blob: e305f05ad1e5e804c663560383fe5ec74b3f4a87 [file] [log] [blame]
Heinz Mauelshagen3bd94002023-01-25 21:00:44 +01001// SPDX-License-Identifier: GPL-2.0-only
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +01002/*
3 * Copyright (C) 2004-2005 IBM Corp. All Rights Reserved.
4 * Copyright (C) 2006-2009 NEC Corporation.
5 *
6 * dm-queue-length.c
7 *
8 * Module Author: Stefan Bader, IBM
9 * Modified by: Kiyoshi Ueda, NEC
10 *
11 * This file is released under the GPL.
12 *
13 * queue-length path selector - choose a path with the least number of
14 * in-flight I/Os.
15 */
16
17#include "dm.h"
18#include "dm-path-selector.h"
19
20#include <linux/slab.h>
21#include <linux/ctype.h>
22#include <linux/errno.h>
23#include <linux/module.h>
Arun Sharma600634972011-07-26 16:09:06 -070024#include <linux/atomic.h>
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +010025
26#define DM_MSG_PREFIX "multipath queue-length"
Mike Snitzer21136f82016-02-10 11:58:45 -050027#define QL_MIN_IO 1
28#define QL_VERSION "0.2.0"
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +010029
30struct selector {
31 struct list_head valid_paths;
32 struct list_head failed_paths;
Mike Snitzer9659f812016-02-15 14:25:00 -050033 spinlock_t lock;
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +010034};
35
36struct path_info {
37 struct list_head list;
38 struct dm_path *path;
Heinz Mauelshagen86a32382023-01-25 21:14:58 +010039 unsigned int repeat_count;
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +010040 atomic_t qlen; /* the number of in-flight I/Os */
41};
42
43static struct selector *alloc_selector(void)
44{
45 struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL);
46
47 if (s) {
48 INIT_LIST_HEAD(&s->valid_paths);
49 INIT_LIST_HEAD(&s->failed_paths);
Mike Snitzer9659f812016-02-15 14:25:00 -050050 spin_lock_init(&s->lock);
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +010051 }
52
53 return s;
54}
55
Heinz Mauelshagen86a32382023-01-25 21:14:58 +010056static int ql_create(struct path_selector *ps, unsigned int argc, char **argv)
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +010057{
58 struct selector *s = alloc_selector();
59
60 if (!s)
61 return -ENOMEM;
62
63 ps->context = s;
64 return 0;
65}
66
67static void ql_free_paths(struct list_head *paths)
68{
69 struct path_info *pi, *next;
70
71 list_for_each_entry_safe(pi, next, paths, list) {
72 list_del(&pi->list);
73 kfree(pi);
74 }
75}
76
77static void ql_destroy(struct path_selector *ps)
78{
79 struct selector *s = ps->context;
80
81 ql_free_paths(&s->valid_paths);
82 ql_free_paths(&s->failed_paths);
83 kfree(s);
84 ps->context = NULL;
85}
86
87static int ql_status(struct path_selector *ps, struct dm_path *path,
Heinz Mauelshagen86a32382023-01-25 21:14:58 +010088 status_type_t type, char *result, unsigned int maxlen)
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +010089{
Heinz Mauelshagen86a32382023-01-25 21:14:58 +010090 unsigned int sz = 0;
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +010091 struct path_info *pi;
92
93 /* When called with NULL path, return selector status/args. */
94 if (!path)
95 DMEMIT("0 ");
96 else {
97 pi = path->pscontext;
98
99 switch (type) {
100 case STATUSTYPE_INFO:
101 DMEMIT("%d ", atomic_read(&pi->qlen));
102 break;
103 case STATUSTYPE_TABLE:
104 DMEMIT("%u ", pi->repeat_count);
105 break;
Tushar Sugandhi8ec45662021-07-12 17:49:03 -0700106 case STATUSTYPE_IMA:
107 *result = '\0';
108 break;
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100109 }
110 }
111
112 return sz;
113}
114
115static int ql_add_path(struct path_selector *ps, struct dm_path *path,
116 int argc, char **argv, char **error)
117{
118 struct selector *s = ps->context;
119 struct path_info *pi;
Heinz Mauelshagen86a32382023-01-25 21:14:58 +0100120 unsigned int repeat_count = QL_MIN_IO;
Mikulas Patocka31998ef2012-03-28 18:41:26 +0100121 char dummy;
Mike Snitzer9659f812016-02-15 14:25:00 -0500122 unsigned long flags;
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100123
124 /*
125 * Arguments: [<repeat_count>]
Heinz Mauelshagen8ca817c2023-02-01 22:31:43 +0100126 * <repeat_count>: The number of I/Os before switching path.
127 * If not given, default (QL_MIN_IO) is used.
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100128 */
129 if (argc > 1) {
130 *error = "queue-length ps: incorrect number of arguments";
131 return -EINVAL;
132 }
133
Mikulas Patocka31998ef2012-03-28 18:41:26 +0100134 if ((argc == 1) && (sscanf(argv[0], "%u%c", &repeat_count, &dummy) != 1)) {
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100135 *error = "queue-length ps: invalid repeat count";
136 return -EINVAL;
137 }
138
Mike Snitzer21136f82016-02-10 11:58:45 -0500139 if (repeat_count > 1) {
140 DMWARN_LIMIT("repeat_count > 1 is deprecated, using 1 instead");
141 repeat_count = 1;
142 }
143
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100144 /* Allocate the path information structure */
145 pi = kmalloc(sizeof(*pi), GFP_KERNEL);
146 if (!pi) {
147 *error = "queue-length ps: Error allocating path information";
148 return -ENOMEM;
149 }
150
151 pi->path = path;
152 pi->repeat_count = repeat_count;
153 atomic_set(&pi->qlen, 0);
154
155 path->pscontext = pi;
156
Mike Snitzer9659f812016-02-15 14:25:00 -0500157 spin_lock_irqsave(&s->lock, flags);
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100158 list_add_tail(&pi->list, &s->valid_paths);
Mike Snitzer9659f812016-02-15 14:25:00 -0500159 spin_unlock_irqrestore(&s->lock, flags);
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100160
161 return 0;
162}
163
164static void ql_fail_path(struct path_selector *ps, struct dm_path *path)
165{
166 struct selector *s = ps->context;
167 struct path_info *pi = path->pscontext;
Mike Snitzer9659f812016-02-15 14:25:00 -0500168 unsigned long flags;
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100169
Mike Snitzer9659f812016-02-15 14:25:00 -0500170 spin_lock_irqsave(&s->lock, flags);
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100171 list_move(&pi->list, &s->failed_paths);
Mike Snitzer9659f812016-02-15 14:25:00 -0500172 spin_unlock_irqrestore(&s->lock, flags);
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100173}
174
175static int ql_reinstate_path(struct path_selector *ps, struct dm_path *path)
176{
177 struct selector *s = ps->context;
178 struct path_info *pi = path->pscontext;
Mike Snitzer9659f812016-02-15 14:25:00 -0500179 unsigned long flags;
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100180
Mike Snitzer9659f812016-02-15 14:25:00 -0500181 spin_lock_irqsave(&s->lock, flags);
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100182 list_move_tail(&pi->list, &s->valid_paths);
Mike Snitzer9659f812016-02-15 14:25:00 -0500183 spin_unlock_irqrestore(&s->lock, flags);
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100184
185 return 0;
186}
187
188/*
189 * Select a path having the minimum number of in-flight I/Os
190 */
Mike Snitzer90a43232016-02-17 21:29:17 -0500191static struct dm_path *ql_select_path(struct path_selector *ps, size_t nr_bytes)
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100192{
193 struct selector *s = ps->context;
194 struct path_info *pi = NULL, *best = NULL;
Mike Snitzer9659f812016-02-15 14:25:00 -0500195 struct dm_path *ret = NULL;
196 unsigned long flags;
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100197
Mike Snitzer9659f812016-02-15 14:25:00 -0500198 spin_lock_irqsave(&s->lock, flags);
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100199 if (list_empty(&s->valid_paths))
Mike Snitzer9659f812016-02-15 14:25:00 -0500200 goto out;
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100201
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100202 list_for_each_entry(pi, &s->valid_paths, list) {
203 if (!best ||
204 (atomic_read(&pi->qlen) < atomic_read(&best->qlen)))
205 best = pi;
206
207 if (!atomic_read(&best->qlen))
208 break;
209 }
210
211 if (!best)
Mike Snitzer9659f812016-02-15 14:25:00 -0500212 goto out;
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100213
Khazhismel Kumykovf2042602018-01-19 15:07:37 -0800214 /* Move most recently used to least preferred to evenly balance. */
215 list_move_tail(&best->list, &s->valid_paths);
216
Mike Snitzer9659f812016-02-15 14:25:00 -0500217 ret = best->path;
218out:
219 spin_unlock_irqrestore(&s->lock, flags);
220 return ret;
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100221}
222
223static int ql_start_io(struct path_selector *ps, struct dm_path *path,
224 size_t nr_bytes)
225{
226 struct path_info *pi = path->pscontext;
227
228 atomic_inc(&pi->qlen);
229
230 return 0;
231}
232
233static int ql_end_io(struct path_selector *ps, struct dm_path *path,
Gabriel Krisman Bertazi087615bf2020-04-30 16:48:29 -0400234 size_t nr_bytes, u64 start_time)
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100235{
236 struct path_info *pi = path->pscontext;
237
238 atomic_dec(&pi->qlen);
239
240 return 0;
241}
242
243static struct path_selector_type ql_ps = {
244 .name = "queue-length",
245 .module = THIS_MODULE,
246 .table_args = 1,
247 .info_args = 1,
248 .create = ql_create,
249 .destroy = ql_destroy,
250 .status = ql_status,
251 .add_path = ql_add_path,
252 .fail_path = ql_fail_path,
253 .reinstate_path = ql_reinstate_path,
254 .select_path = ql_select_path,
255 .start_io = ql_start_io,
256 .end_io = ql_end_io,
257};
258
259static int __init dm_ql_init(void)
260{
261 int r = dm_register_path_selector(&ql_ps);
262
263 if (r < 0)
264 DMERR("register failed %d", r);
265
266 DMINFO("version " QL_VERSION " loaded");
267
268 return r;
269}
270
271static void __exit dm_ql_exit(void)
272{
273 int r = dm_unregister_path_selector(&ql_ps);
274
275 if (r < 0)
276 DMERR("unregister failed %d", r);
277}
278
279module_init(dm_ql_init);
280module_exit(dm_ql_exit);
281
282MODULE_AUTHOR("Stefan Bader <Stefan.Bader at de.ibm.com>");
283MODULE_DESCRIPTION(
284 "(C) Copyright IBM Corp. 2004,2005 All Rights Reserved.\n"
285 DM_NAME " path selector to balance the number of in-flight I/Os"
286);
287MODULE_LICENSE("GPL");