blob: 969c4f1a3633636c1f5f49ad93057a2bf59a19d0 [file] [log] [blame]
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +01001/*
2 * Copyright (C) 2004-2005 IBM Corp. All Rights Reserved.
3 * Copyright (C) 2006-2009 NEC Corporation.
4 *
5 * dm-queue-length.c
6 *
7 * Module Author: Stefan Bader, IBM
8 * Modified by: Kiyoshi Ueda, NEC
9 *
10 * This file is released under the GPL.
11 *
12 * queue-length path selector - choose a path with the least number of
13 * in-flight I/Os.
14 */
15
16#include "dm.h"
17#include "dm-path-selector.h"
18
19#include <linux/slab.h>
20#include <linux/ctype.h>
21#include <linux/errno.h>
22#include <linux/module.h>
Arun Sharma600634972011-07-26 16:09:06 -070023#include <linux/atomic.h>
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +010024
25#define DM_MSG_PREFIX "multipath queue-length"
Mike Snitzer21136f82016-02-10 11:58:45 -050026#define QL_MIN_IO 1
27#define QL_VERSION "0.2.0"
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +010028
29struct selector {
30 struct list_head valid_paths;
31 struct list_head failed_paths;
Mike Snitzer9659f812016-02-15 14:25:00 -050032 spinlock_t lock;
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +010033};
34
35struct path_info {
36 struct list_head list;
37 struct dm_path *path;
38 unsigned repeat_count;
39 atomic_t qlen; /* the number of in-flight I/Os */
40};
41
42static struct selector *alloc_selector(void)
43{
44 struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL);
45
46 if (s) {
47 INIT_LIST_HEAD(&s->valid_paths);
48 INIT_LIST_HEAD(&s->failed_paths);
Mike Snitzer9659f812016-02-15 14:25:00 -050049 spin_lock_init(&s->lock);
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +010050 }
51
52 return s;
53}
54
55static int ql_create(struct path_selector *ps, unsigned argc, char **argv)
56{
57 struct selector *s = alloc_selector();
58
59 if (!s)
60 return -ENOMEM;
61
62 ps->context = s;
63 return 0;
64}
65
66static void ql_free_paths(struct list_head *paths)
67{
68 struct path_info *pi, *next;
69
70 list_for_each_entry_safe(pi, next, paths, list) {
71 list_del(&pi->list);
72 kfree(pi);
73 }
74}
75
76static void ql_destroy(struct path_selector *ps)
77{
78 struct selector *s = ps->context;
79
80 ql_free_paths(&s->valid_paths);
81 ql_free_paths(&s->failed_paths);
82 kfree(s);
83 ps->context = NULL;
84}
85
86static int ql_status(struct path_selector *ps, struct dm_path *path,
87 status_type_t type, char *result, unsigned maxlen)
88{
89 unsigned sz = 0;
90 struct path_info *pi;
91
92 /* When called with NULL path, return selector status/args. */
93 if (!path)
94 DMEMIT("0 ");
95 else {
96 pi = path->pscontext;
97
98 switch (type) {
99 case STATUSTYPE_INFO:
100 DMEMIT("%d ", atomic_read(&pi->qlen));
101 break;
102 case STATUSTYPE_TABLE:
103 DMEMIT("%u ", pi->repeat_count);
104 break;
105 }
106 }
107
108 return sz;
109}
110
111static int ql_add_path(struct path_selector *ps, struct dm_path *path,
112 int argc, char **argv, char **error)
113{
114 struct selector *s = ps->context;
115 struct path_info *pi;
116 unsigned repeat_count = QL_MIN_IO;
Mikulas Patocka31998ef2012-03-28 18:41:26 +0100117 char dummy;
Mike Snitzer9659f812016-02-15 14:25:00 -0500118 unsigned long flags;
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100119
120 /*
121 * Arguments: [<repeat_count>]
122 * <repeat_count>: The number of I/Os before switching path.
123 * If not given, default (QL_MIN_IO) is used.
124 */
125 if (argc > 1) {
126 *error = "queue-length ps: incorrect number of arguments";
127 return -EINVAL;
128 }
129
Mikulas Patocka31998ef2012-03-28 18:41:26 +0100130 if ((argc == 1) && (sscanf(argv[0], "%u%c", &repeat_count, &dummy) != 1)) {
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100131 *error = "queue-length ps: invalid repeat count";
132 return -EINVAL;
133 }
134
Mike Snitzer21136f82016-02-10 11:58:45 -0500135 if (repeat_count > 1) {
136 DMWARN_LIMIT("repeat_count > 1 is deprecated, using 1 instead");
137 repeat_count = 1;
138 }
139
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100140 /* Allocate the path information structure */
141 pi = kmalloc(sizeof(*pi), GFP_KERNEL);
142 if (!pi) {
143 *error = "queue-length ps: Error allocating path information";
144 return -ENOMEM;
145 }
146
147 pi->path = path;
148 pi->repeat_count = repeat_count;
149 atomic_set(&pi->qlen, 0);
150
151 path->pscontext = pi;
152
Mike Snitzer9659f812016-02-15 14:25:00 -0500153 spin_lock_irqsave(&s->lock, flags);
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100154 list_add_tail(&pi->list, &s->valid_paths);
Mike Snitzer9659f812016-02-15 14:25:00 -0500155 spin_unlock_irqrestore(&s->lock, flags);
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100156
157 return 0;
158}
159
160static void ql_fail_path(struct path_selector *ps, struct dm_path *path)
161{
162 struct selector *s = ps->context;
163 struct path_info *pi = path->pscontext;
Mike Snitzer9659f812016-02-15 14:25:00 -0500164 unsigned long flags;
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100165
Mike Snitzer9659f812016-02-15 14:25:00 -0500166 spin_lock_irqsave(&s->lock, flags);
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100167 list_move(&pi->list, &s->failed_paths);
Mike Snitzer9659f812016-02-15 14:25:00 -0500168 spin_unlock_irqrestore(&s->lock, flags);
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100169}
170
171static int ql_reinstate_path(struct path_selector *ps, struct dm_path *path)
172{
173 struct selector *s = ps->context;
174 struct path_info *pi = path->pscontext;
Mike Snitzer9659f812016-02-15 14:25:00 -0500175 unsigned long flags;
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100176
Mike Snitzer9659f812016-02-15 14:25:00 -0500177 spin_lock_irqsave(&s->lock, flags);
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100178 list_move_tail(&pi->list, &s->valid_paths);
Mike Snitzer9659f812016-02-15 14:25:00 -0500179 spin_unlock_irqrestore(&s->lock, flags);
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100180
181 return 0;
182}
183
184/*
185 * Select a path having the minimum number of in-flight I/Os
186 */
Mike Snitzer90a43232016-02-17 21:29:17 -0500187static struct dm_path *ql_select_path(struct path_selector *ps, size_t nr_bytes)
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100188{
189 struct selector *s = ps->context;
190 struct path_info *pi = NULL, *best = NULL;
Mike Snitzer9659f812016-02-15 14:25:00 -0500191 struct dm_path *ret = NULL;
192 unsigned long flags;
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100193
Mike Snitzer9659f812016-02-15 14:25:00 -0500194 spin_lock_irqsave(&s->lock, flags);
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100195 if (list_empty(&s->valid_paths))
Mike Snitzer9659f812016-02-15 14:25:00 -0500196 goto out;
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100197
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100198 list_for_each_entry(pi, &s->valid_paths, list) {
199 if (!best ||
200 (atomic_read(&pi->qlen) < atomic_read(&best->qlen)))
201 best = pi;
202
203 if (!atomic_read(&best->qlen))
204 break;
205 }
206
207 if (!best)
Mike Snitzer9659f812016-02-15 14:25:00 -0500208 goto out;
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100209
Khazhismel Kumykovf2042602018-01-19 15:07:37 -0800210 /* Move most recently used to least preferred to evenly balance. */
211 list_move_tail(&best->list, &s->valid_paths);
212
Mike Snitzer9659f812016-02-15 14:25:00 -0500213 ret = best->path;
214out:
215 spin_unlock_irqrestore(&s->lock, flags);
216 return ret;
Kiyoshi Uedafd5e0332009-06-22 10:12:27 +0100217}
218
219static int ql_start_io(struct path_selector *ps, struct dm_path *path,
220 size_t nr_bytes)
221{
222 struct path_info *pi = path->pscontext;
223
224 atomic_inc(&pi->qlen);
225
226 return 0;
227}
228
229static int ql_end_io(struct path_selector *ps, struct dm_path *path,
230 size_t nr_bytes)
231{
232 struct path_info *pi = path->pscontext;
233
234 atomic_dec(&pi->qlen);
235
236 return 0;
237}
238
239static struct path_selector_type ql_ps = {
240 .name = "queue-length",
241 .module = THIS_MODULE,
242 .table_args = 1,
243 .info_args = 1,
244 .create = ql_create,
245 .destroy = ql_destroy,
246 .status = ql_status,
247 .add_path = ql_add_path,
248 .fail_path = ql_fail_path,
249 .reinstate_path = ql_reinstate_path,
250 .select_path = ql_select_path,
251 .start_io = ql_start_io,
252 .end_io = ql_end_io,
253};
254
255static int __init dm_ql_init(void)
256{
257 int r = dm_register_path_selector(&ql_ps);
258
259 if (r < 0)
260 DMERR("register failed %d", r);
261
262 DMINFO("version " QL_VERSION " loaded");
263
264 return r;
265}
266
267static void __exit dm_ql_exit(void)
268{
269 int r = dm_unregister_path_selector(&ql_ps);
270
271 if (r < 0)
272 DMERR("unregister failed %d", r);
273}
274
275module_init(dm_ql_init);
276module_exit(dm_ql_exit);
277
278MODULE_AUTHOR("Stefan Bader <Stefan.Bader at de.ibm.com>");
279MODULE_DESCRIPTION(
280 "(C) Copyright IBM Corp. 2004,2005 All Rights Reserved.\n"
281 DM_NAME " path selector to balance the number of in-flight I/Os"
282);
283MODULE_LICENSE("GPL");