blob: 4c601ca9552a07dd4ae9828c6703d1dc3d694b4c [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Shaohua Li3bf2bd22017-08-14 15:04:53 -07002/*
3 * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and
4 * Shaohua Li <shli@fb.com>
5 */
Jens Axboef2298c02013-10-25 11:52:25 +01006#include <linux/module.h>
Matias Bjørlingfc1bc352013-12-21 00:11:01 +01007
Jens Axboef2298c02013-10-25 11:52:25 +01008#include <linux/moduleparam.h>
9#include <linux/sched.h>
10#include <linux/fs.h>
Jens Axboef2298c02013-10-25 11:52:25 +010011#include <linux/init.h>
Matias Bjørling6dad38d2018-07-06 19:38:38 +020012#include "null_blk.h"
Jens Axboef2298c02013-10-25 11:52:25 +010013
Damien Le Moaldb060f52022-04-20 09:57:17 +090014#undef pr_fmt
15#define pr_fmt(fmt) "null_blk: " fmt
16
Shaohua Li5bcd0e02017-08-14 15:04:56 -070017#define FREE_BATCH 16
18
Shaohua Lieff2c4f2017-08-14 15:04:58 -070019#define TICKS_PER_SEC 50ULL
20#define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
21
Arnd Bergmann33f782c2018-01-11 11:31:25 +010022#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
Jens Axboe93b57042018-01-10 09:06:23 -070023static DECLARE_FAULT_ATTR(null_timeout_attr);
Jens Axboe24941b902018-02-28 09:18:57 -070024static DECLARE_FAULT_ATTR(null_requeue_attr);
Bart Van Assche596444e2020-03-09 21:26:23 -070025static DECLARE_FAULT_ATTR(null_init_hctx_attr);
Arnd Bergmann33f782c2018-01-11 11:31:25 +010026#endif
Jens Axboe93b57042018-01-10 09:06:23 -070027
Shaohua Lieff2c4f2017-08-14 15:04:58 -070028static inline u64 mb_per_tick(int mbps)
29{
30 return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
31}
Jens Axboef2298c02013-10-25 11:52:25 +010032
Shaohua Li3bf2bd22017-08-14 15:04:53 -070033/*
34 * Status flags for nullb_device.
35 *
36 * CONFIGURED: Device has been configured and turned on. Cannot reconfigure.
37 * UP: Device is currently on and visible in userspace.
Shaohua Lieff2c4f2017-08-14 15:04:58 -070038 * THROTTLED: Device is being throttled.
Shaohua Lideb78b42017-08-14 15:04:59 -070039 * CACHE: Device is using a write-back cache.
Shaohua Li3bf2bd22017-08-14 15:04:53 -070040 */
41enum nullb_device_flags {
42 NULLB_DEV_FL_CONFIGURED = 0,
43 NULLB_DEV_FL_UP = 1,
Shaohua Lieff2c4f2017-08-14 15:04:58 -070044 NULLB_DEV_FL_THROTTLED = 2,
Shaohua Lideb78b42017-08-14 15:04:59 -070045 NULLB_DEV_FL_CACHE = 3,
Shaohua Li3bf2bd22017-08-14 15:04:53 -070046};
47
Ming Lei66231ad2018-03-06 12:07:13 +080048#define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2)
Shaohua Li5bcd0e02017-08-14 15:04:56 -070049/*
50 * nullb_page is a page in memory for nullb devices.
51 *
52 * @page: The page holding the data.
53 * @bitmap: The bitmap represents which sector in the page has data.
54 * Each bit represents one block size. For example, sector 8
55 * will use the 7th bit
Shaohua Lideb78b42017-08-14 15:04:59 -070056 * The highest 2 bits of bitmap are for special purpose. LOCK means the cache
57 * page is being flushing to storage. FREE means the cache page is freed and
58 * should be skipped from flushing to storage. Please see
59 * null_make_cache_space
Shaohua Li5bcd0e02017-08-14 15:04:56 -070060 */
61struct nullb_page {
62 struct page *page;
Ming Lei66231ad2018-03-06 12:07:13 +080063 DECLARE_BITMAP(bitmap, MAP_SZ);
Shaohua Li5bcd0e02017-08-14 15:04:56 -070064};
Ming Lei66231ad2018-03-06 12:07:13 +080065#define NULLB_PAGE_LOCK (MAP_SZ - 1)
66#define NULLB_PAGE_FREE (MAP_SZ - 2)
Shaohua Li5bcd0e02017-08-14 15:04:56 -070067
Jens Axboef2298c02013-10-25 11:52:25 +010068static LIST_HEAD(nullb_list);
69static struct mutex lock;
70static int null_major;
Shaohua Li94bc02e2017-08-14 15:04:55 -070071static DEFINE_IDA(nullb_indexes);
Jens Axboe82f402f2017-06-20 14:22:01 -060072static struct blk_mq_tag_set tag_set;
Jens Axboef2298c02013-10-25 11:52:25 +010073
Jens Axboef2298c02013-10-25 11:52:25 +010074enum {
75 NULL_IRQ_NONE = 0,
76 NULL_IRQ_SOFTIRQ = 1,
77 NULL_IRQ_TIMER = 2,
Christoph Hellwigce2c3502014-02-10 03:24:40 -080078};
Jens Axboef2298c02013-10-25 11:52:25 +010079
Max Gurtovoycee1b212021-04-12 09:55:23 +000080static bool g_virt_boundary = false;
81module_param_named(virt_boundary, g_virt_boundary, bool, 0444);
82MODULE_PARM_DESC(virt_boundary, "Require a virtual boundary for the device. Default: False");
83
weiping zhangb3cffc32017-09-30 09:49:21 +080084static int g_no_sched;
Joe Perches5657a812018-05-24 13:38:59 -060085module_param_named(no_sched, g_no_sched, int, 0444);
weiping zhangb3cffc32017-09-30 09:49:21 +080086MODULE_PARM_DESC(no_sched, "No io scheduler");
87
Shaohua Li2984c862017-08-14 15:04:52 -070088static int g_submit_queues = 1;
Joe Perches5657a812018-05-24 13:38:59 -060089module_param_named(submit_queues, g_submit_queues, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +010090MODULE_PARM_DESC(submit_queues, "Number of submission queues");
91
Jens Axboe0a593fb2021-04-17 09:29:49 -060092static int g_poll_queues = 1;
93module_param_named(poll_queues, g_poll_queues, int, 0444);
94MODULE_PARM_DESC(poll_queues, "Number of IOPOLL submission queues");
95
Shaohua Li2984c862017-08-14 15:04:52 -070096static int g_home_node = NUMA_NO_NODE;
Joe Perches5657a812018-05-24 13:38:59 -060097module_param_named(home_node, g_home_node, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +010098MODULE_PARM_DESC(home_node, "Home node for the device");
99
Arnd Bergmann33f782c2018-01-11 11:31:25 +0100100#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
Dongli Zhang290df922020-03-12 15:01:40 -0700101/*
102 * For more details about fault injection, please refer to
103 * Documentation/fault-injection/fault-injection.rst.
104 */
Jens Axboe93b57042018-01-10 09:06:23 -0700105static char g_timeout_str[80];
Joe Perches5657a812018-05-24 13:38:59 -0600106module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), 0444);
Dongli Zhang290df922020-03-12 15:01:40 -0700107MODULE_PARM_DESC(timeout, "Fault injection. timeout=<interval>,<probability>,<space>,<times>");
Jens Axboe24941b902018-02-28 09:18:57 -0700108
109static char g_requeue_str[80];
Joe Perches5657a812018-05-24 13:38:59 -0600110module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), 0444);
Dongli Zhang290df922020-03-12 15:01:40 -0700111MODULE_PARM_DESC(requeue, "Fault injection. requeue=<interval>,<probability>,<space>,<times>");
Bart Van Assche596444e2020-03-09 21:26:23 -0700112
113static char g_init_hctx_str[80];
114module_param_string(init_hctx, g_init_hctx_str, sizeof(g_init_hctx_str), 0444);
Dongli Zhang290df922020-03-12 15:01:40 -0700115MODULE_PARM_DESC(init_hctx, "Fault injection to fail hctx init. init_hctx=<interval>,<probability>,<space>,<times>");
Arnd Bergmann33f782c2018-01-11 11:31:25 +0100116#endif
Jens Axboe93b57042018-01-10 09:06:23 -0700117
Shaohua Li2984c862017-08-14 15:04:52 -0700118static int g_queue_mode = NULL_Q_MQ;
Matias Bjorling709c8662014-11-26 14:45:48 -0700119
120static int null_param_store_val(const char *str, int *val, int min, int max)
121{
122 int ret, new_val;
123
124 ret = kstrtoint(str, 10, &new_val);
125 if (ret)
126 return -EINVAL;
127
128 if (new_val < min || new_val > max)
129 return -EINVAL;
130
131 *val = new_val;
132 return 0;
133}
134
135static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
136{
Shaohua Li2984c862017-08-14 15:04:52 -0700137 return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ);
Matias Bjorling709c8662014-11-26 14:45:48 -0700138}
139
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930140static const struct kernel_param_ops null_queue_mode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -0700141 .set = null_set_queue_mode,
142 .get = param_get_int,
143};
144
Joe Perches5657a812018-05-24 13:38:59 -0600145device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, 0444);
Mike Snitzer54ae81c2014-06-11 17:13:50 -0400146MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
Jens Axboef2298c02013-10-25 11:52:25 +0100147
Shaohua Li2984c862017-08-14 15:04:52 -0700148static int g_gb = 250;
Joe Perches5657a812018-05-24 13:38:59 -0600149module_param_named(gb, g_gb, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100150MODULE_PARM_DESC(gb, "Size in GB");
151
Shaohua Li2984c862017-08-14 15:04:52 -0700152static int g_bs = 512;
Joe Perches5657a812018-05-24 13:38:59 -0600153module_param_named(bs, g_bs, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100154MODULE_PARM_DESC(bs, "Block size (in bytes)");
155
Damien Le Moalea17fd32020-11-20 10:55:18 +0900156static int g_max_sectors;
157module_param_named(max_sectors, g_max_sectors, int, 0444);
158MODULE_PARM_DESC(max_sectors, "Maximum size of a command (in 512B sectors)");
159
André Almeidaf7c4ce892019-09-11 11:46:36 -0300160static unsigned int nr_devices = 1;
André Almeida701dfc42019-09-16 11:07:58 -0300161module_param(nr_devices, uint, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100162MODULE_PARM_DESC(nr_devices, "Number of devices to register");
163
Shaohua Li2984c862017-08-14 15:04:52 -0700164static bool g_blocking;
Joe Perches5657a812018-05-24 13:38:59 -0600165module_param_named(blocking, g_blocking, bool, 0444);
Jens Axboedb5bcf82017-03-30 13:44:26 -0600166MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
167
Jens Axboe82f402f2017-06-20 14:22:01 -0600168static bool shared_tags;
Joe Perches5657a812018-05-24 13:38:59 -0600169module_param(shared_tags, bool, 0444);
Jens Axboe82f402f2017-06-20 14:22:01 -0600170MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
171
John Garry09050532020-08-19 23:20:29 +0800172static bool g_shared_tag_bitmap;
173module_param_named(shared_tag_bitmap, g_shared_tag_bitmap, bool, 0444);
174MODULE_PARM_DESC(shared_tag_bitmap, "Use shared tag bitmap for all submission queues for blk-mq");
175
Shaohua Li2984c862017-08-14 15:04:52 -0700176static int g_irqmode = NULL_IRQ_SOFTIRQ;
Matias Bjorling709c8662014-11-26 14:45:48 -0700177
178static int null_set_irqmode(const char *str, const struct kernel_param *kp)
179{
Shaohua Li2984c862017-08-14 15:04:52 -0700180 return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE,
Matias Bjorling709c8662014-11-26 14:45:48 -0700181 NULL_IRQ_TIMER);
182}
183
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930184static const struct kernel_param_ops null_irqmode_param_ops = {
Matias Bjorling709c8662014-11-26 14:45:48 -0700185 .set = null_set_irqmode,
186 .get = param_get_int,
187};
188
Joe Perches5657a812018-05-24 13:38:59 -0600189device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100190MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
191
Shaohua Li2984c862017-08-14 15:04:52 -0700192static unsigned long g_completion_nsec = 10000;
Joe Perches5657a812018-05-24 13:38:59 -0600193module_param_named(completion_nsec, g_completion_nsec, ulong, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100194MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
195
Shaohua Li2984c862017-08-14 15:04:52 -0700196static int g_hw_queue_depth = 64;
Joe Perches5657a812018-05-24 13:38:59 -0600197module_param_named(hw_queue_depth, g_hw_queue_depth, int, 0444);
Jens Axboef2298c02013-10-25 11:52:25 +0100198MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
199
Shaohua Li2984c862017-08-14 15:04:52 -0700200static bool g_use_per_node_hctx;
Joe Perches5657a812018-05-24 13:38:59 -0600201module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444);
Matias Bjørling20005242013-12-21 00:11:00 +0100202MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
Jens Axboef2298c02013-10-25 11:52:25 +0100203
Vincent Fu058efe02022-07-08 17:49:49 +0000204static bool g_memory_backed;
205module_param_named(memory_backed, g_memory_backed, bool, 0444);
206MODULE_PARM_DESC(memory_backed, "Create a memory-backed block device. Default: false");
207
208static bool g_discard;
209module_param_named(discard, g_discard, bool, 0444);
210MODULE_PARM_DESC(discard, "Support discard operations (requires memory-backed null_blk device). Default: false");
211
212static unsigned long g_cache_size;
213module_param_named(cache_size, g_cache_size, ulong, 0444);
214MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 (none)");
215
216static unsigned int g_mbps;
217module_param_named(mbps, g_mbps, uint, 0444);
218MODULE_PARM_DESC(mbps, "Limit maximum bandwidth (in MiB/s). Default: 0 (no limit)");
219
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200220static bool g_zoned;
221module_param_named(zoned, g_zoned, bool, S_IRUGO);
222MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false");
223
224static unsigned long g_zone_size = 256;
225module_param_named(zone_size, g_zone_size, ulong, S_IRUGO);
226MODULE_PARM_DESC(zone_size, "Zone size in MB when block device is zoned. Must be power-of-two: Default: 256");
227
Aravind Ramesh089565f2020-06-29 12:06:38 -0700228static unsigned long g_zone_capacity;
229module_param_named(zone_capacity, g_zone_capacity, ulong, 0444);
230MODULE_PARM_DESC(zone_capacity, "Zone capacity in MB when block device is zoned. Can be less than or equal to zone size. Default: Zone size");
231
Masato Suzukiea2c18e2018-10-30 16:14:05 +0900232static unsigned int g_zone_nr_conv;
233module_param_named(zone_nr_conv, g_zone_nr_conv, uint, 0444);
234MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones when block device is zoned. Default: 0");
235
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200236static unsigned int g_zone_max_open;
237module_param_named(zone_max_open, g_zone_max_open, uint, 0444);
238MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones when block device is zoned. Default: 0 (no limit)");
239
240static unsigned int g_zone_max_active;
241module_param_named(zone_max_active, g_zone_max_active, uint, 0444);
242MODULE_PARM_DESC(zone_max_active, "Maximum number of active zones when block device is zoned. Default: 0 (no limit)");
243
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700244static struct nullb_device *null_alloc_dev(void);
245static void null_free_dev(struct nullb_device *dev);
Shaohua Licedcafa2017-08-14 15:04:54 -0700246static void null_del_dev(struct nullb *nullb);
247static int null_add_dev(struct nullb_device *dev);
Damien Le Moal49c3b922022-04-20 09:57:18 +0900248static struct nullb *null_find_dev_by_name(const char *name);
Shaohua Lideb78b42017-08-14 15:04:59 -0700249static void null_free_device_storage(struct nullb_device *dev, bool is_cache);
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700250
251static inline struct nullb_device *to_nullb_device(struct config_item *item)
252{
253 return item ? container_of(item, struct nullb_device, item) : NULL;
254}
255
256static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page)
257{
258 return snprintf(page, PAGE_SIZE, "%u\n", val);
259}
260
261static inline ssize_t nullb_device_ulong_attr_show(unsigned long val,
262 char *page)
263{
264 return snprintf(page, PAGE_SIZE, "%lu\n", val);
265}
266
267static inline ssize_t nullb_device_bool_attr_show(bool val, char *page)
268{
269 return snprintf(page, PAGE_SIZE, "%u\n", val);
270}
271
272static ssize_t nullb_device_uint_attr_store(unsigned int *val,
273 const char *page, size_t count)
274{
275 unsigned int tmp;
276 int result;
277
278 result = kstrtouint(page, 0, &tmp);
Bart Van Assche45919fb2019-09-30 16:00:47 -0700279 if (result < 0)
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700280 return result;
281
282 *val = tmp;
283 return count;
284}
285
286static ssize_t nullb_device_ulong_attr_store(unsigned long *val,
287 const char *page, size_t count)
288{
289 int result;
290 unsigned long tmp;
291
292 result = kstrtoul(page, 0, &tmp);
Bart Van Assche45919fb2019-09-30 16:00:47 -0700293 if (result < 0)
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700294 return result;
295
296 *val = tmp;
297 return count;
298}
299
300static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
301 size_t count)
302{
303 bool tmp;
304 int result;
305
306 result = kstrtobool(page, &tmp);
Bart Van Assche45919fb2019-09-30 16:00:47 -0700307 if (result < 0)
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700308 return result;
309
310 *val = tmp;
311 return count;
312}
313
314/* The following macro should only be used with TYPE = {uint, ulong, bool}. */
Andrew Mortonca0a95a2020-02-03 17:34:49 -0800315#define NULLB_DEVICE_ATTR(NAME, TYPE, APPLY) \
316static ssize_t \
317nullb_device_##NAME##_show(struct config_item *item, char *page) \
318{ \
319 return nullb_device_##TYPE##_attr_show( \
320 to_nullb_device(item)->NAME, page); \
321} \
322static ssize_t \
323nullb_device_##NAME##_store(struct config_item *item, const char *page, \
324 size_t count) \
325{ \
326 int (*apply_fn)(struct nullb_device *dev, TYPE new_value) = APPLY;\
327 struct nullb_device *dev = to_nullb_device(item); \
Bart Van Asscheb9853b42020-03-09 21:26:19 -0700328 TYPE new_value = 0; \
Andrew Mortonca0a95a2020-02-03 17:34:49 -0800329 int ret; \
330 \
331 ret = nullb_device_##TYPE##_attr_store(&new_value, page, count);\
332 if (ret < 0) \
333 return ret; \
334 if (apply_fn) \
335 ret = apply_fn(dev, new_value); \
336 else if (test_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags)) \
337 ret = -EBUSY; \
338 if (ret < 0) \
339 return ret; \
340 dev->NAME = new_value; \
341 return count; \
342} \
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700343CONFIGFS_ATTR(nullb_device_, NAME);
344
Shin'ichiro Kawasaki15dfc662021-10-29 19:39:26 +0900345static int nullb_update_nr_hw_queues(struct nullb_device *dev,
346 unsigned int submit_queues,
347 unsigned int poll_queues)
Bart Van Assche45919fb2019-09-30 16:00:47 -0700348
Shin'ichiro Kawasaki15dfc662021-10-29 19:39:26 +0900349{
350 struct blk_mq_tag_set *set;
351 int ret, nr_hw_queues;
352
353 if (!dev->nullb)
Bart Van Assche45919fb2019-09-30 16:00:47 -0700354 return 0;
355
Bart Van Assche78b10be2020-03-09 21:26:20 -0700356 /*
Ming Lei2bfdbe82021-12-03 10:39:35 +0800357 * Make sure at least one submit queue exists.
Shin'ichiro Kawasaki15dfc662021-10-29 19:39:26 +0900358 */
Ming Lei2bfdbe82021-12-03 10:39:35 +0800359 if (!submit_queues)
Shin'ichiro Kawasaki15dfc662021-10-29 19:39:26 +0900360 return -EINVAL;
361
362 /*
Bart Van Assche78b10be2020-03-09 21:26:20 -0700363 * Make sure that null_init_hctx() does not access nullb->queues[] past
364 * the end of that array.
365 */
Shin'ichiro Kawasaki15dfc662021-10-29 19:39:26 +0900366 if (submit_queues > nr_cpu_ids || poll_queues > g_poll_queues)
Bart Van Assche78b10be2020-03-09 21:26:20 -0700367 return -EINVAL;
Shin'ichiro Kawasaki15dfc662021-10-29 19:39:26 +0900368
369 /*
370 * Keep previous and new queue numbers in nullb_device for reference in
371 * the call back function null_map_queues().
372 */
373 dev->prev_submit_queues = dev->submit_queues;
374 dev->prev_poll_queues = dev->poll_queues;
375 dev->submit_queues = submit_queues;
376 dev->poll_queues = poll_queues;
377
378 set = dev->nullb->tag_set;
379 nr_hw_queues = submit_queues + poll_queues;
380 blk_mq_update_nr_hw_queues(set, nr_hw_queues);
381 ret = set->nr_hw_queues == nr_hw_queues ? 0 : -ENOMEM;
382
383 if (ret) {
384 /* on error, revert the queue numbers */
385 dev->submit_queues = dev->prev_submit_queues;
386 dev->poll_queues = dev->prev_poll_queues;
387 }
388
389 return ret;
390}
391
392static int nullb_apply_submit_queues(struct nullb_device *dev,
393 unsigned int submit_queues)
394{
395 return nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues);
396}
397
398static int nullb_apply_poll_queues(struct nullb_device *dev,
399 unsigned int poll_queues)
400{
401 return nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues);
Bart Van Assche45919fb2019-09-30 16:00:47 -0700402}
403
404NULLB_DEVICE_ATTR(size, ulong, NULL);
405NULLB_DEVICE_ATTR(completion_nsec, ulong, NULL);
406NULLB_DEVICE_ATTR(submit_queues, uint, nullb_apply_submit_queues);
Shin'ichiro Kawasaki15dfc662021-10-29 19:39:26 +0900407NULLB_DEVICE_ATTR(poll_queues, uint, nullb_apply_poll_queues);
Bart Van Assche45919fb2019-09-30 16:00:47 -0700408NULLB_DEVICE_ATTR(home_node, uint, NULL);
409NULLB_DEVICE_ATTR(queue_mode, uint, NULL);
410NULLB_DEVICE_ATTR(blocksize, uint, NULL);
Damien Le Moalea17fd32020-11-20 10:55:18 +0900411NULLB_DEVICE_ATTR(max_sectors, uint, NULL);
Bart Van Assche45919fb2019-09-30 16:00:47 -0700412NULLB_DEVICE_ATTR(irqmode, uint, NULL);
413NULLB_DEVICE_ATTR(hw_queue_depth, uint, NULL);
414NULLB_DEVICE_ATTR(index, uint, NULL);
415NULLB_DEVICE_ATTR(blocking, bool, NULL);
416NULLB_DEVICE_ATTR(use_per_node_hctx, bool, NULL);
417NULLB_DEVICE_ATTR(memory_backed, bool, NULL);
418NULLB_DEVICE_ATTR(discard, bool, NULL);
419NULLB_DEVICE_ATTR(mbps, uint, NULL);
420NULLB_DEVICE_ATTR(cache_size, ulong, NULL);
421NULLB_DEVICE_ATTR(zoned, bool, NULL);
422NULLB_DEVICE_ATTR(zone_size, ulong, NULL);
Aravind Ramesh089565f2020-06-29 12:06:38 -0700423NULLB_DEVICE_ATTR(zone_capacity, ulong, NULL);
Bart Van Assche45919fb2019-09-30 16:00:47 -0700424NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL);
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200425NULLB_DEVICE_ATTR(zone_max_open, uint, NULL);
426NULLB_DEVICE_ATTR(zone_max_active, uint, NULL);
Max Gurtovoycee1b212021-04-12 09:55:23 +0000427NULLB_DEVICE_ATTR(virt_boundary, bool, NULL);
Vincent Fu7012eef2022-07-08 17:49:49 +0000428NULLB_DEVICE_ATTR(no_sched, bool, NULL);
429NULLB_DEVICE_ATTR(shared_tag_bitmap, bool, NULL);
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700430
Shaohua Licedcafa2017-08-14 15:04:54 -0700431static ssize_t nullb_device_power_show(struct config_item *item, char *page)
432{
433 return nullb_device_bool_attr_show(to_nullb_device(item)->power, page);
434}
435
436static ssize_t nullb_device_power_store(struct config_item *item,
437 const char *page, size_t count)
438{
439 struct nullb_device *dev = to_nullb_device(item);
440 bool newp = false;
441 ssize_t ret;
442
443 ret = nullb_device_bool_attr_store(&newp, page, count);
444 if (ret < 0)
445 return ret;
446
447 if (!dev->power && newp) {
448 if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
449 return count;
Chaitanya Kulkarnia75110c2022-02-15 03:59:51 -0800450 ret = null_add_dev(dev);
451 if (ret) {
Shaohua Licedcafa2017-08-14 15:04:54 -0700452 clear_bit(NULLB_DEV_FL_UP, &dev->flags);
Chaitanya Kulkarnia75110c2022-02-15 03:59:51 -0800453 return ret;
Shaohua Licedcafa2017-08-14 15:04:54 -0700454 }
455
456 set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
457 dev->power = newp;
Jens Axboeb3c30512017-08-28 15:06:31 -0600458 } else if (dev->power && !newp) {
Bob Liu76028432019-06-15 01:43:48 -0600459 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
460 mutex_lock(&lock);
461 dev->power = newp;
462 null_del_dev(dev->nullb);
463 mutex_unlock(&lock);
464 }
Liu Bo00a8cdb2018-07-06 03:07:13 +0800465 clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
Shaohua Licedcafa2017-08-14 15:04:54 -0700466 }
467
468 return count;
469}
470
471CONFIGFS_ATTR(nullb_device_, power);
472
Shaohua Li2f54a612017-08-14 15:05:00 -0700473static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page)
474{
475 struct nullb_device *t_dev = to_nullb_device(item);
476
477 return badblocks_show(&t_dev->badblocks, page, 0);
478}
479
480static ssize_t nullb_device_badblocks_store(struct config_item *item,
481 const char *page, size_t count)
482{
483 struct nullb_device *t_dev = to_nullb_device(item);
484 char *orig, *buf, *tmp;
485 u64 start, end;
486 int ret;
487
488 orig = kstrndup(page, count, GFP_KERNEL);
489 if (!orig)
490 return -ENOMEM;
491
492 buf = strstrip(orig);
493
494 ret = -EINVAL;
495 if (buf[0] != '+' && buf[0] != '-')
496 goto out;
497 tmp = strchr(&buf[1], '-');
498 if (!tmp)
499 goto out;
500 *tmp = '\0';
501 ret = kstrtoull(buf + 1, 0, &start);
502 if (ret)
503 goto out;
504 ret = kstrtoull(tmp + 1, 0, &end);
505 if (ret)
506 goto out;
507 ret = -EINVAL;
508 if (start > end)
509 goto out;
510 /* enable badblocks */
511 cmpxchg(&t_dev->badblocks.shift, -1, 0);
512 if (buf[0] == '+')
513 ret = badblocks_set(&t_dev->badblocks, start,
514 end - start + 1, 1);
515 else
516 ret = badblocks_clear(&t_dev->badblocks, start,
517 end - start + 1);
518 if (ret == 0)
519 ret = count;
520out:
521 kfree(orig);
522 return ret;
523}
524CONFIGFS_ATTR(nullb_device_, badblocks);
525
Shin'ichiro Kawasakid3a57382022-12-01 15:10:36 +0900526static ssize_t nullb_device_zone_readonly_store(struct config_item *item,
527 const char *page, size_t count)
528{
529 struct nullb_device *dev = to_nullb_device(item);
530
531 return zone_cond_store(dev, page, count, BLK_ZONE_COND_READONLY);
532}
533CONFIGFS_ATTR_WO(nullb_device_, zone_readonly);
534
535static ssize_t nullb_device_zone_offline_store(struct config_item *item,
536 const char *page, size_t count)
537{
538 struct nullb_device *dev = to_nullb_device(item);
539
540 return zone_cond_store(dev, page, count, BLK_ZONE_COND_OFFLINE);
541}
542CONFIGFS_ATTR_WO(nullb_device_, zone_offline);
543
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700544static struct configfs_attribute *nullb_device_attrs[] = {
545 &nullb_device_attr_size,
546 &nullb_device_attr_completion_nsec,
547 &nullb_device_attr_submit_queues,
Jens Axboe0a593fb2021-04-17 09:29:49 -0600548 &nullb_device_attr_poll_queues,
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700549 &nullb_device_attr_home_node,
550 &nullb_device_attr_queue_mode,
551 &nullb_device_attr_blocksize,
Damien Le Moalea17fd32020-11-20 10:55:18 +0900552 &nullb_device_attr_max_sectors,
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700553 &nullb_device_attr_irqmode,
554 &nullb_device_attr_hw_queue_depth,
Shaohua Licedcafa2017-08-14 15:04:54 -0700555 &nullb_device_attr_index,
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700556 &nullb_device_attr_blocking,
557 &nullb_device_attr_use_per_node_hctx,
Shaohua Licedcafa2017-08-14 15:04:54 -0700558 &nullb_device_attr_power,
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700559 &nullb_device_attr_memory_backed,
Shaohua Li306eb6b2017-08-14 15:04:57 -0700560 &nullb_device_attr_discard,
Shaohua Lieff2c4f2017-08-14 15:04:58 -0700561 &nullb_device_attr_mbps,
Shaohua Lideb78b42017-08-14 15:04:59 -0700562 &nullb_device_attr_cache_size,
Shaohua Li2f54a612017-08-14 15:05:00 -0700563 &nullb_device_attr_badblocks,
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200564 &nullb_device_attr_zoned,
565 &nullb_device_attr_zone_size,
Aravind Ramesh089565f2020-06-29 12:06:38 -0700566 &nullb_device_attr_zone_capacity,
Masato Suzukiea2c18e2018-10-30 16:14:05 +0900567 &nullb_device_attr_zone_nr_conv,
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200568 &nullb_device_attr_zone_max_open,
569 &nullb_device_attr_zone_max_active,
Shin'ichiro Kawasakid3a57382022-12-01 15:10:36 +0900570 &nullb_device_attr_zone_readonly,
571 &nullb_device_attr_zone_offline,
Max Gurtovoycee1b212021-04-12 09:55:23 +0000572 &nullb_device_attr_virt_boundary,
Vincent Fu7012eef2022-07-08 17:49:49 +0000573 &nullb_device_attr_no_sched,
574 &nullb_device_attr_shared_tag_bitmap,
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700575 NULL,
576};
577
578static void nullb_device_release(struct config_item *item)
579{
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700580 struct nullb_device *dev = to_nullb_device(item);
581
Shaohua Lideb78b42017-08-14 15:04:59 -0700582 null_free_device_storage(dev, false);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700583 null_free_dev(dev);
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700584}
585
586static struct configfs_item_operations nullb_device_ops = {
587 .release = nullb_device_release,
588};
589
Bhumika Goyale1919df2017-10-16 17:18:49 +0200590static const struct config_item_type nullb_device_type = {
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700591 .ct_item_ops = &nullb_device_ops,
592 .ct_attrs = nullb_device_attrs,
593 .ct_owner = THIS_MODULE,
594};
595
596static struct
597config_item *nullb_group_make_item(struct config_group *group, const char *name)
598{
599 struct nullb_device *dev;
600
Damien Le Moal49c3b922022-04-20 09:57:18 +0900601 if (null_find_dev_by_name(name))
602 return ERR_PTR(-EEXIST);
603
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700604 dev = null_alloc_dev();
605 if (!dev)
606 return ERR_PTR(-ENOMEM);
607
608 config_item_init_type_name(&dev->item, name, &nullb_device_type);
609
610 return &dev->item;
611}
612
613static void
614nullb_group_drop_item(struct config_group *group, struct config_item *item)
615{
Shaohua Licedcafa2017-08-14 15:04:54 -0700616 struct nullb_device *dev = to_nullb_device(item);
617
618 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
619 mutex_lock(&lock);
620 dev->power = false;
621 null_del_dev(dev->nullb);
622 mutex_unlock(&lock);
623 }
624
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700625 config_item_put(item);
626}
627
628static ssize_t memb_group_features_show(struct config_item *item, char *page)
629{
Aravind Ramesh089565f2020-06-29 12:06:38 -0700630 return snprintf(page, PAGE_SIZE,
Vincent Fu7012eef2022-07-08 17:49:49 +0000631 "badblocks,blocking,blocksize,cache_size,"
632 "completion_nsec,discard,home_node,hw_queue_depth,"
633 "irqmode,max_sectors,mbps,memory_backed,no_sched,"
634 "poll_queues,power,queue_mode,shared_tag_bitmap,size,"
635 "submit_queues,use_per_node_hctx,virt_boundary,zoned,"
636 "zone_capacity,zone_max_active,zone_max_open,"
Shin'ichiro Kawasakid3a57382022-12-01 15:10:36 +0900637 "zone_nr_conv,zone_offline,zone_readonly,zone_size\n");
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700638}
639
640CONFIGFS_ATTR_RO(memb_group_, features);
641
642static struct configfs_attribute *nullb_group_attrs[] = {
643 &memb_group_attr_features,
644 NULL,
645};
646
647static struct configfs_group_operations nullb_group_ops = {
648 .make_item = nullb_group_make_item,
649 .drop_item = nullb_group_drop_item,
650};
651
Bhumika Goyale1919df2017-10-16 17:18:49 +0200652static const struct config_item_type nullb_group_type = {
Shaohua Li3bf2bd22017-08-14 15:04:53 -0700653 .ct_group_ops = &nullb_group_ops,
654 .ct_attrs = nullb_group_attrs,
655 .ct_owner = THIS_MODULE,
656};
657
658static struct configfs_subsystem nullb_subsys = {
659 .su_group = {
660 .cg_item = {
661 .ci_namebuf = "nullb",
662 .ci_type = &nullb_group_type,
663 },
664 },
665};
666
Shaohua Lideb78b42017-08-14 15:04:59 -0700667static inline int null_cache_active(struct nullb *nullb)
668{
669 return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
670}
671
Shaohua Li2984c862017-08-14 15:04:52 -0700672static struct nullb_device *null_alloc_dev(void)
673{
674 struct nullb_device *dev;
675
676 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
677 if (!dev)
678 return NULL;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700679 INIT_RADIX_TREE(&dev->data, GFP_ATOMIC);
Shaohua Lideb78b42017-08-14 15:04:59 -0700680 INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC);
Shaohua Li2f54a612017-08-14 15:05:00 -0700681 if (badblocks_init(&dev->badblocks, 0)) {
682 kfree(dev);
683 return NULL;
684 }
685
Shaohua Li2984c862017-08-14 15:04:52 -0700686 dev->size = g_gb * 1024;
687 dev->completion_nsec = g_completion_nsec;
688 dev->submit_queues = g_submit_queues;
Shin'ichiro Kawasaki15dfc662021-10-29 19:39:26 +0900689 dev->prev_submit_queues = g_submit_queues;
Jens Axboe0a593fb2021-04-17 09:29:49 -0600690 dev->poll_queues = g_poll_queues;
Shin'ichiro Kawasaki15dfc662021-10-29 19:39:26 +0900691 dev->prev_poll_queues = g_poll_queues;
Shaohua Li2984c862017-08-14 15:04:52 -0700692 dev->home_node = g_home_node;
693 dev->queue_mode = g_queue_mode;
694 dev->blocksize = g_bs;
Damien Le Moalea17fd32020-11-20 10:55:18 +0900695 dev->max_sectors = g_max_sectors;
Shaohua Li2984c862017-08-14 15:04:52 -0700696 dev->irqmode = g_irqmode;
697 dev->hw_queue_depth = g_hw_queue_depth;
Shaohua Li2984c862017-08-14 15:04:52 -0700698 dev->blocking = g_blocking;
Vincent Fu058efe02022-07-08 17:49:49 +0000699 dev->memory_backed = g_memory_backed;
700 dev->discard = g_discard;
701 dev->cache_size = g_cache_size;
702 dev->mbps = g_mbps;
Shaohua Li2984c862017-08-14 15:04:52 -0700703 dev->use_per_node_hctx = g_use_per_node_hctx;
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200704 dev->zoned = g_zoned;
705 dev->zone_size = g_zone_size;
Aravind Ramesh089565f2020-06-29 12:06:38 -0700706 dev->zone_capacity = g_zone_capacity;
Masato Suzukiea2c18e2018-10-30 16:14:05 +0900707 dev->zone_nr_conv = g_zone_nr_conv;
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200708 dev->zone_max_open = g_zone_max_open;
709 dev->zone_max_active = g_zone_max_active;
Max Gurtovoycee1b212021-04-12 09:55:23 +0000710 dev->virt_boundary = g_virt_boundary;
Vincent Fu7012eef2022-07-08 17:49:49 +0000711 dev->no_sched = g_no_sched;
712 dev->shared_tag_bitmap = g_shared_tag_bitmap;
Shaohua Li2984c862017-08-14 15:04:52 -0700713 return dev;
714}
715
716static void null_free_dev(struct nullb_device *dev)
717{
David Disseldorp1addb792017-11-08 17:29:44 +0100718 if (!dev)
719 return;
720
Damien Le Moald205bde2020-04-23 12:02:38 +0900721 null_free_zoned_dev(dev);
David Disseldorp1addb792017-11-08 17:29:44 +0100722 badblocks_exit(&dev->badblocks);
Shaohua Li2984c862017-08-14 15:04:52 -0700723 kfree(dev);
724}
725
Jens Axboef2298c02013-10-25 11:52:25 +0100726static void put_tag(struct nullb_queue *nq, unsigned int tag)
727{
728 clear_bit_unlock(tag, nq->tag_map);
729
730 if (waitqueue_active(&nq->wait))
731 wake_up(&nq->wait);
732}
733
734static unsigned int get_tag(struct nullb_queue *nq)
735{
736 unsigned int tag;
737
738 do {
739 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
740 if (tag >= nq->queue_depth)
741 return -1U;
742 } while (test_and_set_bit_lock(tag, nq->tag_map));
743
744 return tag;
745}
746
747static void free_cmd(struct nullb_cmd *cmd)
748{
749 put_tag(cmd->nq, cmd->tag);
750}
751
Paolo Valente3c395a92015-12-01 11:48:17 +0100752static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
753
Jens Axboef2298c02013-10-25 11:52:25 +0100754static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
755{
756 struct nullb_cmd *cmd;
757 unsigned int tag;
758
759 tag = get_tag(nq);
760 if (tag != -1U) {
761 cmd = &nq->cmds[tag];
762 cmd->tag = tag;
Alexey Dobriyanff7704222020-02-12 23:23:20 +0300763 cmd->error = BLK_STS_OK;
Jens Axboef2298c02013-10-25 11:52:25 +0100764 cmd->nq = nq;
Shaohua Li2984c862017-08-14 15:04:52 -0700765 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
Paolo Valente3c395a92015-12-01 11:48:17 +0100766 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
767 HRTIMER_MODE_REL);
768 cmd->timer.function = null_cmd_timer_expired;
769 }
Jens Axboef2298c02013-10-25 11:52:25 +0100770 return cmd;
771 }
772
773 return NULL;
774}
775
Chaitanya Kulkarni3d3472f2022-02-16 09:29:45 -0800776static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, struct bio *bio)
Jens Axboef2298c02013-10-25 11:52:25 +0100777{
778 struct nullb_cmd *cmd;
779 DEFINE_WAIT(wait);
780
Jens Axboef2298c02013-10-25 11:52:25 +0100781 do {
Chaitanya Kulkarni3d3472f2022-02-16 09:29:45 -0800782 /*
783 * This avoids multiple return statements, multiple calls to
784 * __alloc_cmd() and a fast path call to prepare_to_wait().
785 */
Jens Axboef2298c02013-10-25 11:52:25 +0100786 cmd = __alloc_cmd(nq);
Chaitanya Kulkarni3d3472f2022-02-16 09:29:45 -0800787 if (cmd) {
788 cmd->bio = bio;
789 return cmd;
790 }
791 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
Jens Axboef2298c02013-10-25 11:52:25 +0100792 io_schedule();
Chaitanya Kulkarni3d3472f2022-02-16 09:29:45 -0800793 finish_wait(&nq->wait, &wait);
Jens Axboef2298c02013-10-25 11:52:25 +0100794 } while (1);
Jens Axboef2298c02013-10-25 11:52:25 +0100795}
796
797static void end_cmd(struct nullb_cmd *cmd)
798{
Shaohua Li2984c862017-08-14 15:04:52 -0700799 int queue_mode = cmd->nq->dev->queue_mode;
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100800
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800801 switch (queue_mode) {
802 case NULL_Q_MQ:
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700803 blk_mq_end_request(cmd->rq, cmd->error);
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800804 return;
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800805 case NULL_Q_BIO:
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700806 cmd->bio->bi_status = cmd->error;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200807 bio_endio(cmd->bio);
Jens Axboe48cc6612015-12-28 13:02:47 -0700808 break;
Christoph Hellwigce2c3502014-02-10 03:24:40 -0800809 }
Jens Axboef2298c02013-10-25 11:52:25 +0100810
Jens Axboe48cc6612015-12-28 13:02:47 -0700811 free_cmd(cmd);
Jens Axboef2298c02013-10-25 11:52:25 +0100812}
813
814static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
815{
Arianna Avanzinicf8ecc52015-12-01 11:48:18 +0100816 end_cmd(container_of(timer, struct nullb_cmd, timer));
Jens Axboef2298c02013-10-25 11:52:25 +0100817
818 return HRTIMER_NORESTART;
819}
820
821static void null_cmd_end_timer(struct nullb_cmd *cmd)
822{
Shaohua Li2984c862017-08-14 15:04:52 -0700823 ktime_t kt = cmd->nq->dev->completion_nsec;
Jens Axboef2298c02013-10-25 11:52:25 +0100824
Paolo Valente3c395a92015-12-01 11:48:17 +0100825 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
Jens Axboef2298c02013-10-25 11:52:25 +0100826}
827
Christoph Hellwig49f66132018-11-10 09:30:45 +0100828static void null_complete_rq(struct request *rq)
Jens Axboef2298c02013-10-25 11:52:25 +0100829{
Christoph Hellwig49f66132018-11-10 09:30:45 +0100830 end_cmd(blk_mq_rq_to_pdu(rq));
Jens Axboef2298c02013-10-25 11:52:25 +0100831}
832
Chaitanya Kulkarnic90b6b52022-02-22 07:28:51 -0800833static struct nullb_page *null_alloc_page(void)
Jens Axboef2298c02013-10-25 11:52:25 +0100834{
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700835 struct nullb_page *t_page;
836
Chaitanya Kulkarnic90b6b52022-02-22 07:28:51 -0800837 t_page = kmalloc(sizeof(struct nullb_page), GFP_NOIO);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700838 if (!t_page)
Chaitanya Kulkarnidf00b1d2022-02-22 07:28:52 -0800839 return NULL;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700840
Chaitanya Kulkarnic90b6b52022-02-22 07:28:51 -0800841 t_page->page = alloc_pages(GFP_NOIO, 0);
Chaitanya Kulkarnidf00b1d2022-02-22 07:28:52 -0800842 if (!t_page->page) {
843 kfree(t_page);
844 return NULL;
845 }
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700846
Ming Lei66231ad2018-03-06 12:07:13 +0800847 memset(t_page->bitmap, 0, sizeof(t_page->bitmap));
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700848 return t_page;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700849}
850
851static void null_free_page(struct nullb_page *t_page)
852{
Ming Lei66231ad2018-03-06 12:07:13 +0800853 __set_bit(NULLB_PAGE_FREE, t_page->bitmap);
854 if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap))
Shaohua Lideb78b42017-08-14 15:04:59 -0700855 return;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700856 __free_page(t_page->page);
857 kfree(t_page);
858}
859
Ming Lei66231ad2018-03-06 12:07:13 +0800860static bool null_page_empty(struct nullb_page *page)
861{
862 int size = MAP_SZ - 2;
863
864 return find_first_bit(page->bitmap, size) == size;
865}
866
Shaohua Lideb78b42017-08-14 15:04:59 -0700867static void null_free_sector(struct nullb *nullb, sector_t sector,
868 bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700869{
870 unsigned int sector_bit;
871 u64 idx;
872 struct nullb_page *t_page, *ret;
873 struct radix_tree_root *root;
874
Shaohua Lideb78b42017-08-14 15:04:59 -0700875 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700876 idx = sector >> PAGE_SECTORS_SHIFT;
877 sector_bit = (sector & SECTOR_MASK);
878
879 t_page = radix_tree_lookup(root, idx);
880 if (t_page) {
Ming Lei66231ad2018-03-06 12:07:13 +0800881 __clear_bit(sector_bit, t_page->bitmap);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700882
Ming Lei66231ad2018-03-06 12:07:13 +0800883 if (null_page_empty(t_page)) {
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700884 ret = radix_tree_delete_item(root, idx, t_page);
885 WARN_ON(ret != t_page);
886 null_free_page(ret);
Shaohua Lideb78b42017-08-14 15:04:59 -0700887 if (is_cache)
888 nullb->dev->curr_cache -= PAGE_SIZE;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700889 }
890 }
891}
892
893static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
Shaohua Lideb78b42017-08-14 15:04:59 -0700894 struct nullb_page *t_page, bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700895{
896 struct radix_tree_root *root;
897
Shaohua Lideb78b42017-08-14 15:04:59 -0700898 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700899
900 if (radix_tree_insert(root, idx, t_page)) {
901 null_free_page(t_page);
902 t_page = radix_tree_lookup(root, idx);
903 WARN_ON(!t_page || t_page->page->index != idx);
Shaohua Lideb78b42017-08-14 15:04:59 -0700904 } else if (is_cache)
905 nullb->dev->curr_cache += PAGE_SIZE;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700906
907 return t_page;
908}
909
Shaohua Lideb78b42017-08-14 15:04:59 -0700910static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700911{
912 unsigned long pos = 0;
913 int nr_pages;
914 struct nullb_page *ret, *t_pages[FREE_BATCH];
915 struct radix_tree_root *root;
916
Shaohua Lideb78b42017-08-14 15:04:59 -0700917 root = is_cache ? &dev->cache : &dev->data;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700918
919 do {
920 int i;
921
922 nr_pages = radix_tree_gang_lookup(root,
923 (void **)t_pages, pos, FREE_BATCH);
924
925 for (i = 0; i < nr_pages; i++) {
926 pos = t_pages[i]->page->index;
927 ret = radix_tree_delete_item(root, pos, t_pages[i]);
928 WARN_ON(ret != t_pages[i]);
929 null_free_page(ret);
930 }
931
932 pos++;
933 } while (nr_pages == FREE_BATCH);
Shaohua Lideb78b42017-08-14 15:04:59 -0700934
935 if (is_cache)
936 dev->curr_cache = 0;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700937}
938
Shaohua Lideb78b42017-08-14 15:04:59 -0700939static struct nullb_page *__null_lookup_page(struct nullb *nullb,
940 sector_t sector, bool for_write, bool is_cache)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700941{
942 unsigned int sector_bit;
943 u64 idx;
944 struct nullb_page *t_page;
Shaohua Lideb78b42017-08-14 15:04:59 -0700945 struct radix_tree_root *root;
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700946
947 idx = sector >> PAGE_SECTORS_SHIFT;
948 sector_bit = (sector & SECTOR_MASK);
949
Shaohua Lideb78b42017-08-14 15:04:59 -0700950 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
951 t_page = radix_tree_lookup(root, idx);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700952 WARN_ON(t_page && t_page->page->index != idx);
953
Ming Lei66231ad2018-03-06 12:07:13 +0800954 if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700955 return t_page;
956
957 return NULL;
958}
959
Shaohua Lideb78b42017-08-14 15:04:59 -0700960static struct nullb_page *null_lookup_page(struct nullb *nullb,
961 sector_t sector, bool for_write, bool ignore_cache)
962{
963 struct nullb_page *page = NULL;
964
965 if (!ignore_cache)
966 page = __null_lookup_page(nullb, sector, for_write, true);
967 if (page)
968 return page;
969 return __null_lookup_page(nullb, sector, for_write, false);
970}
971
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700972static struct nullb_page *null_insert_page(struct nullb *nullb,
Jens Axboe61884de2018-08-09 14:22:41 -0600973 sector_t sector, bool ignore_cache)
974 __releases(&nullb->lock)
975 __acquires(&nullb->lock)
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700976{
977 u64 idx;
978 struct nullb_page *t_page;
979
Shaohua Lideb78b42017-08-14 15:04:59 -0700980 t_page = null_lookup_page(nullb, sector, true, ignore_cache);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700981 if (t_page)
982 return t_page;
983
984 spin_unlock_irq(&nullb->lock);
985
Chaitanya Kulkarnic90b6b52022-02-22 07:28:51 -0800986 t_page = null_alloc_page();
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700987 if (!t_page)
988 goto out_lock;
989
990 if (radix_tree_preload(GFP_NOIO))
991 goto out_freepage;
992
993 spin_lock_irq(&nullb->lock);
994 idx = sector >> PAGE_SECTORS_SHIFT;
995 t_page->page->index = idx;
Shaohua Lideb78b42017-08-14 15:04:59 -0700996 t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
Shaohua Li5bcd0e02017-08-14 15:04:56 -0700997 radix_tree_preload_end();
998
999 return t_page;
1000out_freepage:
1001 null_free_page(t_page);
1002out_lock:
1003 spin_lock_irq(&nullb->lock);
Shaohua Lideb78b42017-08-14 15:04:59 -07001004 return null_lookup_page(nullb, sector, true, ignore_cache);
1005}
1006
1007static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
1008{
1009 int i;
1010 unsigned int offset;
1011 u64 idx;
1012 struct nullb_page *t_page, *ret;
1013 void *dst, *src;
1014
1015 idx = c_page->page->index;
1016
1017 t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
1018
Ming Lei66231ad2018-03-06 12:07:13 +08001019 __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap);
1020 if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) {
Shaohua Lideb78b42017-08-14 15:04:59 -07001021 null_free_page(c_page);
Ming Lei66231ad2018-03-06 12:07:13 +08001022 if (t_page && null_page_empty(t_page)) {
Shaohua Lideb78b42017-08-14 15:04:59 -07001023 ret = radix_tree_delete_item(&nullb->dev->data,
1024 idx, t_page);
1025 null_free_page(t_page);
1026 }
1027 return 0;
1028 }
1029
1030 if (!t_page)
1031 return -ENOMEM;
1032
1033 src = kmap_atomic(c_page->page);
1034 dst = kmap_atomic(t_page->page);
1035
1036 for (i = 0; i < PAGE_SECTORS;
1037 i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
Ming Lei66231ad2018-03-06 12:07:13 +08001038 if (test_bit(i, c_page->bitmap)) {
Shaohua Lideb78b42017-08-14 15:04:59 -07001039 offset = (i << SECTOR_SHIFT);
1040 memcpy(dst + offset, src + offset,
1041 nullb->dev->blocksize);
Ming Lei66231ad2018-03-06 12:07:13 +08001042 __set_bit(i, t_page->bitmap);
Shaohua Lideb78b42017-08-14 15:04:59 -07001043 }
1044 }
1045
1046 kunmap_atomic(dst);
1047 kunmap_atomic(src);
1048
1049 ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page);
1050 null_free_page(ret);
1051 nullb->dev->curr_cache -= PAGE_SIZE;
1052
1053 return 0;
1054}
1055
1056static int null_make_cache_space(struct nullb *nullb, unsigned long n)
1057{
1058 int i, err, nr_pages;
1059 struct nullb_page *c_pages[FREE_BATCH];
1060 unsigned long flushed = 0, one_round;
1061
1062again:
1063 if ((nullb->dev->cache_size * 1024 * 1024) >
1064 nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0)
1065 return 0;
1066
1067 nr_pages = radix_tree_gang_lookup(&nullb->dev->cache,
1068 (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH);
1069 /*
1070 * nullb_flush_cache_page could unlock before using the c_pages. To
1071 * avoid race, we don't allow page free
1072 */
1073 for (i = 0; i < nr_pages; i++) {
1074 nullb->cache_flush_pos = c_pages[i]->page->index;
1075 /*
1076 * We found the page which is being flushed to disk by other
1077 * threads
1078 */
Ming Lei66231ad2018-03-06 12:07:13 +08001079 if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap))
Shaohua Lideb78b42017-08-14 15:04:59 -07001080 c_pages[i] = NULL;
1081 else
Ming Lei66231ad2018-03-06 12:07:13 +08001082 __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap);
Shaohua Lideb78b42017-08-14 15:04:59 -07001083 }
1084
1085 one_round = 0;
1086 for (i = 0; i < nr_pages; i++) {
1087 if (c_pages[i] == NULL)
1088 continue;
1089 err = null_flush_cache_page(nullb, c_pages[i]);
1090 if (err)
1091 return err;
1092 one_round++;
1093 }
1094 flushed += one_round << PAGE_SHIFT;
1095
1096 if (n > flushed) {
1097 if (nr_pages == 0)
1098 nullb->cache_flush_pos = 0;
1099 if (one_round == 0) {
1100 /* give other threads a chance */
1101 spin_unlock_irq(&nullb->lock);
1102 spin_lock_irq(&nullb->lock);
1103 }
1104 goto again;
1105 }
1106 return 0;
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001107}
1108
1109static int copy_to_nullb(struct nullb *nullb, struct page *source,
Shaohua Lideb78b42017-08-14 15:04:59 -07001110 unsigned int off, sector_t sector, size_t n, bool is_fua)
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001111{
1112 size_t temp, count = 0;
1113 unsigned int offset;
1114 struct nullb_page *t_page;
1115 void *dst, *src;
1116
1117 while (count < n) {
1118 temp = min_t(size_t, nullb->dev->blocksize, n - count);
1119
Shaohua Lideb78b42017-08-14 15:04:59 -07001120 if (null_cache_active(nullb) && !is_fua)
1121 null_make_cache_space(nullb, PAGE_SIZE);
1122
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001123 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
Shaohua Lideb78b42017-08-14 15:04:59 -07001124 t_page = null_insert_page(nullb, sector,
1125 !null_cache_active(nullb) || is_fua);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001126 if (!t_page)
1127 return -ENOSPC;
1128
1129 src = kmap_atomic(source);
1130 dst = kmap_atomic(t_page->page);
1131 memcpy(dst + offset, src + off + count, temp);
1132 kunmap_atomic(dst);
1133 kunmap_atomic(src);
1134
Ming Lei66231ad2018-03-06 12:07:13 +08001135 __set_bit(sector & SECTOR_MASK, t_page->bitmap);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001136
Shaohua Lideb78b42017-08-14 15:04:59 -07001137 if (is_fua)
1138 null_free_sector(nullb, sector, true);
1139
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001140 count += temp;
1141 sector += temp >> SECTOR_SHIFT;
1142 }
1143 return 0;
1144}
1145
1146static int copy_from_nullb(struct nullb *nullb, struct page *dest,
1147 unsigned int off, sector_t sector, size_t n)
1148{
1149 size_t temp, count = 0;
1150 unsigned int offset;
1151 struct nullb_page *t_page;
1152 void *dst, *src;
1153
1154 while (count < n) {
1155 temp = min_t(size_t, nullb->dev->blocksize, n - count);
1156
1157 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
Shaohua Lideb78b42017-08-14 15:04:59 -07001158 t_page = null_lookup_page(nullb, sector, false,
1159 !null_cache_active(nullb));
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001160
1161 dst = kmap_atomic(dest);
1162 if (!t_page) {
1163 memset(dst + off + count, 0, temp);
1164 goto next;
1165 }
1166 src = kmap_atomic(t_page->page);
1167 memcpy(dst + off + count, src + offset, temp);
1168 kunmap_atomic(src);
1169next:
1170 kunmap_atomic(dst);
1171
1172 count += temp;
1173 sector += temp >> SECTOR_SHIFT;
1174 }
1175 return 0;
1176}
1177
Ajay Joshidd85b492019-10-17 14:19:43 -07001178static void nullb_fill_pattern(struct nullb *nullb, struct page *page,
1179 unsigned int len, unsigned int off)
1180{
1181 void *dst;
1182
1183 dst = kmap_atomic(page);
1184 memset(dst + off, 0xFF, len);
1185 kunmap_atomic(dst);
1186}
1187
Damien Le Moal0ec4d912020-11-20 10:55:17 +09001188blk_status_t null_handle_discard(struct nullb_device *dev,
1189 sector_t sector, sector_t nr_sectors)
Shaohua Li306eb6b2017-08-14 15:04:57 -07001190{
Damien Le Moal49c70892020-11-20 10:55:16 +09001191 struct nullb *nullb = dev->nullb;
1192 size_t n = nr_sectors << SECTOR_SHIFT;
Shaohua Li306eb6b2017-08-14 15:04:57 -07001193 size_t temp;
1194
1195 spin_lock_irq(&nullb->lock);
1196 while (n > 0) {
Damien Le Moal49c70892020-11-20 10:55:16 +09001197 temp = min_t(size_t, n, dev->blocksize);
Shaohua Lideb78b42017-08-14 15:04:59 -07001198 null_free_sector(nullb, sector, false);
1199 if (null_cache_active(nullb))
1200 null_free_sector(nullb, sector, true);
Shaohua Li306eb6b2017-08-14 15:04:57 -07001201 sector += temp >> SECTOR_SHIFT;
1202 n -= temp;
1203 }
1204 spin_unlock_irq(&nullb->lock);
Damien Le Moal49c70892020-11-20 10:55:16 +09001205
1206 return BLK_STS_OK;
Shaohua Li306eb6b2017-08-14 15:04:57 -07001207}
1208
Shaohua Lideb78b42017-08-14 15:04:59 -07001209static int null_handle_flush(struct nullb *nullb)
1210{
1211 int err;
1212
1213 if (!null_cache_active(nullb))
1214 return 0;
1215
1216 spin_lock_irq(&nullb->lock);
1217 while (true) {
1218 err = null_make_cache_space(nullb,
1219 nullb->dev->cache_size * 1024 * 1024);
1220 if (err || nullb->dev->curr_cache == 0)
1221 break;
1222 }
1223
1224 WARN_ON(!radix_tree_empty(&nullb->dev->cache));
1225 spin_unlock_irq(&nullb->lock);
1226 return err;
1227}
1228
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001229static int null_transfer(struct nullb *nullb, struct page *page,
Shaohua Lideb78b42017-08-14 15:04:59 -07001230 unsigned int len, unsigned int off, bool is_write, sector_t sector,
1231 bool is_fua)
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001232{
Ajay Joshidd85b492019-10-17 14:19:43 -07001233 struct nullb_device *dev = nullb->dev;
1234 unsigned int valid_len = len;
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001235 int err = 0;
1236
1237 if (!is_write) {
Ajay Joshidd85b492019-10-17 14:19:43 -07001238 if (dev->zoned)
1239 valid_len = null_zone_valid_read_len(nullb,
1240 sector, len);
1241
1242 if (valid_len) {
1243 err = copy_from_nullb(nullb, page, off,
1244 sector, valid_len);
1245 off += valid_len;
1246 len -= valid_len;
1247 }
1248
1249 if (len)
1250 nullb_fill_pattern(nullb, page, len, off);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001251 flush_dcache_page(page);
1252 } else {
1253 flush_dcache_page(page);
Shaohua Lideb78b42017-08-14 15:04:59 -07001254 err = copy_to_nullb(nullb, page, off, sector, len, is_fua);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001255 }
1256
1257 return err;
1258}
1259
1260static int null_handle_rq(struct nullb_cmd *cmd)
1261{
1262 struct request *rq = cmd->rq;
1263 struct nullb *nullb = cmd->nq->dev->nullb;
1264 int err;
1265 unsigned int len;
Damien Le Moal49c70892020-11-20 10:55:16 +09001266 sector_t sector = blk_rq_pos(rq);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001267 struct req_iterator iter;
1268 struct bio_vec bvec;
1269
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001270 spin_lock_irq(&nullb->lock);
1271 rq_for_each_segment(bvec, rq, iter) {
1272 len = bvec.bv_len;
1273 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
Shaohua Lideb78b42017-08-14 15:04:59 -07001274 op_is_write(req_op(rq)), sector,
Hou Pu2d62e6b2020-08-21 04:34:42 -04001275 rq->cmd_flags & REQ_FUA);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001276 if (err) {
1277 spin_unlock_irq(&nullb->lock);
1278 return err;
1279 }
1280 sector += len >> SECTOR_SHIFT;
1281 }
1282 spin_unlock_irq(&nullb->lock);
1283
1284 return 0;
1285}
1286
1287static int null_handle_bio(struct nullb_cmd *cmd)
1288{
1289 struct bio *bio = cmd->bio;
1290 struct nullb *nullb = cmd->nq->dev->nullb;
1291 int err;
1292 unsigned int len;
Damien Le Moal49c70892020-11-20 10:55:16 +09001293 sector_t sector = bio->bi_iter.bi_sector;
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001294 struct bio_vec bvec;
1295 struct bvec_iter iter;
1296
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001297 spin_lock_irq(&nullb->lock);
1298 bio_for_each_segment(bvec, bio, iter) {
1299 len = bvec.bv_len;
1300 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
Shaohua Lideb78b42017-08-14 15:04:59 -07001301 op_is_write(bio_op(bio)), sector,
Heinz Mauelshagenbf7c7a02019-02-22 20:00:01 +01001302 bio->bi_opf & REQ_FUA);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001303 if (err) {
1304 spin_unlock_irq(&nullb->lock);
1305 return err;
1306 }
1307 sector += len >> SECTOR_SHIFT;
1308 }
1309 spin_unlock_irq(&nullb->lock);
1310 return 0;
1311}
1312
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001313static void null_stop_queue(struct nullb *nullb)
1314{
1315 struct request_queue *q = nullb->q;
1316
1317 if (nullb->dev->queue_mode == NULL_Q_MQ)
1318 blk_mq_stop_hw_queues(q);
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001319}
1320
1321static void null_restart_queue_async(struct nullb *nullb)
1322{
1323 struct request_queue *q = nullb->q;
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001324
1325 if (nullb->dev->queue_mode == NULL_Q_MQ)
1326 blk_mq_start_stopped_hw_queues(q, true);
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001327}
1328
Chaitanya Kulkarniadb84282019-08-22 21:45:15 -07001329static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd)
1330{
1331 struct nullb_device *dev = cmd->nq->dev;
1332 struct nullb *nullb = dev->nullb;
1333 blk_status_t sts = BLK_STS_OK;
1334 struct request *rq = cmd->rq;
1335
1336 if (!hrtimer_active(&nullb->bw_timer))
1337 hrtimer_restart(&nullb->bw_timer);
1338
1339 if (atomic_long_sub_return(blk_rq_bytes(rq), &nullb->cur_bytes) < 0) {
1340 null_stop_queue(nullb);
1341 /* race with timer */
1342 if (atomic_long_read(&nullb->cur_bytes) > 0)
1343 null_restart_queue_async(nullb);
1344 /* requeue request */
1345 sts = BLK_STS_DEV_RESOURCE;
1346 }
1347 return sts;
1348}
1349
Chaitanya Kulkarni8f94d1c2019-08-22 21:45:16 -07001350static inline blk_status_t null_handle_badblocks(struct nullb_cmd *cmd,
1351 sector_t sector,
1352 sector_t nr_sectors)
1353{
1354 struct badblocks *bb = &cmd->nq->dev->badblocks;
1355 sector_t first_bad;
1356 int bad_sectors;
1357
1358 if (badblocks_check(bb, sector, nr_sectors, &first_bad, &bad_sectors))
1359 return BLK_STS_IOERR;
1360
1361 return BLK_STS_OK;
1362}
1363
Chaitanya Kulkarni7ea88e22019-08-22 21:45:17 -07001364static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
Bart Van Asscheff07a022022-07-14 11:06:27 -07001365 enum req_op op,
Damien Le Moal49c70892020-11-20 10:55:16 +09001366 sector_t sector,
1367 sector_t nr_sectors)
Chaitanya Kulkarni7ea88e22019-08-22 21:45:17 -07001368{
1369 struct nullb_device *dev = cmd->nq->dev;
1370 int err;
1371
Damien Le Moal49c70892020-11-20 10:55:16 +09001372 if (op == REQ_OP_DISCARD)
1373 return null_handle_discard(dev, sector, nr_sectors);
1374
Chaitanya Kulkarni7ea88e22019-08-22 21:45:17 -07001375 if (dev->queue_mode == NULL_Q_BIO)
1376 err = null_handle_bio(cmd);
1377 else
1378 err = null_handle_rq(cmd);
1379
1380 return errno_to_blk_status(err);
1381}
1382
Bart Van Asschececbc9c2020-05-18 21:07:37 -07001383static void nullb_zero_read_cmd_buffer(struct nullb_cmd *cmd)
1384{
1385 struct nullb_device *dev = cmd->nq->dev;
1386 struct bio *bio;
1387
1388 if (dev->memory_backed)
1389 return;
1390
1391 if (dev->queue_mode == NULL_Q_BIO && bio_op(cmd->bio) == REQ_OP_READ) {
1392 zero_fill_bio(cmd->bio);
1393 } else if (req_op(cmd->rq) == REQ_OP_READ) {
1394 __rq_for_each_bio(bio, cmd->rq)
1395 zero_fill_bio(bio);
1396 }
1397}
1398
Chaitanya Kulkarnia3d7d672019-08-22 21:45:19 -07001399static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
1400{
Bart Van Asschececbc9c2020-05-18 21:07:37 -07001401 /*
1402 * Since root privileges are required to configure the null_blk
1403 * driver, it is fine that this driver does not initialize the
1404 * data buffers of read commands. Zero-initialize these buffers
1405 * anyway if KMSAN is enabled to prevent that KMSAN complains
1406 * about null_blk not initializing read data buffers.
1407 */
1408 if (IS_ENABLED(CONFIG_KMSAN))
1409 nullb_zero_read_cmd_buffer(cmd);
1410
Chaitanya Kulkarnia3d7d672019-08-22 21:45:19 -07001411 /* Complete IO by inline, softirq or timer */
1412 switch (cmd->nq->dev->irqmode) {
1413 case NULL_IRQ_SOFTIRQ:
1414 switch (cmd->nq->dev->queue_mode) {
1415 case NULL_Q_MQ:
Christoph Hellwig15f73f52020-06-11 08:44:47 +02001416 if (likely(!blk_should_fake_timeout(cmd->rq->q)))
1417 blk_mq_complete_request(cmd->rq);
Chaitanya Kulkarnia3d7d672019-08-22 21:45:19 -07001418 break;
1419 case NULL_Q_BIO:
1420 /*
1421 * XXX: no proper submitting cpu information available.
1422 */
1423 end_cmd(cmd);
1424 break;
1425 }
1426 break;
1427 case NULL_IRQ_NONE:
1428 end_cmd(cmd);
1429 break;
1430 case NULL_IRQ_TIMER:
1431 null_cmd_end_timer(cmd);
1432 break;
1433 }
1434}
1435
Bart Van Asscheff07a022022-07-14 11:06:27 -07001436blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
1437 sector_t sector, unsigned int nr_sectors)
Damien Le Moal9dd44c72020-04-23 12:02:37 +09001438{
1439 struct nullb_device *dev = cmd->nq->dev;
1440 blk_status_t ret;
1441
1442 if (dev->badblocks.shift != -1) {
1443 ret = null_handle_badblocks(cmd, sector, nr_sectors);
1444 if (ret != BLK_STS_OK)
1445 return ret;
1446 }
1447
1448 if (dev->memory_backed)
Damien Le Moal49c70892020-11-20 10:55:16 +09001449 return null_handle_memory_backed(cmd, op, sector, nr_sectors);
Damien Le Moal9dd44c72020-04-23 12:02:37 +09001450
1451 return BLK_STS_OK;
1452}
1453
Chaitanya Kulkarnid4b186e2019-08-22 21:45:14 -07001454static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
Bart Van Asscheff07a022022-07-14 11:06:27 -07001455 sector_t nr_sectors, enum req_op op)
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001456{
1457 struct nullb_device *dev = cmd->nq->dev;
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001458 struct nullb *nullb = dev->nullb;
Chaitanya Kulkarniadb84282019-08-22 21:45:15 -07001459 blk_status_t sts;
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001460
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001461 if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
Chaitanya Kulkarniadb84282019-08-22 21:45:15 -07001462 sts = null_handle_throttled(cmd);
1463 if (sts != BLK_STS_OK)
1464 return sts;
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001465 }
1466
Chaitanya Kulkarnid4b186e2019-08-22 21:45:14 -07001467 if (op == REQ_OP_FLUSH) {
1468 cmd->error = errno_to_blk_status(null_handle_flush(nullb));
1469 goto out;
1470 }
Shaohua Li2f54a612017-08-14 15:05:00 -07001471
Damien Le Moal9dd44c72020-04-23 12:02:37 +09001472 if (dev->zoned)
Damien Le Moalde3510e2021-04-01 07:52:44 +09001473 sts = null_process_zoned_cmd(cmd, op, sector, nr_sectors);
Damien Le Moal9dd44c72020-04-23 12:02:37 +09001474 else
Damien Le Moalde3510e2021-04-01 07:52:44 +09001475 sts = null_process_cmd(cmd, op, sector, nr_sectors);
1476
1477 /* Do not overwrite errors (e.g. timeout errors) */
1478 if (cmd->error == BLK_STS_OK)
1479 cmd->error = sts;
Chaitanya Kulkarnifceb5d12019-08-22 21:45:18 -07001480
Shaohua Li2f54a612017-08-14 15:05:00 -07001481out:
Chaitanya Kulkarnia3d7d672019-08-22 21:45:19 -07001482 nullb_complete_cmd(cmd);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001483 return BLK_STS_OK;
Jens Axboef2298c02013-10-25 11:52:25 +01001484}
1485
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001486static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
1487{
1488 struct nullb *nullb = container_of(timer, struct nullb, bw_timer);
1489 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1490 unsigned int mbps = nullb->dev->mbps;
1491
1492 if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps))
1493 return HRTIMER_NORESTART;
1494
1495 atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps));
1496 null_restart_queue_async(nullb);
1497
1498 hrtimer_forward_now(&nullb->bw_timer, timer_interval);
1499
1500 return HRTIMER_RESTART;
1501}
1502
1503static void nullb_setup_bwtimer(struct nullb *nullb)
1504{
1505 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1506
1507 hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1508 nullb->bw_timer.function = nullb_bwtimer_fn;
1509 atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
1510 hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
Jens Axboef2298c02013-10-25 11:52:25 +01001511}
1512
1513static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
1514{
1515 int index = 0;
1516
1517 if (nullb->nr_queues != 1)
1518 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
1519
1520 return &nullb->queues[index];
1521}
1522
Christoph Hellwig3e087732021-10-12 13:12:24 +02001523static void null_submit_bio(struct bio *bio)
Jens Axboef2298c02013-10-25 11:52:25 +01001524{
Chaitanya Kulkarnid4b186e2019-08-22 21:45:14 -07001525 sector_t sector = bio->bi_iter.bi_sector;
1526 sector_t nr_sectors = bio_sectors(bio);
Christoph Hellwig309dca302021-01-24 11:02:34 +01001527 struct nullb *nullb = bio->bi_bdev->bd_disk->private_data;
Jens Axboef2298c02013-10-25 11:52:25 +01001528 struct nullb_queue *nq = nullb_to_queue(nullb);
Jens Axboef2298c02013-10-25 11:52:25 +01001529
Chaitanya Kulkarni3d3472f2022-02-16 09:29:45 -08001530 null_handle_cmd(alloc_cmd(nq, bio), sector, nr_sectors, bio_op(bio));
Jens Axboef2298c02013-10-25 11:52:25 +01001531}
1532
Jens Axboe93b57042018-01-10 09:06:23 -07001533static bool should_timeout_request(struct request *rq)
1534{
Arnd Bergmann33f782c2018-01-11 11:31:25 +01001535#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
Jens Axboe93b57042018-01-10 09:06:23 -07001536 if (g_timeout_str[0])
1537 return should_fail(&null_timeout_attr, 1);
Arnd Bergmann33f782c2018-01-11 11:31:25 +01001538#endif
Jens Axboe24941b902018-02-28 09:18:57 -07001539 return false;
1540}
Jens Axboe93b57042018-01-10 09:06:23 -07001541
Jens Axboe24941b902018-02-28 09:18:57 -07001542static bool should_requeue_request(struct request *rq)
1543{
1544#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1545 if (g_requeue_str[0])
1546 return should_fail(&null_requeue_attr, 1);
1547#endif
Jens Axboe93b57042018-01-10 09:06:23 -07001548 return false;
1549}
1550
Bart Van Asschea4e1d0b2022-08-15 10:00:43 -07001551static void null_map_queues(struct blk_mq_tag_set *set)
Jens Axboe0a593fb2021-04-17 09:29:49 -06001552{
1553 struct nullb *nullb = set->driver_data;
1554 int i, qoff;
Shin'ichiro Kawasaki15dfc662021-10-29 19:39:26 +09001555 unsigned int submit_queues = g_submit_queues;
1556 unsigned int poll_queues = g_poll_queues;
1557
1558 if (nullb) {
1559 struct nullb_device *dev = nullb->dev;
1560
1561 /*
1562 * Refer nr_hw_queues of the tag set to check if the expected
1563 * number of hardware queues are prepared. If block layer failed
1564 * to prepare them, use previous numbers of submit queues and
1565 * poll queues to map queues.
1566 */
1567 if (set->nr_hw_queues ==
1568 dev->submit_queues + dev->poll_queues) {
1569 submit_queues = dev->submit_queues;
1570 poll_queues = dev->poll_queues;
1571 } else if (set->nr_hw_queues ==
1572 dev->prev_submit_queues + dev->prev_poll_queues) {
1573 submit_queues = dev->prev_submit_queues;
1574 poll_queues = dev->prev_poll_queues;
1575 } else {
1576 pr_warn("tag set has unexpected nr_hw_queues: %d\n",
1577 set->nr_hw_queues);
Bart Van Assche10b41ea2022-08-15 10:00:42 -07001578 WARN_ON_ONCE(true);
1579 submit_queues = 1;
1580 poll_queues = 0;
Shin'ichiro Kawasaki15dfc662021-10-29 19:39:26 +09001581 }
1582 }
Jens Axboe0a593fb2021-04-17 09:29:49 -06001583
1584 for (i = 0, qoff = 0; i < set->nr_maps; i++) {
1585 struct blk_mq_queue_map *map = &set->map[i];
1586
1587 switch (i) {
1588 case HCTX_TYPE_DEFAULT:
Shin'ichiro Kawasaki15dfc662021-10-29 19:39:26 +09001589 map->nr_queues = submit_queues;
Jens Axboe0a593fb2021-04-17 09:29:49 -06001590 break;
1591 case HCTX_TYPE_READ:
1592 map->nr_queues = 0;
1593 continue;
1594 case HCTX_TYPE_POLL:
Shin'ichiro Kawasaki15dfc662021-10-29 19:39:26 +09001595 map->nr_queues = poll_queues;
Jens Axboe0a593fb2021-04-17 09:29:49 -06001596 break;
1597 }
1598 map->queue_offset = qoff;
1599 qoff += map->nr_queues;
1600 blk_mq_map_queues(map);
1601 }
Jens Axboe0a593fb2021-04-17 09:29:49 -06001602}
1603
1604static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
1605{
1606 struct nullb_queue *nq = hctx->driver_data;
1607 LIST_HEAD(list);
1608 int nr = 0;
1609
1610 spin_lock(&nq->poll_lock);
1611 list_splice_init(&nq->poll_list, &list);
1612 spin_unlock(&nq->poll_lock);
1613
1614 while (!list_empty(&list)) {
1615 struct nullb_cmd *cmd;
1616 struct request *req;
1617
1618 req = list_first_entry(&list, struct request, queuelist);
1619 list_del_init(&req->queuelist);
1620 cmd = blk_mq_rq_to_pdu(req);
1621 cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req),
1622 blk_rq_sectors(req));
Jens Axboec5eafd72021-12-10 16:32:44 -07001623 if (!blk_mq_add_to_batch(req, iob, (__force int) cmd->error,
Ming Lei2385ebf2021-12-03 16:17:03 +08001624 blk_mq_end_request_batch))
1625 end_cmd(cmd);
Jens Axboe0a593fb2021-04-17 09:29:49 -06001626 nr++;
1627 }
1628
1629 return nr;
1630}
1631
John Garry9bdb4832022-07-06 20:03:51 +08001632static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
Jens Axboe5448aca2018-01-09 12:47:24 -07001633{
Jens Axboe0a593fb2021-04-17 09:29:49 -06001634 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
Damien Le Moalde3510e2021-04-01 07:52:44 +09001635 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
1636
André Almeida9c7eddf2019-09-16 11:07:59 -03001637 pr_info("rq %p timed out\n", rq);
Damien Le Moalde3510e2021-04-01 07:52:44 +09001638
Jens Axboe0a593fb2021-04-17 09:29:49 -06001639 if (hctx->type == HCTX_TYPE_POLL) {
1640 struct nullb_queue *nq = hctx->driver_data;
1641
1642 spin_lock(&nq->poll_lock);
1643 list_del_init(&rq->queuelist);
1644 spin_unlock(&nq->poll_lock);
1645 }
1646
Damien Le Moalde3510e2021-04-01 07:52:44 +09001647 /*
1648 * If the device is marked as blocking (i.e. memory backed or zoned
1649 * device), the submission path may be blocked waiting for resources
1650 * and cause real timeouts. For these real timeouts, the submission
1651 * path will complete the request using blk_mq_complete_request().
1652 * Only fake timeouts need to execute blk_mq_complete_request() here.
1653 */
1654 cmd->error = BLK_STS_TIMEOUT;
Ming Lei3e3876d2022-04-13 16:48:36 +08001655 if (cmd->fake_timeout || hctx->type == HCTX_TYPE_POLL)
Damien Le Moalde3510e2021-04-01 07:52:44 +09001656 blk_mq_complete_request(rq);
Christoph Hellwig0df0bb02018-05-29 15:52:33 +02001657 return BLK_EH_DONE;
Jens Axboe5448aca2018-01-09 12:47:24 -07001658}
1659
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001660static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
Jens Axboe74c45052014-10-29 11:14:52 -06001661 const struct blk_mq_queue_data *bd)
Jens Axboef2298c02013-10-25 11:52:25 +01001662{
Jens Axboe74c45052014-10-29 11:14:52 -06001663 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
Shaohua Li2984c862017-08-14 15:04:52 -07001664 struct nullb_queue *nq = hctx->driver_data;
Chaitanya Kulkarnid4b186e2019-08-22 21:45:14 -07001665 sector_t nr_sectors = blk_rq_sectors(bd->rq);
1666 sector_t sector = blk_rq_pos(bd->rq);
Jens Axboe0a593fb2021-04-17 09:29:49 -06001667 const bool is_poll = hctx->type == HCTX_TYPE_POLL;
Jens Axboef2298c02013-10-25 11:52:25 +01001668
Jens Axboedb5bcf82017-03-30 13:44:26 -06001669 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1670
Jens Axboe0a593fb2021-04-17 09:29:49 -06001671 if (!is_poll && nq->dev->irqmode == NULL_IRQ_TIMER) {
Paolo Valente3c395a92015-12-01 11:48:17 +01001672 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1673 cmd->timer.function = null_cmd_timer_expired;
1674 }
Jens Axboe74c45052014-10-29 11:14:52 -06001675 cmd->rq = bd->rq;
Alexey Dobriyanff7704222020-02-12 23:23:20 +03001676 cmd->error = BLK_STS_OK;
Shaohua Li2984c862017-08-14 15:04:52 -07001677 cmd->nq = nq;
Damien Le Moalde3510e2021-04-01 07:52:44 +09001678 cmd->fake_timeout = should_timeout_request(bd->rq);
Jens Axboef2298c02013-10-25 11:52:25 +01001679
Jens Axboe74c45052014-10-29 11:14:52 -06001680 blk_mq_start_request(bd->rq);
Christoph Hellwige2490072014-09-13 16:40:09 -07001681
Jens Axboe24941b902018-02-28 09:18:57 -07001682 if (should_requeue_request(bd->rq)) {
1683 /*
1684 * Alternate between hitting the core BUSY path, and the
1685 * driver driven requeue path
1686 */
1687 nq->requeue_selection++;
1688 if (nq->requeue_selection & 1)
1689 return BLK_STS_RESOURCE;
1690 else {
1691 blk_mq_requeue_request(bd->rq, true);
1692 return BLK_STS_OK;
1693 }
1694 }
Jens Axboe0a593fb2021-04-17 09:29:49 -06001695
1696 if (is_poll) {
1697 spin_lock(&nq->poll_lock);
1698 list_add_tail(&bd->rq->queuelist, &nq->poll_list);
1699 spin_unlock(&nq->poll_lock);
1700 return BLK_STS_OK;
1701 }
Damien Le Moalde3510e2021-04-01 07:52:44 +09001702 if (cmd->fake_timeout)
Jens Axboe24941b902018-02-28 09:18:57 -07001703 return BLK_STS_OK;
Jens Axboe93b57042018-01-10 09:06:23 -07001704
Chaitanya Kulkarnid4b186e2019-08-22 21:45:14 -07001705 return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq));
Jens Axboef2298c02013-10-25 11:52:25 +01001706}
1707
Matias Bjørlingde65d2d2015-08-31 14:17:18 +02001708static void cleanup_queue(struct nullb_queue *nq)
1709{
Christophe JAILLETeb25ad82022-07-03 18:05:43 +02001710 bitmap_free(nq->tag_map);
Matias Bjørlingde65d2d2015-08-31 14:17:18 +02001711 kfree(nq->cmds);
1712}
1713
1714static void cleanup_queues(struct nullb *nullb)
1715{
1716 int i;
1717
1718 for (i = 0; i < nullb->nr_queues; i++)
1719 cleanup_queue(&nullb->queues[i]);
1720
1721 kfree(nullb->queues);
1722}
1723
Bart Van Assche78b10be2020-03-09 21:26:20 -07001724static void null_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1725{
1726 struct nullb_queue *nq = hctx->driver_data;
1727 struct nullb *nullb = nq->dev->nullb;
1728
1729 nullb->nr_queues--;
1730}
1731
1732static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
1733{
1734 init_waitqueue_head(&nq->wait);
1735 nq->queue_depth = nullb->queue_depth;
1736 nq->dev = nullb->dev;
Jens Axboe0a593fb2021-04-17 09:29:49 -06001737 INIT_LIST_HEAD(&nq->poll_list);
1738 spin_lock_init(&nq->poll_lock);
Bart Van Assche78b10be2020-03-09 21:26:20 -07001739}
1740
1741static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
1742 unsigned int hctx_idx)
1743{
1744 struct nullb *nullb = hctx->queue->queuedata;
1745 struct nullb_queue *nq;
1746
Bart Van Assche596444e2020-03-09 21:26:23 -07001747#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1748 if (g_init_hctx_str[0] && should_fail(&null_init_hctx_attr, 1))
1749 return -EFAULT;
1750#endif
1751
Bart Van Assche78b10be2020-03-09 21:26:20 -07001752 nq = &nullb->queues[hctx_idx];
1753 hctx->driver_data = nq;
1754 null_init_queue(nullb, nq);
1755 nullb->nr_queues++;
1756
1757 return 0;
1758}
1759
1760static const struct blk_mq_ops null_mq_ops = {
1761 .queue_rq = null_queue_rq,
1762 .complete = null_complete_rq,
1763 .timeout = null_timeout_rq,
Jens Axboe0a593fb2021-04-17 09:29:49 -06001764 .poll = null_poll,
1765 .map_queues = null_map_queues,
Bart Van Assche78b10be2020-03-09 21:26:20 -07001766 .init_hctx = null_init_hctx,
1767 .exit_hctx = null_exit_hctx,
1768};
1769
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001770static void null_del_dev(struct nullb *nullb)
1771{
Bart Van Assche9b03b712020-03-09 21:26:22 -07001772 struct nullb_device *dev;
1773
1774 if (!nullb)
1775 return;
1776
1777 dev = nullb->dev;
Shaohua Li2984c862017-08-14 15:04:52 -07001778
Shaohua Li94bc02e2017-08-14 15:04:55 -07001779 ida_simple_remove(&nullb_indexes, nullb->index);
1780
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001781 list_del_init(&nullb->list);
1782
Matias Bjørling74ede5a2018-01-05 14:15:57 +01001783 del_gendisk(nullb->disk);
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001784
1785 if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
1786 hrtimer_cancel(&nullb->bw_timer);
1787 atomic_long_set(&nullb->cur_bytes, LONG_MAX);
1788 null_restart_queue_async(nullb);
1789 }
1790
Christoph Hellwig8b9ab622022-06-19 08:05:52 +02001791 put_disk(nullb->disk);
Shaohua Li2984c862017-08-14 15:04:52 -07001792 if (dev->queue_mode == NULL_Q_MQ &&
1793 nullb->tag_set == &nullb->__tag_set)
Jens Axboe82f402f2017-06-20 14:22:01 -06001794 blk_mq_free_tag_set(nullb->tag_set);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001795 cleanup_queues(nullb);
Shaohua Lideb78b42017-08-14 15:04:59 -07001796 if (null_cache_active(nullb))
1797 null_free_device_storage(nullb->dev, true);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001798 kfree(nullb);
Shaohua Li2984c862017-08-14 15:04:52 -07001799 dev->nullb = NULL;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001800}
1801
Shaohua Li306eb6b2017-08-14 15:04:57 -07001802static void null_config_discard(struct nullb *nullb)
1803{
1804 if (nullb->dev->discard == false)
1805 return;
Chaitanya Kulkarni1592cd12020-05-20 16:01:52 -07001806
Damien Le Moal49c70892020-11-20 10:55:16 +09001807 if (!nullb->dev->memory_backed) {
1808 nullb->dev->discard = false;
1809 pr_info("discard option is ignored without memory backing\n");
1810 return;
1811 }
1812
Chaitanya Kulkarni1592cd12020-05-20 16:01:52 -07001813 if (nullb->dev->zoned) {
1814 nullb->dev->discard = false;
1815 pr_info("discard option is ignored in zoned mode\n");
1816 return;
1817 }
1818
Shaohua Li306eb6b2017-08-14 15:04:57 -07001819 nullb->q->limits.discard_granularity = nullb->dev->blocksize;
Shaohua Li306eb6b2017-08-14 15:04:57 -07001820 blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01001821}
1822
Christoph Hellwigc62b37d2020-07-01 10:59:43 +02001823static const struct block_device_operations null_bio_ops = {
1824 .owner = THIS_MODULE,
1825 .submit_bio = null_submit_bio,
1826 .report_zones = null_report_zones,
1827};
1828
1829static const struct block_device_operations null_rq_ops = {
Christoph Hellwige3f89562019-11-11 11:39:26 +09001830 .owner = THIS_MODULE,
Christoph Hellwig7fc8fb52019-11-11 11:39:27 +09001831 .report_zones = null_report_zones,
Jens Axboef2298c02013-10-25 11:52:25 +01001832};
1833
1834static int setup_commands(struct nullb_queue *nq)
1835{
1836 struct nullb_cmd *cmd;
Christophe JAILLETeb25ad82022-07-03 18:05:43 +02001837 int i;
Jens Axboef2298c02013-10-25 11:52:25 +01001838
Kees Cook6396bb22018-06-12 14:03:40 -07001839 nq->cmds = kcalloc(nq->queue_depth, sizeof(*cmd), GFP_KERNEL);
Jens Axboef2298c02013-10-25 11:52:25 +01001840 if (!nq->cmds)
Matias Bjorling2d263a782013-12-18 13:41:43 +01001841 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +01001842
Christophe JAILLETeb25ad82022-07-03 18:05:43 +02001843 nq->tag_map = bitmap_zalloc(nq->queue_depth, GFP_KERNEL);
Jens Axboef2298c02013-10-25 11:52:25 +01001844 if (!nq->tag_map) {
1845 kfree(nq->cmds);
Matias Bjorling2d263a782013-12-18 13:41:43 +01001846 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +01001847 }
1848
1849 for (i = 0; i < nq->queue_depth; i++) {
1850 cmd = &nq->cmds[i];
Jens Axboef2298c02013-10-25 11:52:25 +01001851 cmd->tag = -1U;
1852 }
1853
1854 return 0;
1855}
1856
Jens Axboef2298c02013-10-25 11:52:25 +01001857static int setup_queues(struct nullb *nullb)
1858{
Jens Axboe0a593fb2021-04-17 09:29:49 -06001859 int nqueues = nr_cpu_ids;
1860
1861 if (g_poll_queues)
1862 nqueues += g_poll_queues;
1863
1864 nullb->queues = kcalloc(nqueues, sizeof(struct nullb_queue),
Kees Cook6396bb22018-06-12 14:03:40 -07001865 GFP_KERNEL);
Jens Axboef2298c02013-10-25 11:52:25 +01001866 if (!nullb->queues)
Matias Bjorling2d263a782013-12-18 13:41:43 +01001867 return -ENOMEM;
Jens Axboef2298c02013-10-25 11:52:25 +01001868
Shaohua Li2984c862017-08-14 15:04:52 -07001869 nullb->queue_depth = nullb->dev->hw_queue_depth;
Matias Bjorling2d263a782013-12-18 13:41:43 +01001870 return 0;
1871}
1872
1873static int init_driver_queues(struct nullb *nullb)
1874{
1875 struct nullb_queue *nq;
1876 int i, ret = 0;
Jens Axboef2298c02013-10-25 11:52:25 +01001877
Shaohua Li2984c862017-08-14 15:04:52 -07001878 for (i = 0; i < nullb->dev->submit_queues; i++) {
Jens Axboef2298c02013-10-25 11:52:25 +01001879 nq = &nullb->queues[i];
Matias Bjorling2d263a782013-12-18 13:41:43 +01001880
1881 null_init_queue(nullb, nq);
1882
1883 ret = setup_commands(nq);
1884 if (ret)
Jan Kara31f96902014-10-22 15:34:21 +02001885 return ret;
Jens Axboef2298c02013-10-25 11:52:25 +01001886 nullb->nr_queues++;
1887 }
Matias Bjorling2d263a782013-12-18 13:41:43 +01001888 return 0;
Jens Axboef2298c02013-10-25 11:52:25 +01001889}
1890
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001891static int null_gendisk_register(struct nullb *nullb)
Jens Axboef2298c02013-10-25 11:52:25 +01001892{
Christoph Hellwig979d5442019-12-03 10:39:02 +01001893 sector_t size = ((sector_t)nullb->dev->size * SZ_1M) >> SECTOR_SHIFT;
Christoph Hellwig132226b2021-05-21 07:51:15 +02001894 struct gendisk *disk = nullb->disk;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001895
Christoph Hellwig979d5442019-12-03 10:39:02 +01001896 set_capacity(disk, size);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001897
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001898 disk->major = null_major;
1899 disk->first_minor = nullb->index;
Christoph Hellwig132226b2021-05-21 07:51:15 +02001900 disk->minors = 1;
Christoph Hellwigc62b37d2020-07-01 10:59:43 +02001901 if (queue_is_mq(nullb->q))
1902 disk->fops = &null_rq_ops;
1903 else
1904 disk->fops = &null_bio_ops;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001905 disk->private_data = nullb;
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001906 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
1907
Damien Le Moalbf505452018-10-12 19:08:50 +09001908 if (nullb->dev->zoned) {
Damien Le Moald205bde2020-04-23 12:02:38 +09001909 int ret = null_register_zoned_dev(nullb);
1910
1911 if (ret)
1912 return ret;
Damien Le Moalbf505452018-10-12 19:08:50 +09001913 }
1914
Luis Chamberlain10e71232021-08-18 16:45:42 +02001915 return add_disk(disk);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02001916}
1917
Shaohua Li2984c862017-08-14 15:04:52 -07001918static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
Jens Axboe82f402f2017-06-20 14:22:01 -06001919{
Vincent Fu7012eef2022-07-08 17:49:49 +00001920 unsigned int flags = BLK_MQ_F_SHOULD_MERGE;
1921 int hw_queues, numa_node;
1922 unsigned int queue_depth;
Jens Axboe0a593fb2021-04-17 09:29:49 -06001923 int poll_queues;
1924
Vincent Fu7012eef2022-07-08 17:49:49 +00001925 if (nullb) {
1926 hw_queues = nullb->dev->submit_queues;
1927 poll_queues = nullb->dev->poll_queues;
1928 queue_depth = nullb->dev->hw_queue_depth;
1929 numa_node = nullb->dev->home_node;
1930 if (nullb->dev->no_sched)
1931 flags |= BLK_MQ_F_NO_SCHED;
1932 if (nullb->dev->shared_tag_bitmap)
1933 flags |= BLK_MQ_F_TAG_HCTX_SHARED;
1934 if (nullb->dev->blocking)
1935 flags |= BLK_MQ_F_BLOCKING;
1936 } else {
1937 hw_queues = g_submit_queues;
1938 poll_queues = g_poll_queues;
1939 queue_depth = g_hw_queue_depth;
1940 numa_node = g_home_node;
1941 if (g_no_sched)
1942 flags |= BLK_MQ_F_NO_SCHED;
1943 if (g_shared_tag_bitmap)
1944 flags |= BLK_MQ_F_TAG_HCTX_SHARED;
1945 if (g_blocking)
1946 flags |= BLK_MQ_F_BLOCKING;
1947 }
Jens Axboe82f402f2017-06-20 14:22:01 -06001948
Vincent Fu7012eef2022-07-08 17:49:49 +00001949 set->ops = &null_mq_ops;
1950 set->cmd_size = sizeof(struct nullb_cmd);
1951 set->flags = flags;
1952 set->driver_data = nullb;
1953 set->nr_hw_queues = hw_queues;
1954 set->queue_depth = queue_depth;
1955 set->numa_node = numa_node;
1956 if (poll_queues) {
1957 set->nr_hw_queues += poll_queues;
1958 set->nr_maps = 3;
1959 } else {
1960 set->nr_maps = 1;
1961 }
Jens Axboe82f402f2017-06-20 14:22:01 -06001962
1963 return blk_mq_alloc_tag_set(set);
1964}
1965
Damien Le Moal5c4bd1f2019-12-03 10:39:01 +01001966static int null_validate_conf(struct nullb_device *dev)
Shaohua Licedcafa2017-08-14 15:04:54 -07001967{
1968 dev->blocksize = round_down(dev->blocksize, 512);
1969 dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
Shaohua Licedcafa2017-08-14 15:04:54 -07001970
1971 if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) {
1972 if (dev->submit_queues != nr_online_nodes)
1973 dev->submit_queues = nr_online_nodes;
1974 } else if (dev->submit_queues > nr_cpu_ids)
1975 dev->submit_queues = nr_cpu_ids;
1976 else if (dev->submit_queues == 0)
1977 dev->submit_queues = 1;
Shin'ichiro Kawasaki15dfc662021-10-29 19:39:26 +09001978 dev->prev_submit_queues = dev->submit_queues;
1979
1980 if (dev->poll_queues > g_poll_queues)
1981 dev->poll_queues = g_poll_queues;
Shin'ichiro Kawasaki15dfc662021-10-29 19:39:26 +09001982 dev->prev_poll_queues = dev->poll_queues;
Shaohua Licedcafa2017-08-14 15:04:54 -07001983
1984 dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
1985 dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER);
Shaohua Li5bcd0e02017-08-14 15:04:56 -07001986
1987 /* Do memory allocation, so set blocking */
1988 if (dev->memory_backed)
1989 dev->blocking = true;
Shaohua Lideb78b42017-08-14 15:04:59 -07001990 else /* cache is meaningless */
1991 dev->cache_size = 0;
1992 dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024,
1993 dev->cache_size);
Shaohua Lieff2c4f2017-08-14 15:04:58 -07001994 dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps);
1995 /* can not stop a queue */
1996 if (dev->queue_mode == NULL_Q_BIO)
1997 dev->mbps = 0;
Damien Le Moal5c4bd1f2019-12-03 10:39:01 +01001998
1999 if (dev->zoned &&
2000 (!dev->zone_size || !is_power_of_2(dev->zone_size))) {
2001 pr_err("zone_size must be power-of-two\n");
2002 return -EINVAL;
2003 }
2004
2005 return 0;
Shaohua Licedcafa2017-08-14 15:04:54 -07002006}
2007
Jens Axboe24941b902018-02-28 09:18:57 -07002008#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
2009static bool __null_setup_fault(struct fault_attr *attr, char *str)
2010{
2011 if (!str[0])
2012 return true;
2013
2014 if (!setup_fault_attr(attr, str))
2015 return false;
2016
2017 attr->verbose = 0;
2018 return true;
2019}
2020#endif
2021
Jens Axboe93b57042018-01-10 09:06:23 -07002022static bool null_setup_fault(void)
2023{
Arnd Bergmann33f782c2018-01-11 11:31:25 +01002024#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
Jens Axboe24941b902018-02-28 09:18:57 -07002025 if (!__null_setup_fault(&null_timeout_attr, g_timeout_str))
Jens Axboe93b57042018-01-10 09:06:23 -07002026 return false;
Jens Axboe24941b902018-02-28 09:18:57 -07002027 if (!__null_setup_fault(&null_requeue_attr, g_requeue_str))
2028 return false;
Bart Van Assche596444e2020-03-09 21:26:23 -07002029 if (!__null_setup_fault(&null_init_hctx_attr, g_init_hctx_str))
2030 return false;
Arnd Bergmann33f782c2018-01-11 11:31:25 +01002031#endif
Jens Axboe93b57042018-01-10 09:06:23 -07002032 return true;
2033}
2034
Shaohua Li2984c862017-08-14 15:04:52 -07002035static int null_add_dev(struct nullb_device *dev)
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02002036{
2037 struct nullb *nullb;
Robert Elliottdc501dc2014-09-02 11:38:49 -05002038 int rv;
Jens Axboef2298c02013-10-25 11:52:25 +01002039
Damien Le Moal5c4bd1f2019-12-03 10:39:01 +01002040 rv = null_validate_conf(dev);
2041 if (rv)
2042 return rv;
Shaohua Licedcafa2017-08-14 15:04:54 -07002043
Shaohua Li2984c862017-08-14 15:04:52 -07002044 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node);
Robert Elliottdc501dc2014-09-02 11:38:49 -05002045 if (!nullb) {
2046 rv = -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002047 goto out;
Robert Elliottdc501dc2014-09-02 11:38:49 -05002048 }
Shaohua Li2984c862017-08-14 15:04:52 -07002049 nullb->dev = dev;
2050 dev->nullb = nullb;
Jens Axboef2298c02013-10-25 11:52:25 +01002051
2052 spin_lock_init(&nullb->lock);
2053
Robert Elliottdc501dc2014-09-02 11:38:49 -05002054 rv = setup_queues(nullb);
2055 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002056 goto out_free_nullb;
Jens Axboef2298c02013-10-25 11:52:25 +01002057
Shaohua Li2984c862017-08-14 15:04:52 -07002058 if (dev->queue_mode == NULL_Q_MQ) {
Jens Axboe82f402f2017-06-20 14:22:01 -06002059 if (shared_tags) {
2060 nullb->tag_set = &tag_set;
2061 rv = 0;
2062 } else {
2063 nullb->tag_set = &nullb->__tag_set;
Shaohua Li2984c862017-08-14 15:04:52 -07002064 rv = null_init_tag_set(nullb, nullb->tag_set);
Jens Axboe82f402f2017-06-20 14:22:01 -06002065 }
Jens Axboef2298c02013-10-25 11:52:25 +01002066
Robert Elliottdc501dc2014-09-02 11:38:49 -05002067 if (rv)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002068 goto out_cleanup_queues;
Jens Axboef2298c02013-10-25 11:52:25 +01002069
Jens Axboe93b57042018-01-10 09:06:23 -07002070 if (!null_setup_fault())
Christoph Hellwig132226b2021-05-21 07:51:15 +02002071 goto out_cleanup_tags;
Jens Axboe93b57042018-01-10 09:06:23 -07002072
Jens Axboe5448aca2018-01-09 12:47:24 -07002073 nullb->tag_set->timeout = 5 * HZ;
Christoph Hellwig6759b1a2021-06-02 09:53:35 +03002074 nullb->disk = blk_mq_alloc_disk(nullb->tag_set, nullb);
2075 if (IS_ERR(nullb->disk)) {
2076 rv = PTR_ERR(nullb->disk);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002077 goto out_cleanup_tags;
Christoph Hellwig6759b1a2021-06-02 09:53:35 +03002078 }
2079 nullb->q = nullb->disk->queue;
Shaohua Li2984c862017-08-14 15:04:52 -07002080 } else if (dev->queue_mode == NULL_Q_BIO) {
Christoph Hellwig132226b2021-05-21 07:51:15 +02002081 rv = -ENOMEM;
2082 nullb->disk = blk_alloc_disk(nullb->dev->home_node);
2083 if (!nullb->disk)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002084 goto out_cleanup_queues;
Christoph Hellwig132226b2021-05-21 07:51:15 +02002085
2086 nullb->q = nullb->disk->queue;
Jan Kara31f96902014-10-22 15:34:21 +02002087 rv = init_driver_queues(nullb);
2088 if (rv)
Christoph Hellwig132226b2021-05-21 07:51:15 +02002089 goto out_cleanup_disk;
Jens Axboef2298c02013-10-25 11:52:25 +01002090 }
2091
Shaohua Lieff2c4f2017-08-14 15:04:58 -07002092 if (dev->mbps) {
2093 set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags);
2094 nullb_setup_bwtimer(nullb);
2095 }
2096
Shaohua Lideb78b42017-08-14 15:04:59 -07002097 if (dev->cache_size > 0) {
2098 set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
2099 blk_queue_write_cache(nullb->q, true, true);
Shaohua Lideb78b42017-08-14 15:04:59 -07002100 }
2101
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02002102 if (dev->zoned) {
Damien Le Moald205bde2020-04-23 12:02:38 +09002103 rv = null_init_zoned_dev(dev, nullb->q);
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02002104 if (rv)
Christoph Hellwig132226b2021-05-21 07:51:15 +02002105 goto out_cleanup_disk;
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02002106 }
2107
Jens Axboef2298c02013-10-25 11:52:25 +01002108 nullb->q->queuedata = nullb;
Bart Van Assche8b904b52018-03-07 17:10:10 -08002109 blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
2110 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);
Jens Axboef2298c02013-10-25 11:52:25 +01002111
Jens Axboef2298c02013-10-25 11:52:25 +01002112 mutex_lock(&lock);
Dan Carpenteree452a82022-07-15 11:12:14 +03002113 rv = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
2114 if (rv < 0) {
2115 mutex_unlock(&lock);
2116 goto out_cleanup_zone;
2117 }
2118 nullb->index = rv;
2119 dev->index = rv;
Jens Axboef2298c02013-10-25 11:52:25 +01002120 mutex_unlock(&lock);
2121
Shaohua Li2984c862017-08-14 15:04:52 -07002122 blk_queue_logical_block_size(nullb->q, dev->blocksize);
2123 blk_queue_physical_block_size(nullb->q, dev->blocksize);
Damien Le Moalea17fd32020-11-20 10:55:18 +09002124 if (!dev->max_sectors)
2125 dev->max_sectors = queue_max_hw_sectors(nullb->q);
Keith Busch0a26f322023-01-05 12:51:45 -08002126 dev->max_sectors = min(dev->max_sectors, BLK_DEF_MAX_SECTORS);
Damien Le Moalea17fd32020-11-20 10:55:18 +09002127 blk_queue_max_hw_sectors(nullb->q, dev->max_sectors);
Jens Axboef2298c02013-10-25 11:52:25 +01002128
Max Gurtovoycee1b212021-04-12 09:55:23 +00002129 if (dev->virt_boundary)
2130 blk_queue_virt_boundary(nullb->q, PAGE_SIZE - 1);
2131
Shaohua Li306eb6b2017-08-14 15:04:57 -07002132 null_config_discard(nullb);
Jens Axboef2298c02013-10-25 11:52:25 +01002133
Damien Le Moal49c3b922022-04-20 09:57:18 +09002134 if (config_item_name(&dev->item)) {
2135 /* Use configfs dir name as the device name */
2136 snprintf(nullb->disk_name, sizeof(nullb->disk_name),
2137 "%s", config_item_name(&dev->item));
2138 } else {
2139 sprintf(nullb->disk_name, "nullb%d", nullb->index);
2140 }
Matias Bjørlingb2b7e002015-11-12 20:25:10 +01002141
Matias Bjørling74ede5a2018-01-05 14:15:57 +01002142 rv = null_gendisk_register(nullb);
Matias Bjørling9ae2d0a2016-09-16 14:25:05 +02002143 if (rv)
Dan Carpenteree452a82022-07-15 11:12:14 +03002144 goto out_ida_free;
Jens Axboef2298c02013-10-25 11:52:25 +01002145
Matias Bjørlinga5143792016-02-11 14:49:13 +01002146 mutex_lock(&lock);
2147 list_add_tail(&nullb->list, &nullb_list);
2148 mutex_unlock(&lock);
Wenwei Tao3681c85d2016-03-05 00:27:04 +08002149
Damien Le Moaldb060f52022-04-20 09:57:17 +09002150 pr_info("disk %s created\n", nullb->disk_name);
2151
Jens Axboef2298c02013-10-25 11:52:25 +01002152 return 0;
Dan Carpenteree452a82022-07-15 11:12:14 +03002153
2154out_ida_free:
2155 ida_free(&nullb_indexes, nullb->index);
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02002156out_cleanup_zone:
Damien Le Moald205bde2020-04-23 12:02:38 +09002157 null_free_zoned_dev(dev);
Christoph Hellwig132226b2021-05-21 07:51:15 +02002158out_cleanup_disk:
Christoph Hellwig8b9ab622022-06-19 08:05:52 +02002159 put_disk(nullb->disk);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002160out_cleanup_tags:
Shaohua Li2984c862017-08-14 15:04:52 -07002161 if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
Jens Axboe82f402f2017-06-20 14:22:01 -06002162 blk_mq_free_tag_set(nullb->tag_set);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002163out_cleanup_queues:
2164 cleanup_queues(nullb);
2165out_free_nullb:
2166 kfree(nullb);
Bart Van Assche2004bfd2020-03-09 21:26:21 -07002167 dev->nullb = NULL;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002168out:
Robert Elliottdc501dc2014-09-02 11:38:49 -05002169 return rv;
Jens Axboef2298c02013-10-25 11:52:25 +01002170}
2171
Damien Le Moal49c3b922022-04-20 09:57:18 +09002172static struct nullb *null_find_dev_by_name(const char *name)
2173{
2174 struct nullb *nullb = NULL, *nb;
2175
2176 mutex_lock(&lock);
2177 list_for_each_entry(nb, &nullb_list, list) {
2178 if (strcmp(nb->disk_name, name) == 0) {
2179 nullb = nb;
2180 break;
2181 }
2182 }
2183 mutex_unlock(&lock);
2184
2185 return nullb;
2186}
2187
Damien Le Moalb3a0a732022-04-20 09:57:16 +09002188static int null_create_dev(void)
2189{
2190 struct nullb_device *dev;
2191 int ret;
2192
2193 dev = null_alloc_dev();
2194 if (!dev)
2195 return -ENOMEM;
2196
2197 ret = null_add_dev(dev);
2198 if (ret) {
2199 null_free_dev(dev);
2200 return ret;
2201 }
2202
2203 return 0;
2204}
2205
2206static void null_destroy_dev(struct nullb *nullb)
2207{
2208 struct nullb_device *dev = nullb->dev;
2209
2210 null_del_dev(nullb);
2211 null_free_dev(dev);
2212}
2213
Jens Axboef2298c02013-10-25 11:52:25 +01002214static int __init null_init(void)
2215{
Minfei Huangaf096e22015-12-08 13:47:34 -07002216 int ret = 0;
Jens Axboef2298c02013-10-25 11:52:25 +01002217 unsigned int i;
Minfei Huangaf096e22015-12-08 13:47:34 -07002218 struct nullb *nullb;
Jens Axboef2298c02013-10-25 11:52:25 +01002219
Shaohua Li2984c862017-08-14 15:04:52 -07002220 if (g_bs > PAGE_SIZE) {
André Almeida9c7eddf2019-09-16 11:07:59 -03002221 pr_warn("invalid block size\n");
2222 pr_warn("defaults block size to %lu\n", PAGE_SIZE);
Shaohua Li2984c862017-08-14 15:04:52 -07002223 g_bs = PAGE_SIZE;
Raghavendra K T9967d8a2014-01-21 16:59:59 +05302224 }
Jens Axboef2298c02013-10-25 11:52:25 +01002225
Damien Le Moalea17fd32020-11-20 10:55:18 +09002226 if (g_max_sectors > BLK_DEF_MAX_SECTORS) {
2227 pr_warn("invalid max sectors\n");
2228 pr_warn("defaults max sectors to %u\n", BLK_DEF_MAX_SECTORS);
2229 g_max_sectors = BLK_DEF_MAX_SECTORS;
2230 }
2231
John Pittman7ff684a2019-04-05 17:42:45 -04002232 if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
André Almeida9c7eddf2019-09-16 11:07:59 -03002233 pr_err("invalid home_node value\n");
John Pittman7ff684a2019-04-05 17:42:45 -04002234 g_home_node = NUMA_NO_NODE;
2235 }
2236
Jens Axboee50b1e32018-10-11 17:58:17 -06002237 if (g_queue_mode == NULL_Q_RQ) {
Damien Le Moal525323d2022-04-20 09:57:15 +09002238 pr_err("legacy IO path is no longer available\n");
Jens Axboee50b1e32018-10-11 17:58:17 -06002239 return -EINVAL;
2240 }
Damien Le Moal525323d2022-04-20 09:57:15 +09002241
Shaohua Li2984c862017-08-14 15:04:52 -07002242 if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
2243 if (g_submit_queues != nr_online_nodes) {
André Almeida9c7eddf2019-09-16 11:07:59 -03002244 pr_warn("submit_queues param is set to %u.\n",
Damien Le Moal525323d2022-04-20 09:57:15 +09002245 nr_online_nodes);
Shaohua Li2984c862017-08-14 15:04:52 -07002246 g_submit_queues = nr_online_nodes;
Matias Bjørlingfc1bc352013-12-21 00:11:01 +01002247 }
Damien Le Moal525323d2022-04-20 09:57:15 +09002248 } else if (g_submit_queues > nr_cpu_ids) {
Shaohua Li2984c862017-08-14 15:04:52 -07002249 g_submit_queues = nr_cpu_ids;
Damien Le Moal525323d2022-04-20 09:57:15 +09002250 } else if (g_submit_queues <= 0) {
Shaohua Li2984c862017-08-14 15:04:52 -07002251 g_submit_queues = 1;
Damien Le Moal525323d2022-04-20 09:57:15 +09002252 }
Jens Axboef2298c02013-10-25 11:52:25 +01002253
Shaohua Li2984c862017-08-14 15:04:52 -07002254 if (g_queue_mode == NULL_Q_MQ && shared_tags) {
2255 ret = null_init_tag_set(NULL, &tag_set);
Max Gurtovoydb2d1532017-07-06 18:00:07 +03002256 if (ret)
2257 return ret;
2258 }
2259
Shaohua Li3bf2bd22017-08-14 15:04:53 -07002260 config_group_init(&nullb_subsys.su_group);
2261 mutex_init(&nullb_subsys.su_mutex);
2262
2263 ret = configfs_register_subsystem(&nullb_subsys);
2264 if (ret)
2265 goto err_tagset;
2266
Jens Axboef2298c02013-10-25 11:52:25 +01002267 mutex_init(&lock);
2268
Jens Axboef2298c02013-10-25 11:52:25 +01002269 null_major = register_blkdev(0, "nullb");
Max Gurtovoydb2d1532017-07-06 18:00:07 +03002270 if (null_major < 0) {
2271 ret = null_major;
Shaohua Li3bf2bd22017-08-14 15:04:53 -07002272 goto err_conf;
Max Gurtovoydb2d1532017-07-06 18:00:07 +03002273 }
Jens Axboef2298c02013-10-25 11:52:25 +01002274
Minfei Huangaf096e22015-12-08 13:47:34 -07002275 for (i = 0; i < nr_devices; i++) {
Damien Le Moalb3a0a732022-04-20 09:57:16 +09002276 ret = null_create_dev();
2277 if (ret)
Minfei Huangaf096e22015-12-08 13:47:34 -07002278 goto err_dev;
2279 }
2280
André Almeida9c7eddf2019-09-16 11:07:59 -03002281 pr_info("module loaded\n");
Jens Axboef2298c02013-10-25 11:52:25 +01002282 return 0;
Minfei Huangaf096e22015-12-08 13:47:34 -07002283
2284err_dev:
2285 while (!list_empty(&nullb_list)) {
2286 nullb = list_entry(nullb_list.next, struct nullb, list);
Damien Le Moalb3a0a732022-04-20 09:57:16 +09002287 null_destroy_dev(nullb);
Minfei Huangaf096e22015-12-08 13:47:34 -07002288 }
Minfei Huangaf096e22015-12-08 13:47:34 -07002289 unregister_blkdev(null_major, "nullb");
Shaohua Li3bf2bd22017-08-14 15:04:53 -07002290err_conf:
2291 configfs_unregister_subsystem(&nullb_subsys);
Max Gurtovoydb2d1532017-07-06 18:00:07 +03002292err_tagset:
Shaohua Li2984c862017-08-14 15:04:52 -07002293 if (g_queue_mode == NULL_Q_MQ && shared_tags)
Max Gurtovoydb2d1532017-07-06 18:00:07 +03002294 blk_mq_free_tag_set(&tag_set);
Minfei Huangaf096e22015-12-08 13:47:34 -07002295 return ret;
Jens Axboef2298c02013-10-25 11:52:25 +01002296}
2297
2298static void __exit null_exit(void)
2299{
2300 struct nullb *nullb;
2301
Shaohua Li3bf2bd22017-08-14 15:04:53 -07002302 configfs_unregister_subsystem(&nullb_subsys);
2303
Jens Axboef2298c02013-10-25 11:52:25 +01002304 unregister_blkdev(null_major, "nullb");
2305
2306 mutex_lock(&lock);
2307 while (!list_empty(&nullb_list)) {
2308 nullb = list_entry(nullb_list.next, struct nullb, list);
Damien Le Moalb3a0a732022-04-20 09:57:16 +09002309 null_destroy_dev(nullb);
Jens Axboef2298c02013-10-25 11:52:25 +01002310 }
2311 mutex_unlock(&lock);
Matias Bjørling6bb95352015-11-19 12:50:08 +01002312
Shaohua Li2984c862017-08-14 15:04:52 -07002313 if (g_queue_mode == NULL_Q_MQ && shared_tags)
Jens Axboe82f402f2017-06-20 14:22:01 -06002314 blk_mq_free_tag_set(&tag_set);
Jens Axboef2298c02013-10-25 11:52:25 +01002315}
2316
2317module_init(null_init);
2318module_exit(null_exit);
2319
Jens Axboe231b3db2017-08-25 12:53:15 -06002320MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
Jens Axboef2298c02013-10-25 11:52:25 +01002321MODULE_LICENSE("GPL");