Christoph Hellwig | 3dcf60bc | 2019-04-30 14:42:43 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Jens Axboe | 75bb462 | 2014-05-28 10:15:41 -0600 | [diff] [blame] | 2 | /* |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 3 | * Tag allocation using scalable bitmaps. Uses active queue tracking to support |
| 4 | * fairer distribution of tags between multiple submitters when a shared tag map |
| 5 | * is used. |
Jens Axboe | 75bb462 | 2014-05-28 10:15:41 -0600 | [diff] [blame] | 6 | * |
| 7 | * Copyright (C) 2013-2014 Jens Axboe |
| 8 | */ |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 9 | #include <linux/kernel.h> |
| 10 | #include <linux/module.h> |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 11 | |
Ming Lei | f9934a8 | 2019-07-24 11:48:40 +0800 | [diff] [blame] | 12 | #include <linux/delay.h> |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 13 | #include "blk.h" |
| 14 | #include "blk-mq.h" |
John Garry | d97e594 | 2021-05-13 20:00:58 +0800 | [diff] [blame] | 15 | #include "blk-mq-sched.h" |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 16 | |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 17 | /* |
Laibin Qiu | 180dccb | 2022-01-13 10:55:36 +0800 | [diff] [blame] | 18 | * Recalculate wakeup batch when tag is shared by hctx. |
| 19 | */ |
| 20 | static void blk_mq_update_wake_batch(struct blk_mq_tags *tags, |
| 21 | unsigned int users) |
| 22 | { |
| 23 | if (!users) |
| 24 | return; |
| 25 | |
| 26 | sbitmap_queue_recalculate_wake_batch(&tags->bitmap_tags, |
| 27 | users); |
| 28 | sbitmap_queue_recalculate_wake_batch(&tags->breserved_tags, |
| 29 | users); |
| 30 | } |
| 31 | |
| 32 | /* |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 33 | * If a previously inactive queue goes active, bump the active user count. |
Jianchao Wang | d263ed9 | 2018-08-09 08:34:17 -0600 | [diff] [blame] | 34 | * We need to do this before try to allocate driver tag, then even if fail |
| 35 | * to get tag when first time, the other shared-tag users could reserve |
| 36 | * budget for it. |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 37 | */ |
Liu Song | ee78ec1 | 2022-06-25 23:15:21 +0800 | [diff] [blame] | 38 | void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 39 | { |
Laibin Qiu | 180dccb | 2022-01-13 10:55:36 +0800 | [diff] [blame] | 40 | unsigned int users; |
Yu Kuai | 4f1731df | 2023-06-10 10:30:43 +0800 | [diff] [blame] | 41 | struct blk_mq_tags *tags = hctx->tags; |
Laibin Qiu | 180dccb | 2022-01-13 10:55:36 +0800 | [diff] [blame] | 42 | |
Tian Lan | 3e94d54 | 2023-05-22 17:05:55 -0400 | [diff] [blame] | 43 | /* |
| 44 | * calling test_bit() prior to test_and_set_bit() is intentional, |
| 45 | * it avoids dirtying the cacheline if the queue is already active. |
| 46 | */ |
John Garry | 079a2e3 | 2021-10-05 18:23:39 +0800 | [diff] [blame] | 47 | if (blk_mq_is_shared_tags(hctx->flags)) { |
John Garry | f1b49fd | 2020-08-19 23:20:27 +0800 | [diff] [blame] | 48 | struct request_queue *q = hctx->queue; |
John Garry | f1b49fd | 2020-08-19 23:20:27 +0800 | [diff] [blame] | 49 | |
Tian Lan | 3e94d54 | 2023-05-22 17:05:55 -0400 | [diff] [blame] | 50 | if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) || |
| 51 | test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) |
Liu Song | ee78ec1 | 2022-06-25 23:15:21 +0800 | [diff] [blame] | 52 | return; |
John Garry | f1b49fd | 2020-08-19 23:20:27 +0800 | [diff] [blame] | 53 | } else { |
Tian Lan | 3e94d54 | 2023-05-22 17:05:55 -0400 | [diff] [blame] | 54 | if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) || |
| 55 | test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) |
Liu Song | ee78ec1 | 2022-06-25 23:15:21 +0800 | [diff] [blame] | 56 | return; |
John Garry | f1b49fd | 2020-08-19 23:20:27 +0800 | [diff] [blame] | 57 | } |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 58 | |
Yu Kuai | 4f1731df | 2023-06-10 10:30:43 +0800 | [diff] [blame] | 59 | spin_lock_irq(&tags->lock); |
| 60 | users = tags->active_queues + 1; |
| 61 | WRITE_ONCE(tags->active_queues, users); |
| 62 | blk_mq_update_wake_batch(tags, users); |
| 63 | spin_unlock_irq(&tags->lock); |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 64 | } |
| 65 | |
| 66 | /* |
Jens Axboe | aed3ea9 | 2014-12-22 14:04:42 -0700 | [diff] [blame] | 67 | * Wakeup all potentially sleeping on tags |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 68 | */ |
Jens Axboe | aed3ea9 | 2014-12-22 14:04:42 -0700 | [diff] [blame] | 69 | void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 70 | { |
John Garry | ae0f1a7 | 2021-10-05 18:23:38 +0800 | [diff] [blame] | 71 | sbitmap_queue_wake_all(&tags->bitmap_tags); |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 72 | if (include_reserve) |
John Garry | ae0f1a7 | 2021-10-05 18:23:38 +0800 | [diff] [blame] | 73 | sbitmap_queue_wake_all(&tags->breserved_tags); |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 74 | } |
| 75 | |
| 76 | /* |
Jens Axboe | e3a2b3f | 2014-05-20 11:49:02 -0600 | [diff] [blame] | 77 | * If a previously busy queue goes inactive, potential waiters could now |
| 78 | * be allowed to queue. Wake them up and check. |
| 79 | */ |
| 80 | void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) |
| 81 | { |
| 82 | struct blk_mq_tags *tags = hctx->tags; |
Laibin Qiu | 180dccb | 2022-01-13 10:55:36 +0800 | [diff] [blame] | 83 | unsigned int users; |
Jens Axboe | e3a2b3f | 2014-05-20 11:49:02 -0600 | [diff] [blame] | 84 | |
John Garry | 079a2e3 | 2021-10-05 18:23:39 +0800 | [diff] [blame] | 85 | if (blk_mq_is_shared_tags(hctx->flags)) { |
John Garry | e155b0c | 2021-10-05 18:23:37 +0800 | [diff] [blame] | 86 | struct request_queue *q = hctx->queue; |
| 87 | |
John Garry | f1b49fd | 2020-08-19 23:20:27 +0800 | [diff] [blame] | 88 | if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE, |
| 89 | &q->queue_flags)) |
| 90 | return; |
John Garry | f1b49fd | 2020-08-19 23:20:27 +0800 | [diff] [blame] | 91 | } else { |
| 92 | if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) |
| 93 | return; |
John Garry | f1b49fd | 2020-08-19 23:20:27 +0800 | [diff] [blame] | 94 | } |
Jens Axboe | e3a2b3f | 2014-05-20 11:49:02 -0600 | [diff] [blame] | 95 | |
Yu Kuai | 4f1731df | 2023-06-10 10:30:43 +0800 | [diff] [blame] | 96 | spin_lock_irq(&tags->lock); |
| 97 | users = tags->active_queues - 1; |
| 98 | WRITE_ONCE(tags->active_queues, users); |
Laibin Qiu | 180dccb | 2022-01-13 10:55:36 +0800 | [diff] [blame] | 99 | blk_mq_update_wake_batch(tags, users); |
Yu Kuai | 4f1731df | 2023-06-10 10:30:43 +0800 | [diff] [blame] | 100 | spin_unlock_irq(&tags->lock); |
John Garry | 079a2e3 | 2021-10-05 18:23:39 +0800 | [diff] [blame] | 101 | |
Jens Axboe | aed3ea9 | 2014-12-22 14:04:42 -0700 | [diff] [blame] | 102 | blk_mq_tag_wakeup_all(tags, false); |
Jens Axboe | e3a2b3f | 2014-05-20 11:49:02 -0600 | [diff] [blame] | 103 | } |
| 104 | |
Jens Axboe | 200e86b | 2017-01-25 08:11:38 -0700 | [diff] [blame] | 105 | static int __blk_mq_get_tag(struct blk_mq_alloc_data *data, |
| 106 | struct sbitmap_queue *bt) |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 107 | { |
Ming Lei | 2850085 | 2020-09-11 18:41:14 +0800 | [diff] [blame] | 108 | if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) && |
| 109 | !hctx_may_queue(data->hctx, bt)) |
Christoph Hellwig | 76647368 | 2020-05-29 15:53:12 +0200 | [diff] [blame] | 110 | return BLK_MQ_NO_TAG; |
Christoph Hellwig | 42fdc5e | 2020-06-29 17:08:34 +0200 | [diff] [blame] | 111 | |
Omar Sandoval | 229a9287 | 2017-04-14 00:59:59 -0700 | [diff] [blame] | 112 | if (data->shallow_depth) |
John Garry | 3f60729 | 2022-02-08 20:07:04 +0800 | [diff] [blame] | 113 | return sbitmap_queue_get_shallow(bt, data->shallow_depth); |
Omar Sandoval | 229a9287 | 2017-04-14 00:59:59 -0700 | [diff] [blame] | 114 | else |
| 115 | return __sbitmap_queue_get(bt); |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 116 | } |
| 117 | |
Jens Axboe | 349302d | 2021-10-09 13:10:39 -0600 | [diff] [blame] | 118 | unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags, |
| 119 | unsigned int *offset) |
| 120 | { |
| 121 | struct blk_mq_tags *tags = blk_mq_tags_from_data(data); |
| 122 | struct sbitmap_queue *bt = &tags->bitmap_tags; |
| 123 | unsigned long ret; |
| 124 | |
| 125 | if (data->shallow_depth ||data->flags & BLK_MQ_REQ_RESERVED || |
| 126 | data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) |
| 127 | return 0; |
| 128 | ret = __sbitmap_queue_get_batch(bt, nr_tags, offset); |
| 129 | *offset += tags->nr_reserved_tags; |
| 130 | return ret; |
| 131 | } |
| 132 | |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame] | 133 | unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 134 | { |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame] | 135 | struct blk_mq_tags *tags = blk_mq_tags_from_data(data); |
| 136 | struct sbitmap_queue *bt; |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 137 | struct sbq_wait_state *ws; |
Jens Axboe | 5d2ee71 | 2018-11-29 17:36:41 -0700 | [diff] [blame] | 138 | DEFINE_SBQ_WAIT(wait); |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame] | 139 | unsigned int tag_offset; |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 140 | int tag; |
| 141 | |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame] | 142 | if (data->flags & BLK_MQ_REQ_RESERVED) { |
| 143 | if (unlikely(!tags->nr_reserved_tags)) { |
| 144 | WARN_ON_ONCE(1); |
Christoph Hellwig | 419c3d5 | 2020-05-29 15:53:11 +0200 | [diff] [blame] | 145 | return BLK_MQ_NO_TAG; |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame] | 146 | } |
John Garry | ae0f1a7 | 2021-10-05 18:23:38 +0800 | [diff] [blame] | 147 | bt = &tags->breserved_tags; |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame] | 148 | tag_offset = 0; |
| 149 | } else { |
John Garry | ae0f1a7 | 2021-10-05 18:23:38 +0800 | [diff] [blame] | 150 | bt = &tags->bitmap_tags; |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame] | 151 | tag_offset = tags->nr_reserved_tags; |
| 152 | } |
| 153 | |
Jens Axboe | 200e86b | 2017-01-25 08:11:38 -0700 | [diff] [blame] | 154 | tag = __blk_mq_get_tag(data, bt); |
Christoph Hellwig | 76647368 | 2020-05-29 15:53:12 +0200 | [diff] [blame] | 155 | if (tag != BLK_MQ_NO_TAG) |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame] | 156 | goto found_tag; |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 157 | |
Christoph Hellwig | 6f3b0e8 | 2015-11-26 09:13:05 +0100 | [diff] [blame] | 158 | if (data->flags & BLK_MQ_REQ_NOWAIT) |
Christoph Hellwig | 419c3d5 | 2020-05-29 15:53:11 +0200 | [diff] [blame] | 159 | return BLK_MQ_NO_TAG; |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 160 | |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame] | 161 | ws = bt_wait_ptr(bt, data->hctx); |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 162 | do { |
Ming Lei | e6fc464 | 2018-05-24 11:00:39 -0600 | [diff] [blame] | 163 | struct sbitmap_queue *bt_prev; |
| 164 | |
Bart Van Assche | b322320 | 2014-12-08 08:46:34 -0700 | [diff] [blame] | 165 | /* |
| 166 | * We're out of tags on this hardware queue, kick any |
| 167 | * pending IO submits before going to sleep waiting for |
Jens Axboe | 8cecb07 | 2017-01-19 07:39:17 -0700 | [diff] [blame] | 168 | * some to complete. |
Bart Van Assche | b322320 | 2014-12-08 08:46:34 -0700 | [diff] [blame] | 169 | */ |
Jens Axboe | 8cecb07 | 2017-01-19 07:39:17 -0700 | [diff] [blame] | 170 | blk_mq_run_hw_queue(data->hctx, false); |
Bart Van Assche | b322320 | 2014-12-08 08:46:34 -0700 | [diff] [blame] | 171 | |
Jens Axboe | 080ff35 | 2014-12-08 08:49:06 -0700 | [diff] [blame] | 172 | /* |
| 173 | * Retry tag allocation after running the hardware queue, |
| 174 | * as running the queue may also have found completions. |
| 175 | */ |
Jens Axboe | 200e86b | 2017-01-25 08:11:38 -0700 | [diff] [blame] | 176 | tag = __blk_mq_get_tag(data, bt); |
Christoph Hellwig | 76647368 | 2020-05-29 15:53:12 +0200 | [diff] [blame] | 177 | if (tag != BLK_MQ_NO_TAG) |
Jens Axboe | 080ff35 | 2014-12-08 08:49:06 -0700 | [diff] [blame] | 178 | break; |
| 179 | |
Jens Axboe | 5d2ee71 | 2018-11-29 17:36:41 -0700 | [diff] [blame] | 180 | sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE); |
Jens Axboe | 4e5dff4 | 2017-11-14 10:24:58 -0700 | [diff] [blame] | 181 | |
| 182 | tag = __blk_mq_get_tag(data, bt); |
Christoph Hellwig | 76647368 | 2020-05-29 15:53:12 +0200 | [diff] [blame] | 183 | if (tag != BLK_MQ_NO_TAG) |
Jens Axboe | 4e5dff4 | 2017-11-14 10:24:58 -0700 | [diff] [blame] | 184 | break; |
| 185 | |
Ming Lei | e6fc464 | 2018-05-24 11:00:39 -0600 | [diff] [blame] | 186 | bt_prev = bt; |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 187 | io_schedule(); |
Ming Lei | cb96a42c | 2014-06-01 00:43:37 +0800 | [diff] [blame] | 188 | |
Jens Axboe | 5d2ee71 | 2018-11-29 17:36:41 -0700 | [diff] [blame] | 189 | sbitmap_finish_wait(bt, ws, &wait); |
| 190 | |
Ming Lei | cb96a42c | 2014-06-01 00:43:37 +0800 | [diff] [blame] | 191 | data->ctx = blk_mq_get_ctx(data->q); |
Jens Axboe | f9afca4 | 2018-10-29 13:11:38 -0600 | [diff] [blame] | 192 | data->hctx = blk_mq_map_queue(data->q, data->cmd_flags, |
Jianchao Wang | 8ccdf4a | 2019-01-24 18:25:32 +0800 | [diff] [blame] | 193 | data->ctx); |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame] | 194 | tags = blk_mq_tags_from_data(data); |
| 195 | if (data->flags & BLK_MQ_REQ_RESERVED) |
John Garry | ae0f1a7 | 2021-10-05 18:23:38 +0800 | [diff] [blame] | 196 | bt = &tags->breserved_tags; |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame] | 197 | else |
John Garry | ae0f1a7 | 2021-10-05 18:23:38 +0800 | [diff] [blame] | 198 | bt = &tags->bitmap_tags; |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame] | 199 | |
Ming Lei | e6fc464 | 2018-05-24 11:00:39 -0600 | [diff] [blame] | 200 | /* |
| 201 | * If destination hw queue is changed, fake wake up on |
| 202 | * previous queue for compensating the wake up miss, so |
| 203 | * other allocations on previous queue won't be starved. |
| 204 | */ |
| 205 | if (bt != bt_prev) |
Keith Busch | 4acb834 | 2022-09-09 11:40:22 -0700 | [diff] [blame] | 206 | sbitmap_queue_wake_up(bt_prev, 1); |
Ming Lei | e6fc464 | 2018-05-24 11:00:39 -0600 | [diff] [blame] | 207 | |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame] | 208 | ws = bt_wait_ptr(bt, data->hctx); |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 209 | } while (1); |
| 210 | |
Jens Axboe | 5d2ee71 | 2018-11-29 17:36:41 -0700 | [diff] [blame] | 211 | sbitmap_finish_wait(bt, ws, &wait); |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame] | 212 | |
| 213 | found_tag: |
Ming Lei | bf0beec | 2020-05-29 15:53:15 +0200 | [diff] [blame] | 214 | /* |
| 215 | * Give up this allocation if the hctx is inactive. The caller will |
| 216 | * retry on an active hctx. |
| 217 | */ |
| 218 | if (unlikely(test_bit(BLK_MQ_S_INACTIVE, &data->hctx->state))) { |
| 219 | blk_mq_put_tag(tags, data->ctx, tag + tag_offset); |
| 220 | return BLK_MQ_NO_TAG; |
| 221 | } |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame] | 222 | return tag + tag_offset; |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 223 | } |
| 224 | |
John Garry | cae740a | 2020-02-26 20:10:15 +0800 | [diff] [blame] | 225 | void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, |
| 226 | unsigned int tag) |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 227 | { |
Sagi Grimberg | 415b806 | 2017-02-27 10:04:39 -0700 | [diff] [blame] | 228 | if (!blk_mq_tag_is_reserved(tags, tag)) { |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 229 | const int real_tag = tag - tags->nr_reserved_tags; |
| 230 | |
Jens Axboe | 70114c3 | 2014-11-24 15:52:30 -0700 | [diff] [blame] | 231 | BUG_ON(real_tag >= tags->nr_tags); |
John Garry | ae0f1a7 | 2021-10-05 18:23:38 +0800 | [diff] [blame] | 232 | sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu); |
Jens Axboe | 70114c3 | 2014-11-24 15:52:30 -0700 | [diff] [blame] | 233 | } else { |
John Garry | ae0f1a7 | 2021-10-05 18:23:38 +0800 | [diff] [blame] | 234 | sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu); |
Jens Axboe | 70114c3 | 2014-11-24 15:52:30 -0700 | [diff] [blame] | 235 | } |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 236 | } |
| 237 | |
Jens Axboe | f794f33 | 2021-10-08 05:50:46 -0600 | [diff] [blame] | 238 | void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags) |
| 239 | { |
| 240 | sbitmap_queue_clear_batch(&tags->bitmap_tags, tags->nr_reserved_tags, |
| 241 | tag_array, nr_tags); |
| 242 | } |
| 243 | |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 244 | struct bt_iter_data { |
| 245 | struct blk_mq_hw_ctx *hctx; |
John Garry | fea9f92 | 2021-12-06 20:49:50 +0800 | [diff] [blame] | 246 | struct request_queue *q; |
John Garry | fc39f8d | 2021-12-06 20:49:49 +0800 | [diff] [blame] | 247 | busy_tag_iter_fn *fn; |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 248 | void *data; |
| 249 | bool reserved; |
| 250 | }; |
| 251 | |
Ming Lei | 2e315dc | 2021-05-11 23:22:34 +0800 | [diff] [blame] | 252 | static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags, |
| 253 | unsigned int bitnr) |
| 254 | { |
Ming Lei | bd63141 | 2021-05-11 23:22:35 +0800 | [diff] [blame] | 255 | struct request *rq; |
| 256 | unsigned long flags; |
Ming Lei | 2e315dc | 2021-05-11 23:22:34 +0800 | [diff] [blame] | 257 | |
Ming Lei | bd63141 | 2021-05-11 23:22:35 +0800 | [diff] [blame] | 258 | spin_lock_irqsave(&tags->lock, flags); |
| 259 | rq = tags->rqs[bitnr]; |
Jens Axboe | 0a467d0 | 2021-10-14 14:39:59 -0600 | [diff] [blame] | 260 | if (!rq || rq->tag != bitnr || !req_ref_inc_not_zero(rq)) |
Ming Lei | bd63141 | 2021-05-11 23:22:35 +0800 | [diff] [blame] | 261 | rq = NULL; |
| 262 | spin_unlock_irqrestore(&tags->lock, flags); |
Ming Lei | 2e315dc | 2021-05-11 23:22:34 +0800 | [diff] [blame] | 263 | return rq; |
| 264 | } |
| 265 | |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 266 | static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 267 | { |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 268 | struct bt_iter_data *iter_data = data; |
| 269 | struct blk_mq_hw_ctx *hctx = iter_data->hctx; |
John Garry | fea9f92 | 2021-12-06 20:49:50 +0800 | [diff] [blame] | 270 | struct request_queue *q = iter_data->q; |
| 271 | struct blk_mq_tag_set *set = q->tag_set; |
John Garry | fea9f92 | 2021-12-06 20:49:50 +0800 | [diff] [blame] | 272 | struct blk_mq_tags *tags; |
Christoph Hellwig | 81481eb | 2014-09-13 16:40:11 -0700 | [diff] [blame] | 273 | struct request *rq; |
Ming Lei | 2e315dc | 2021-05-11 23:22:34 +0800 | [diff] [blame] | 274 | bool ret = true; |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 275 | |
John Garry | fea9f92 | 2021-12-06 20:49:50 +0800 | [diff] [blame] | 276 | if (blk_mq_is_shared_tags(set->flags)) |
| 277 | tags = set->shared_tags; |
| 278 | else |
| 279 | tags = hctx->tags; |
| 280 | |
John Garry | 4cf6e6c | 2022-07-06 20:03:54 +0800 | [diff] [blame] | 281 | if (!iter_data->reserved) |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 282 | bitnr += tags->nr_reserved_tags; |
Jens Axboe | 7f5562d | 2017-08-04 13:37:03 -0600 | [diff] [blame] | 283 | /* |
| 284 | * We can hit rq == NULL here, because the tagging functions |
Bart Van Assche | c7b1bf5 | 2018-09-21 13:34:46 -0700 | [diff] [blame] | 285 | * test and set the bit before assigning ->rqs[]. |
Jens Axboe | 7f5562d | 2017-08-04 13:37:03 -0600 | [diff] [blame] | 286 | */ |
Ming Lei | 2e315dc | 2021-05-11 23:22:34 +0800 | [diff] [blame] | 287 | rq = blk_mq_find_and_get_req(tags, bitnr); |
| 288 | if (!rq) |
| 289 | return true; |
| 290 | |
John Garry | fea9f92 | 2021-12-06 20:49:50 +0800 | [diff] [blame] | 291 | if (rq->q == q && (!hctx || rq->mq_hctx == hctx)) |
John Garry | 2dd6532 | 2022-07-06 20:03:53 +0800 | [diff] [blame] | 292 | ret = iter_data->fn(rq, iter_data->data); |
Ming Lei | 2e315dc | 2021-05-11 23:22:34 +0800 | [diff] [blame] | 293 | blk_mq_put_rq_ref(rq); |
| 294 | return ret; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 295 | } |
| 296 | |
Bart Van Assche | c7b1bf5 | 2018-09-21 13:34:46 -0700 | [diff] [blame] | 297 | /** |
| 298 | * bt_for_each - iterate over the requests associated with a hardware queue |
| 299 | * @hctx: Hardware queue to examine. |
John Garry | fea9f92 | 2021-12-06 20:49:50 +0800 | [diff] [blame] | 300 | * @q: Request queue to examine. |
Bart Van Assche | c7b1bf5 | 2018-09-21 13:34:46 -0700 | [diff] [blame] | 301 | * @bt: sbitmap to examine. This is either the breserved_tags member |
| 302 | * or the bitmap_tags member of struct blk_mq_tags. |
| 303 | * @fn: Pointer to the function that will be called for each request |
| 304 | * associated with @hctx that has been assigned a driver tag. |
| 305 | * @fn will be called as follows: @fn(@hctx, rq, @data, @reserved) |
Jens Axboe | ab11fe5 | 2018-11-08 11:09:50 -0700 | [diff] [blame] | 306 | * where rq is a pointer to a request. Return true to continue |
| 307 | * iterating tags, false to stop. |
Bart Van Assche | c7b1bf5 | 2018-09-21 13:34:46 -0700 | [diff] [blame] | 308 | * @data: Will be passed as third argument to @fn. |
| 309 | * @reserved: Indicates whether @bt is the breserved_tags member or the |
| 310 | * bitmap_tags member of struct blk_mq_tags. |
| 311 | */ |
John Garry | fea9f92 | 2021-12-06 20:49:50 +0800 | [diff] [blame] | 312 | static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct request_queue *q, |
| 313 | struct sbitmap_queue *bt, busy_tag_iter_fn *fn, |
| 314 | void *data, bool reserved) |
Keith Busch | f26cdc8 | 2015-06-01 09:29:53 -0600 | [diff] [blame] | 315 | { |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 316 | struct bt_iter_data iter_data = { |
| 317 | .hctx = hctx, |
| 318 | .fn = fn, |
| 319 | .data = data, |
| 320 | .reserved = reserved, |
John Garry | fea9f92 | 2021-12-06 20:49:50 +0800 | [diff] [blame] | 321 | .q = q, |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 322 | }; |
| 323 | |
| 324 | sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data); |
| 325 | } |
| 326 | |
| 327 | struct bt_tags_iter_data { |
| 328 | struct blk_mq_tags *tags; |
| 329 | busy_tag_iter_fn *fn; |
| 330 | void *data; |
Ming Lei | 602380d | 2020-05-29 15:53:14 +0200 | [diff] [blame] | 331 | unsigned int flags; |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 332 | }; |
| 333 | |
Ming Lei | 602380d | 2020-05-29 15:53:14 +0200 | [diff] [blame] | 334 | #define BT_TAG_ITER_RESERVED (1 << 0) |
| 335 | #define BT_TAG_ITER_STARTED (1 << 1) |
Ming Lei | 22f614b | 2020-06-05 19:44:10 +0800 | [diff] [blame] | 336 | #define BT_TAG_ITER_STATIC_RQS (1 << 2) |
Ming Lei | 602380d | 2020-05-29 15:53:14 +0200 | [diff] [blame] | 337 | |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 338 | static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) |
| 339 | { |
| 340 | struct bt_tags_iter_data *iter_data = data; |
| 341 | struct blk_mq_tags *tags = iter_data->tags; |
Keith Busch | f26cdc8 | 2015-06-01 09:29:53 -0600 | [diff] [blame] | 342 | struct request *rq; |
Ming Lei | 2e315dc | 2021-05-11 23:22:34 +0800 | [diff] [blame] | 343 | bool ret = true; |
| 344 | bool iter_static_rqs = !!(iter_data->flags & BT_TAG_ITER_STATIC_RQS); |
Keith Busch | f26cdc8 | 2015-06-01 09:29:53 -0600 | [diff] [blame] | 345 | |
John Garry | 4cf6e6c | 2022-07-06 20:03:54 +0800 | [diff] [blame] | 346 | if (!(iter_data->flags & BT_TAG_ITER_RESERVED)) |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 347 | bitnr += tags->nr_reserved_tags; |
Keith Busch | f26cdc8 | 2015-06-01 09:29:53 -0600 | [diff] [blame] | 348 | |
Jens Axboe | 7f5562d | 2017-08-04 13:37:03 -0600 | [diff] [blame] | 349 | /* |
| 350 | * We can hit rq == NULL here, because the tagging functions |
Ming Lei | 22f614b | 2020-06-05 19:44:10 +0800 | [diff] [blame] | 351 | * test and set the bit before assigning ->rqs[]. |
Jens Axboe | 7f5562d | 2017-08-04 13:37:03 -0600 | [diff] [blame] | 352 | */ |
Ming Lei | 2e315dc | 2021-05-11 23:22:34 +0800 | [diff] [blame] | 353 | if (iter_static_rqs) |
Ming Lei | 22f614b | 2020-06-05 19:44:10 +0800 | [diff] [blame] | 354 | rq = tags->static_rqs[bitnr]; |
| 355 | else |
Ming Lei | 2e315dc | 2021-05-11 23:22:34 +0800 | [diff] [blame] | 356 | rq = blk_mq_find_and_get_req(tags, bitnr); |
Ming Lei | 602380d | 2020-05-29 15:53:14 +0200 | [diff] [blame] | 357 | if (!rq) |
| 358 | return true; |
Ming Lei | 2e315dc | 2021-05-11 23:22:34 +0800 | [diff] [blame] | 359 | |
| 360 | if (!(iter_data->flags & BT_TAG_ITER_STARTED) || |
| 361 | blk_mq_request_started(rq)) |
John Garry | 2dd6532 | 2022-07-06 20:03:53 +0800 | [diff] [blame] | 362 | ret = iter_data->fn(rq, iter_data->data); |
Ming Lei | 2e315dc | 2021-05-11 23:22:34 +0800 | [diff] [blame] | 363 | if (!iter_static_rqs) |
| 364 | blk_mq_put_rq_ref(rq); |
| 365 | return ret; |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 366 | } |
Keith Busch | f26cdc8 | 2015-06-01 09:29:53 -0600 | [diff] [blame] | 367 | |
Bart Van Assche | c7b1bf5 | 2018-09-21 13:34:46 -0700 | [diff] [blame] | 368 | /** |
| 369 | * bt_tags_for_each - iterate over the requests in a tag map |
| 370 | * @tags: Tag map to iterate over. |
| 371 | * @bt: sbitmap to examine. This is either the breserved_tags member |
| 372 | * or the bitmap_tags member of struct blk_mq_tags. |
| 373 | * @fn: Pointer to the function that will be called for each started |
| 374 | * request. @fn will be called as follows: @fn(rq, @data, |
Jens Axboe | ab11fe5 | 2018-11-08 11:09:50 -0700 | [diff] [blame] | 375 | * @reserved) where rq is a pointer to a request. Return true |
| 376 | * to continue iterating tags, false to stop. |
Bart Van Assche | c7b1bf5 | 2018-09-21 13:34:46 -0700 | [diff] [blame] | 377 | * @data: Will be passed as second argument to @fn. |
Ming Lei | 602380d | 2020-05-29 15:53:14 +0200 | [diff] [blame] | 378 | * @flags: BT_TAG_ITER_* |
Bart Van Assche | c7b1bf5 | 2018-09-21 13:34:46 -0700 | [diff] [blame] | 379 | */ |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 380 | static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt, |
Ming Lei | 602380d | 2020-05-29 15:53:14 +0200 | [diff] [blame] | 381 | busy_tag_iter_fn *fn, void *data, unsigned int flags) |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 382 | { |
| 383 | struct bt_tags_iter_data iter_data = { |
| 384 | .tags = tags, |
| 385 | .fn = fn, |
| 386 | .data = data, |
Ming Lei | 602380d | 2020-05-29 15:53:14 +0200 | [diff] [blame] | 387 | .flags = flags, |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 388 | }; |
| 389 | |
| 390 | if (tags->rqs) |
| 391 | sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data); |
Keith Busch | f26cdc8 | 2015-06-01 09:29:53 -0600 | [diff] [blame] | 392 | } |
| 393 | |
Ming Lei | 602380d | 2020-05-29 15:53:14 +0200 | [diff] [blame] | 394 | static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags, |
| 395 | busy_tag_iter_fn *fn, void *priv, unsigned int flags) |
| 396 | { |
| 397 | WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED); |
| 398 | |
| 399 | if (tags->nr_reserved_tags) |
John Garry | ae0f1a7 | 2021-10-05 18:23:38 +0800 | [diff] [blame] | 400 | bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, |
Ming Lei | 602380d | 2020-05-29 15:53:14 +0200 | [diff] [blame] | 401 | flags | BT_TAG_ITER_RESERVED); |
John Garry | ae0f1a7 | 2021-10-05 18:23:38 +0800 | [diff] [blame] | 402 | bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, flags); |
Ming Lei | 602380d | 2020-05-29 15:53:14 +0200 | [diff] [blame] | 403 | } |
| 404 | |
Bart Van Assche | c7b1bf5 | 2018-09-21 13:34:46 -0700 | [diff] [blame] | 405 | /** |
Ming Lei | 602380d | 2020-05-29 15:53:14 +0200 | [diff] [blame] | 406 | * blk_mq_all_tag_iter - iterate over all requests in a tag map |
Bart Van Assche | c7b1bf5 | 2018-09-21 13:34:46 -0700 | [diff] [blame] | 407 | * @tags: Tag map to iterate over. |
Ming Lei | 602380d | 2020-05-29 15:53:14 +0200 | [diff] [blame] | 408 | * @fn: Pointer to the function that will be called for each |
Bart Van Assche | c7b1bf5 | 2018-09-21 13:34:46 -0700 | [diff] [blame] | 409 | * request. @fn will be called as follows: @fn(rq, @priv, |
| 410 | * reserved) where rq is a pointer to a request. 'reserved' |
Jens Axboe | ab11fe5 | 2018-11-08 11:09:50 -0700 | [diff] [blame] | 411 | * indicates whether or not @rq is a reserved request. Return |
| 412 | * true to continue iterating tags, false to stop. |
Bart Van Assche | c7b1bf5 | 2018-09-21 13:34:46 -0700 | [diff] [blame] | 413 | * @priv: Will be passed as second argument to @fn. |
Ming Lei | 22f614b | 2020-06-05 19:44:10 +0800 | [diff] [blame] | 414 | * |
| 415 | * Caller has to pass the tag map from which requests are allocated. |
Bart Van Assche | c7b1bf5 | 2018-09-21 13:34:46 -0700 | [diff] [blame] | 416 | */ |
Ming Lei | 602380d | 2020-05-29 15:53:14 +0200 | [diff] [blame] | 417 | void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, |
| 418 | void *priv) |
Keith Busch | f26cdc8 | 2015-06-01 09:29:53 -0600 | [diff] [blame] | 419 | { |
Baolin Wang | a8a5e38 | 2020-06-15 17:12:23 +0800 | [diff] [blame] | 420 | __blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS); |
Keith Busch | f26cdc8 | 2015-06-01 09:29:53 -0600 | [diff] [blame] | 421 | } |
Keith Busch | f26cdc8 | 2015-06-01 09:29:53 -0600 | [diff] [blame] | 422 | |
Bart Van Assche | c7b1bf5 | 2018-09-21 13:34:46 -0700 | [diff] [blame] | 423 | /** |
| 424 | * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set |
| 425 | * @tagset: Tag set to iterate over. |
| 426 | * @fn: Pointer to the function that will be called for each started |
| 427 | * request. @fn will be called as follows: @fn(rq, @priv, |
| 428 | * reserved) where rq is a pointer to a request. 'reserved' |
Jens Axboe | ab11fe5 | 2018-11-08 11:09:50 -0700 | [diff] [blame] | 429 | * indicates whether or not @rq is a reserved request. Return |
| 430 | * true to continue iterating tags, false to stop. |
Bart Van Assche | c7b1bf5 | 2018-09-21 13:34:46 -0700 | [diff] [blame] | 431 | * @priv: Will be passed as second argument to @fn. |
Ming Lei | 2e315dc | 2021-05-11 23:22:34 +0800 | [diff] [blame] | 432 | * |
| 433 | * We grab one request reference before calling @fn and release it after |
| 434 | * @fn returns. |
Bart Van Assche | c7b1bf5 | 2018-09-21 13:34:46 -0700 | [diff] [blame] | 435 | */ |
Sagi Grimberg | e048948 | 2016-03-10 13:58:46 +0200 | [diff] [blame] | 436 | void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, |
| 437 | busy_tag_iter_fn *fn, void *priv) |
| 438 | { |
John Garry | 0994c64 | 2021-10-18 17:41:23 +0800 | [diff] [blame] | 439 | unsigned int flags = tagset->flags; |
| 440 | int i, nr_tags; |
Sagi Grimberg | e048948 | 2016-03-10 13:58:46 +0200 | [diff] [blame] | 441 | |
John Garry | 0994c64 | 2021-10-18 17:41:23 +0800 | [diff] [blame] | 442 | nr_tags = blk_mq_is_shared_tags(flags) ? 1 : tagset->nr_hw_queues; |
| 443 | |
| 444 | for (i = 0; i < nr_tags; i++) { |
Sagi Grimberg | e048948 | 2016-03-10 13:58:46 +0200 | [diff] [blame] | 445 | if (tagset->tags && tagset->tags[i]) |
Ming Lei | 602380d | 2020-05-29 15:53:14 +0200 | [diff] [blame] | 446 | __blk_mq_all_tag_iter(tagset->tags[i], fn, priv, |
| 447 | BT_TAG_ITER_STARTED); |
Sagi Grimberg | e048948 | 2016-03-10 13:58:46 +0200 | [diff] [blame] | 448 | } |
| 449 | } |
| 450 | EXPORT_SYMBOL(blk_mq_tagset_busy_iter); |
| 451 | |
John Garry | 2dd6532 | 2022-07-06 20:03:53 +0800 | [diff] [blame] | 452 | static bool blk_mq_tagset_count_completed_rqs(struct request *rq, void *data) |
Ming Lei | f9934a8 | 2019-07-24 11:48:40 +0800 | [diff] [blame] | 453 | { |
| 454 | unsigned *count = data; |
| 455 | |
| 456 | if (blk_mq_request_completed(rq)) |
| 457 | (*count)++; |
| 458 | return true; |
| 459 | } |
| 460 | |
| 461 | /** |
Bhaskar Chowdhury | 9cf1adc | 2021-03-20 04:22:22 +0530 | [diff] [blame] | 462 | * blk_mq_tagset_wait_completed_request - Wait until all scheduled request |
| 463 | * completions have finished. |
Ming Lei | f9934a8 | 2019-07-24 11:48:40 +0800 | [diff] [blame] | 464 | * @tagset: Tag set to drain completed request |
| 465 | * |
| 466 | * Note: This function has to be run after all IO queues are shutdown |
| 467 | */ |
| 468 | void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset) |
| 469 | { |
| 470 | while (true) { |
| 471 | unsigned count = 0; |
| 472 | |
| 473 | blk_mq_tagset_busy_iter(tagset, |
| 474 | blk_mq_tagset_count_completed_rqs, &count); |
| 475 | if (!count) |
| 476 | break; |
| 477 | msleep(5); |
| 478 | } |
| 479 | } |
| 480 | EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request); |
| 481 | |
Bart Van Assche | c7b1bf5 | 2018-09-21 13:34:46 -0700 | [diff] [blame] | 482 | /** |
| 483 | * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag |
| 484 | * @q: Request queue to examine. |
| 485 | * @fn: Pointer to the function that will be called for each request |
| 486 | * on @q. @fn will be called as follows: @fn(hctx, rq, @priv, |
| 487 | * reserved) where rq is a pointer to a request and hctx points |
| 488 | * to the hardware queue associated with the request. 'reserved' |
| 489 | * indicates whether or not @rq is a reserved request. |
| 490 | * @priv: Will be passed as third argument to @fn. |
| 491 | * |
| 492 | * Note: if @q->tag_set is shared with other request queues then @fn will be |
| 493 | * called for all requests on all queues that share that tag set and not only |
| 494 | * for requests associated with @q. |
| 495 | */ |
John Garry | fc39f8d | 2021-12-06 20:49:49 +0800 | [diff] [blame] | 496 | void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn, |
Christoph Hellwig | 81481eb | 2014-09-13 16:40:11 -0700 | [diff] [blame] | 497 | void *priv) |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 498 | { |
Jianchao Wang | f5bbbbe | 2018-08-21 15:15:04 +0800 | [diff] [blame] | 499 | /* |
Ming Lei | 4e5cc99 | 2022-03-08 15:32:19 +0800 | [diff] [blame] | 500 | * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and hctx_table |
Bart Van Assche | c7b1bf5 | 2018-09-21 13:34:46 -0700 | [diff] [blame] | 501 | * while the queue is frozen. So we can use q_usage_counter to avoid |
yangerkun | 76cffcc | 2020-09-19 11:54:25 +0800 | [diff] [blame] | 502 | * racing with it. |
Jianchao Wang | f5bbbbe | 2018-08-21 15:15:04 +0800 | [diff] [blame] | 503 | */ |
Keith Busch | 530ca2c | 2018-09-25 10:36:20 -0600 | [diff] [blame] | 504 | if (!percpu_ref_tryget(&q->q_usage_counter)) |
Jianchao Wang | f5bbbbe | 2018-08-21 15:15:04 +0800 | [diff] [blame] | 505 | return; |
Christoph Hellwig | 0bf6cd5 | 2015-09-27 21:01:51 +0200 | [diff] [blame] | 506 | |
John Garry | fea9f92 | 2021-12-06 20:49:50 +0800 | [diff] [blame] | 507 | if (blk_mq_is_shared_tags(q->tag_set->flags)) { |
| 508 | struct blk_mq_tags *tags = q->tag_set->shared_tags; |
| 509 | struct sbitmap_queue *bresv = &tags->breserved_tags; |
| 510 | struct sbitmap_queue *btags = &tags->bitmap_tags; |
Christoph Hellwig | 0bf6cd5 | 2015-09-27 21:01:51 +0200 | [diff] [blame] | 511 | |
| 512 | if (tags->nr_reserved_tags) |
John Garry | fea9f92 | 2021-12-06 20:49:50 +0800 | [diff] [blame] | 513 | bt_for_each(NULL, q, bresv, fn, priv, true); |
| 514 | bt_for_each(NULL, q, btags, fn, priv, false); |
| 515 | } else { |
| 516 | struct blk_mq_hw_ctx *hctx; |
Ming Lei | 4f48120 | 2022-03-08 15:32:18 +0800 | [diff] [blame] | 517 | unsigned long i; |
John Garry | fea9f92 | 2021-12-06 20:49:50 +0800 | [diff] [blame] | 518 | |
| 519 | queue_for_each_hw_ctx(q, hctx, i) { |
| 520 | struct blk_mq_tags *tags = hctx->tags; |
| 521 | struct sbitmap_queue *bresv = &tags->breserved_tags; |
| 522 | struct sbitmap_queue *btags = &tags->bitmap_tags; |
| 523 | |
| 524 | /* |
| 525 | * If no software queues are currently mapped to this |
| 526 | * hardware queue, there's nothing to check |
| 527 | */ |
| 528 | if (!blk_mq_hw_queue_mapped(hctx)) |
| 529 | continue; |
| 530 | |
| 531 | if (tags->nr_reserved_tags) |
| 532 | bt_for_each(hctx, q, bresv, fn, priv, true); |
| 533 | bt_for_each(hctx, q, btags, fn, priv, false); |
| 534 | } |
Christoph Hellwig | 0bf6cd5 | 2015-09-27 21:01:51 +0200 | [diff] [blame] | 535 | } |
Keith Busch | 530ca2c | 2018-09-25 10:36:20 -0600 | [diff] [blame] | 536 | blk_queue_exit(q); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 537 | } |
| 538 | |
Omar Sandoval | f4a644d | 2016-09-17 01:28:24 -0700 | [diff] [blame] | 539 | static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, |
| 540 | bool round_robin, int node) |
Jens Axboe | e3a2b3f | 2014-05-20 11:49:02 -0600 | [diff] [blame] | 541 | { |
Omar Sandoval | f4a644d | 2016-09-17 01:28:24 -0700 | [diff] [blame] | 542 | return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL, |
| 543 | node); |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 544 | } |
| 545 | |
John Garry | 56b6808 | 2021-05-13 20:00:57 +0800 | [diff] [blame] | 546 | int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags, |
| 547 | struct sbitmap_queue *breserved_tags, |
| 548 | unsigned int queue_depth, unsigned int reserved, |
| 549 | int node, int alloc_policy) |
| 550 | { |
| 551 | unsigned int depth = queue_depth - reserved; |
| 552 | bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR; |
| 553 | |
| 554 | if (bt_alloc(bitmap_tags, depth, round_robin, node)) |
| 555 | return -ENOMEM; |
| 556 | if (bt_alloc(breserved_tags, reserved, round_robin, node)) |
| 557 | goto free_bitmap_tags; |
| 558 | |
| 559 | return 0; |
| 560 | |
| 561 | free_bitmap_tags: |
| 562 | sbitmap_queue_free(bitmap_tags); |
| 563 | return -ENOMEM; |
| 564 | } |
| 565 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 566 | struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, |
Shaohua Li | 24391c0 | 2015-01-23 14:18:00 -0700 | [diff] [blame] | 567 | unsigned int reserved_tags, |
John Garry | e155b0c | 2021-10-05 18:23:37 +0800 | [diff] [blame] | 568 | int node, int alloc_policy) |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 569 | { |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 570 | struct blk_mq_tags *tags; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 571 | |
| 572 | if (total_tags > BLK_MQ_TAG_MAX) { |
| 573 | pr_err("blk-mq: tag depth too large\n"); |
| 574 | return NULL; |
| 575 | } |
| 576 | |
| 577 | tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node); |
| 578 | if (!tags) |
| 579 | return NULL; |
| 580 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 581 | tags->nr_tags = total_tags; |
| 582 | tags->nr_reserved_tags = reserved_tags; |
Ming Lei | bd63141 | 2021-05-11 23:22:35 +0800 | [diff] [blame] | 583 | spin_lock_init(&tags->lock); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 584 | |
John Garry | ae0f1a7 | 2021-10-05 18:23:38 +0800 | [diff] [blame] | 585 | if (blk_mq_init_bitmaps(&tags->bitmap_tags, &tags->breserved_tags, |
| 586 | total_tags, reserved_tags, node, |
| 587 | alloc_policy) < 0) { |
Hannes Reinecke | 4d06323 | 2020-08-19 23:20:21 +0800 | [diff] [blame] | 588 | kfree(tags); |
| 589 | return NULL; |
| 590 | } |
| 591 | return tags; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 592 | } |
| 593 | |
John Garry | e155b0c | 2021-10-05 18:23:37 +0800 | [diff] [blame] | 594 | void blk_mq_free_tags(struct blk_mq_tags *tags) |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 595 | { |
John Garry | ae0f1a7 | 2021-10-05 18:23:38 +0800 | [diff] [blame] | 596 | sbitmap_queue_free(&tags->bitmap_tags); |
| 597 | sbitmap_queue_free(&tags->breserved_tags); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 598 | kfree(tags); |
| 599 | } |
| 600 | |
Jens Axboe | 70f36b6 | 2017-01-19 10:59:07 -0700 | [diff] [blame] | 601 | int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, |
| 602 | struct blk_mq_tags **tagsptr, unsigned int tdepth, |
| 603 | bool can_grow) |
Jens Axboe | e3a2b3f | 2014-05-20 11:49:02 -0600 | [diff] [blame] | 604 | { |
Jens Axboe | 70f36b6 | 2017-01-19 10:59:07 -0700 | [diff] [blame] | 605 | struct blk_mq_tags *tags = *tagsptr; |
| 606 | |
| 607 | if (tdepth <= tags->nr_reserved_tags) |
Jens Axboe | e3a2b3f | 2014-05-20 11:49:02 -0600 | [diff] [blame] | 608 | return -EINVAL; |
| 609 | |
Jens Axboe | 70f36b6 | 2017-01-19 10:59:07 -0700 | [diff] [blame] | 610 | /* |
| 611 | * If we are allowed to grow beyond the original size, allocate |
| 612 | * a new set of tags before freeing the old one. |
| 613 | */ |
| 614 | if (tdepth > tags->nr_tags) { |
| 615 | struct blk_mq_tag_set *set = hctx->queue->tag_set; |
| 616 | struct blk_mq_tags *new; |
Jens Axboe | 70f36b6 | 2017-01-19 10:59:07 -0700 | [diff] [blame] | 617 | |
| 618 | if (!can_grow) |
| 619 | return -EINVAL; |
| 620 | |
| 621 | /* |
| 622 | * We need some sort of upper limit, set it high enough that |
| 623 | * no valid use cases should require more. |
| 624 | */ |
John Garry | d97e594 | 2021-05-13 20:00:58 +0800 | [diff] [blame] | 625 | if (tdepth > MAX_SCHED_RQ) |
Jens Axboe | 70f36b6 | 2017-01-19 10:59:07 -0700 | [diff] [blame] | 626 | return -EINVAL; |
| 627 | |
John Garry | e155b0c | 2021-10-05 18:23:37 +0800 | [diff] [blame] | 628 | /* |
| 629 | * Only the sbitmap needs resizing since we allocated the max |
| 630 | * initially. |
| 631 | */ |
John Garry | 079a2e3 | 2021-10-05 18:23:39 +0800 | [diff] [blame] | 632 | if (blk_mq_is_shared_tags(set->flags)) |
John Garry | e155b0c | 2021-10-05 18:23:37 +0800 | [diff] [blame] | 633 | return 0; |
| 634 | |
John Garry | 63064be | 2021-10-05 18:23:35 +0800 | [diff] [blame] | 635 | new = blk_mq_alloc_map_and_rqs(set, hctx->queue_num, tdepth); |
Jens Axboe | 70f36b6 | 2017-01-19 10:59:07 -0700 | [diff] [blame] | 636 | if (!new) |
| 637 | return -ENOMEM; |
Jens Axboe | 70f36b6 | 2017-01-19 10:59:07 -0700 | [diff] [blame] | 638 | |
John Garry | 645db34 | 2021-10-05 18:23:36 +0800 | [diff] [blame] | 639 | blk_mq_free_map_and_rqs(set, *tagsptr, hctx->queue_num); |
Jens Axboe | 70f36b6 | 2017-01-19 10:59:07 -0700 | [diff] [blame] | 640 | *tagsptr = new; |
| 641 | } else { |
| 642 | /* |
| 643 | * Don't need (or can't) update reserved tags here, they |
| 644 | * remain static and should never need resizing. |
| 645 | */ |
John Garry | ae0f1a7 | 2021-10-05 18:23:38 +0800 | [diff] [blame] | 646 | sbitmap_queue_resize(&tags->bitmap_tags, |
Ming Lei | 75d6e17 | 2018-08-02 18:23:26 +0800 | [diff] [blame] | 647 | tdepth - tags->nr_reserved_tags); |
Jens Axboe | 70f36b6 | 2017-01-19 10:59:07 -0700 | [diff] [blame] | 648 | } |
| 649 | |
Jens Axboe | e3a2b3f | 2014-05-20 11:49:02 -0600 | [diff] [blame] | 650 | return 0; |
| 651 | } |
| 652 | |
John Garry | 079a2e3 | 2021-10-05 18:23:39 +0800 | [diff] [blame] | 653 | void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size) |
John Garry | 32bc15a | 2020-08-19 23:20:24 +0800 | [diff] [blame] | 654 | { |
John Garry | 079a2e3 | 2021-10-05 18:23:39 +0800 | [diff] [blame] | 655 | struct blk_mq_tags *tags = set->shared_tags; |
John Garry | e155b0c | 2021-10-05 18:23:37 +0800 | [diff] [blame] | 656 | |
John Garry | ae0f1a7 | 2021-10-05 18:23:38 +0800 | [diff] [blame] | 657 | sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags); |
John Garry | 32bc15a | 2020-08-19 23:20:24 +0800 | [diff] [blame] | 658 | } |
| 659 | |
John Garry | 079a2e3 | 2021-10-05 18:23:39 +0800 | [diff] [blame] | 660 | void blk_mq_tag_update_sched_shared_tags(struct request_queue *q) |
John Garry | a7e7388 | 2021-10-05 18:23:34 +0800 | [diff] [blame] | 661 | { |
John Garry | 079a2e3 | 2021-10-05 18:23:39 +0800 | [diff] [blame] | 662 | sbitmap_queue_resize(&q->sched_shared_tags->bitmap_tags, |
John Garry | a7e7388 | 2021-10-05 18:23:34 +0800 | [diff] [blame] | 663 | q->nr_requests - q->tag_set->reserved_tags); |
| 664 | } |
| 665 | |
Bart Van Assche | 205fb5f5 | 2014-10-30 14:45:11 +0100 | [diff] [blame] | 666 | /** |
| 667 | * blk_mq_unique_tag() - return a tag that is unique queue-wide |
| 668 | * @rq: request for which to compute a unique tag |
| 669 | * |
| 670 | * The tag field in struct request is unique per hardware queue but not over |
| 671 | * all hardware queues. Hence this function that returns a tag with the |
| 672 | * hardware context index in the upper bits and the per hardware queue tag in |
| 673 | * the lower bits. |
| 674 | * |
| 675 | * Note: When called for a request that is queued on a non-multiqueue request |
| 676 | * queue, the hardware context index is set to zero. |
| 677 | */ |
| 678 | u32 blk_mq_unique_tag(struct request *rq) |
| 679 | { |
Jens Axboe | ea4f995 | 2018-10-29 15:06:13 -0600 | [diff] [blame] | 680 | return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) | |
Bart Van Assche | 205fb5f5 | 2014-10-30 14:45:11 +0100 | [diff] [blame] | 681 | (rq->tag & BLK_MQ_UNIQUE_TAG_MASK); |
| 682 | } |
| 683 | EXPORT_SYMBOL(blk_mq_unique_tag); |