Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 2 | #ifndef INT_BLK_MQ_H |
| 3 | #define INT_BLK_MQ_H |
| 4 | |
Christoph Hellwig | 90110e0 | 2023-04-13 08:40:40 +0200 | [diff] [blame] | 5 | #include <linux/blk-mq.h> |
Jens Axboe | cf43e6b | 2016-11-07 21:32:37 -0700 | [diff] [blame] | 6 | #include "blk-stat.h" |
| 7 | |
Christoph Hellwig | 24d2f90 | 2014-04-15 14:14:00 -0600 | [diff] [blame] | 8 | struct blk_mq_tag_set; |
| 9 | |
Ming Lei | 1db4909 | 2018-11-20 09:44:35 +0800 | [diff] [blame] | 10 | struct blk_mq_ctxs { |
| 11 | struct kobject kobj; |
| 12 | struct blk_mq_ctx __percpu *queue_ctx; |
| 13 | }; |
| 14 | |
Linus Walleij | fe64407 | 2018-04-20 10:29:51 +0200 | [diff] [blame] | 15 | /** |
| 16 | * struct blk_mq_ctx - State for a software queue facing the submitting CPUs |
| 17 | */ |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 18 | struct blk_mq_ctx { |
| 19 | struct { |
| 20 | spinlock_t lock; |
Ming Lei | c16d6b5 | 2018-12-17 08:44:05 -0700 | [diff] [blame] | 21 | struct list_head rq_lists[HCTX_MAX_TYPES]; |
| 22 | } ____cacheline_aligned_in_smp; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 23 | |
| 24 | unsigned int cpu; |
Jens Axboe | f31967f | 2018-10-29 13:13:29 -0600 | [diff] [blame] | 25 | unsigned short index_hw[HCTX_MAX_TYPES]; |
Jianchao Wang | 8ccdf4a | 2019-01-24 18:25:32 +0800 | [diff] [blame] | 26 | struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES]; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 27 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 28 | struct request_queue *queue; |
Ming Lei | 1db4909 | 2018-11-20 09:44:35 +0800 | [diff] [blame] | 29 | struct blk_mq_ctxs *ctxs; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 30 | struct kobject kobj; |
Jens Axboe | 4bb659b | 2014-05-09 09:36:49 -0600 | [diff] [blame] | 31 | } ____cacheline_aligned_in_smp; |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 32 | |
Christoph Hellwig | bebe84e | 2023-04-13 08:40:39 +0200 | [diff] [blame] | 33 | enum { |
| 34 | BLK_MQ_NO_TAG = -1U, |
| 35 | BLK_MQ_TAG_MIN = 1, |
| 36 | BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1, |
| 37 | }; |
| 38 | |
Christoph Hellwig | 710fa37 | 2023-04-13 08:40:54 +0200 | [diff] [blame] | 39 | typedef unsigned int __bitwise blk_insert_t; |
| 40 | #define BLK_MQ_INSERT_AT_HEAD ((__force blk_insert_t)0x01) |
| 41 | |
Christoph Hellwig | 3e08773 | 2021-10-12 13:12:24 +0200 | [diff] [blame] | 42 | void blk_mq_submit_bio(struct bio *bio); |
Jens Axboe | 5a72e89 | 2021-10-12 09:24:29 -0600 | [diff] [blame] | 43 | int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob, |
| 44 | unsigned int flags); |
Ming Lei | c7e2d94 | 2019-04-30 09:52:25 +0800 | [diff] [blame] | 45 | void blk_mq_exit_queue(struct request_queue *q); |
Jens Axboe | e3a2b3f | 2014-05-20 11:49:02 -0600 | [diff] [blame] | 46 | int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); |
Jens Axboe | aed3ea9 | 2014-12-22 14:04:42 -0700 | [diff] [blame] | 47 | void blk_mq_wake_waiters(struct request_queue *q); |
Ming Lei | 1fd40b5 | 2020-06-30 18:25:00 +0800 | [diff] [blame] | 48 | bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *, |
| 49 | unsigned int); |
Christoph Hellwig | b12e5c6 | 2023-04-13 08:40:57 +0200 | [diff] [blame] | 50 | void blk_mq_add_to_requeue_list(struct request *rq, blk_insert_t insert_flags); |
Jens Axboe | 2c3ad66 | 2016-12-14 14:34:47 -0700 | [diff] [blame] | 51 | void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); |
Ming Lei | b347689 | 2017-10-14 17:22:30 +0800 | [diff] [blame] | 52 | struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, |
| 53 | struct blk_mq_ctx *start); |
Ming Lei | 2e315dc | 2021-05-11 23:22:34 +0800 | [diff] [blame] | 54 | void blk_mq_put_rq_ref(struct request *rq); |
Jens Axboe | 2c3ad66 | 2016-12-14 14:34:47 -0700 | [diff] [blame] | 55 | |
| 56 | /* |
| 57 | * Internal helpers for allocating/freeing the request map |
| 58 | */ |
Jens Axboe | cc71a6f | 2017-01-11 14:29:56 -0700 | [diff] [blame] | 59 | void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, |
| 60 | unsigned int hctx_idx); |
John Garry | e155b0c | 2021-10-05 18:23:37 +0800 | [diff] [blame] | 61 | void blk_mq_free_rq_map(struct blk_mq_tags *tags); |
John Garry | 63064be | 2021-10-05 18:23:35 +0800 | [diff] [blame] | 62 | struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, |
| 63 | unsigned int hctx_idx, unsigned int depth); |
John Garry | 645db34 | 2021-10-05 18:23:36 +0800 | [diff] [blame] | 64 | void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, |
| 65 | struct blk_mq_tags *tags, |
| 66 | unsigned int hctx_idx); |
Jens Axboe | 2c3ad66 | 2016-12-14 14:34:47 -0700 | [diff] [blame] | 67 | /* |
| 68 | * Internal helpers for request insertion into sw queues |
| 69 | */ |
Christoph Hellwig | 2b59761 | 2023-04-13 08:40:55 +0200 | [diff] [blame] | 70 | void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags); |
Ming Lei | 396eaf2 | 2018-01-17 11:25:57 -0500 | [diff] [blame] | 71 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 72 | /* |
| 73 | * CPU -> queue mappings |
| 74 | */ |
Jens Axboe | ed76e32 | 2018-10-29 13:06:14 -0600 | [diff] [blame] | 75 | extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 76 | |
Jens Axboe | b3c661b | 2018-10-30 10:36:06 -0600 | [diff] [blame] | 77 | /* |
| 78 | * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue |
| 79 | * @q: request queue |
Christoph Hellwig | e20ba6e | 2018-12-02 17:46:16 +0100 | [diff] [blame] | 80 | * @type: the hctx type index |
Jens Axboe | b3c661b | 2018-10-30 10:36:06 -0600 | [diff] [blame] | 81 | * @cpu: CPU |
| 82 | */ |
Jens Axboe | ff2c566 | 2018-10-29 13:07:33 -0600 | [diff] [blame] | 83 | static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, |
Christoph Hellwig | e20ba6e | 2018-12-02 17:46:16 +0100 | [diff] [blame] | 84 | enum hctx_type type, |
Jens Axboe | ff2c566 | 2018-10-29 13:07:33 -0600 | [diff] [blame] | 85 | unsigned int cpu) |
| 86 | { |
Ming Lei | 4e5cc99 | 2022-03-08 15:32:19 +0800 | [diff] [blame] | 87 | return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]); |
Jens Axboe | b3c661b | 2018-10-30 10:36:06 -0600 | [diff] [blame] | 88 | } |
| 89 | |
Bart Van Assche | 16458cf | 2022-07-14 11:06:32 -0700 | [diff] [blame] | 90 | static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf) |
Jens Axboe | b3c661b | 2018-10-30 10:36:06 -0600 | [diff] [blame] | 91 | { |
Christoph Hellwig | e20ba6e | 2018-12-02 17:46:16 +0100 | [diff] [blame] | 92 | enum hctx_type type = HCTX_TYPE_DEFAULT; |
Jens Axboe | b3c661b | 2018-10-30 10:36:06 -0600 | [diff] [blame] | 93 | |
Jianchao Wang | bb94aea | 2019-01-24 18:25:33 +0800 | [diff] [blame] | 94 | /* |
Christoph Hellwig | 6ce913f | 2021-10-12 13:12:21 +0200 | [diff] [blame] | 95 | * The caller ensure that if REQ_POLLED, poll must be enabled. |
Jianchao Wang | bb94aea | 2019-01-24 18:25:33 +0800 | [diff] [blame] | 96 | */ |
Bart Van Assche | 7e923f4 | 2022-06-15 15:55:48 -0700 | [diff] [blame] | 97 | if (opf & REQ_POLLED) |
Christoph Hellwig | e20ba6e | 2018-12-02 17:46:16 +0100 | [diff] [blame] | 98 | type = HCTX_TYPE_POLL; |
Bart Van Assche | 7e923f4 | 2022-06-15 15:55:48 -0700 | [diff] [blame] | 99 | else if ((opf & REQ_OP_MASK) == REQ_OP_READ) |
Christoph Hellwig | e20ba6e | 2018-12-02 17:46:16 +0100 | [diff] [blame] | 100 | type = HCTX_TYPE_READ; |
Ming Lei | b637108 | 2021-11-12 20:47:15 +0800 | [diff] [blame] | 101 | return type; |
| 102 | } |
| 103 | |
| 104 | /* |
| 105 | * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue |
| 106 | * @q: request queue |
Bart Van Assche | 7e923f4 | 2022-06-15 15:55:48 -0700 | [diff] [blame] | 107 | * @opf: operation type (REQ_OP_*) and flags (e.g. REQ_POLLED). |
Ming Lei | b637108 | 2021-11-12 20:47:15 +0800 | [diff] [blame] | 108 | * @ctx: software queue cpu ctx |
| 109 | */ |
| 110 | static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, |
Bart Van Assche | 16458cf | 2022-07-14 11:06:32 -0700 | [diff] [blame] | 111 | blk_opf_t opf, |
Ming Lei | b637108 | 2021-11-12 20:47:15 +0800 | [diff] [blame] | 112 | struct blk_mq_ctx *ctx) |
| 113 | { |
Bart Van Assche | 7e923f4 | 2022-06-15 15:55:48 -0700 | [diff] [blame] | 114 | return ctx->hctxs[blk_mq_get_hctx_type(opf)]; |
Jens Axboe | ff2c566 | 2018-10-29 13:07:33 -0600 | [diff] [blame] | 115 | } |
| 116 | |
Jens Axboe | e93ecf6 | 2014-05-19 09:17:48 -0600 | [diff] [blame] | 117 | /* |
Jens Axboe | 67aec14 | 2014-05-30 08:25:36 -0600 | [diff] [blame] | 118 | * sysfs helpers |
| 119 | */ |
Ming Lei | 737f98c | 2017-02-22 18:13:59 +0800 | [diff] [blame] | 120 | extern void blk_mq_sysfs_init(struct request_queue *q); |
Ming Lei | 7ea5fe3 | 2017-02-22 18:14:00 +0800 | [diff] [blame] | 121 | extern void blk_mq_sysfs_deinit(struct request_queue *q); |
Christoph Hellwig | 8682b92 | 2022-06-28 19:18:50 +0200 | [diff] [blame] | 122 | int blk_mq_sysfs_register(struct gendisk *disk); |
| 123 | void blk_mq_sysfs_unregister(struct gendisk *disk); |
Christoph Hellwig | eaa870f | 2022-06-28 19:18:49 +0200 | [diff] [blame] | 124 | int blk_mq_sysfs_register_hctxs(struct request_queue *q); |
| 125 | void blk_mq_sysfs_unregister_hctxs(struct request_queue *q); |
Keith Busch | 868f2f0 | 2015-12-17 17:08:14 -0700 | [diff] [blame] | 126 | extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); |
Jens Axboe | 47c122e | 2021-10-06 06:34:11 -0600 | [diff] [blame] | 127 | void blk_mq_free_plug_rqs(struct blk_plug *plug); |
Christoph Hellwig | dbb6f76 | 2021-10-20 16:41:17 +0200 | [diff] [blame] | 128 | void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); |
Jens Axboe | 67aec14 | 2014-05-30 08:25:36 -0600 | [diff] [blame] | 129 | |
Ming Lei | 2a19b28 | 2021-11-16 09:43:43 +0800 | [diff] [blame] | 130 | void blk_mq_cancel_work_sync(struct request_queue *q); |
| 131 | |
Ming Lei | e09aae7 | 2015-01-29 20:17:27 +0800 | [diff] [blame] | 132 | void blk_mq_release(struct request_queue *q); |
| 133 | |
Ming Lei | 1aecfe4 | 2014-06-01 00:43:36 +0800 | [diff] [blame] | 134 | static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, |
| 135 | unsigned int cpu) |
| 136 | { |
| 137 | return per_cpu_ptr(q->queue_ctx, cpu); |
| 138 | } |
| 139 | |
| 140 | /* |
| 141 | * This assumes per-cpu software queueing queues. They could be per-node |
| 142 | * as well, for instance. For now this is hardcoded as-is. Note that we don't |
| 143 | * care about preemption, since we know the ctx's are persistent. This does |
| 144 | * mean that we can't rely on ctx always matching the currently running CPU. |
| 145 | */ |
| 146 | static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) |
| 147 | { |
Bart Van Assche | c05f422 | 2019-07-01 08:47:29 -0700 | [diff] [blame] | 148 | return __blk_mq_get_ctx(q, raw_smp_processor_id()); |
Ming Lei | 1aecfe4 | 2014-06-01 00:43:36 +0800 | [diff] [blame] | 149 | } |
| 150 | |
Ming Lei | cb96a42c | 2014-06-01 00:43:37 +0800 | [diff] [blame] | 151 | struct blk_mq_alloc_data { |
| 152 | /* input parameter */ |
| 153 | struct request_queue *q; |
Bart Van Assche | 9a95e4e | 2017-11-09 10:49:59 -0800 | [diff] [blame] | 154 | blk_mq_req_flags_t flags; |
Omar Sandoval | 229a9287 | 2017-04-14 00:59:59 -0700 | [diff] [blame] | 155 | unsigned int shallow_depth; |
Bart Van Assche | 16458cf | 2022-07-14 11:06:32 -0700 | [diff] [blame] | 156 | blk_opf_t cmd_flags; |
Jens Axboe | ecaf97f | 2021-11-09 15:08:11 -0700 | [diff] [blame] | 157 | req_flags_t rq_flags; |
Ming Lei | cb96a42c | 2014-06-01 00:43:37 +0800 | [diff] [blame] | 158 | |
Jens Axboe | 47c122e | 2021-10-06 06:34:11 -0600 | [diff] [blame] | 159 | /* allocate multiple requests/tags in one go */ |
| 160 | unsigned int nr_tags; |
| 161 | struct request **cached_rq; |
| 162 | |
Ming Lei | cb96a42c | 2014-06-01 00:43:37 +0800 | [diff] [blame] | 163 | /* input & output parameter */ |
| 164 | struct blk_mq_ctx *ctx; |
| 165 | struct blk_mq_hw_ctx *hctx; |
| 166 | }; |
| 167 | |
Christoph Hellwig | bebe84e | 2023-04-13 08:40:39 +0200 | [diff] [blame] | 168 | struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, |
| 169 | unsigned int reserved_tags, int node, int alloc_policy); |
| 170 | void blk_mq_free_tags(struct blk_mq_tags *tags); |
| 171 | int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags, |
| 172 | struct sbitmap_queue *breserved_tags, unsigned int queue_depth, |
| 173 | unsigned int reserved, int node, int alloc_policy); |
| 174 | |
| 175 | unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data); |
| 176 | unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags, |
| 177 | unsigned int *offset); |
| 178 | void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, |
| 179 | unsigned int tag); |
| 180 | void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags); |
| 181 | int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, |
| 182 | struct blk_mq_tags **tags, unsigned int depth, bool can_grow); |
| 183 | void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, |
| 184 | unsigned int size); |
| 185 | void blk_mq_tag_update_sched_shared_tags(struct request_queue *q); |
| 186 | |
| 187 | void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); |
| 188 | void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn, |
| 189 | void *priv); |
| 190 | void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, |
| 191 | void *priv); |
| 192 | |
| 193 | static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt, |
| 194 | struct blk_mq_hw_ctx *hctx) |
| 195 | { |
| 196 | if (!hctx) |
| 197 | return &bt->ws[0]; |
| 198 | return sbq_wait_ptr(bt, &hctx->wait_index); |
| 199 | } |
| 200 | |
| 201 | void __blk_mq_tag_busy(struct blk_mq_hw_ctx *); |
| 202 | void __blk_mq_tag_idle(struct blk_mq_hw_ctx *); |
| 203 | |
| 204 | static inline void blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) |
| 205 | { |
| 206 | if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) |
| 207 | __blk_mq_tag_busy(hctx); |
| 208 | } |
| 209 | |
| 210 | static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) |
| 211 | { |
| 212 | if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) |
| 213 | __blk_mq_tag_idle(hctx); |
| 214 | } |
| 215 | |
| 216 | static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags, |
| 217 | unsigned int tag) |
| 218 | { |
| 219 | return tag < tags->nr_reserved_tags; |
| 220 | } |
| 221 | |
John Garry | 079a2e3 | 2021-10-05 18:23:39 +0800 | [diff] [blame] | 222 | static inline bool blk_mq_is_shared_tags(unsigned int flags) |
John Garry | 32bc15a | 2020-08-19 23:20:24 +0800 | [diff] [blame] | 223 | { |
| 224 | return flags & BLK_MQ_F_TAG_HCTX_SHARED; |
| 225 | } |
| 226 | |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame] | 227 | static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data) |
| 228 | { |
Jens Axboe | 56f8da6 | 2021-10-19 09:32:57 -0600 | [diff] [blame] | 229 | if (!(data->rq_flags & RQF_ELV)) |
| 230 | return data->hctx->tags; |
| 231 | return data->hctx->sched_tags; |
Jens Axboe | 4941115 | 2017-01-13 08:09:05 -0700 | [diff] [blame] | 232 | } |
| 233 | |
Bart Van Assche | 5d1b25c | 2016-10-28 17:19:15 -0700 | [diff] [blame] | 234 | static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) |
| 235 | { |
| 236 | return test_bit(BLK_MQ_S_STOPPED, &hctx->state); |
| 237 | } |
| 238 | |
Ming Lei | 19c66e5 | 2014-12-03 19:38:04 +0800 | [diff] [blame] | 239 | static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) |
| 240 | { |
| 241 | return hctx->nr_ctx && hctx->tags; |
| 242 | } |
| 243 | |
Christoph Hellwig | 8446fe9 | 2020-11-24 09:36:54 +0100 | [diff] [blame] | 244 | unsigned int blk_mq_in_flight(struct request_queue *q, |
| 245 | struct block_device *part); |
| 246 | void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part, |
| 247 | unsigned int inflight[2]); |
Jens Axboe | f299b7c | 2017-08-08 17:51:45 -0600 | [diff] [blame] | 248 | |
Ming Lei | 2a5a24a | 2021-01-22 10:33:12 +0800 | [diff] [blame] | 249 | static inline void blk_mq_put_dispatch_budget(struct request_queue *q, |
| 250 | int budget_token) |
Ming Lei | de14829 | 2017-10-14 17:22:29 +0800 | [diff] [blame] | 251 | { |
Ming Lei | de14829 | 2017-10-14 17:22:29 +0800 | [diff] [blame] | 252 | if (q->mq_ops->put_budget) |
Ming Lei | 2a5a24a | 2021-01-22 10:33:12 +0800 | [diff] [blame] | 253 | q->mq_ops->put_budget(q, budget_token); |
Ming Lei | de14829 | 2017-10-14 17:22:29 +0800 | [diff] [blame] | 254 | } |
| 255 | |
Ming Lei | 2a5a24a | 2021-01-22 10:33:12 +0800 | [diff] [blame] | 256 | static inline int blk_mq_get_dispatch_budget(struct request_queue *q) |
Ming Lei | de14829 | 2017-10-14 17:22:29 +0800 | [diff] [blame] | 257 | { |
Ming Lei | de14829 | 2017-10-14 17:22:29 +0800 | [diff] [blame] | 258 | if (q->mq_ops->get_budget) |
Ming Lei | 65c7636 | 2020-06-30 18:24:56 +0800 | [diff] [blame] | 259 | return q->mq_ops->get_budget(q); |
Ming Lei | 2a5a24a | 2021-01-22 10:33:12 +0800 | [diff] [blame] | 260 | return 0; |
| 261 | } |
| 262 | |
| 263 | static inline void blk_mq_set_rq_budget_token(struct request *rq, int token) |
| 264 | { |
| 265 | if (token < 0) |
| 266 | return; |
| 267 | |
| 268 | if (rq->q->mq_ops->set_rq_budget_token) |
| 269 | rq->q->mq_ops->set_rq_budget_token(rq, token); |
| 270 | } |
| 271 | |
| 272 | static inline int blk_mq_get_rq_budget_token(struct request *rq) |
| 273 | { |
| 274 | if (rq->q->mq_ops->get_rq_budget_token) |
| 275 | return rq->q->mq_ops->get_rq_budget_token(rq); |
| 276 | return -1; |
Ming Lei | de14829 | 2017-10-14 17:22:29 +0800 | [diff] [blame] | 277 | } |
| 278 | |
John Garry | bccf5e2 | 2020-08-19 23:20:26 +0800 | [diff] [blame] | 279 | static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx) |
| 280 | { |
John Garry | 079a2e3 | 2021-10-05 18:23:39 +0800 | [diff] [blame] | 281 | if (blk_mq_is_shared_tags(hctx->flags)) |
| 282 | atomic_inc(&hctx->queue->nr_active_requests_shared_tags); |
John Garry | bccf5e2 | 2020-08-19 23:20:26 +0800 | [diff] [blame] | 283 | else |
| 284 | atomic_inc(&hctx->nr_active); |
| 285 | } |
| 286 | |
Ming Lei | 3b87c6e | 2021-11-02 23:36:19 +0800 | [diff] [blame] | 287 | static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx, |
| 288 | int val) |
John Garry | bccf5e2 | 2020-08-19 23:20:26 +0800 | [diff] [blame] | 289 | { |
John Garry | 079a2e3 | 2021-10-05 18:23:39 +0800 | [diff] [blame] | 290 | if (blk_mq_is_shared_tags(hctx->flags)) |
Ming Lei | 3b87c6e | 2021-11-02 23:36:19 +0800 | [diff] [blame] | 291 | atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags); |
John Garry | bccf5e2 | 2020-08-19 23:20:26 +0800 | [diff] [blame] | 292 | else |
Ming Lei | 3b87c6e | 2021-11-02 23:36:19 +0800 | [diff] [blame] | 293 | atomic_sub(val, &hctx->nr_active); |
| 294 | } |
| 295 | |
| 296 | static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx) |
| 297 | { |
| 298 | __blk_mq_sub_active_requests(hctx, 1); |
John Garry | bccf5e2 | 2020-08-19 23:20:26 +0800 | [diff] [blame] | 299 | } |
| 300 | |
| 301 | static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx) |
| 302 | { |
John Garry | 079a2e3 | 2021-10-05 18:23:39 +0800 | [diff] [blame] | 303 | if (blk_mq_is_shared_tags(hctx->flags)) |
| 304 | return atomic_read(&hctx->queue->nr_active_requests_shared_tags); |
John Garry | bccf5e2 | 2020-08-19 23:20:26 +0800 | [diff] [blame] | 305 | return atomic_read(&hctx->nr_active); |
| 306 | } |
Jens Axboe | 4e2f62e5 | 2020-07-01 22:58:32 -0600 | [diff] [blame] | 307 | static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, |
| 308 | struct request *rq) |
| 309 | { |
| 310 | blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag); |
| 311 | rq->tag = BLK_MQ_NO_TAG; |
| 312 | |
| 313 | if (rq->rq_flags & RQF_MQ_INFLIGHT) { |
| 314 | rq->rq_flags &= ~RQF_MQ_INFLIGHT; |
John Garry | bccf5e2 | 2020-08-19 23:20:26 +0800 | [diff] [blame] | 315 | __blk_mq_dec_active_requests(hctx); |
Jens Axboe | 4e2f62e5 | 2020-07-01 22:58:32 -0600 | [diff] [blame] | 316 | } |
| 317 | } |
| 318 | |
| 319 | static inline void blk_mq_put_driver_tag(struct request *rq) |
| 320 | { |
| 321 | if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG) |
| 322 | return; |
| 323 | |
| 324 | __blk_mq_put_driver_tag(rq->mq_hctx, rq); |
| 325 | } |
| 326 | |
Jens Axboe | a808a9d | 2021-10-13 08:28:14 -0600 | [diff] [blame] | 327 | bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq); |
| 328 | |
| 329 | static inline bool blk_mq_get_driver_tag(struct request *rq) |
| 330 | { |
| 331 | struct blk_mq_hw_ctx *hctx = rq->mq_hctx; |
| 332 | |
| 333 | if (rq->tag != BLK_MQ_NO_TAG && |
| 334 | !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { |
| 335 | hctx->tags->rqs[rq->tag] = rq; |
| 336 | return true; |
| 337 | } |
| 338 | |
| 339 | return __blk_mq_get_driver_tag(hctx, rq); |
| 340 | } |
Jan Kara | 61347154 | 2021-06-03 12:47:21 +0200 | [diff] [blame] | 341 | |
Jens Axboe | ed76e32 | 2018-10-29 13:06:14 -0600 | [diff] [blame] | 342 | static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap) |
Minwoo Im | 0da73d0 | 2018-07-02 23:46:43 +0900 | [diff] [blame] | 343 | { |
| 344 | int cpu; |
| 345 | |
| 346 | for_each_possible_cpu(cpu) |
Jens Axboe | ed76e32 | 2018-10-29 13:06:14 -0600 | [diff] [blame] | 347 | qmap->mq_map[cpu] = 0; |
Minwoo Im | 0da73d0 | 2018-07-02 23:46:43 +0900 | [diff] [blame] | 348 | } |
| 349 | |
Damien Le Moal | b49773e7 | 2019-07-11 01:18:31 +0900 | [diff] [blame] | 350 | /* |
| 351 | * blk_mq_plug() - Get caller context plug |
Damien Le Moal | b49773e7 | 2019-07-11 01:18:31 +0900 | [diff] [blame] | 352 | * @bio : the bio being submitted by the caller context |
| 353 | * |
| 354 | * Plugging, by design, may delay the insertion of BIOs into the elevator in |
| 355 | * order to increase BIO merging opportunities. This however can cause BIO |
| 356 | * insertion order to change from the order in which submit_bio() is being |
| 357 | * executed in the case of multiple contexts concurrently issuing BIOs to a |
| 358 | * device, even if these context are synchronized to tightly control BIO issuing |
| 359 | * order. While this is not a problem with regular block devices, this ordering |
| 360 | * change can cause write BIO failures with zoned block devices as these |
| 361 | * require sequential write patterns to zones. Prevent this from happening by |
Christoph Hellwig | 6deacb3 | 2022-07-06 09:03:38 +0200 | [diff] [blame] | 362 | * ignoring the plug state of a BIO issuing context if it is for a zoned block |
| 363 | * device and the BIO to plug is a write operation. |
Damien Le Moal | b49773e7 | 2019-07-11 01:18:31 +0900 | [diff] [blame] | 364 | * |
| 365 | * Return current->plug if the bio can be plugged and NULL otherwise |
| 366 | */ |
Christoph Hellwig | 6deacb3 | 2022-07-06 09:03:38 +0200 | [diff] [blame] | 367 | static inline struct blk_plug *blk_mq_plug( struct bio *bio) |
Damien Le Moal | b49773e7 | 2019-07-11 01:18:31 +0900 | [diff] [blame] | 368 | { |
Christoph Hellwig | 6deacb3 | 2022-07-06 09:03:38 +0200 | [diff] [blame] | 369 | /* Zoned block device write operation case: do not plug the BIO */ |
Pankaj Raghav | 8cafdb5 | 2022-09-29 09:47:44 +0200 | [diff] [blame] | 370 | if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && |
| 371 | bdev_op_is_zoned_write(bio->bi_bdev, bio_op(bio))) |
Christoph Hellwig | 6deacb3 | 2022-07-06 09:03:38 +0200 | [diff] [blame] | 372 | return NULL; |
| 373 | |
Damien Le Moal | b49773e7 | 2019-07-11 01:18:31 +0900 | [diff] [blame] | 374 | /* |
| 375 | * For regular block devices or read operations, use the context plug |
| 376 | * which may be NULL if blk_start_plug() was not executed. |
| 377 | */ |
Christoph Hellwig | 6deacb3 | 2022-07-06 09:03:38 +0200 | [diff] [blame] | 378 | return current->plug; |
Damien Le Moal | b49773e7 | 2019-07-11 01:18:31 +0900 | [diff] [blame] | 379 | } |
| 380 | |
Jan Kara | fd2ef39 | 2021-06-23 11:36:34 +0200 | [diff] [blame] | 381 | /* Free all requests on the list */ |
| 382 | static inline void blk_mq_free_requests(struct list_head *list) |
| 383 | { |
| 384 | while (!list_empty(list)) { |
| 385 | struct request *rq = list_entry_rq(list->next); |
| 386 | |
| 387 | list_del_init(&rq->queuelist); |
| 388 | blk_mq_free_request(rq); |
| 389 | } |
| 390 | } |
| 391 | |
John Garry | a0235d2 | 2020-08-19 23:20:25 +0800 | [diff] [blame] | 392 | /* |
| 393 | * For shared tag users, we track the number of currently active users |
| 394 | * and attempt to provide a fair share of the tag depth for each of them. |
| 395 | */ |
| 396 | static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, |
| 397 | struct sbitmap_queue *bt) |
| 398 | { |
| 399 | unsigned int depth, users; |
| 400 | |
| 401 | if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) |
| 402 | return true; |
John Garry | a0235d2 | 2020-08-19 23:20:25 +0800 | [diff] [blame] | 403 | |
| 404 | /* |
| 405 | * Don't try dividing an ant |
| 406 | */ |
| 407 | if (bt->sb.depth == 1) |
| 408 | return true; |
| 409 | |
John Garry | 079a2e3 | 2021-10-05 18:23:39 +0800 | [diff] [blame] | 410 | if (blk_mq_is_shared_tags(hctx->flags)) { |
John Garry | f1b49fd | 2020-08-19 23:20:27 +0800 | [diff] [blame] | 411 | struct request_queue *q = hctx->queue; |
John Garry | f1b49fd | 2020-08-19 23:20:27 +0800 | [diff] [blame] | 412 | |
Ming Lei | 2569063 | 2020-12-27 19:34:58 +0800 | [diff] [blame] | 413 | if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) |
John Garry | f1b49fd | 2020-08-19 23:20:27 +0800 | [diff] [blame] | 414 | return true; |
John Garry | f1b49fd | 2020-08-19 23:20:27 +0800 | [diff] [blame] | 415 | } else { |
| 416 | if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) |
| 417 | return true; |
John Garry | f1b49fd | 2020-08-19 23:20:27 +0800 | [diff] [blame] | 418 | } |
| 419 | |
John Garry | e155b0c | 2021-10-05 18:23:37 +0800 | [diff] [blame] | 420 | users = atomic_read(&hctx->tags->active_queues); |
| 421 | |
John Garry | a0235d2 | 2020-08-19 23:20:25 +0800 | [diff] [blame] | 422 | if (!users) |
| 423 | return true; |
| 424 | |
| 425 | /* |
| 426 | * Allow at least some tags |
| 427 | */ |
| 428 | depth = max((bt->sb.depth + users - 1) / users, 4U); |
John Garry | bccf5e2 | 2020-08-19 23:20:26 +0800 | [diff] [blame] | 429 | return __blk_mq_active_requests(hctx) < depth; |
John Garry | a0235d2 | 2020-08-19 23:20:25 +0800 | [diff] [blame] | 430 | } |
| 431 | |
Ming Lei | 2a904d0 | 2021-12-03 21:15:31 +0800 | [diff] [blame] | 432 | /* run the code block in @dispatch_ops with rcu/srcu read lock held */ |
Ming Lei | 41adf53 | 2021-12-06 19:12:13 +0800 | [diff] [blame] | 433 | #define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops) \ |
Ming Lei | 2a904d0 | 2021-12-03 21:15:31 +0800 | [diff] [blame] | 434 | do { \ |
Christoph Hellwig | 80bd4a7 | 2022-11-01 16:00:47 +0100 | [diff] [blame] | 435 | if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) { \ |
Chris Leech | 00e885e | 2023-03-10 09:09:13 +0800 | [diff] [blame] | 436 | struct blk_mq_tag_set *__tag_set = (q)->tag_set; \ |
Ming Lei | 2a904d0 | 2021-12-03 21:15:31 +0800 | [diff] [blame] | 437 | int srcu_idx; \ |
| 438 | \ |
Ming Lei | 41adf53 | 2021-12-06 19:12:13 +0800 | [diff] [blame] | 439 | might_sleep_if(check_sleep); \ |
Chris Leech | 00e885e | 2023-03-10 09:09:13 +0800 | [diff] [blame] | 440 | srcu_idx = srcu_read_lock(__tag_set->srcu); \ |
Ming Lei | 2a904d0 | 2021-12-03 21:15:31 +0800 | [diff] [blame] | 441 | (dispatch_ops); \ |
Chris Leech | 00e885e | 2023-03-10 09:09:13 +0800 | [diff] [blame] | 442 | srcu_read_unlock(__tag_set->srcu, srcu_idx); \ |
Christoph Hellwig | 80bd4a7 | 2022-11-01 16:00:47 +0100 | [diff] [blame] | 443 | } else { \ |
| 444 | rcu_read_lock(); \ |
| 445 | (dispatch_ops); \ |
| 446 | rcu_read_unlock(); \ |
Ming Lei | 2a904d0 | 2021-12-03 21:15:31 +0800 | [diff] [blame] | 447 | } \ |
| 448 | } while (0) |
John Garry | a0235d2 | 2020-08-19 23:20:25 +0800 | [diff] [blame] | 449 | |
Ming Lei | 41adf53 | 2021-12-06 19:12:13 +0800 | [diff] [blame] | 450 | #define blk_mq_run_dispatch_ops(q, dispatch_ops) \ |
| 451 | __blk_mq_run_dispatch_ops(q, true, dispatch_ops) \ |
| 452 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 453 | #endif |