blob: e876584d3516340920ab57ba98783de6f431c1cd [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Jens Axboe320ae512013-10-24 09:20:05 +01002#ifndef INT_BLK_MQ_H
3#define INT_BLK_MQ_H
4
Christoph Hellwig90110e02023-04-13 08:40:40 +02005#include <linux/blk-mq.h>
Jens Axboecf43e6b2016-11-07 21:32:37 -07006#include "blk-stat.h"
7
Christoph Hellwig24d2f902014-04-15 14:14:00 -06008struct blk_mq_tag_set;
9
Ming Lei1db49092018-11-20 09:44:35 +080010struct blk_mq_ctxs {
11 struct kobject kobj;
12 struct blk_mq_ctx __percpu *queue_ctx;
13};
14
Linus Walleijfe644072018-04-20 10:29:51 +020015/**
16 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
17 */
Jens Axboe320ae512013-10-24 09:20:05 +010018struct blk_mq_ctx {
19 struct {
20 spinlock_t lock;
Ming Leic16d6b52018-12-17 08:44:05 -070021 struct list_head rq_lists[HCTX_MAX_TYPES];
22 } ____cacheline_aligned_in_smp;
Jens Axboe320ae512013-10-24 09:20:05 +010023
24 unsigned int cpu;
Jens Axboef31967f2018-10-29 13:13:29 -060025 unsigned short index_hw[HCTX_MAX_TYPES];
Jianchao Wang8ccdf4a2019-01-24 18:25:32 +080026 struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES];
Jens Axboe320ae512013-10-24 09:20:05 +010027
Jens Axboe320ae512013-10-24 09:20:05 +010028 struct request_queue *queue;
Ming Lei1db49092018-11-20 09:44:35 +080029 struct blk_mq_ctxs *ctxs;
Jens Axboe320ae512013-10-24 09:20:05 +010030 struct kobject kobj;
Jens Axboe4bb659b2014-05-09 09:36:49 -060031} ____cacheline_aligned_in_smp;
Jens Axboe320ae512013-10-24 09:20:05 +010032
Christoph Hellwigbebe84e2023-04-13 08:40:39 +020033enum {
34 BLK_MQ_NO_TAG = -1U,
35 BLK_MQ_TAG_MIN = 1,
36 BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1,
37};
38
Christoph Hellwig710fa372023-04-13 08:40:54 +020039typedef unsigned int __bitwise blk_insert_t;
40#define BLK_MQ_INSERT_AT_HEAD ((__force blk_insert_t)0x01)
41
Christoph Hellwig3e087732021-10-12 13:12:24 +020042void blk_mq_submit_bio(struct bio *bio);
Jens Axboe5a72e892021-10-12 09:24:29 -060043int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
44 unsigned int flags);
Ming Leic7e2d942019-04-30 09:52:25 +080045void blk_mq_exit_queue(struct request_queue *q);
Jens Axboee3a2b3f2014-05-20 11:49:02 -060046int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
Jens Axboeaed3ea92014-12-22 14:04:42 -070047void blk_mq_wake_waiters(struct request_queue *q);
Ming Lei1fd40b52020-06-30 18:25:00 +080048bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
49 unsigned int);
Christoph Hellwigb12e5c62023-04-13 08:40:57 +020050void blk_mq_add_to_requeue_list(struct request *rq, blk_insert_t insert_flags);
Jens Axboe2c3ad662016-12-14 14:34:47 -070051void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
Ming Leib3476892017-10-14 17:22:30 +080052struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
53 struct blk_mq_ctx *start);
Ming Lei2e315dc2021-05-11 23:22:34 +080054void blk_mq_put_rq_ref(struct request *rq);
Jens Axboe2c3ad662016-12-14 14:34:47 -070055
56/*
57 * Internal helpers for allocating/freeing the request map
58 */
Jens Axboecc71a6f2017-01-11 14:29:56 -070059void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
60 unsigned int hctx_idx);
John Garrye155b0c2021-10-05 18:23:37 +080061void blk_mq_free_rq_map(struct blk_mq_tags *tags);
John Garry63064be2021-10-05 18:23:35 +080062struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
63 unsigned int hctx_idx, unsigned int depth);
John Garry645db342021-10-05 18:23:36 +080064void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
65 struct blk_mq_tags *tags,
66 unsigned int hctx_idx);
Jens Axboe2c3ad662016-12-14 14:34:47 -070067/*
68 * Internal helpers for request insertion into sw queues
69 */
Christoph Hellwig2b597612023-04-13 08:40:55 +020070void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags);
Ming Lei396eaf22018-01-17 11:25:57 -050071
Jens Axboe320ae512013-10-24 09:20:05 +010072/*
73 * CPU -> queue mappings
74 */
Jens Axboeed76e322018-10-29 13:06:14 -060075extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
Jens Axboe320ae512013-10-24 09:20:05 +010076
Jens Axboeb3c661b2018-10-30 10:36:06 -060077/*
78 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
79 * @q: request queue
Christoph Hellwige20ba6e2018-12-02 17:46:16 +010080 * @type: the hctx type index
Jens Axboeb3c661b2018-10-30 10:36:06 -060081 * @cpu: CPU
82 */
Jens Axboeff2c5662018-10-29 13:07:33 -060083static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
Christoph Hellwige20ba6e2018-12-02 17:46:16 +010084 enum hctx_type type,
Jens Axboeff2c5662018-10-29 13:07:33 -060085 unsigned int cpu)
86{
Ming Lei4e5cc992022-03-08 15:32:19 +080087 return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]);
Jens Axboeb3c661b2018-10-30 10:36:06 -060088}
89
Bart Van Assche16458cf2022-07-14 11:06:32 -070090static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf)
Jens Axboeb3c661b2018-10-30 10:36:06 -060091{
Christoph Hellwige20ba6e2018-12-02 17:46:16 +010092 enum hctx_type type = HCTX_TYPE_DEFAULT;
Jens Axboeb3c661b2018-10-30 10:36:06 -060093
Jianchao Wangbb94aea2019-01-24 18:25:33 +080094 /*
Christoph Hellwig6ce913f2021-10-12 13:12:21 +020095 * The caller ensure that if REQ_POLLED, poll must be enabled.
Jianchao Wangbb94aea2019-01-24 18:25:33 +080096 */
Bart Van Assche7e923f42022-06-15 15:55:48 -070097 if (opf & REQ_POLLED)
Christoph Hellwige20ba6e2018-12-02 17:46:16 +010098 type = HCTX_TYPE_POLL;
Bart Van Assche7e923f42022-06-15 15:55:48 -070099 else if ((opf & REQ_OP_MASK) == REQ_OP_READ)
Christoph Hellwige20ba6e2018-12-02 17:46:16 +0100100 type = HCTX_TYPE_READ;
Ming Leib6371082021-11-12 20:47:15 +0800101 return type;
102}
103
104/*
105 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
106 * @q: request queue
Bart Van Assche7e923f42022-06-15 15:55:48 -0700107 * @opf: operation type (REQ_OP_*) and flags (e.g. REQ_POLLED).
Ming Leib6371082021-11-12 20:47:15 +0800108 * @ctx: software queue cpu ctx
109 */
110static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
Bart Van Assche16458cf2022-07-14 11:06:32 -0700111 blk_opf_t opf,
Ming Leib6371082021-11-12 20:47:15 +0800112 struct blk_mq_ctx *ctx)
113{
Bart Van Assche7e923f42022-06-15 15:55:48 -0700114 return ctx->hctxs[blk_mq_get_hctx_type(opf)];
Jens Axboeff2c5662018-10-29 13:07:33 -0600115}
116
Jens Axboee93ecf62014-05-19 09:17:48 -0600117/*
Jens Axboe67aec142014-05-30 08:25:36 -0600118 * sysfs helpers
119 */
Ming Lei737f98c2017-02-22 18:13:59 +0800120extern void blk_mq_sysfs_init(struct request_queue *q);
Ming Lei7ea5fe32017-02-22 18:14:00 +0800121extern void blk_mq_sysfs_deinit(struct request_queue *q);
Christoph Hellwig8682b922022-06-28 19:18:50 +0200122int blk_mq_sysfs_register(struct gendisk *disk);
123void blk_mq_sysfs_unregister(struct gendisk *disk);
Christoph Hellwigeaa870f2022-06-28 19:18:49 +0200124int blk_mq_sysfs_register_hctxs(struct request_queue *q);
125void blk_mq_sysfs_unregister_hctxs(struct request_queue *q);
Keith Busch868f2f02015-12-17 17:08:14 -0700126extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
Jens Axboe47c122e2021-10-06 06:34:11 -0600127void blk_mq_free_plug_rqs(struct blk_plug *plug);
Christoph Hellwigdbb6f762021-10-20 16:41:17 +0200128void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
Jens Axboe67aec142014-05-30 08:25:36 -0600129
Ming Lei2a19b282021-11-16 09:43:43 +0800130void blk_mq_cancel_work_sync(struct request_queue *q);
131
Ming Leie09aae72015-01-29 20:17:27 +0800132void blk_mq_release(struct request_queue *q);
133
Ming Lei1aecfe42014-06-01 00:43:36 +0800134static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
135 unsigned int cpu)
136{
137 return per_cpu_ptr(q->queue_ctx, cpu);
138}
139
140/*
141 * This assumes per-cpu software queueing queues. They could be per-node
142 * as well, for instance. For now this is hardcoded as-is. Note that we don't
143 * care about preemption, since we know the ctx's are persistent. This does
144 * mean that we can't rely on ctx always matching the currently running CPU.
145 */
146static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
147{
Bart Van Asschec05f4222019-07-01 08:47:29 -0700148 return __blk_mq_get_ctx(q, raw_smp_processor_id());
Ming Lei1aecfe42014-06-01 00:43:36 +0800149}
150
Ming Leicb96a42c2014-06-01 00:43:37 +0800151struct blk_mq_alloc_data {
152 /* input parameter */
153 struct request_queue *q;
Bart Van Assche9a95e4e2017-11-09 10:49:59 -0800154 blk_mq_req_flags_t flags;
Omar Sandoval229a92872017-04-14 00:59:59 -0700155 unsigned int shallow_depth;
Bart Van Assche16458cf2022-07-14 11:06:32 -0700156 blk_opf_t cmd_flags;
Jens Axboeecaf97f2021-11-09 15:08:11 -0700157 req_flags_t rq_flags;
Ming Leicb96a42c2014-06-01 00:43:37 +0800158
Jens Axboe47c122e2021-10-06 06:34:11 -0600159 /* allocate multiple requests/tags in one go */
160 unsigned int nr_tags;
161 struct request **cached_rq;
162
Ming Leicb96a42c2014-06-01 00:43:37 +0800163 /* input & output parameter */
164 struct blk_mq_ctx *ctx;
165 struct blk_mq_hw_ctx *hctx;
166};
167
Christoph Hellwigbebe84e2023-04-13 08:40:39 +0200168struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags,
169 unsigned int reserved_tags, int node, int alloc_policy);
170void blk_mq_free_tags(struct blk_mq_tags *tags);
171int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags,
172 struct sbitmap_queue *breserved_tags, unsigned int queue_depth,
173 unsigned int reserved, int node, int alloc_policy);
174
175unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
176unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags,
177 unsigned int *offset);
178void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
179 unsigned int tag);
180void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags);
181int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
182 struct blk_mq_tags **tags, unsigned int depth, bool can_grow);
183void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set,
184 unsigned int size);
185void blk_mq_tag_update_sched_shared_tags(struct request_queue *q);
186
187void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
188void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
189 void *priv);
190void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
191 void *priv);
192
193static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt,
194 struct blk_mq_hw_ctx *hctx)
195{
196 if (!hctx)
197 return &bt->ws[0];
198 return sbq_wait_ptr(bt, &hctx->wait_index);
199}
200
201void __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
202void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
203
204static inline void blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
205{
206 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
207 __blk_mq_tag_busy(hctx);
208}
209
210static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
211{
212 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
213 __blk_mq_tag_idle(hctx);
214}
215
216static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags,
217 unsigned int tag)
218{
219 return tag < tags->nr_reserved_tags;
220}
221
John Garry079a2e32021-10-05 18:23:39 +0800222static inline bool blk_mq_is_shared_tags(unsigned int flags)
John Garry32bc15a2020-08-19 23:20:24 +0800223{
224 return flags & BLK_MQ_F_TAG_HCTX_SHARED;
225}
226
Jens Axboe49411152017-01-13 08:09:05 -0700227static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
228{
Jens Axboe56f8da62021-10-19 09:32:57 -0600229 if (!(data->rq_flags & RQF_ELV))
230 return data->hctx->tags;
231 return data->hctx->sched_tags;
Jens Axboe49411152017-01-13 08:09:05 -0700232}
233
Bart Van Assche5d1b25c2016-10-28 17:19:15 -0700234static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
235{
236 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
237}
238
Ming Lei19c66e52014-12-03 19:38:04 +0800239static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
240{
241 return hctx->nr_ctx && hctx->tags;
242}
243
Christoph Hellwig8446fe92020-11-24 09:36:54 +0100244unsigned int blk_mq_in_flight(struct request_queue *q,
245 struct block_device *part);
246void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
247 unsigned int inflight[2]);
Jens Axboef299b7c2017-08-08 17:51:45 -0600248
Ming Lei2a5a24a2021-01-22 10:33:12 +0800249static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
250 int budget_token)
Ming Leide148292017-10-14 17:22:29 +0800251{
Ming Leide148292017-10-14 17:22:29 +0800252 if (q->mq_ops->put_budget)
Ming Lei2a5a24a2021-01-22 10:33:12 +0800253 q->mq_ops->put_budget(q, budget_token);
Ming Leide148292017-10-14 17:22:29 +0800254}
255
Ming Lei2a5a24a2021-01-22 10:33:12 +0800256static inline int blk_mq_get_dispatch_budget(struct request_queue *q)
Ming Leide148292017-10-14 17:22:29 +0800257{
Ming Leide148292017-10-14 17:22:29 +0800258 if (q->mq_ops->get_budget)
Ming Lei65c76362020-06-30 18:24:56 +0800259 return q->mq_ops->get_budget(q);
Ming Lei2a5a24a2021-01-22 10:33:12 +0800260 return 0;
261}
262
263static inline void blk_mq_set_rq_budget_token(struct request *rq, int token)
264{
265 if (token < 0)
266 return;
267
268 if (rq->q->mq_ops->set_rq_budget_token)
269 rq->q->mq_ops->set_rq_budget_token(rq, token);
270}
271
272static inline int blk_mq_get_rq_budget_token(struct request *rq)
273{
274 if (rq->q->mq_ops->get_rq_budget_token)
275 return rq->q->mq_ops->get_rq_budget_token(rq);
276 return -1;
Ming Leide148292017-10-14 17:22:29 +0800277}
278
John Garrybccf5e22020-08-19 23:20:26 +0800279static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
280{
John Garry079a2e32021-10-05 18:23:39 +0800281 if (blk_mq_is_shared_tags(hctx->flags))
282 atomic_inc(&hctx->queue->nr_active_requests_shared_tags);
John Garrybccf5e22020-08-19 23:20:26 +0800283 else
284 atomic_inc(&hctx->nr_active);
285}
286
Ming Lei3b87c6e2021-11-02 23:36:19 +0800287static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
288 int val)
John Garrybccf5e22020-08-19 23:20:26 +0800289{
John Garry079a2e32021-10-05 18:23:39 +0800290 if (blk_mq_is_shared_tags(hctx->flags))
Ming Lei3b87c6e2021-11-02 23:36:19 +0800291 atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags);
John Garrybccf5e22020-08-19 23:20:26 +0800292 else
Ming Lei3b87c6e2021-11-02 23:36:19 +0800293 atomic_sub(val, &hctx->nr_active);
294}
295
296static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
297{
298 __blk_mq_sub_active_requests(hctx, 1);
John Garrybccf5e22020-08-19 23:20:26 +0800299}
300
301static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
302{
John Garry079a2e32021-10-05 18:23:39 +0800303 if (blk_mq_is_shared_tags(hctx->flags))
304 return atomic_read(&hctx->queue->nr_active_requests_shared_tags);
John Garrybccf5e22020-08-19 23:20:26 +0800305 return atomic_read(&hctx->nr_active);
306}
Jens Axboe4e2f62e52020-07-01 22:58:32 -0600307static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
308 struct request *rq)
309{
310 blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
311 rq->tag = BLK_MQ_NO_TAG;
312
313 if (rq->rq_flags & RQF_MQ_INFLIGHT) {
314 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
John Garrybccf5e22020-08-19 23:20:26 +0800315 __blk_mq_dec_active_requests(hctx);
Jens Axboe4e2f62e52020-07-01 22:58:32 -0600316 }
317}
318
319static inline void blk_mq_put_driver_tag(struct request *rq)
320{
321 if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
322 return;
323
324 __blk_mq_put_driver_tag(rq->mq_hctx, rq);
325}
326
Jens Axboea808a9d2021-10-13 08:28:14 -0600327bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq);
328
329static inline bool blk_mq_get_driver_tag(struct request *rq)
330{
331 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
332
333 if (rq->tag != BLK_MQ_NO_TAG &&
334 !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
335 hctx->tags->rqs[rq->tag] = rq;
336 return true;
337 }
338
339 return __blk_mq_get_driver_tag(hctx, rq);
340}
Jan Kara613471542021-06-03 12:47:21 +0200341
Jens Axboeed76e322018-10-29 13:06:14 -0600342static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
Minwoo Im0da73d02018-07-02 23:46:43 +0900343{
344 int cpu;
345
346 for_each_possible_cpu(cpu)
Jens Axboeed76e322018-10-29 13:06:14 -0600347 qmap->mq_map[cpu] = 0;
Minwoo Im0da73d02018-07-02 23:46:43 +0900348}
349
Damien Le Moalb49773e72019-07-11 01:18:31 +0900350/*
351 * blk_mq_plug() - Get caller context plug
Damien Le Moalb49773e72019-07-11 01:18:31 +0900352 * @bio : the bio being submitted by the caller context
353 *
354 * Plugging, by design, may delay the insertion of BIOs into the elevator in
355 * order to increase BIO merging opportunities. This however can cause BIO
356 * insertion order to change from the order in which submit_bio() is being
357 * executed in the case of multiple contexts concurrently issuing BIOs to a
358 * device, even if these context are synchronized to tightly control BIO issuing
359 * order. While this is not a problem with regular block devices, this ordering
360 * change can cause write BIO failures with zoned block devices as these
361 * require sequential write patterns to zones. Prevent this from happening by
Christoph Hellwig6deacb32022-07-06 09:03:38 +0200362 * ignoring the plug state of a BIO issuing context if it is for a zoned block
363 * device and the BIO to plug is a write operation.
Damien Le Moalb49773e72019-07-11 01:18:31 +0900364 *
365 * Return current->plug if the bio can be plugged and NULL otherwise
366 */
Christoph Hellwig6deacb32022-07-06 09:03:38 +0200367static inline struct blk_plug *blk_mq_plug( struct bio *bio)
Damien Le Moalb49773e72019-07-11 01:18:31 +0900368{
Christoph Hellwig6deacb32022-07-06 09:03:38 +0200369 /* Zoned block device write operation case: do not plug the BIO */
Pankaj Raghav8cafdb52022-09-29 09:47:44 +0200370 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
371 bdev_op_is_zoned_write(bio->bi_bdev, bio_op(bio)))
Christoph Hellwig6deacb32022-07-06 09:03:38 +0200372 return NULL;
373
Damien Le Moalb49773e72019-07-11 01:18:31 +0900374 /*
375 * For regular block devices or read operations, use the context plug
376 * which may be NULL if blk_start_plug() was not executed.
377 */
Christoph Hellwig6deacb32022-07-06 09:03:38 +0200378 return current->plug;
Damien Le Moalb49773e72019-07-11 01:18:31 +0900379}
380
Jan Karafd2ef392021-06-23 11:36:34 +0200381/* Free all requests on the list */
382static inline void blk_mq_free_requests(struct list_head *list)
383{
384 while (!list_empty(list)) {
385 struct request *rq = list_entry_rq(list->next);
386
387 list_del_init(&rq->queuelist);
388 blk_mq_free_request(rq);
389 }
390}
391
John Garrya0235d22020-08-19 23:20:25 +0800392/*
393 * For shared tag users, we track the number of currently active users
394 * and attempt to provide a fair share of the tag depth for each of them.
395 */
396static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
397 struct sbitmap_queue *bt)
398{
399 unsigned int depth, users;
400
401 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
402 return true;
John Garrya0235d22020-08-19 23:20:25 +0800403
404 /*
405 * Don't try dividing an ant
406 */
407 if (bt->sb.depth == 1)
408 return true;
409
John Garry079a2e32021-10-05 18:23:39 +0800410 if (blk_mq_is_shared_tags(hctx->flags)) {
John Garryf1b49fd2020-08-19 23:20:27 +0800411 struct request_queue *q = hctx->queue;
John Garryf1b49fd2020-08-19 23:20:27 +0800412
Ming Lei25690632020-12-27 19:34:58 +0800413 if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
John Garryf1b49fd2020-08-19 23:20:27 +0800414 return true;
John Garryf1b49fd2020-08-19 23:20:27 +0800415 } else {
416 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
417 return true;
John Garryf1b49fd2020-08-19 23:20:27 +0800418 }
419
John Garrye155b0c2021-10-05 18:23:37 +0800420 users = atomic_read(&hctx->tags->active_queues);
421
John Garrya0235d22020-08-19 23:20:25 +0800422 if (!users)
423 return true;
424
425 /*
426 * Allow at least some tags
427 */
428 depth = max((bt->sb.depth + users - 1) / users, 4U);
John Garrybccf5e22020-08-19 23:20:26 +0800429 return __blk_mq_active_requests(hctx) < depth;
John Garrya0235d22020-08-19 23:20:25 +0800430}
431
Ming Lei2a904d02021-12-03 21:15:31 +0800432/* run the code block in @dispatch_ops with rcu/srcu read lock held */
Ming Lei41adf532021-12-06 19:12:13 +0800433#define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops) \
Ming Lei2a904d02021-12-03 21:15:31 +0800434do { \
Christoph Hellwig80bd4a72022-11-01 16:00:47 +0100435 if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) { \
Chris Leech00e885e2023-03-10 09:09:13 +0800436 struct blk_mq_tag_set *__tag_set = (q)->tag_set; \
Ming Lei2a904d02021-12-03 21:15:31 +0800437 int srcu_idx; \
438 \
Ming Lei41adf532021-12-06 19:12:13 +0800439 might_sleep_if(check_sleep); \
Chris Leech00e885e2023-03-10 09:09:13 +0800440 srcu_idx = srcu_read_lock(__tag_set->srcu); \
Ming Lei2a904d02021-12-03 21:15:31 +0800441 (dispatch_ops); \
Chris Leech00e885e2023-03-10 09:09:13 +0800442 srcu_read_unlock(__tag_set->srcu, srcu_idx); \
Christoph Hellwig80bd4a72022-11-01 16:00:47 +0100443 } else { \
444 rcu_read_lock(); \
445 (dispatch_ops); \
446 rcu_read_unlock(); \
Ming Lei2a904d02021-12-03 21:15:31 +0800447 } \
448} while (0)
John Garrya0235d22020-08-19 23:20:25 +0800449
Ming Lei41adf532021-12-06 19:12:13 +0800450#define blk_mq_run_dispatch_ops(q, dispatch_ops) \
451 __blk_mq_run_dispatch_ops(q, true, dispatch_ops) \
452
Jens Axboe320ae512013-10-24 09:20:05 +0100453#endif