blob: 5668e28be0b7a6d2b1128072cb45ffeb665a9a31 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Jens Axboe320ae512013-10-24 09:20:05 +01002#ifndef INT_BLK_MQ_TAG_H
3#define INT_BLK_MQ_TAG_H
4
Christoph Hellwig2e9bc342021-09-20 14:33:23 +02005struct blk_mq_alloc_data;
6
John Garry1c0706a2020-08-19 23:20:22 +08007extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags,
8 unsigned int reserved_tags,
John Garrye155b0c2021-10-05 18:23:37 +08009 int node, int alloc_policy);
10extern void blk_mq_free_tags(struct blk_mq_tags *tags);
John Garry56b68082021-05-13 20:00:57 +080011extern int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags,
12 struct sbitmap_queue *breserved_tags,
13 unsigned int queue_depth,
14 unsigned int reserved,
15 int node, int alloc_policy);
Jens Axboe320ae512013-10-24 09:20:05 +010016
Ming Leicb96a42c2014-06-01 00:43:37 +080017extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
Jens Axboe349302d2021-10-09 13:10:39 -060018unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags,
19 unsigned int *offset);
John Garrycae740a2020-02-26 20:10:15 +080020extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
21 unsigned int tag);
Jens Axboef794f332021-10-08 05:50:46 -060022void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags);
Jens Axboe70f36b62017-01-19 10:59:07 -070023extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
24 struct blk_mq_tags **tags,
25 unsigned int depth, bool can_grow);
John Garry079a2e32021-10-05 18:23:39 +080026extern void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set,
John Garry32bc15a2020-08-19 23:20:24 +080027 unsigned int size);
John Garry079a2e32021-10-05 18:23:39 +080028extern void blk_mq_tag_update_sched_shared_tags(struct request_queue *q);
John Garry32bc15a2020-08-19 23:20:24 +080029
Jens Axboeaed3ea92014-12-22 14:04:42 -070030extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
John Garryfc39f8d2021-12-06 20:49:49 +080031void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
Christoph Hellwig0bf6cd52015-09-27 21:01:51 +020032 void *priv);
Ming Lei602380d2020-05-29 15:53:14 +020033void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
34 void *priv);
Jens Axboe320ae512013-10-24 09:20:05 +010035
Omar Sandoval88459642016-09-17 08:38:44 -060036static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt,
37 struct blk_mq_hw_ctx *hctx)
38{
39 if (!hctx)
40 return &bt->ws[0];
41 return sbq_wait_ptr(bt, &hctx->wait_index);
42}
43
Jens Axboe320ae512013-10-24 09:20:05 +010044enum {
Christoph Hellwig419c3d52020-05-29 15:53:11 +020045 BLK_MQ_NO_TAG = -1U,
Jens Axboe5385fa42017-10-01 01:26:21 -060046 BLK_MQ_TAG_MIN = 1,
Christoph Hellwig419c3d52020-05-29 15:53:11 +020047 BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1,
Jens Axboe320ae512013-10-24 09:20:05 +010048};
49
Jens Axboe0d2602c2014-05-13 15:10:52 -060050extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
51extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
52
53static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
54{
Ming Lei51db1c32020-08-19 23:20:19 +080055 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
Jens Axboe0d2602c2014-05-13 15:10:52 -060056 return false;
57
58 return __blk_mq_tag_busy(hctx);
59}
60
61static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
62{
Ming Lei51db1c32020-08-19 23:20:19 +080063 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
Jens Axboe0d2602c2014-05-13 15:10:52 -060064 return;
65
66 __blk_mq_tag_idle(hctx);
67}
68
Sagi Grimberg415b8062017-02-27 10:04:39 -070069static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags,
70 unsigned int tag)
71{
72 return tag < tags->nr_reserved_tags;
73}
74
Jens Axboe320ae512013-10-24 09:20:05 +010075#endif