Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 2 | #ifndef BLK_INTERNAL_H |
| 3 | #define BLK_INTERNAL_H |
| 4 | |
Satya Tangirala | a892c8d | 2020-05-14 00:37:18 +0000 | [diff] [blame] | 5 | #include <linux/blk-crypto.h> |
Christoph Hellwig | 9bb33f2 | 2021-03-31 09:30:00 +0200 | [diff] [blame] | 6 | #include <linux/memblock.h> /* for max_pfn/max_low_pfn */ |
Keith Busch | 0eb4db4 | 2024-02-23 07:59:09 -0800 | [diff] [blame] | 7 | #include <linux/sched/sysctl.h> |
Jens Axboe | 08420cf | 2024-01-15 14:45:07 -0700 | [diff] [blame] | 8 | #include <linux/timekeeping.h> |
Christoph Hellwig | c39ae60 | 2018-09-25 13:30:08 -0700 | [diff] [blame] | 9 | #include <xen/xen.h> |
Satya Tangirala | a892c8d | 2020-05-14 00:37:18 +0000 | [diff] [blame] | 10 | #include "blk-crypto-internal.h" |
Tejun Heo | a73f730 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 11 | |
Christoph Hellwig | 2e9bc34 | 2021-09-20 14:33:23 +0200 | [diff] [blame] | 12 | struct elevator_type; |
| 13 | |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 14 | /* Max future timer expiry for timeouts */ |
| 15 | #define BLK_MAX_TIMEOUT (5 * HZ) |
| 16 | |
Omar Sandoval | 18fbda9 | 2017-01-31 14:53:20 -0800 | [diff] [blame] | 17 | extern struct dentry *blk_debugfs_root; |
Omar Sandoval | 18fbda9 | 2017-01-31 14:53:20 -0800 | [diff] [blame] | 18 | |
Ming Lei | 7c94e1c | 2014-09-25 23:23:43 +0800 | [diff] [blame] | 19 | struct blk_flush_queue { |
Chengming Zhou | b175c86 | 2023-07-17 12:00:57 +0800 | [diff] [blame] | 20 | spinlock_t mq_flush_lock; |
Ming Lei | 7c94e1c | 2014-09-25 23:23:43 +0800 | [diff] [blame] | 21 | unsigned int flush_pending_idx:1; |
| 22 | unsigned int flush_running_idx:1; |
Yufen Yu | 8d69966 | 2019-09-27 16:19:55 +0800 | [diff] [blame] | 23 | blk_status_t rq_status; |
Ming Lei | 7c94e1c | 2014-09-25 23:23:43 +0800 | [diff] [blame] | 24 | unsigned long flush_pending_since; |
| 25 | struct list_head flush_queue[2]; |
Chengming Zhou | b175c86 | 2023-07-17 12:00:57 +0800 | [diff] [blame] | 26 | unsigned long flush_data_in_flight; |
Ming Lei | 7c94e1c | 2014-09-25 23:23:43 +0800 | [diff] [blame] | 27 | struct request *flush_rq; |
Ming Lei | 7c94e1c | 2014-09-25 23:23:43 +0800 | [diff] [blame] | 28 | }; |
| 29 | |
Ming Lei | a9ed27a | 2021-08-18 09:09:25 +0800 | [diff] [blame] | 30 | bool is_flush_rq(struct request *req); |
Yufen Yu | 8d69966 | 2019-09-27 16:19:55 +0800 | [diff] [blame] | 31 | |
Guoqing Jiang | 754a157 | 2020-03-09 22:41:37 +0100 | [diff] [blame] | 32 | struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size, |
| 33 | gfp_t flags); |
Ming Lei | f70ced0 | 2014-09-25 23:23:47 +0800 | [diff] [blame] | 34 | void blk_free_flush_queue(struct blk_flush_queue *q); |
Ming Lei | f355265 | 2014-09-25 23:23:40 +0800 | [diff] [blame] | 35 | |
Dan Williams | 3ef28e8 | 2015-10-21 13:20:12 -0400 | [diff] [blame] | 36 | void blk_freeze_queue(struct request_queue *q); |
Christoph Hellwig | aec89dc | 2021-09-29 09:12:41 +0200 | [diff] [blame] | 37 | void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic); |
Christoph Hellwig | 8e141f9 | 2021-09-29 09:12:40 +0200 | [diff] [blame] | 38 | void blk_queue_start_drain(struct request_queue *q); |
Jens Axboe | c98cb5b | 2021-11-04 12:45:51 -0600 | [diff] [blame] | 39 | int __bio_queue_enter(struct request_queue *q, struct bio *bio); |
Ming Lei | 3f98c75 | 2022-02-16 12:45:10 +0800 | [diff] [blame] | 40 | void submit_bio_noacct_nocheck(struct bio *bio); |
Jens Axboe | c98cb5b | 2021-11-04 12:45:51 -0600 | [diff] [blame] | 41 | |
| 42 | static inline bool blk_try_enter_queue(struct request_queue *q, bool pm) |
| 43 | { |
| 44 | rcu_read_lock(); |
| 45 | if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter)) |
| 46 | goto fail; |
| 47 | |
| 48 | /* |
| 49 | * The code that increments the pm_only counter must ensure that the |
| 50 | * counter is globally visible before the queue is unfrozen. |
| 51 | */ |
| 52 | if (blk_queue_pm_only(q) && |
| 53 | (!pm || queue_rpm_status(q) == RPM_SUSPENDED)) |
| 54 | goto fail_put; |
| 55 | |
| 56 | rcu_read_unlock(); |
| 57 | return true; |
| 58 | |
| 59 | fail_put: |
| 60 | blk_queue_exit(q); |
| 61 | fail: |
| 62 | rcu_read_unlock(); |
| 63 | return false; |
| 64 | } |
| 65 | |
| 66 | static inline int bio_queue_enter(struct bio *bio) |
| 67 | { |
| 68 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
| 69 | |
| 70 | if (blk_try_enter_queue(q, false)) |
| 71 | return 0; |
| 72 | return __bio_queue_enter(q, bio); |
| 73 | } |
Dan Williams | 3ef28e8 | 2015-10-21 13:20:12 -0400 | [diff] [blame] | 74 | |
Keith Busch | 0eb4db4 | 2024-02-23 07:59:09 -0800 | [diff] [blame] | 75 | static inline void blk_wait_io(struct completion *done) |
| 76 | { |
| 77 | /* Prevent hang_check timer from firing at us during very long I/O */ |
| 78 | unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2; |
| 79 | |
| 80 | if (timeout) |
| 81 | while (!wait_for_completion_io_timeout(done, timeout)) |
| 82 | ; |
| 83 | else |
| 84 | wait_for_completion_io(done); |
| 85 | } |
| 86 | |
Christoph Hellwig | dc0b8a57 | 2021-02-02 18:19:19 +0100 | [diff] [blame] | 87 | #define BIO_INLINE_VECS 4 |
Christoph Hellwig | 7a800a2 | 2021-02-02 18:19:29 +0100 | [diff] [blame] | 88 | struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs, |
| 89 | gfp_t gfp_mask); |
| 90 | void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs); |
Ming Lei | eec716a | 2021-01-11 11:05:56 +0800 | [diff] [blame] | 91 | |
Jinyoung Choi | 7c8998f | 2023-08-03 11:48:27 +0900 | [diff] [blame] | 92 | bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv, |
| 93 | struct page *page, unsigned len, unsigned offset, |
| 94 | bool *same_page); |
| 95 | |
Christoph Hellwig | 3dccdae | 2018-09-24 09:43:52 +0200 | [diff] [blame] | 96 | static inline bool biovec_phys_mergeable(struct request_queue *q, |
| 97 | struct bio_vec *vec1, struct bio_vec *vec2) |
Christoph Hellwig | 6a9f5f2 | 2018-09-24 09:43:50 +0200 | [diff] [blame] | 98 | { |
Christoph Hellwig | 3dccdae | 2018-09-24 09:43:52 +0200 | [diff] [blame] | 99 | unsigned long mask = queue_segment_boundary(q); |
Christoph Hellwig | 6e76846 | 2018-09-24 09:43:53 +0200 | [diff] [blame] | 100 | phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset; |
| 101 | phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset; |
Christoph Hellwig | 3dccdae | 2018-09-24 09:43:52 +0200 | [diff] [blame] | 102 | |
Alexander Potapenko | f630a5d | 2022-09-15 17:04:01 +0200 | [diff] [blame] | 103 | /* |
| 104 | * Merging adjacent physical pages may not work correctly under KMSAN |
| 105 | * if their metadata pages aren't adjacent. Just disable merging. |
| 106 | */ |
| 107 | if (IS_ENABLED(CONFIG_KMSAN)) |
| 108 | return false; |
| 109 | |
Christoph Hellwig | 3dccdae | 2018-09-24 09:43:52 +0200 | [diff] [blame] | 110 | if (addr1 + vec1->bv_len != addr2) |
Christoph Hellwig | 6a9f5f2 | 2018-09-24 09:43:50 +0200 | [diff] [blame] | 111 | return false; |
Ming Lei | 0383ad4 | 2019-03-29 15:07:54 +0800 | [diff] [blame] | 112 | if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page)) |
Christoph Hellwig | 6a9f5f2 | 2018-09-24 09:43:50 +0200 | [diff] [blame] | 113 | return false; |
Christoph Hellwig | 3dccdae | 2018-09-24 09:43:52 +0200 | [diff] [blame] | 114 | if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask)) |
| 115 | return false; |
Christoph Hellwig | 6a9f5f2 | 2018-09-24 09:43:50 +0200 | [diff] [blame] | 116 | return true; |
| 117 | } |
| 118 | |
Bart Van Assche | aa261f2 | 2022-10-25 12:17:54 -0700 | [diff] [blame] | 119 | static inline bool __bvec_gap_to_prev(const struct queue_limits *lim, |
Christoph Hellwig | 27ca1d4 | 2018-09-24 09:43:49 +0200 | [diff] [blame] | 120 | struct bio_vec *bprv, unsigned int offset) |
| 121 | { |
Christoph Hellwig | c55ddd9 | 2022-07-27 12:23:00 -0400 | [diff] [blame] | 122 | return (offset & lim->virt_boundary_mask) || |
| 123 | ((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask); |
Christoph Hellwig | 27ca1d4 | 2018-09-24 09:43:49 +0200 | [diff] [blame] | 124 | } |
| 125 | |
| 126 | /* |
| 127 | * Check if adding a bio_vec after bprv with offset would create a gap in |
| 128 | * the SG list. Most drivers don't care about this, but some do. |
| 129 | */ |
Bart Van Assche | aa261f2 | 2022-10-25 12:17:54 -0700 | [diff] [blame] | 130 | static inline bool bvec_gap_to_prev(const struct queue_limits *lim, |
Christoph Hellwig | 27ca1d4 | 2018-09-24 09:43:49 +0200 | [diff] [blame] | 131 | struct bio_vec *bprv, unsigned int offset) |
| 132 | { |
Christoph Hellwig | c55ddd9 | 2022-07-27 12:23:00 -0400 | [diff] [blame] | 133 | if (!lim->virt_boundary_mask) |
Christoph Hellwig | 27ca1d4 | 2018-09-24 09:43:49 +0200 | [diff] [blame] | 134 | return false; |
Christoph Hellwig | c55ddd9 | 2022-07-27 12:23:00 -0400 | [diff] [blame] | 135 | return __bvec_gap_to_prev(lim, bprv, offset); |
Christoph Hellwig | 27ca1d4 | 2018-09-24 09:43:49 +0200 | [diff] [blame] | 136 | } |
| 137 | |
Christoph Hellwig | badf7f6 | 2021-09-20 14:33:26 +0200 | [diff] [blame] | 138 | static inline bool rq_mergeable(struct request *rq) |
| 139 | { |
| 140 | if (blk_rq_is_passthrough(rq)) |
| 141 | return false; |
| 142 | |
| 143 | if (req_op(rq) == REQ_OP_FLUSH) |
| 144 | return false; |
| 145 | |
| 146 | if (req_op(rq) == REQ_OP_WRITE_ZEROES) |
| 147 | return false; |
| 148 | |
| 149 | if (req_op(rq) == REQ_OP_ZONE_APPEND) |
| 150 | return false; |
| 151 | |
| 152 | if (rq->cmd_flags & REQ_NOMERGE_FLAGS) |
| 153 | return false; |
| 154 | if (rq->rq_flags & RQF_NOMERGE_FLAGS) |
| 155 | return false; |
| 156 | |
| 157 | return true; |
| 158 | } |
| 159 | |
| 160 | /* |
| 161 | * There are two different ways to handle DISCARD merges: |
| 162 | * 1) If max_discard_segments > 1, the driver treats every bio as a range and |
| 163 | * send the bios to controller together. The ranges don't need to be |
| 164 | * contiguous. |
| 165 | * 2) Otherwise, the request will be normal read/write requests. The ranges |
| 166 | * need to be contiguous. |
| 167 | */ |
| 168 | static inline bool blk_discard_mergable(struct request *req) |
| 169 | { |
| 170 | if (req_op(req) == REQ_OP_DISCARD && |
| 171 | queue_max_discard_segments(req->q) > 1) |
| 172 | return true; |
| 173 | return false; |
| 174 | } |
| 175 | |
Uday Shankar | 49d2439 | 2023-02-28 17:06:55 -0700 | [diff] [blame] | 176 | static inline unsigned int blk_rq_get_max_segments(struct request *rq) |
| 177 | { |
| 178 | if (req_op(rq) == REQ_OP_DISCARD) |
| 179 | return queue_max_discard_segments(rq->q); |
| 180 | return queue_max_segments(rq->q); |
| 181 | } |
| 182 | |
Christoph Hellwig | 2a9336c | 2022-06-14 11:09:34 +0200 | [diff] [blame] | 183 | static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, |
Bart Van Assche | 77e7ffd | 2022-07-14 11:06:28 -0700 | [diff] [blame] | 184 | enum req_op op) |
Christoph Hellwig | 2a9336c | 2022-06-14 11:09:34 +0200 | [diff] [blame] | 185 | { |
| 186 | if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)) |
| 187 | return min(q->limits.max_discard_sectors, |
| 188 | UINT_MAX >> SECTOR_SHIFT); |
| 189 | |
| 190 | if (unlikely(op == REQ_OP_WRITE_ZEROES)) |
| 191 | return q->limits.max_write_zeroes_sectors; |
| 192 | |
| 193 | return q->limits.max_sectors; |
| 194 | } |
| 195 | |
Dan Williams | 5a48fc1 | 2015-10-21 13:20:23 -0400 | [diff] [blame] | 196 | #ifdef CONFIG_BLK_DEV_INTEGRITY |
| 197 | void blk_flush_integrity(void); |
Christoph Hellwig | 7c20f11 | 2017-07-03 16:58:43 -0600 | [diff] [blame] | 198 | bool __bio_integrity_endio(struct bio *); |
Justin Tee | ece841a | 2019-12-05 10:09:01 +0800 | [diff] [blame] | 199 | void bio_integrity_free(struct bio *bio); |
Christoph Hellwig | 7c20f11 | 2017-07-03 16:58:43 -0600 | [diff] [blame] | 200 | static inline bool bio_integrity_endio(struct bio *bio) |
| 201 | { |
| 202 | if (bio_integrity(bio)) |
| 203 | return __bio_integrity_endio(bio); |
| 204 | return true; |
| 205 | } |
Christoph Hellwig | 43b729b | 2018-09-24 09:43:47 +0200 | [diff] [blame] | 206 | |
Christoph Hellwig | 92cf2fd | 2020-10-06 09:07:17 +0200 | [diff] [blame] | 207 | bool blk_integrity_merge_rq(struct request_queue *, struct request *, |
| 208 | struct request *); |
Christoph Hellwig | d59da41 | 2020-10-06 09:07:18 +0200 | [diff] [blame] | 209 | bool blk_integrity_merge_bio(struct request_queue *, struct request *, |
| 210 | struct bio *); |
Christoph Hellwig | 92cf2fd | 2020-10-06 09:07:17 +0200 | [diff] [blame] | 211 | |
Christoph Hellwig | 43b729b | 2018-09-24 09:43:47 +0200 | [diff] [blame] | 212 | static inline bool integrity_req_gap_back_merge(struct request *req, |
| 213 | struct bio *next) |
| 214 | { |
| 215 | struct bio_integrity_payload *bip = bio_integrity(req->bio); |
| 216 | struct bio_integrity_payload *bip_next = bio_integrity(next); |
| 217 | |
Christoph Hellwig | c55ddd9 | 2022-07-27 12:23:00 -0400 | [diff] [blame] | 218 | return bvec_gap_to_prev(&req->q->limits, |
| 219 | &bip->bip_vec[bip->bip_vcnt - 1], |
Christoph Hellwig | 43b729b | 2018-09-24 09:43:47 +0200 | [diff] [blame] | 220 | bip_next->bip_vec[0].bv_offset); |
| 221 | } |
| 222 | |
| 223 | static inline bool integrity_req_gap_front_merge(struct request *req, |
| 224 | struct bio *bio) |
| 225 | { |
| 226 | struct bio_integrity_payload *bip = bio_integrity(bio); |
| 227 | struct bio_integrity_payload *bip_next = bio_integrity(req->bio); |
| 228 | |
Christoph Hellwig | c55ddd9 | 2022-07-27 12:23:00 -0400 | [diff] [blame] | 229 | return bvec_gap_to_prev(&req->q->limits, |
| 230 | &bip->bip_vec[bip->bip_vcnt - 1], |
Christoph Hellwig | 43b729b | 2018-09-24 09:43:47 +0200 | [diff] [blame] | 231 | bip_next->bip_vec[0].bv_offset); |
| 232 | } |
Christoph Hellwig | 581e260 | 2020-03-25 16:48:41 +0100 | [diff] [blame] | 233 | |
Thomas Weißschuh | ff53cd5 | 2023-03-18 17:36:25 +0000 | [diff] [blame] | 234 | extern const struct attribute_group blk_integrity_attr_group; |
Christoph Hellwig | 43b729b | 2018-09-24 09:43:47 +0200 | [diff] [blame] | 235 | #else /* CONFIG_BLK_DEV_INTEGRITY */ |
Christoph Hellwig | 92cf2fd | 2020-10-06 09:07:17 +0200 | [diff] [blame] | 236 | static inline bool blk_integrity_merge_rq(struct request_queue *rq, |
| 237 | struct request *r1, struct request *r2) |
| 238 | { |
| 239 | return true; |
| 240 | } |
Christoph Hellwig | d59da41 | 2020-10-06 09:07:18 +0200 | [diff] [blame] | 241 | static inline bool blk_integrity_merge_bio(struct request_queue *rq, |
| 242 | struct request *r, struct bio *b) |
| 243 | { |
| 244 | return true; |
| 245 | } |
Christoph Hellwig | 43b729b | 2018-09-24 09:43:47 +0200 | [diff] [blame] | 246 | static inline bool integrity_req_gap_back_merge(struct request *req, |
| 247 | struct bio *next) |
| 248 | { |
| 249 | return false; |
| 250 | } |
| 251 | static inline bool integrity_req_gap_front_merge(struct request *req, |
| 252 | struct bio *bio) |
| 253 | { |
| 254 | return false; |
| 255 | } |
| 256 | |
Dan Williams | 5a48fc1 | 2015-10-21 13:20:23 -0400 | [diff] [blame] | 257 | static inline void blk_flush_integrity(void) |
| 258 | { |
| 259 | } |
Christoph Hellwig | 7c20f11 | 2017-07-03 16:58:43 -0600 | [diff] [blame] | 260 | static inline bool bio_integrity_endio(struct bio *bio) |
| 261 | { |
| 262 | return true; |
| 263 | } |
Justin Tee | ece841a | 2019-12-05 10:09:01 +0800 | [diff] [blame] | 264 | static inline void bio_integrity_free(struct bio *bio) |
| 265 | { |
| 266 | } |
Christoph Hellwig | 43b729b | 2018-09-24 09:43:47 +0200 | [diff] [blame] | 267 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
Jens Axboe | 8324aa9 | 2008-01-29 14:51:59 +0100 | [diff] [blame] | 268 | |
Jens Axboe | 0d2602c | 2014-05-13 15:10:52 -0600 | [diff] [blame] | 269 | unsigned long blk_rq_timeout(unsigned long timeout); |
Jens Axboe | 87ee7b1 | 2014-04-24 08:51:47 -0600 | [diff] [blame] | 270 | void blk_add_timer(struct request *req); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 271 | |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 272 | bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, |
Christoph Hellwig | 0c5bcc9 | 2021-11-23 17:04:41 +0100 | [diff] [blame] | 273 | unsigned int nr_segs); |
Baolin Wang | bdc6a287 | 2020-08-28 10:52:55 +0800 | [diff] [blame] | 274 | bool blk_bio_list_merge(struct request_queue *q, struct list_head *list, |
| 275 | struct bio *bio, unsigned int nr_segs); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 276 | |
Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 277 | /* |
Jens Axboe | ba0ffdd | 2021-10-06 12:01:07 -0600 | [diff] [blame] | 278 | * Plug flush limits |
| 279 | */ |
| 280 | #define BLK_MAX_REQUEST_COUNT 32 |
| 281 | #define BLK_PLUG_FLUSH_SIZE (128 * 1024) |
| 282 | |
| 283 | /* |
Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 284 | * Internal elevator interface |
| 285 | */ |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 286 | #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED) |
Tejun Heo | 158dbda | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 287 | |
Christoph Hellwig | 360f264 | 2023-05-19 06:40:46 +0200 | [diff] [blame] | 288 | bool blk_insert_flush(struct request *rq); |
Tejun Heo | dd83100 | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 289 | |
Keith Busch | 8237c01 | 2022-09-27 08:56:52 -0700 | [diff] [blame] | 290 | int elevator_switch(struct request_queue *q, struct elevator_type *new_e); |
Christoph Hellwig | 64b3607 | 2022-10-30 11:07:14 +0100 | [diff] [blame] | 291 | void elevator_disable(struct request_queue *q); |
Christoph Hellwig | 0c6cb3a | 2021-11-23 19:53:07 +0100 | [diff] [blame] | 292 | void elevator_exit(struct request_queue *q); |
Ming Lei | cecf5d8 | 2019-08-27 19:01:48 +0800 | [diff] [blame] | 293 | int elv_register_queue(struct request_queue *q, bool uevent); |
Bart Van Assche | 83d016a | 2018-01-17 11:48:08 -0800 | [diff] [blame] | 294 | void elv_unregister_queue(struct request_queue *q); |
| 295 | |
Christoph Hellwig | 3ad5cee | 2020-03-24 08:25:13 +0100 | [diff] [blame] | 296 | ssize_t part_size_show(struct device *dev, struct device_attribute *attr, |
| 297 | char *buf); |
| 298 | ssize_t part_stat_show(struct device *dev, struct device_attribute *attr, |
| 299 | char *buf); |
| 300 | ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, |
| 301 | char *buf); |
| 302 | ssize_t part_fail_show(struct device *dev, struct device_attribute *attr, |
| 303 | char *buf); |
| 304 | ssize_t part_fail_store(struct device *dev, struct device_attribute *attr, |
| 305 | const char *buf, size_t count); |
Jens Axboe | 581d4e2 | 2008-09-14 05:56:33 -0700 | [diff] [blame] | 306 | ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); |
| 307 | ssize_t part_timeout_store(struct device *, struct device_attribute *, |
| 308 | const char *, size_t); |
Jens Axboe | 581d4e2 | 2008-09-14 05:56:33 -0700 | [diff] [blame] | 309 | |
Christoph Hellwig | c55ddd9 | 2022-07-27 12:23:00 -0400 | [diff] [blame] | 310 | static inline bool bio_may_exceed_limits(struct bio *bio, |
Bart Van Assche | aa261f2 | 2022-10-25 12:17:54 -0700 | [diff] [blame] | 311 | const struct queue_limits *lim) |
Jens Axboe | abd45c1 | 2021-10-13 12:43:41 -0600 | [diff] [blame] | 312 | { |
| 313 | switch (bio_op(bio)) { |
| 314 | case REQ_OP_DISCARD: |
| 315 | case REQ_OP_SECURE_ERASE: |
| 316 | case REQ_OP_WRITE_ZEROES: |
Jens Axboe | abd45c1 | 2021-10-13 12:43:41 -0600 | [diff] [blame] | 317 | return true; /* non-trivial splitting decisions */ |
| 318 | default: |
| 319 | break; |
| 320 | } |
| 321 | |
| 322 | /* |
| 323 | * All drivers must accept single-segments bios that are <= PAGE_SIZE. |
| 324 | * This is a quick and dirty check that relies on the fact that |
| 325 | * bi_io_vec[0] is always valid if a bio has data. The check might |
| 326 | * lead to occasional false negatives when bios are cloned, but compared |
| 327 | * to the performance impact of cloned bios themselves the loop below |
| 328 | * doesn't matter anyway. |
| 329 | */ |
Christoph Hellwig | c55ddd9 | 2022-07-27 12:23:00 -0400 | [diff] [blame] | 330 | return lim->chunk_sectors || bio->bi_vcnt != 1 || |
Jens Axboe | abd45c1 | 2021-10-13 12:43:41 -0600 | [diff] [blame] | 331 | bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE; |
| 332 | } |
| 333 | |
Bart Van Assche | aa261f2 | 2022-10-25 12:17:54 -0700 | [diff] [blame] | 334 | struct bio *__bio_split_to_limits(struct bio *bio, |
| 335 | const struct queue_limits *lim, |
| 336 | unsigned int *nr_segs); |
Christoph Hellwig | 14ccb66 | 2019-06-06 12:29:01 +0200 | [diff] [blame] | 337 | int ll_back_merge_fn(struct request *req, struct bio *bio, |
| 338 | unsigned int nr_segs); |
Jan Kara | fd2ef39 | 2021-06-23 11:36:34 +0200 | [diff] [blame] | 339 | bool blk_attempt_req_merge(struct request_queue *q, struct request *rq, |
Jens Axboe | 5e84ea3 | 2011-03-21 10:14:27 +0100 | [diff] [blame] | 340 | struct request *next); |
Christoph Hellwig | e9cd19c | 2019-06-06 12:29:02 +0200 | [diff] [blame] | 341 | unsigned int blk_recalc_rq_segments(struct request *rq); |
Tejun Heo | 050c8ea | 2012-02-08 09:19:38 +0100 | [diff] [blame] | 342 | bool blk_rq_merge_ok(struct request *rq, struct bio *bio); |
Christoph Hellwig | 34fe7c0 | 2017-02-08 14:46:48 +0100 | [diff] [blame] | 343 | enum elv_merge blk_try_merge(struct request *rq, struct bio *bio); |
Jens Axboe | d6d4819 | 2008-01-29 14:04:06 +0100 | [diff] [blame] | 344 | |
Christoph Hellwig | d690cb8a | 2024-02-13 08:34:14 +0100 | [diff] [blame] | 345 | int blk_set_default_limits(struct queue_limits *lim); |
Adrian Bunk | ff88972 | 2008-03-04 11:23:45 +0100 | [diff] [blame] | 346 | int blk_dev_init(void); |
| 347 | |
Jens Axboe | c2553b5 | 2009-04-24 08:10:11 +0200 | [diff] [blame] | 348 | /* |
| 349 | * Contribute to IO statistics IFF: |
| 350 | * |
| 351 | * a) it's attached to a gendisk, and |
Logan Gunthorpe | 48d9b0d | 2019-10-10 17:36:26 -0600 | [diff] [blame] | 352 | * b) the queue had IO stats enabled when this request was started |
Jens Axboe | c2553b5 | 2009-04-24 08:10:11 +0200 | [diff] [blame] | 353 | */ |
Chengguang Xu | 599d067 | 2018-08-16 22:51:40 +0800 | [diff] [blame] | 354 | static inline bool blk_do_io_stat(struct request *rq) |
Jens Axboe | fb8ec18 | 2009-02-02 08:42:32 +0100 | [diff] [blame] | 355 | { |
Christoph Hellwig | 41fa722 | 2022-03-08 06:51:47 +0100 | [diff] [blame] | 356 | return (rq->rq_flags & RQF_IO_STAT) && !blk_rq_is_passthrough(rq); |
Pavel Begunkov | be6bfe3 | 2021-10-09 13:25:41 +0100 | [diff] [blame] | 357 | } |
| 358 | |
Christoph Hellwig | 450b787 | 2021-11-17 07:14:01 +0100 | [diff] [blame] | 359 | void update_io_ticks(struct block_device *part, unsigned long now, bool end); |
Jens Axboe | fb8ec18 | 2009-02-02 08:42:32 +0100 | [diff] [blame] | 360 | |
Christoph Hellwig | 6cf7677 | 2017-02-08 14:46:47 +0100 | [diff] [blame] | 361 | static inline void req_set_nomerge(struct request_queue *q, struct request *req) |
| 362 | { |
| 363 | req->cmd_flags |= REQ_NOMERGE; |
| 364 | if (req == q->last_merge) |
| 365 | q->last_merge = NULL; |
| 366 | } |
| 367 | |
Tejun Heo | f2dbd76 | 2011-12-14 00:33:40 +0100 | [diff] [blame] | 368 | /* |
| 369 | * Internal io_context interface |
| 370 | */ |
Christoph Hellwig | 87dd1d6 | 2021-11-26 12:58:10 +0100 | [diff] [blame] | 371 | struct io_cq *ioc_find_get_icq(struct request_queue *q); |
Christoph Hellwig | eca5892 | 2021-11-26 12:58:17 +0100 | [diff] [blame] | 372 | struct io_cq *ioc_lookup_icq(struct request_queue *q); |
Christoph Hellwig | 5ef1630 | 2021-12-09 07:31:31 +0100 | [diff] [blame] | 373 | #ifdef CONFIG_BLK_ICQ |
Tejun Heo | 7e5a879 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 374 | void ioc_clear_queue(struct request_queue *q); |
Christoph Hellwig | 5ef1630 | 2021-12-09 07:31:31 +0100 | [diff] [blame] | 375 | #else |
| 376 | static inline void ioc_clear_queue(struct request_queue *q) |
| 377 | { |
| 378 | } |
| 379 | #endif /* CONFIG_BLK_ICQ */ |
Tejun Heo | f2dbd76 | 2011-12-14 00:33:40 +0100 | [diff] [blame] | 380 | |
Shaohua Li | 297e3d8 | 2017-03-27 10:51:37 -0700 | [diff] [blame] | 381 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
| 382 | extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page); |
| 383 | extern ssize_t blk_throtl_sample_time_store(struct request_queue *q, |
| 384 | const char *page, size_t count); |
Shaohua Li | 9e234ee | 2017-03-27 10:51:41 -0700 | [diff] [blame] | 385 | extern void blk_throtl_bio_endio(struct bio *bio); |
Shaohua Li | b9147dd | 2017-03-27 15:19:42 -0700 | [diff] [blame] | 386 | extern void blk_throtl_stat_add(struct request *rq, u64 time); |
Shaohua Li | 9e234ee | 2017-03-27 10:51:41 -0700 | [diff] [blame] | 387 | #else |
| 388 | static inline void blk_throtl_bio_endio(struct bio *bio) { } |
Shaohua Li | b9147dd | 2017-03-27 15:19:42 -0700 | [diff] [blame] | 389 | static inline void blk_throtl_stat_add(struct request *rq, u64 time) { } |
Shaohua Li | 297e3d8 | 2017-03-27 10:51:37 -0700 | [diff] [blame] | 390 | #endif |
Tejun Heo | bc9fcbf | 2011-10-19 14:31:18 +0200 | [diff] [blame] | 391 | |
Christoph Hellwig | 51d798c | 2022-07-27 12:22:56 -0400 | [diff] [blame] | 392 | struct bio *__blk_queue_bounce(struct bio *bio, struct request_queue *q); |
Christoph Hellwig | 9bb33f2 | 2021-03-31 09:30:00 +0200 | [diff] [blame] | 393 | |
| 394 | static inline bool blk_queue_may_bounce(struct request_queue *q) |
| 395 | { |
| 396 | return IS_ENABLED(CONFIG_BOUNCE) && |
| 397 | q->limits.bounce == BLK_BOUNCE_HIGH && |
| 398 | max_low_pfn >= max_pfn; |
| 399 | } |
| 400 | |
Christoph Hellwig | 51d798c | 2022-07-27 12:22:56 -0400 | [diff] [blame] | 401 | static inline struct bio *blk_queue_bounce(struct bio *bio, |
| 402 | struct request_queue *q) |
Christoph Hellwig | 3bce016 | 2017-06-19 09:26:21 +0200 | [diff] [blame] | 403 | { |
Christoph Hellwig | 51d798c | 2022-07-27 12:22:56 -0400 | [diff] [blame] | 404 | if (unlikely(blk_queue_may_bounce(q) && bio_has_data(bio))) |
| 405 | return __blk_queue_bounce(bio, q); |
| 406 | return bio; |
Christoph Hellwig | 3bce016 | 2017-06-19 09:26:21 +0200 | [diff] [blame] | 407 | } |
Christoph Hellwig | 3bce016 | 2017-06-19 09:26:21 +0200 | [diff] [blame] | 408 | |
Damien Le Moal | bf50545 | 2018-10-12 19:08:50 +0900 | [diff] [blame] | 409 | #ifdef CONFIG_BLK_DEV_ZONED |
Christoph Hellwig | 5d40066 | 2022-07-06 09:03:42 +0200 | [diff] [blame] | 410 | void disk_free_zone_bitmaps(struct gendisk *disk); |
Christoph Hellwig | 5e4ea834 | 2023-06-08 13:02:54 +0200 | [diff] [blame] | 411 | int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd, |
| 412 | unsigned long arg); |
Christoph Hellwig | 05bdb99 | 2023-06-08 13:02:55 +0200 | [diff] [blame] | 413 | int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode, |
Christoph Hellwig | cfb4257 | 2023-06-08 13:02:53 +0200 | [diff] [blame] | 414 | unsigned int cmd, unsigned long arg); |
| 415 | #else /* CONFIG_BLK_DEV_ZONED */ |
Christoph Hellwig | 5d40066 | 2022-07-06 09:03:42 +0200 | [diff] [blame] | 416 | static inline void disk_free_zone_bitmaps(struct gendisk *disk) {} |
Christoph Hellwig | cfb4257 | 2023-06-08 13:02:53 +0200 | [diff] [blame] | 417 | static inline int blkdev_report_zones_ioctl(struct block_device *bdev, |
Christoph Hellwig | 5e4ea834 | 2023-06-08 13:02:54 +0200 | [diff] [blame] | 418 | unsigned int cmd, unsigned long arg) |
Christoph Hellwig | cfb4257 | 2023-06-08 13:02:53 +0200 | [diff] [blame] | 419 | { |
| 420 | return -ENOTTY; |
| 421 | } |
| 422 | static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev, |
Christoph Hellwig | 05bdb99 | 2023-06-08 13:02:55 +0200 | [diff] [blame] | 423 | blk_mode_t mode, unsigned int cmd, unsigned long arg) |
Christoph Hellwig | cfb4257 | 2023-06-08 13:02:53 +0200 | [diff] [blame] | 424 | { |
| 425 | return -ENOTTY; |
| 426 | } |
| 427 | #endif /* CONFIG_BLK_DEV_ZONED */ |
| 428 | |
| 429 | struct block_device *bdev_alloc(struct gendisk *disk, u8 partno); |
| 430 | void bdev_add(struct block_device *bdev, dev_t dev); |
Damien Le Moal | bf50545 | 2018-10-12 19:08:50 +0900 | [diff] [blame] | 431 | |
Christoph Hellwig | 7c3f828 | 2021-05-21 07:50:51 +0200 | [diff] [blame] | 432 | int blk_alloc_ext_minor(void); |
| 433 | void blk_free_ext_minor(unsigned int minor); |
Christoph Hellwig | 581e260 | 2020-03-25 16:48:41 +0100 | [diff] [blame] | 434 | #define ADDPART_FLAG_NONE 0 |
| 435 | #define ADDPART_FLAG_RAID 1 |
| 436 | #define ADDPART_FLAG_WHOLEDISK 2 |
Christoph Hellwig | 7f6be37 | 2021-08-10 17:45:10 +0200 | [diff] [blame] | 437 | int bdev_add_partition(struct gendisk *disk, int partno, sector_t start, |
| 438 | sector_t length); |
Christoph Hellwig | 926fbb1 | 2021-08-10 17:45:11 +0200 | [diff] [blame] | 439 | int bdev_del_partition(struct gendisk *disk, int partno); |
Christoph Hellwig | 3d2e798 | 2021-08-10 17:45:12 +0200 | [diff] [blame] | 440 | int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start, |
| 441 | sector_t length); |
Christoph Hellwig | eec1be4 | 2023-06-01 11:44:50 +0200 | [diff] [blame] | 442 | void drop_partition(struct block_device *part); |
Christoph Hellwig | 581e260 | 2020-03-25 16:48:41 +0100 | [diff] [blame] | 443 | |
Damien Le Moal | 8379436 | 2023-04-24 22:13:18 +0900 | [diff] [blame] | 444 | void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors); |
| 445 | |
Christoph Hellwig | 6f8191f | 2022-06-19 08:05:51 +0200 | [diff] [blame] | 446 | struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id, |
| 447 | struct lock_class_key *lkclass); |
| 448 | |
Christoph Hellwig | e458110 | 2020-05-12 17:55:46 +0900 | [diff] [blame] | 449 | int bio_add_hw_page(struct request_queue *q, struct bio *bio, |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 450 | struct page *page, unsigned int len, unsigned int offset, |
Christoph Hellwig | e458110 | 2020-05-12 17:55:46 +0900 | [diff] [blame] | 451 | unsigned int max_sectors, bool *same_page); |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 452 | |
David Howells | fd36324 | 2023-05-22 21:57:42 +0100 | [diff] [blame] | 453 | /* |
| 454 | * Clean up a page appropriately, where the page may be pinned, may have a |
| 455 | * ref taken on it or neither. |
| 456 | */ |
| 457 | static inline void bio_release_page(struct bio *bio, struct page *page) |
| 458 | { |
| 459 | if (bio_flagged(bio, BIO_PAGE_PINNED)) |
| 460 | unpin_user_page(page); |
David Howells | fd36324 | 2023-05-22 21:57:42 +0100 | [diff] [blame] | 461 | } |
| 462 | |
Christoph Hellwig | ad751ba | 2024-02-13 08:34:18 +0100 | [diff] [blame] | 463 | struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id); |
Ming Lei | 704b914 | 2021-12-03 21:15:32 +0800 | [diff] [blame] | 464 | |
Christoph Hellwig | 05bdb99 | 2023-06-08 13:02:55 +0200 | [diff] [blame] | 465 | int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode); |
Christoph Hellwig | da7ba72 | 2021-05-21 07:51:16 +0200 | [diff] [blame] | 466 | |
Luis Chamberlain | 92e7755 | 2021-08-18 16:45:39 +0200 | [diff] [blame] | 467 | int disk_alloc_events(struct gendisk *disk); |
Christoph Hellwig | d5870ed | 2021-06-24 09:38:42 +0200 | [diff] [blame] | 468 | void disk_add_events(struct gendisk *disk); |
| 469 | void disk_del_events(struct gendisk *disk); |
| 470 | void disk_release_events(struct gendisk *disk); |
Christoph Hellwig | 926597f | 2022-01-24 10:39:11 +0100 | [diff] [blame] | 471 | void disk_block_events(struct gendisk *disk); |
| 472 | void disk_unblock_events(struct gendisk *disk); |
| 473 | void disk_flush_events(struct gendisk *disk, unsigned int mask); |
Christoph Hellwig | 2bc8cda | 2021-06-24 09:38:43 +0200 | [diff] [blame] | 474 | extern struct device_attribute dev_attr_events; |
| 475 | extern struct device_attribute dev_attr_events_async; |
| 476 | extern struct device_attribute dev_attr_events_poll_msecs; |
Christoph Hellwig | d5870ed | 2021-06-24 09:38:42 +0200 | [diff] [blame] | 477 | |
Christoph Hellwig | cc5c516d | 2022-06-28 19:18:45 +0200 | [diff] [blame] | 478 | extern struct attribute_group blk_trace_attr_group; |
| 479 | |
Christoph Hellwig | 05bdb99 | 2023-06-08 13:02:55 +0200 | [diff] [blame] | 480 | blk_mode_t file_to_blk_mode(struct file *file); |
| 481 | int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode, |
| 482 | loff_t lstart, loff_t lend); |
Christoph Hellwig | 8a70951 | 2021-10-12 12:44:50 +0200 | [diff] [blame] | 483 | long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg); |
Christoph Hellwig | 84b8514 | 2021-10-12 12:44:49 +0200 | [diff] [blame] | 484 | long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg); |
| 485 | |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 486 | extern const struct address_space_operations def_blk_aops; |
| 487 | |
Christoph Hellwig | 22d0c40 | 2022-06-29 08:20:13 +0200 | [diff] [blame] | 488 | int disk_register_independent_access_ranges(struct gendisk *disk); |
Damien Le Moal | a2247f1 | 2021-10-27 11:22:19 +0900 | [diff] [blame] | 489 | void disk_unregister_independent_access_ranges(struct gendisk *disk); |
| 490 | |
Christoph Hellwig | 06c8c69 | 2021-11-17 07:13:58 +0100 | [diff] [blame] | 491 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
| 492 | bool should_fail_request(struct block_device *part, unsigned int bytes); |
| 493 | #else /* CONFIG_FAIL_MAKE_REQUEST */ |
| 494 | static inline bool should_fail_request(struct block_device *part, |
| 495 | unsigned int bytes) |
| 496 | { |
| 497 | return false; |
| 498 | } |
| 499 | #endif /* CONFIG_FAIL_MAKE_REQUEST */ |
| 500 | |
Jens Axboe | 0a467d0 | 2021-10-14 14:39:59 -0600 | [diff] [blame] | 501 | /* |
| 502 | * Optimized request reference counting. Ideally we'd make timeouts be more |
| 503 | * clever, as that's the only reason we need references at all... But until |
| 504 | * this happens, this is faster than using refcount_t. Also see: |
| 505 | * |
| 506 | * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count") |
| 507 | */ |
| 508 | #define req_ref_zero_or_close_to_overflow(req) \ |
| 509 | ((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u) |
| 510 | |
| 511 | static inline bool req_ref_inc_not_zero(struct request *req) |
| 512 | { |
| 513 | return atomic_inc_not_zero(&req->ref); |
| 514 | } |
| 515 | |
| 516 | static inline bool req_ref_put_and_test(struct request *req) |
| 517 | { |
| 518 | WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req)); |
| 519 | return atomic_dec_and_test(&req->ref); |
| 520 | } |
| 521 | |
| 522 | static inline void req_ref_set(struct request *req, int value) |
| 523 | { |
| 524 | atomic_set(&req->ref, value); |
| 525 | } |
| 526 | |
| 527 | static inline int req_ref_read(struct request *req) |
| 528 | { |
| 529 | return atomic_read(&req->ref); |
| 530 | } |
| 531 | |
Jens Axboe | 08420cf | 2024-01-15 14:45:07 -0700 | [diff] [blame] | 532 | static inline u64 blk_time_get_ns(void) |
| 533 | { |
Jens Axboe | da4c8c3 | 2024-01-15 14:46:03 -0700 | [diff] [blame] | 534 | struct blk_plug *plug = current->plug; |
| 535 | |
Jens Axboe | b874d4a | 2024-03-12 15:58:41 -0600 | [diff] [blame] | 536 | if (!plug || !in_task()) |
Jens Axboe | da4c8c3 | 2024-01-15 14:46:03 -0700 | [diff] [blame] | 537 | return ktime_get_ns(); |
| 538 | |
| 539 | /* |
| 540 | * 0 could very well be a valid time, but rather than flag "this is |
| 541 | * a valid timestamp" separately, just accept that we'll do an extra |
| 542 | * ktime_get_ns() if we just happen to get 0 as the current time. |
| 543 | */ |
Jens Axboe | 06b23f92 | 2024-01-16 09:18:39 -0700 | [diff] [blame] | 544 | if (!plug->cur_ktime) { |
Jens Axboe | da4c8c3 | 2024-01-15 14:46:03 -0700 | [diff] [blame] | 545 | plug->cur_ktime = ktime_get_ns(); |
Jens Axboe | 06b23f92 | 2024-01-16 09:18:39 -0700 | [diff] [blame] | 546 | current->flags |= PF_BLOCK_TS; |
| 547 | } |
Jens Axboe | da4c8c3 | 2024-01-15 14:46:03 -0700 | [diff] [blame] | 548 | return plug->cur_ktime; |
Jens Axboe | 08420cf | 2024-01-15 14:45:07 -0700 | [diff] [blame] | 549 | } |
| 550 | |
| 551 | static inline ktime_t blk_time_get(void) |
| 552 | { |
| 553 | return ns_to_ktime(blk_time_get_ns()); |
| 554 | } |
| 555 | |
Jens Axboe | c4e47bbb | 2024-01-16 14:14:59 -0700 | [diff] [blame] | 556 | /* |
| 557 | * From most significant bit: |
| 558 | * 1 bit: reserved for other usage, see below |
| 559 | * 12 bits: original size of bio |
| 560 | * 51 bits: issue time of bio |
| 561 | */ |
| 562 | #define BIO_ISSUE_RES_BITS 1 |
| 563 | #define BIO_ISSUE_SIZE_BITS 12 |
| 564 | #define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS) |
| 565 | #define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS) |
| 566 | #define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1) |
| 567 | #define BIO_ISSUE_SIZE_MASK \ |
| 568 | (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT) |
| 569 | #define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1)) |
| 570 | |
| 571 | /* Reserved bit for blk-throtl */ |
| 572 | #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63) |
| 573 | |
| 574 | static inline u64 __bio_issue_time(u64 time) |
| 575 | { |
| 576 | return time & BIO_ISSUE_TIME_MASK; |
| 577 | } |
| 578 | |
| 579 | static inline u64 bio_issue_time(struct bio_issue *issue) |
| 580 | { |
| 581 | return __bio_issue_time(issue->value); |
| 582 | } |
| 583 | |
| 584 | static inline sector_t bio_issue_size(struct bio_issue *issue) |
| 585 | { |
| 586 | return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT); |
| 587 | } |
| 588 | |
| 589 | static inline void bio_issue_init(struct bio_issue *issue, |
| 590 | sector_t size) |
| 591 | { |
| 592 | size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1; |
| 593 | issue->value = ((issue->value & BIO_ISSUE_RES_MASK) | |
Jens Axboe | 08420cf | 2024-01-15 14:45:07 -0700 | [diff] [blame] | 594 | (blk_time_get_ns() & BIO_ISSUE_TIME_MASK) | |
Jens Axboe | c4e47bbb | 2024-01-16 14:14:59 -0700 | [diff] [blame] | 595 | ((u64)size << BIO_ISSUE_SIZE_SHIFT)); |
| 596 | } |
| 597 | |
Christian Brauner | 7c09a4e | 2024-01-23 14:26:47 +0100 | [diff] [blame] | 598 | void bdev_release(struct file *bdev_file); |
Christian Brauner | a56aefc | 2024-01-23 14:26:46 +0100 | [diff] [blame] | 599 | int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder, |
| 600 | const struct blk_holder_ops *hops, struct file *bdev_file); |
| 601 | int bdev_permission(dev_t dev, blk_mode_t mode, void *holder); |
Linus Torvalds | 1ddeeb2 | 2024-03-11 11:43:44 -0700 | [diff] [blame] | 602 | |
Tejun Heo | bc9fcbf | 2011-10-19 14:31:18 +0200 | [diff] [blame] | 603 | #endif /* BLK_INTERNAL_H */ |