blob: e160d56e8edaa19f2e9e72dacc2552fcc652244a [file] [log] [blame]
Christoph Hellwig3dcf60bc2019-04-30 14:42:43 -04001// SPDX-License-Identifier: GPL-2.0
Jens Axboe86db1e22008-01-29 14:53:40 +01002/*
3 * Functions related to setting various queue properties from drivers
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/bio.h>
9#include <linux/blkdev.h>
Matthew Wilcox (Oracle)4ee60ec2021-05-06 18:02:27 -070010#include <linux/pagemap.h>
Christoph Hellwigedb08722021-08-09 16:17:43 +020011#include <linux/backing-dev-defs.h>
Martin K. Petersen70dd5bf2009-07-31 11:49:12 -040012#include <linux/gcd.h>
Martin K. Petersen2cda2722010-03-15 12:46:51 +010013#include <linux/lcm.h>
Randy Dunlapad5ebd22009-11-11 13:47:45 +010014#include <linux/jiffies.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/gfp.h>
Yoshihiro Shimoda45147fb2019-08-28 21:35:42 +090016#include <linux/dma-mapping.h>
Jens Axboe86db1e22008-01-29 14:53:40 +010017
18#include "blk.h"
Christoph Hellwig0bc65bd2023-02-03 16:03:51 +010019#include "blk-rq-qos.h"
Jens Axboe87760e52016-11-09 12:38:14 -070020#include "blk-wbt.h"
Jens Axboe86db1e22008-01-29 14:53:40 +010021
Jens Axboe242f9dc2008-09-14 05:55:09 -070022void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
23{
24 q->rq_timeout = timeout;
25}
26EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
27
Jens Axboe86db1e22008-01-29 14:53:40 +010028/**
Martin K. Petersenb1bd0552012-01-11 16:27:11 +010029 * blk_set_stacking_limits - set default limits for stacking devices
30 * @lim: the queue_limits structure to reset
31 *
Christoph Hellwigc490f222024-02-13 08:34:13 +010032 * Prepare queue limits for applying limits from underlying devices using
33 * blk_stack_limits().
Martin K. Petersenb1bd0552012-01-11 16:27:11 +010034 */
35void blk_set_stacking_limits(struct queue_limits *lim)
36{
Christoph Hellwigc490f222024-02-13 08:34:13 +010037 memset(lim, 0, sizeof(*lim));
38 lim->logical_block_size = SECTOR_SIZE;
39 lim->physical_block_size = SECTOR_SIZE;
40 lim->io_min = SECTOR_SIZE;
41 lim->discard_granularity = SECTOR_SIZE;
42 lim->dma_alignment = SECTOR_SIZE - 1;
43 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
Martin K. Petersenb1bd0552012-01-11 16:27:11 +010044
45 /* Inherit limits from component devices */
Martin K. Petersenb1bd0552012-01-11 16:27:11 +010046 lim->max_segments = USHRT_MAX;
Mike Snitzer42c9cdf2018-07-20 14:57:38 -040047 lim->max_discard_segments = USHRT_MAX;
Martin K. Petersenb1bd0552012-01-11 16:27:11 +010048 lim->max_hw_sectors = UINT_MAX;
Mike Snitzerd82ae522013-10-18 09:44:49 -060049 lim->max_segment_size = UINT_MAX;
Mike Snitzerfe86cdc2012-08-01 10:44:28 +020050 lim->max_sectors = UINT_MAX;
Martin K. Petersenca369d52015-11-13 16:46:48 -050051 lim->max_dev_sectors = UINT_MAX;
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -080052 lim->max_write_zeroes_sectors = UINT_MAX;
Keith Busch0512a752020-05-12 17:55:47 +090053 lim->max_zone_append_sectors = UINT_MAX;
Christoph Hellwig4f563a62024-02-13 08:34:16 +010054 lim->max_user_discard_sectors = UINT_MAX;
Martin K. Petersenb1bd0552012-01-11 16:27:11 +010055}
56EXPORT_SYMBOL(blk_set_stacking_limits);
57
Christoph Hellwigb9947292024-02-13 08:34:12 +010058static void blk_apply_bdi_limits(struct backing_dev_info *bdi,
59 struct queue_limits *lim)
60{
61 /*
62 * For read-ahead of large files to be effective, we need to read ahead
63 * at least twice the optimal I/O size.
64 */
65 bdi->ra_pages = max(lim->io_opt * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
66 bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
67}
68
Christoph Hellwigd690cb8a2024-02-13 08:34:14 +010069static int blk_validate_zoned_limits(struct queue_limits *lim)
70{
71 if (!lim->zoned) {
72 if (WARN_ON_ONCE(lim->max_open_zones) ||
73 WARN_ON_ONCE(lim->max_active_zones) ||
74 WARN_ON_ONCE(lim->zone_write_granularity) ||
75 WARN_ON_ONCE(lim->max_zone_append_sectors))
76 return -EINVAL;
77 return 0;
78 }
79
80 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)))
81 return -EINVAL;
82
83 if (lim->zone_write_granularity < lim->logical_block_size)
84 lim->zone_write_granularity = lim->logical_block_size;
85
86 if (lim->max_zone_append_sectors) {
87 /*
88 * The Zone Append size is limited by the maximum I/O size
89 * and the zone size given that it can't span zones.
90 */
91 lim->max_zone_append_sectors =
92 min3(lim->max_hw_sectors,
93 lim->max_zone_append_sectors,
94 lim->chunk_sectors);
95 }
96
97 return 0;
98}
99
100/*
101 * Check that the limits in lim are valid, initialize defaults for unset
102 * values, and cap values based on others where needed.
103 */
104static int blk_validate_limits(struct queue_limits *lim)
105{
106 unsigned int max_hw_sectors;
107
108 /*
109 * Unless otherwise specified, default to 512 byte logical blocks and a
110 * physical block size equal to the logical block size.
111 */
112 if (!lim->logical_block_size)
113 lim->logical_block_size = SECTOR_SIZE;
114 if (lim->physical_block_size < lim->logical_block_size)
115 lim->physical_block_size = lim->logical_block_size;
116
117 /*
118 * The minimum I/O size defaults to the physical block size unless
119 * explicitly overridden.
120 */
121 if (lim->io_min < lim->physical_block_size)
122 lim->io_min = lim->physical_block_size;
123
124 /*
125 * max_hw_sectors has a somewhat weird default for historical reason,
126 * but driver really should set their own instead of relying on this
127 * value.
128 *
129 * The block layer relies on the fact that every driver can
130 * handle at lest a page worth of data per I/O, and needs the value
131 * aligned to the logical block size.
132 */
133 if (!lim->max_hw_sectors)
134 lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
135 if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
136 return -EINVAL;
137 lim->max_hw_sectors = round_down(lim->max_hw_sectors,
138 lim->logical_block_size >> SECTOR_SHIFT);
139
140 /*
141 * The actual max_sectors value is a complex beast and also takes the
142 * max_dev_sectors value (set by SCSI ULPs) and a user configurable
143 * value into account. The ->max_sectors value is always calculated
144 * from these, so directly setting it won't have any effect.
145 */
146 max_hw_sectors = min_not_zero(lim->max_hw_sectors,
147 lim->max_dev_sectors);
148 if (lim->max_user_sectors) {
149 if (lim->max_user_sectors > max_hw_sectors ||
150 lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE)
151 return -EINVAL;
152 lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
153 } else {
154 lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
155 }
156 lim->max_sectors = round_down(lim->max_sectors,
157 lim->logical_block_size >> SECTOR_SHIFT);
158
159 /*
160 * Random default for the maximum number of segments. Driver should not
161 * rely on this and set their own.
162 */
163 if (!lim->max_segments)
164 lim->max_segments = BLK_MAX_SEGMENTS;
165
Christoph Hellwig4f563a62024-02-13 08:34:16 +0100166 lim->max_discard_sectors =
167 min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors);
168
Christoph Hellwigd690cb8a2024-02-13 08:34:14 +0100169 if (!lim->max_discard_segments)
170 lim->max_discard_segments = 1;
171
172 if (lim->discard_granularity < lim->physical_block_size)
173 lim->discard_granularity = lim->physical_block_size;
174
175 /*
176 * By default there is no limit on the segment boundary alignment,
177 * but if there is one it can't be smaller than the page size as
178 * that would break all the normal I/O patterns.
179 */
180 if (!lim->seg_boundary_mask)
181 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
182 if (WARN_ON_ONCE(lim->seg_boundary_mask < PAGE_SIZE - 1))
183 return -EINVAL;
184
185 /*
Christoph Hellwigd690cb8a2024-02-13 08:34:14 +0100186 * Devices that require a virtual boundary do not support scatter/gather
187 * I/O natively, but instead require a descriptor list entry for each
188 * page (which might not be identical to the Linux PAGE_SIZE). Because
189 * of that they are not limited by our notion of "segment size".
190 */
191 if (lim->virt_boundary_mask) {
192 if (WARN_ON_ONCE(lim->max_segment_size &&
193 lim->max_segment_size != UINT_MAX))
194 return -EINVAL;
195 lim->max_segment_size = UINT_MAX;
Christoph Hellwiga3911962024-02-21 13:50:10 +0100196 } else {
197 /*
198 * The maximum segment size has an odd historic 64k default that
199 * drivers probably should override. Just like the I/O size we
200 * require drivers to at least handle a full page per segment.
201 */
202 if (!lim->max_segment_size)
203 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
204 if (WARN_ON_ONCE(lim->max_segment_size < PAGE_SIZE))
205 return -EINVAL;
Christoph Hellwigd690cb8a2024-02-13 08:34:14 +0100206 }
207
208 /*
209 * We require drivers to at least do logical block aligned I/O, but
210 * historically could not check for that due to the separate calls
211 * to set the limits. Once the transition is finished the check
212 * below should be narrowed down to check the logical block size.
213 */
214 if (!lim->dma_alignment)
215 lim->dma_alignment = SECTOR_SIZE - 1;
216 if (WARN_ON_ONCE(lim->dma_alignment > PAGE_SIZE))
217 return -EINVAL;
218
219 if (lim->alignment_offset) {
220 lim->alignment_offset &= (lim->physical_block_size - 1);
221 lim->misaligned = 0;
222 }
223
224 return blk_validate_zoned_limits(lim);
225}
226
227/*
228 * Set the default limits for a newly allocated queue. @lim contains the
229 * initial limits set by the driver, which could be no limit in which case
230 * all fields are cleared to zero.
231 */
232int blk_set_default_limits(struct queue_limits *lim)
233{
Christoph Hellwig4f563a62024-02-13 08:34:16 +0100234 /*
235 * Most defaults are set by capping the bounds in blk_validate_limits,
236 * but max_user_discard_sectors is special and needs an explicit
237 * initialization to the max value here.
238 */
239 lim->max_user_discard_sectors = UINT_MAX;
Christoph Hellwigd690cb8a2024-02-13 08:34:14 +0100240 return blk_validate_limits(lim);
241}
242
243/**
244 * queue_limits_commit_update - commit an atomic update of queue limits
245 * @q: queue to update
246 * @lim: limits to apply
247 *
248 * Apply the limits in @lim that were obtained from queue_limits_start_update()
249 * and updated by the caller to @q.
250 *
251 * Returns 0 if successful, else a negative error code.
252 */
253int queue_limits_commit_update(struct request_queue *q,
254 struct queue_limits *lim)
255 __releases(q->limits_lock)
256{
257 int error = blk_validate_limits(lim);
258
259 if (!error) {
260 q->limits = *lim;
261 if (q->disk)
262 blk_apply_bdi_limits(q->disk->bdi, lim);
263 }
264 mutex_unlock(&q->limits_lock);
265 return error;
266}
267EXPORT_SYMBOL_GPL(queue_limits_commit_update);
268
Martin K. Petersenb1bd0552012-01-11 16:27:11 +0100269/**
Linus Torvaldsbff4b742024-03-11 17:11:28 -0700270 * queue_limits_commit_set - apply queue limits to queue
Christoph Hellwig631d4ef2024-02-28 14:56:40 -0800271 * @q: queue to update
272 * @lim: limits to apply
273 *
274 * Apply the limits in @lim that were freshly initialized to @q.
275 * To update existing limits use queue_limits_start_update() and
276 * queue_limits_commit_update() instead.
277 *
278 * Returns 0 if successful, else a negative error code.
279 */
280int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
281{
282 mutex_lock(&q->limits_lock);
283 return queue_limits_commit_update(q, lim);
284}
285EXPORT_SYMBOL_GPL(queue_limits_set);
286
287/**
Jens Axboe86db1e22008-01-29 14:53:40 +0100288 * blk_queue_bounce_limit - set bounce buffer limit for queue
Tejun Heocd0aca22009-04-15 22:10:25 +0900289 * @q: the request queue for the device
Christoph Hellwig9bb33f22021-03-31 09:30:00 +0200290 * @bounce: bounce limit to enforce
Jens Axboe86db1e22008-01-29 14:53:40 +0100291 *
292 * Description:
Christoph Hellwig9bb33f22021-03-31 09:30:00 +0200293 * Force bouncing for ISA DMA ranges or highmem.
294 *
295 * DEPRECATED, don't use in new code.
Jens Axboe86db1e22008-01-29 14:53:40 +0100296 **/
Christoph Hellwig9bb33f22021-03-31 09:30:00 +0200297void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce)
Jens Axboe86db1e22008-01-29 14:53:40 +0100298{
Christoph Hellwig9bb33f22021-03-31 09:30:00 +0200299 q->limits.bounce = bounce;
Jens Axboe86db1e22008-01-29 14:53:40 +0100300}
Jens Axboe86db1e22008-01-29 14:53:40 +0100301EXPORT_SYMBOL(blk_queue_bounce_limit);
302
303/**
Martin K. Petersenca369d52015-11-13 16:46:48 -0500304 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
305 * @q: the request queue for the device
Martin K. Petersen2800aac2010-02-26 00:20:35 -0500306 * @max_hw_sectors: max hardware sectors in the usual 512b unit
Jens Axboe86db1e22008-01-29 14:53:40 +0100307 *
308 * Description:
Martin K. Petersen2800aac2010-02-26 00:20:35 -0500309 * Enables a low level driver to set a hard upper limit,
310 * max_hw_sectors, on the size of requests. max_hw_sectors is set by
Martin K. Petersen4f258a42015-06-23 12:13:59 -0400311 * the device driver based upon the capabilities of the I/O
312 * controller.
Martin K. Petersen2800aac2010-02-26 00:20:35 -0500313 *
Martin K. Petersenca369d52015-11-13 16:46:48 -0500314 * max_dev_sectors is a hard limit imposed by the storage device for
315 * READ/WRITE requests. It is set by the disk driver.
316 *
Martin K. Petersen2800aac2010-02-26 00:20:35 -0500317 * max_sectors is a soft limit imposed by the block layer for
318 * filesystem type requests. This value can be overridden on a
319 * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
320 * The soft limit can not exceed max_hw_sectors.
Jens Axboe86db1e22008-01-29 14:53:40 +0100321 **/
Martin K. Petersenca369d52015-11-13 16:46:48 -0500322void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
Jens Axboe86db1e22008-01-29 14:53:40 +0100323{
Martin K. Petersenca369d52015-11-13 16:46:48 -0500324 struct queue_limits *limits = &q->limits;
325 unsigned int max_sectors;
326
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300327 if ((max_hw_sectors << 9) < PAGE_SIZE) {
328 max_hw_sectors = 1 << (PAGE_SHIFT - 9);
Bart Van Asschef19d1e32023-12-13 11:47:02 -0800329 pr_info("%s: set to minimum %u\n", __func__, max_hw_sectors);
Jens Axboe86db1e22008-01-29 14:53:40 +0100330 }
331
Damien Le Moal817046e2020-11-20 10:55:13 +0900332 max_hw_sectors = round_down(max_hw_sectors,
333 limits->logical_block_size >> SECTOR_SHIFT);
Jeff Moyer30e2bc02015-08-13 14:57:56 -0400334 limits->max_hw_sectors = max_hw_sectors;
Damien Le Moal817046e2020-11-20 10:55:13 +0900335
Martin K. Petersenca369d52015-11-13 16:46:48 -0500336 max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
Keith Buschc9c77412023-01-05 12:51:46 -0800337
338 if (limits->max_user_sectors)
339 max_sectors = min(max_sectors, limits->max_user_sectors);
340 else
Christoph Hellwigd6b9f4e2023-12-27 09:23:05 +0000341 max_sectors = min(max_sectors, BLK_DEF_MAX_SECTORS_CAP);
Keith Buschc9c77412023-01-05 12:51:46 -0800342
Damien Le Moal817046e2020-11-20 10:55:13 +0900343 max_sectors = round_down(max_sectors,
344 limits->logical_block_size >> SECTOR_SHIFT);
Martin K. Petersenca369d52015-11-13 16:46:48 -0500345 limits->max_sectors = max_sectors;
Damien Le Moal817046e2020-11-20 10:55:13 +0900346
Christoph Hellwigd152c682021-08-16 15:46:24 +0200347 if (!q->disk)
Christoph Hellwigedb08722021-08-09 16:17:43 +0200348 return;
Christoph Hellwigd152c682021-08-16 15:46:24 +0200349 q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9);
Jens Axboe86db1e22008-01-29 14:53:40 +0100350}
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500351EXPORT_SYMBOL(blk_queue_max_hw_sectors);
Jens Axboe86db1e22008-01-29 14:53:40 +0100352
353/**
Jens Axboe762380a2014-06-05 13:38:39 -0600354 * blk_queue_chunk_sectors - set size of the chunk for this queue
355 * @q: the request queue for the device
356 * @chunk_sectors: chunk sectors in the usual 512b unit
357 *
358 * Description:
359 * If a driver doesn't want IOs to cross a given chunk size, it can set
Mike Snitzer07d098e2020-09-21 22:32:49 -0400360 * this limit and prevent merging across chunks. Note that the block layer
361 * must accept a page worth of data at any offset. So if the crossing of
362 * chunks is a hard limitation in the driver, it must still be prepared
363 * to split single page bios.
Jens Axboe762380a2014-06-05 13:38:39 -0600364 **/
365void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
366{
Jens Axboe762380a2014-06-05 13:38:39 -0600367 q->limits.chunk_sectors = chunk_sectors;
368}
369EXPORT_SYMBOL(blk_queue_chunk_sectors);
370
371/**
Christoph Hellwig67efc922009-09-30 13:54:20 +0200372 * blk_queue_max_discard_sectors - set max sectors for a single discard
373 * @q: the request queue for the device
Randy Dunlapc7ebf062009-10-12 08:20:47 +0200374 * @max_discard_sectors: maximum number of sectors to discard
Christoph Hellwig67efc922009-09-30 13:54:20 +0200375 **/
376void blk_queue_max_discard_sectors(struct request_queue *q,
377 unsigned int max_discard_sectors)
378{
Christoph Hellwig4f563a62024-02-13 08:34:16 +0100379 struct queue_limits *lim = &q->limits;
380
381 lim->max_hw_discard_sectors = max_discard_sectors;
382 lim->max_discard_sectors =
383 min(max_discard_sectors, lim->max_user_discard_sectors);
Christoph Hellwig67efc922009-09-30 13:54:20 +0200384}
385EXPORT_SYMBOL(blk_queue_max_discard_sectors);
386
387/**
Christoph Hellwig44abff2c2022-04-15 06:52:57 +0200388 * blk_queue_max_secure_erase_sectors - set max sectors for a secure erase
389 * @q: the request queue for the device
390 * @max_sectors: maximum number of sectors to secure_erase
391 **/
392void blk_queue_max_secure_erase_sectors(struct request_queue *q,
393 unsigned int max_sectors)
394{
395 q->limits.max_secure_erase_sectors = max_sectors;
396}
397EXPORT_SYMBOL(blk_queue_max_secure_erase_sectors);
398
399/**
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800400 * blk_queue_max_write_zeroes_sectors - set max sectors for a single
401 * write zeroes
402 * @q: the request queue for the device
403 * @max_write_zeroes_sectors: maximum number of sectors to write per command
404 **/
405void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
406 unsigned int max_write_zeroes_sectors)
407{
408 q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
409}
410EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
411
412/**
Keith Busch0512a752020-05-12 17:55:47 +0900413 * blk_queue_max_zone_append_sectors - set max sectors for a single zone append
414 * @q: the request queue for the device
415 * @max_zone_append_sectors: maximum number of sectors to write per command
416 **/
417void blk_queue_max_zone_append_sectors(struct request_queue *q,
418 unsigned int max_zone_append_sectors)
419{
420 unsigned int max_sectors;
421
422 if (WARN_ON(!blk_queue_is_zoned(q)))
423 return;
424
425 max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors);
426 max_sectors = min(q->limits.chunk_sectors, max_sectors);
427
428 /*
429 * Signal eventual driver bugs resulting in the max_zone_append sectors limit
430 * being 0 due to a 0 argument, the chunk_sectors limit (zone size) not set,
431 * or the max_hw_sectors limit not set.
432 */
433 WARN_ON(!max_sectors);
434
435 q->limits.max_zone_append_sectors = max_sectors;
436}
437EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors);
438
439/**
Martin K. Petersen8a783622010-02-26 00:20:39 -0500440 * blk_queue_max_segments - set max hw segments for a request for this queue
Jens Axboe86db1e22008-01-29 14:53:40 +0100441 * @q: the request queue for the device
442 * @max_segments: max number of segments
443 *
444 * Description:
445 * Enables a low level driver to set an upper limit on the number of
Martin K. Petersen8a783622010-02-26 00:20:39 -0500446 * hw data segments in a request.
Jens Axboe86db1e22008-01-29 14:53:40 +0100447 **/
Martin K. Petersen8a783622010-02-26 00:20:39 -0500448void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
Jens Axboe86db1e22008-01-29 14:53:40 +0100449{
450 if (!max_segments) {
451 max_segments = 1;
Bart Van Asschef19d1e32023-12-13 11:47:02 -0800452 pr_info("%s: set to minimum %u\n", __func__, max_segments);
Jens Axboe86db1e22008-01-29 14:53:40 +0100453 }
454
Martin K. Petersen8a783622010-02-26 00:20:39 -0500455 q->limits.max_segments = max_segments;
Jens Axboe86db1e22008-01-29 14:53:40 +0100456}
Martin K. Petersen8a783622010-02-26 00:20:39 -0500457EXPORT_SYMBOL(blk_queue_max_segments);
Jens Axboe86db1e22008-01-29 14:53:40 +0100458
459/**
Christoph Hellwig1e739732017-02-08 14:46:49 +0100460 * blk_queue_max_discard_segments - set max segments for discard requests
461 * @q: the request queue for the device
462 * @max_segments: max number of segments
463 *
464 * Description:
465 * Enables a low level driver to set an upper limit on the number of
466 * segments in a discard request.
467 **/
468void blk_queue_max_discard_segments(struct request_queue *q,
469 unsigned short max_segments)
470{
471 q->limits.max_discard_segments = max_segments;
472}
473EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
474
475/**
Jens Axboe86db1e22008-01-29 14:53:40 +0100476 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
477 * @q: the request queue for the device
478 * @max_size: max size of segment in bytes
479 *
480 * Description:
481 * Enables a low level driver to set an upper limit on the size of a
482 * coalesced segment
483 **/
484void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
485{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300486 if (max_size < PAGE_SIZE) {
487 max_size = PAGE_SIZE;
Bart Van Asschef19d1e32023-12-13 11:47:02 -0800488 pr_info("%s: set to minimum %u\n", __func__, max_size);
Jens Axboe86db1e22008-01-29 14:53:40 +0100489 }
490
Christoph Hellwig09324d32019-05-21 09:01:41 +0200491 /* see blk_queue_virt_boundary() for the explanation */
492 WARN_ON_ONCE(q->limits.virt_boundary_mask);
493
Martin K. Petersen025146e2009-05-22 17:17:51 -0400494 q->limits.max_segment_size = max_size;
Jens Axboe86db1e22008-01-29 14:53:40 +0100495}
Jens Axboe86db1e22008-01-29 14:53:40 +0100496EXPORT_SYMBOL(blk_queue_max_segment_size);
497
498/**
Martin K. Petersene1defc42009-05-22 17:17:49 -0400499 * blk_queue_logical_block_size - set logical block size for the queue
Jens Axboe86db1e22008-01-29 14:53:40 +0100500 * @q: the request queue for the device
Martin K. Petersene1defc42009-05-22 17:17:49 -0400501 * @size: the logical block size, in bytes
Jens Axboe86db1e22008-01-29 14:53:40 +0100502 *
503 * Description:
Martin K. Petersene1defc42009-05-22 17:17:49 -0400504 * This should be set to the lowest possible block size that the
505 * storage device can address. The default of 512 covers most
506 * hardware.
Jens Axboe86db1e22008-01-29 14:53:40 +0100507 **/
Mikulas Patockaad6bf882020-01-15 08:35:25 -0500508void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
Jens Axboe86db1e22008-01-29 14:53:40 +0100509{
Damien Le Moal817046e2020-11-20 10:55:13 +0900510 struct queue_limits *limits = &q->limits;
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400511
Damien Le Moal817046e2020-11-20 10:55:13 +0900512 limits->logical_block_size = size;
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400513
Christoph Hellwig3c407dc2023-12-28 07:55:39 +0000514 if (limits->discard_granularity < limits->logical_block_size)
515 limits->discard_granularity = limits->logical_block_size;
516
Damien Le Moal817046e2020-11-20 10:55:13 +0900517 if (limits->physical_block_size < size)
518 limits->physical_block_size = size;
519
520 if (limits->io_min < limits->physical_block_size)
521 limits->io_min = limits->physical_block_size;
522
523 limits->max_hw_sectors =
524 round_down(limits->max_hw_sectors, size >> SECTOR_SHIFT);
525 limits->max_sectors =
526 round_down(limits->max_sectors, size >> SECTOR_SHIFT);
Jens Axboe86db1e22008-01-29 14:53:40 +0100527}
Martin K. Petersene1defc42009-05-22 17:17:49 -0400528EXPORT_SYMBOL(blk_queue_logical_block_size);
Jens Axboe86db1e22008-01-29 14:53:40 +0100529
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400530/**
531 * blk_queue_physical_block_size - set physical block size for the queue
532 * @q: the request queue for the device
533 * @size: the physical block size, in bytes
534 *
535 * Description:
536 * This should be set to the lowest possible sector size that the
537 * hardware can operate on without reverting to read-modify-write
538 * operations.
539 */
Martin K. Petersen892b6f92010-10-13 21:18:03 +0200540void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400541{
542 q->limits.physical_block_size = size;
543
544 if (q->limits.physical_block_size < q->limits.logical_block_size)
545 q->limits.physical_block_size = q->limits.logical_block_size;
546
Christoph Hellwig458aa1a2024-01-03 08:16:22 +0000547 if (q->limits.discard_granularity < q->limits.physical_block_size)
548 q->limits.discard_granularity = q->limits.physical_block_size;
549
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400550 if (q->limits.io_min < q->limits.physical_block_size)
551 q->limits.io_min = q->limits.physical_block_size;
552}
553EXPORT_SYMBOL(blk_queue_physical_block_size);
554
555/**
Damien Le Moala805a4f2021-01-28 13:47:30 +0900556 * blk_queue_zone_write_granularity - set zone write granularity for the queue
557 * @q: the request queue for the zoned device
558 * @size: the zone write granularity size, in bytes
559 *
560 * Description:
561 * This should be set to the lowest possible size allowing to write in
562 * sequential zones of a zoned block device.
563 */
564void blk_queue_zone_write_granularity(struct request_queue *q,
565 unsigned int size)
566{
567 if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
568 return;
569
570 q->limits.zone_write_granularity = size;
571
572 if (q->limits.zone_write_granularity < q->limits.logical_block_size)
573 q->limits.zone_write_granularity = q->limits.logical_block_size;
574}
575EXPORT_SYMBOL_GPL(blk_queue_zone_write_granularity);
576
577/**
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400578 * blk_queue_alignment_offset - set physical block alignment offset
579 * @q: the request queue for the device
Randy Dunlap8ebf9752009-06-11 20:00:41 -0700580 * @offset: alignment offset in bytes
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400581 *
582 * Description:
583 * Some devices are naturally misaligned to compensate for things like
584 * the legacy DOS partition table 63-sector offset. Low-level drivers
585 * should call this function for devices whose first sector is not
586 * naturally aligned.
587 */
588void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
589{
590 q->limits.alignment_offset =
591 offset & (q->limits.physical_block_size - 1);
592 q->limits.misaligned = 0;
593}
594EXPORT_SYMBOL(blk_queue_alignment_offset);
595
Christoph Hellwig471aa702021-08-09 16:17:41 +0200596void disk_update_readahead(struct gendisk *disk)
Christoph Hellwigc2e4cd52020-09-24 08:51:34 +0200597{
Christoph Hellwigb9947292024-02-13 08:34:12 +0100598 blk_apply_bdi_limits(disk->bdi, &disk->queue->limits);
Christoph Hellwigc2e4cd52020-09-24 08:51:34 +0200599}
Christoph Hellwig471aa702021-08-09 16:17:41 +0200600EXPORT_SYMBOL_GPL(disk_update_readahead);
Christoph Hellwigc2e4cd52020-09-24 08:51:34 +0200601
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400602/**
Martin K. Petersen7c958e32009-07-31 11:49:11 -0400603 * blk_limits_io_min - set minimum request size for a device
604 * @limits: the queue limits
605 * @min: smallest I/O size in bytes
606 *
607 * Description:
608 * Some devices have an internal block size bigger than the reported
609 * hardware sector size. This function can be used to signal the
610 * smallest I/O the device can perform without incurring a performance
611 * penalty.
612 */
613void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
614{
615 limits->io_min = min;
616
617 if (limits->io_min < limits->logical_block_size)
618 limits->io_min = limits->logical_block_size;
619
620 if (limits->io_min < limits->physical_block_size)
621 limits->io_min = limits->physical_block_size;
622}
623EXPORT_SYMBOL(blk_limits_io_min);
624
625/**
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400626 * blk_queue_io_min - set minimum request size for the queue
627 * @q: the request queue for the device
Randy Dunlap8ebf9752009-06-11 20:00:41 -0700628 * @min: smallest I/O size in bytes
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400629 *
630 * Description:
Martin K. Petersen7e5f5fb2009-07-31 11:49:13 -0400631 * Storage devices may report a granularity or preferred minimum I/O
632 * size which is the smallest request the device can perform without
633 * incurring a performance penalty. For disk drives this is often the
634 * physical block size. For RAID arrays it is often the stripe chunk
635 * size. A properly aligned multiple of minimum_io_size is the
636 * preferred request size for workloads where a high number of I/O
637 * operations is desired.
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400638 */
639void blk_queue_io_min(struct request_queue *q, unsigned int min)
640{
Martin K. Petersen7c958e32009-07-31 11:49:11 -0400641 blk_limits_io_min(&q->limits, min);
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400642}
643EXPORT_SYMBOL(blk_queue_io_min);
644
645/**
Martin K. Petersen3c5820c2009-09-11 21:54:52 +0200646 * blk_limits_io_opt - set optimal request size for a device
647 * @limits: the queue limits
648 * @opt: smallest I/O size in bytes
649 *
650 * Description:
651 * Storage devices may report an optimal I/O size, which is the
652 * device's preferred unit for sustained I/O. This is rarely reported
653 * for disk drives. For RAID arrays it is usually the stripe width or
654 * the internal track size. A properly aligned multiple of
655 * optimal_io_size is the preferred request size for workloads where
656 * sustained throughput is desired.
657 */
658void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
659{
660 limits->io_opt = opt;
661}
662EXPORT_SYMBOL(blk_limits_io_opt);
663
664/**
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400665 * blk_queue_io_opt - set optimal request size for the queue
666 * @q: the request queue for the device
Randy Dunlap8ebf9752009-06-11 20:00:41 -0700667 * @opt: optimal request size in bytes
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400668 *
669 * Description:
Martin K. Petersen7e5f5fb2009-07-31 11:49:13 -0400670 * Storage devices may report an optimal I/O size, which is the
671 * device's preferred unit for sustained I/O. This is rarely reported
672 * for disk drives. For RAID arrays it is usually the stripe width or
673 * the internal track size. A properly aligned multiple of
674 * optimal_io_size is the preferred request size for workloads where
675 * sustained throughput is desired.
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400676 */
677void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
678{
Martin K. Petersen3c5820c2009-09-11 21:54:52 +0200679 blk_limits_io_opt(&q->limits, opt);
Christoph Hellwigd152c682021-08-16 15:46:24 +0200680 if (!q->disk)
Christoph Hellwigedb08722021-08-09 16:17:43 +0200681 return;
Christoph Hellwigd152c682021-08-16 15:46:24 +0200682 q->disk->bdi->ra_pages =
Christoph Hellwigc2e4cd52020-09-24 08:51:34 +0200683 max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400684}
685EXPORT_SYMBOL(blk_queue_io_opt);
686
Bart Van Asscheaa261f22022-10-25 12:17:54 -0700687static int queue_limit_alignment_offset(const struct queue_limits *lim,
Christoph Hellwig89098b02022-04-15 06:52:49 +0200688 sector_t sector)
689{
690 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
691 unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
692 << SECTOR_SHIFT;
693
694 return (granularity + lim->alignment_offset - alignment) % granularity;
695}
696
Bart Van Asscheaa261f22022-10-25 12:17:54 -0700697static unsigned int queue_limit_discard_alignment(
698 const struct queue_limits *lim, sector_t sector)
Christoph Hellwig5c4b4a52022-04-15 06:52:52 +0200699{
700 unsigned int alignment, granularity, offset;
701
702 if (!lim->max_discard_sectors)
703 return 0;
704
705 /* Why are these in bytes, not sectors? */
706 alignment = lim->discard_alignment >> SECTOR_SHIFT;
707 granularity = lim->discard_granularity >> SECTOR_SHIFT;
708 if (!granularity)
709 return 0;
710
711 /* Offset of the partition start in 'granularity' sectors */
712 offset = sector_div(sector, granularity);
713
714 /* And why do we do this modulus *again* in blkdev_issue_discard()? */
715 offset = (granularity + alignment - offset) % granularity;
716
717 /* Turn it back into bytes, gaah */
718 return offset << SECTOR_SHIFT;
719}
720
Mikulas Patocka97f433c2021-02-23 19:25:30 -0700721static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
722{
723 sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
724 if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
725 sectors = PAGE_SIZE >> SECTOR_SHIFT;
726 return sectors;
727}
728
Jens Axboe86db1e22008-01-29 14:53:40 +0100729/**
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400730 * blk_stack_limits - adjust queue_limits for stacked devices
Martin K. Petersen81744ee2009-12-29 08:35:35 +0100731 * @t: the stacking driver limits (top device)
732 * @b: the underlying queue limits (bottom, component device)
Martin K. Petersene03a72e2010-01-11 03:21:51 -0500733 * @start: first data sector within component device
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400734 *
735 * Description:
Martin K. Petersen81744ee2009-12-29 08:35:35 +0100736 * This function is used by stacking drivers like MD and DM to ensure
737 * that all component devices have compatible block sizes and
738 * alignments. The stacking driver must provide a queue_limits
739 * struct (top) and then iteratively call the stacking function for
740 * all component (bottom) devices. The stacking function will
741 * attempt to combine the values and ensure proper alignment.
742 *
743 * Returns 0 if the top and bottom queue_limits are compatible. The
744 * top device's block sizes and alignment offsets may be adjusted to
745 * ensure alignment with the bottom device. If no compatible sizes
746 * and alignments exist, -1 is returned and the resulting top
747 * queue_limits will have the misaligned flag set to indicate that
748 * the alignment_offset is undefined.
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400749 */
750int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
Martin K. Petersene03a72e2010-01-11 03:21:51 -0500751 sector_t start)
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400752{
Martin K. Petersene03a72e2010-01-11 03:21:51 -0500753 unsigned int top, bottom, alignment, ret = 0;
Martin K. Petersen86b37282009-11-10 11:50:21 +0100754
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400755 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
756 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
Martin K. Petersenca369d52015-11-13 16:46:48 -0500757 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800758 t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
759 b->max_write_zeroes_sectors);
Keith Busch0512a752020-05-12 17:55:47 +0900760 t->max_zone_append_sectors = min(t->max_zone_append_sectors,
761 b->max_zone_append_sectors);
Christoph Hellwig9bb33f22021-03-31 09:30:00 +0200762 t->bounce = max(t->bounce, b->bounce);
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400763
764 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
765 b->seg_boundary_mask);
Keith Busch03100aa2015-08-19 14:24:05 -0700766 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
767 b->virt_boundary_mask);
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400768
Martin K. Petersen8a783622010-02-26 00:20:39 -0500769 t->max_segments = min_not_zero(t->max_segments, b->max_segments);
Christoph Hellwig1e739732017-02-08 14:46:49 +0100770 t->max_discard_segments = min_not_zero(t->max_discard_segments,
771 b->max_discard_segments);
Martin K. Petersen13f05c82010-09-10 20:50:10 +0200772 t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
773 b->max_integrity_segments);
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400774
775 t->max_segment_size = min_not_zero(t->max_segment_size,
776 b->max_segment_size);
777
Martin K. Petersenfe0b3932010-01-11 03:21:47 -0500778 t->misaligned |= b->misaligned;
779
Martin K. Petersene03a72e2010-01-11 03:21:51 -0500780 alignment = queue_limit_alignment_offset(b, start);
Martin K. Petersen9504e082009-12-21 15:55:51 +0100781
Martin K. Petersen81744ee2009-12-29 08:35:35 +0100782 /* Bottom device has different alignment. Check that it is
783 * compatible with the current top alignment.
784 */
Martin K. Petersen9504e082009-12-21 15:55:51 +0100785 if (t->alignment_offset != alignment) {
786
787 top = max(t->physical_block_size, t->io_min)
788 + t->alignment_offset;
Martin K. Petersen81744ee2009-12-29 08:35:35 +0100789 bottom = max(b->physical_block_size, b->io_min) + alignment;
Martin K. Petersen9504e082009-12-21 15:55:51 +0100790
Martin K. Petersen81744ee2009-12-29 08:35:35 +0100791 /* Verify that top and bottom intervals line up */
Mike Snitzerb8839b82014-10-08 18:26:13 -0400792 if (max(top, bottom) % min(top, bottom)) {
Martin K. Petersen9504e082009-12-21 15:55:51 +0100793 t->misaligned = 1;
Martin K. Petersenfe0b3932010-01-11 03:21:47 -0500794 ret = -1;
795 }
Martin K. Petersen9504e082009-12-21 15:55:51 +0100796 }
797
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400798 t->logical_block_size = max(t->logical_block_size,
799 b->logical_block_size);
800
801 t->physical_block_size = max(t->physical_block_size,
802 b->physical_block_size);
803
804 t->io_min = max(t->io_min, b->io_min);
Mike Snitzere9637412015-03-30 13:39:09 -0400805 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
Keith Buschc964d622022-11-10 10:44:57 -0800806 t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
Mike Snitzer7e7986f2020-12-01 11:07:09 -0500807
808 /* Set non-power-of-2 compatible chunk_sectors boundary */
809 if (b->chunk_sectors)
810 t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
Martin K. Petersen9504e082009-12-21 15:55:51 +0100811
Martin K. Petersen81744ee2009-12-29 08:35:35 +0100812 /* Physical block size a multiple of the logical block size? */
Martin K. Petersen9504e082009-12-21 15:55:51 +0100813 if (t->physical_block_size & (t->logical_block_size - 1)) {
814 t->physical_block_size = t->logical_block_size;
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400815 t->misaligned = 1;
Martin K. Petersenfe0b3932010-01-11 03:21:47 -0500816 ret = -1;
Martin K. Petersen86b37282009-11-10 11:50:21 +0100817 }
818
Martin K. Petersen81744ee2009-12-29 08:35:35 +0100819 /* Minimum I/O a multiple of the physical block size? */
Martin K. Petersen9504e082009-12-21 15:55:51 +0100820 if (t->io_min & (t->physical_block_size - 1)) {
821 t->io_min = t->physical_block_size;
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400822 t->misaligned = 1;
Martin K. Petersenfe0b3932010-01-11 03:21:47 -0500823 ret = -1;
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400824 }
825
Martin K. Petersen81744ee2009-12-29 08:35:35 +0100826 /* Optimal I/O a multiple of the physical block size? */
Martin K. Petersen9504e082009-12-21 15:55:51 +0100827 if (t->io_opt & (t->physical_block_size - 1)) {
828 t->io_opt = 0;
829 t->misaligned = 1;
Martin K. Petersenfe0b3932010-01-11 03:21:47 -0500830 ret = -1;
Martin K. Petersen9504e082009-12-21 15:55:51 +0100831 }
Martin K. Petersen70dd5bf2009-07-31 11:49:12 -0400832
Mike Snitzer22ada802020-09-21 22:32:48 -0400833 /* chunk_sectors a multiple of the physical block size? */
834 if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
835 t->chunk_sectors = 0;
836 t->misaligned = 1;
837 ret = -1;
838 }
839
Kent Overstreetc78afc62013-07-11 22:39:53 -0700840 t->raid_partial_stripes_expensive =
841 max(t->raid_partial_stripes_expensive,
842 b->raid_partial_stripes_expensive);
843
Martin K. Petersen81744ee2009-12-29 08:35:35 +0100844 /* Find lowest common alignment_offset */
Mike Snitzere9637412015-03-30 13:39:09 -0400845 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
Mike Snitzerb8839b82014-10-08 18:26:13 -0400846 % max(t->physical_block_size, t->io_min);
Martin K. Petersen70dd5bf2009-07-31 11:49:12 -0400847
Martin K. Petersen81744ee2009-12-29 08:35:35 +0100848 /* Verify that new alignment_offset is on a logical block boundary */
Martin K. Petersenfe0b3932010-01-11 03:21:47 -0500849 if (t->alignment_offset & (t->logical_block_size - 1)) {
Martin K. Petersen9504e082009-12-21 15:55:51 +0100850 t->misaligned = 1;
Martin K. Petersenfe0b3932010-01-11 03:21:47 -0500851 ret = -1;
852 }
Martin K. Petersen9504e082009-12-21 15:55:51 +0100853
Mikulas Patocka97f433c2021-02-23 19:25:30 -0700854 t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
855 t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
856 t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
857
Martin K. Petersen9504e082009-12-21 15:55:51 +0100858 /* Discard alignment and granularity */
859 if (b->discard_granularity) {
Martin K. Petersene03a72e2010-01-11 03:21:51 -0500860 alignment = queue_limit_discard_alignment(b, start);
Martin K. Petersen9504e082009-12-21 15:55:51 +0100861
862 if (t->discard_granularity != 0 &&
863 t->discard_alignment != alignment) {
864 top = t->discard_granularity + t->discard_alignment;
865 bottom = b->discard_granularity + alignment;
866
867 /* Verify that top and bottom intervals line up */
Shaohua Li8dd2cb72012-12-14 11:15:36 +0800868 if ((max(top, bottom) % min(top, bottom)) != 0)
Martin K. Petersen9504e082009-12-21 15:55:51 +0100869 t->discard_misaligned = 1;
870 }
871
Martin K. Petersen81744ee2009-12-29 08:35:35 +0100872 t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
873 b->max_discard_sectors);
Jens Axboe0034af02015-07-16 09:14:26 -0600874 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
875 b->max_hw_discard_sectors);
Martin K. Petersen9504e082009-12-21 15:55:51 +0100876 t->discard_granularity = max(t->discard_granularity,
877 b->discard_granularity);
Mike Snitzere9637412015-03-30 13:39:09 -0400878 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
Shaohua Li8dd2cb72012-12-14 11:15:36 +0800879 t->discard_granularity;
Martin K. Petersen9504e082009-12-21 15:55:51 +0100880 }
Christoph Hellwig44abff2c2022-04-15 06:52:57 +0200881 t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
882 b->max_secure_erase_sectors);
Damien Le Moala805a4f2021-01-28 13:47:30 +0900883 t->zone_write_granularity = max(t->zone_write_granularity,
884 b->zone_write_granularity);
Christoph Hellwig3093a472020-07-20 08:12:49 +0200885 t->zoned = max(t->zoned, b->zoned);
Damien Le Moalc8f6f882024-02-22 22:17:23 +0900886 if (!t->zoned) {
887 t->zone_write_granularity = 0;
888 t->max_zone_append_sectors = 0;
889 }
Martin K. Petersenfe0b3932010-01-11 03:21:47 -0500890 return ret;
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400891}
Mike Snitzer5d85d322009-05-28 11:04:53 +0200892EXPORT_SYMBOL(blk_stack_limits);
Martin K. Petersenc72758f2009-05-22 17:17:53 -0400893
894/**
Christoph Hellwigc1373f1c2024-02-28 14:56:41 -0800895 * queue_limits_stack_bdev - adjust queue_limits for stacked devices
896 * @t: the stacking driver limits (top device)
897 * @bdev: the underlying block device (bottom)
898 * @offset: offset to beginning of data within component device
899 * @pfx: prefix to use for warnings logged
900 *
901 * Description:
902 * This function is used by stacking drivers like MD and DM to ensure
903 * that all component devices have compatible block sizes and
904 * alignments. The stacking driver must provide a queue_limits
905 * struct (top) and then iteratively call the stacking function for
906 * all component (bottom) devices. The stacking function will
907 * attempt to combine the values and ensure proper alignment.
908 */
909void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
910 sector_t offset, const char *pfx)
911{
912 if (blk_stack_limits(t, &bdev_get_queue(bdev)->limits,
913 get_start_sect(bdev) + offset))
914 pr_notice("%s: Warning: Device %pg is misaligned\n",
915 pfx, bdev);
916}
917EXPORT_SYMBOL_GPL(queue_limits_stack_bdev);
918
919/**
FUJITA Tomonori27f82212008-07-04 09:30:03 +0200920 * blk_queue_update_dma_pad - update pad mask
921 * @q: the request queue for the device
922 * @mask: pad mask
923 *
924 * Update dma pad mask.
925 *
926 * Appending pad buffer to a request modifies the last entry of a
927 * scatter list such that it includes the pad buffer.
928 **/
929void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
930{
931 if (mask > q->dma_pad_mask)
932 q->dma_pad_mask = mask;
933}
934EXPORT_SYMBOL(blk_queue_update_dma_pad);
935
936/**
Jens Axboe86db1e22008-01-29 14:53:40 +0100937 * blk_queue_segment_boundary - set boundary rules for segment merging
938 * @q: the request queue for the device
939 * @mask: the memory boundary mask
940 **/
941void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
942{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300943 if (mask < PAGE_SIZE - 1) {
944 mask = PAGE_SIZE - 1;
Bart Van Asschef19d1e32023-12-13 11:47:02 -0800945 pr_info("%s: set to minimum %lx\n", __func__, mask);
Jens Axboe86db1e22008-01-29 14:53:40 +0100946 }
947
Martin K. Petersen025146e2009-05-22 17:17:51 -0400948 q->limits.seg_boundary_mask = mask;
Jens Axboe86db1e22008-01-29 14:53:40 +0100949}
Jens Axboe86db1e22008-01-29 14:53:40 +0100950EXPORT_SYMBOL(blk_queue_segment_boundary);
951
952/**
Keith Busch03100aa2015-08-19 14:24:05 -0700953 * blk_queue_virt_boundary - set boundary rules for bio merging
954 * @q: the request queue for the device
955 * @mask: the memory boundary mask
956 **/
957void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
958{
959 q->limits.virt_boundary_mask = mask;
Christoph Hellwig09324d32019-05-21 09:01:41 +0200960
961 /*
962 * Devices that require a virtual boundary do not support scatter/gather
963 * I/O natively, but instead require a descriptor list entry for each
964 * page (which might not be idential to the Linux PAGE_SIZE). Because
965 * of that they are not limited by our notion of "segment size".
966 */
Christoph Hellwigc6c84f72019-07-24 18:26:56 +0200967 if (mask)
968 q->limits.max_segment_size = UINT_MAX;
Keith Busch03100aa2015-08-19 14:24:05 -0700969}
970EXPORT_SYMBOL(blk_queue_virt_boundary);
971
972/**
Jens Axboe86db1e22008-01-29 14:53:40 +0100973 * blk_queue_dma_alignment - set dma length and memory alignment
974 * @q: the request queue for the device
975 * @mask: alignment mask
976 *
977 * description:
Randy Dunlap710027a2008-08-19 20:13:11 +0200978 * set required memory and length alignment for direct dma transactions.
Alan Cox8feb4d22009-04-01 15:01:39 +0100979 * this is used when building direct io requests for the queue.
Jens Axboe86db1e22008-01-29 14:53:40 +0100980 *
981 **/
982void blk_queue_dma_alignment(struct request_queue *q, int mask)
983{
Keith Buschc964d622022-11-10 10:44:57 -0800984 q->limits.dma_alignment = mask;
Jens Axboe86db1e22008-01-29 14:53:40 +0100985}
Jens Axboe86db1e22008-01-29 14:53:40 +0100986EXPORT_SYMBOL(blk_queue_dma_alignment);
987
988/**
989 * blk_queue_update_dma_alignment - update dma length and memory alignment
990 * @q: the request queue for the device
991 * @mask: alignment mask
992 *
993 * description:
Randy Dunlap710027a2008-08-19 20:13:11 +0200994 * update required memory and length alignment for direct dma transactions.
Jens Axboe86db1e22008-01-29 14:53:40 +0100995 * If the requested alignment is larger than the current alignment, then
996 * the current queue alignment is updated to the new value, otherwise it
997 * is left alone. The design of this is to allow multiple objects
998 * (driver, device, transport etc) to set their respective
999 * alignments without having them interfere.
1000 *
1001 **/
1002void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
1003{
1004 BUG_ON(mask > PAGE_SIZE);
1005
Keith Buschc964d622022-11-10 10:44:57 -08001006 if (mask > q->limits.dma_alignment)
1007 q->limits.dma_alignment = mask;
Jens Axboe86db1e22008-01-29 14:53:40 +01001008}
Jens Axboe86db1e22008-01-29 14:53:40 +01001009EXPORT_SYMBOL(blk_queue_update_dma_alignment);
1010
Jens Axboe93e9d8e2016-04-12 12:32:46 -06001011/**
Jens Axboed278d4a2016-03-30 10:21:08 -06001012 * blk_set_queue_depth - tell the block layer about the device queue depth
1013 * @q: the request queue for the device
1014 * @depth: queue depth
1015 *
1016 */
1017void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
1018{
1019 q->queue_depth = depth;
Tejun Heo9677a3e2019-08-28 15:05:55 -07001020 rq_qos_queue_depth_changed(q);
Jens Axboed278d4a2016-03-30 10:21:08 -06001021}
1022EXPORT_SYMBOL(blk_set_queue_depth);
1023
1024/**
Jens Axboe93e9d8e2016-04-12 12:32:46 -06001025 * blk_queue_write_cache - configure queue's write cache
1026 * @q: the request queue for the device
1027 * @wc: write back cache on or off
1028 * @fua: device supports FUA writes, if true
1029 *
1030 * Tell the block layer about the write cache of @q.
1031 */
1032void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
1033{
Christoph Hellwig43c98352023-07-07 11:42:39 +02001034 if (wc) {
1035 blk_queue_flag_set(QUEUE_FLAG_HW_WC, q);
Christoph Hellwig57d74df2018-11-14 17:02:07 +01001036 blk_queue_flag_set(QUEUE_FLAG_WC, q);
Christoph Hellwig43c98352023-07-07 11:42:39 +02001037 } else {
1038 blk_queue_flag_clear(QUEUE_FLAG_HW_WC, q);
Christoph Hellwig57d74df2018-11-14 17:02:07 +01001039 blk_queue_flag_clear(QUEUE_FLAG_WC, q);
Christoph Hellwig43c98352023-07-07 11:42:39 +02001040 }
Jens Axboec888a8f2016-04-13 13:33:19 -06001041 if (fua)
Christoph Hellwig57d74df2018-11-14 17:02:07 +01001042 blk_queue_flag_set(QUEUE_FLAG_FUA, q);
Jens Axboec888a8f2016-04-13 13:33:19 -06001043 else
Christoph Hellwig57d74df2018-11-14 17:02:07 +01001044 blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
Jens Axboe93e9d8e2016-04-12 12:32:46 -06001045}
1046EXPORT_SYMBOL_GPL(blk_queue_write_cache);
1047
Damien Le Moal68c43f12019-09-05 18:51:31 +09001048/**
1049 * blk_queue_required_elevator_features - Set a queue required elevator features
1050 * @q: the request queue for the target device
1051 * @features: Required elevator features OR'ed together
1052 *
1053 * Tell the block layer that for the device controlled through @q, only the
1054 * only elevators that can be used are those that implement at least the set of
1055 * features specified by @features.
1056 */
1057void blk_queue_required_elevator_features(struct request_queue *q,
1058 unsigned int features)
1059{
1060 q->required_elevator_features = features;
1061}
1062EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features);
1063
Linus Torvalds671df182019-09-19 13:27:23 -07001064/**
Yoshihiro Shimoda45147fb2019-08-28 21:35:42 +09001065 * blk_queue_can_use_dma_map_merging - configure queue for merging segments.
1066 * @q: the request queue for the device
1067 * @dev: the device pointer for dma
1068 *
1069 * Tell the block layer about merging the segments by dma map of @q.
1070 */
1071bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
1072 struct device *dev)
1073{
1074 unsigned long boundary = dma_get_merge_boundary(dev);
1075
1076 if (!boundary)
1077 return false;
1078
1079 /* No need to update max_segment_size. see blk_queue_virt_boundary() */
1080 blk_queue_virt_boundary(q, boundary);
1081
1082 return true;
1083}
1084EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
1085
Damien Le Moal27ba3e82020-09-15 16:33:46 +09001086/**
Christoph Hellwigd73e93b2023-12-17 17:53:58 +01001087 * disk_set_zoned - inidicate a zoned device
1088 * @disk: gendisk to configure
Damien Le Moal27ba3e82020-09-15 16:33:46 +09001089 */
Christoph Hellwigd73e93b2023-12-17 17:53:58 +01001090void disk_set_zoned(struct gendisk *disk)
Damien Le Moal27ba3e82020-09-15 16:33:46 +09001091{
Damien Le Moala805a4f2021-01-28 13:47:30 +09001092 struct request_queue *q = disk->queue;
1093
Christoph Hellwigd73e93b2023-12-17 17:53:58 +01001094 WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED));
Damien Le Moal27ba3e82020-09-15 16:33:46 +09001095
Christoph Hellwigd73e93b2023-12-17 17:53:58 +01001096 /*
1097 * Set the zone write granularity to the device logical block
1098 * size by default. The driver can change this value if needed.
1099 */
1100 q->limits.zoned = true;
1101 blk_queue_zone_write_granularity(q, queue_logical_block_size(q));
Damien Le Moal27ba3e82020-09-15 16:33:46 +09001102}
Christoph Hellwig6b2bd272022-07-06 09:03:40 +02001103EXPORT_SYMBOL_GPL(disk_set_zoned);
Christoph Hellwig89098b02022-04-15 06:52:49 +02001104
1105int bdev_alignment_offset(struct block_device *bdev)
1106{
1107 struct request_queue *q = bdev_get_queue(bdev);
1108
1109 if (q->limits.misaligned)
1110 return -1;
1111 if (bdev_is_partition(bdev))
1112 return queue_limit_alignment_offset(&q->limits,
1113 bdev->bd_start_sect);
1114 return q->limits.alignment_offset;
1115}
1116EXPORT_SYMBOL_GPL(bdev_alignment_offset);
Christoph Hellwig5c4b4a52022-04-15 06:52:52 +02001117
1118unsigned int bdev_discard_alignment(struct block_device *bdev)
1119{
1120 struct request_queue *q = bdev_get_queue(bdev);
1121
1122 if (bdev_is_partition(bdev))
1123 return queue_limit_discard_alignment(&q->limits,
1124 bdev->bd_start_sect);
1125 return q->limits.discard_alignment;
1126}
1127EXPORT_SYMBOL_GPL(bdev_discard_alignment);