Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 2 | /* |
| 3 | * Functions related to generic helpers functions |
| 4 | */ |
| 5 | #include <linux/kernel.h> |
| 6 | #include <linux/module.h> |
| 7 | #include <linux/bio.h> |
| 8 | #include <linux/blkdev.h> |
| 9 | #include <linux/scatterlist.h> |
| 10 | |
| 11 | #include "blk.h" |
| 12 | |
Christoph Hellwig | e3cc28e | 2022-04-15 06:52:53 +0200 | [diff] [blame] | 13 | static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector) |
| 14 | { |
Christoph Hellwig | 7b47ef5 | 2022-04-15 06:52:56 +0200 | [diff] [blame] | 15 | unsigned int discard_granularity = bdev_discard_granularity(bdev); |
Christoph Hellwig | e3cc28e | 2022-04-15 06:52:53 +0200 | [diff] [blame] | 16 | sector_t granularity_aligned_sector; |
| 17 | |
| 18 | if (bdev_is_partition(bdev)) |
| 19 | sector += bdev->bd_start_sect; |
| 20 | |
| 21 | granularity_aligned_sector = |
| 22 | round_up(sector, discard_granularity >> SECTOR_SHIFT); |
| 23 | |
| 24 | /* |
| 25 | * Make sure subsequent bios start aligned to the discard granularity if |
| 26 | * it needs to be split. |
| 27 | */ |
| 28 | if (granularity_aligned_sector != sector) |
| 29 | return granularity_aligned_sector - sector; |
| 30 | |
| 31 | /* |
| 32 | * Align the bio size to the discard granularity to make splitting the bio |
| 33 | * at discard granularity boundaries easier in the driver if needed. |
| 34 | */ |
| 35 | return round_down(UINT_MAX, discard_granularity) >> SECTOR_SHIFT; |
| 36 | } |
| 37 | |
Christoph Hellwig | 38f2525 | 2016-04-16 14:55:28 -0400 | [diff] [blame] | 38 | int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
Christoph Hellwig | 44abff2c | 2022-04-15 06:52:57 +0200 | [diff] [blame] | 39 | sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 40 | { |
Christoph Hellwig | 38f2525 | 2016-04-16 14:55:28 -0400 | [diff] [blame] | 41 | struct bio *bio = *biop; |
Christoph Hellwig | e3cc28e | 2022-04-15 06:52:53 +0200 | [diff] [blame] | 42 | sector_t bs_mask; |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 43 | |
Ilya Dryomov | a13553c | 2018-01-11 14:09:12 +0100 | [diff] [blame] | 44 | if (bdev_read_only(bdev)) |
| 45 | return -EPERM; |
Christoph Hellwig | 44abff2c | 2022-04-15 06:52:57 +0200 | [diff] [blame] | 46 | if (!bdev_max_discard_sectors(bdev)) |
| 47 | return -EOPNOTSUPP; |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 48 | |
Coly Li | b35fd74 | 2020-08-06 01:25:03 +0800 | [diff] [blame] | 49 | /* In case the discard granularity isn't set by buggy device driver */ |
Christoph Hellwig | 7b47ef5 | 2022-04-15 06:52:56 +0200 | [diff] [blame] | 50 | if (WARN_ON_ONCE(!bdev_discard_granularity(bdev))) { |
Christoph Hellwig | 02ff3dd | 2022-07-13 07:53:10 +0200 | [diff] [blame] | 51 | pr_err_ratelimited("%pg: Error: discard_granularity is 0.\n", |
| 52 | bdev); |
Coly Li | b35fd74 | 2020-08-06 01:25:03 +0800 | [diff] [blame] | 53 | return -EOPNOTSUPP; |
| 54 | } |
| 55 | |
Darrick J. Wong | 28b2be2 | 2016-10-11 13:51:08 -0700 | [diff] [blame] | 56 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
| 57 | if ((sector | nr_sects) & bs_mask) |
| 58 | return -EINVAL; |
| 59 | |
Ming Lei | ba5d738 | 2018-10-29 20:57:18 +0800 | [diff] [blame] | 60 | if (!nr_sects) |
| 61 | return -EINVAL; |
| 62 | |
Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 63 | while (nr_sects) { |
Christoph Hellwig | e3cc28e | 2022-04-15 06:52:53 +0200 | [diff] [blame] | 64 | sector_t req_sects = |
| 65 | min(nr_sects, bio_discard_limit(bdev, sector)); |
Dave Chinner | 4800bf7 | 2018-11-14 08:17:18 -0700 | [diff] [blame] | 66 | |
Christoph Hellwig | 44abff2c | 2022-04-15 06:52:57 +0200 | [diff] [blame] | 67 | bio = blk_next_bio(bio, bdev, 0, REQ_OP_DISCARD, gfp_mask); |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 68 | bio->bi_iter.bi_sector = sector; |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 69 | bio->bi_iter.bi_size = req_sects << 9; |
Ming Lei | ba5d738 | 2018-10-29 20:57:18 +0800 | [diff] [blame] | 70 | sector += req_sects; |
Paolo Bonzini | c6e6663 | 2012-08-02 09:48:50 +0200 | [diff] [blame] | 71 | nr_sects -= req_sects; |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 72 | |
Jens Axboe | c8123f8 | 2014-02-12 09:34:01 -0700 | [diff] [blame] | 73 | /* |
| 74 | * We can loop for a long time in here, if someone does |
| 75 | * full device discards (like mkfs). Be nice and allow |
| 76 | * us to schedule out to avoid softlocking if preempt |
| 77 | * is disabled. |
| 78 | */ |
| 79 | cond_resched(); |
Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 80 | } |
Christoph Hellwig | 38f2525 | 2016-04-16 14:55:28 -0400 | [diff] [blame] | 81 | |
| 82 | *biop = bio; |
| 83 | return 0; |
| 84 | } |
| 85 | EXPORT_SYMBOL(__blkdev_issue_discard); |
| 86 | |
| 87 | /** |
| 88 | * blkdev_issue_discard - queue a discard |
| 89 | * @bdev: blockdev to issue discard for |
| 90 | * @sector: start sector |
| 91 | * @nr_sects: number of sectors to discard |
| 92 | * @gfp_mask: memory allocation flags (for bio_alloc) |
Christoph Hellwig | 38f2525 | 2016-04-16 14:55:28 -0400 | [diff] [blame] | 93 | * |
| 94 | * Description: |
| 95 | * Issue a discard request for the sectors in question. |
| 96 | */ |
| 97 | int blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
Christoph Hellwig | 44abff2c | 2022-04-15 06:52:57 +0200 | [diff] [blame] | 98 | sector_t nr_sects, gfp_t gfp_mask) |
Christoph Hellwig | 38f2525 | 2016-04-16 14:55:28 -0400 | [diff] [blame] | 99 | { |
Christoph Hellwig | 38f2525 | 2016-04-16 14:55:28 -0400 | [diff] [blame] | 100 | struct bio *bio = NULL; |
| 101 | struct blk_plug plug; |
| 102 | int ret; |
| 103 | |
Christoph Hellwig | 38f2525 | 2016-04-16 14:55:28 -0400 | [diff] [blame] | 104 | blk_start_plug(&plug); |
Christoph Hellwig | 44abff2c | 2022-04-15 06:52:57 +0200 | [diff] [blame] | 105 | ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio); |
Mike Snitzer | bbd848e0f | 2016-05-05 11:54:21 -0400 | [diff] [blame] | 106 | if (!ret && bio) { |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 107 | ret = submit_bio_wait(bio); |
Christoph Hellwig | 48920ff | 2017-04-05 19:21:23 +0200 | [diff] [blame] | 108 | if (ret == -EOPNOTSUPP) |
Mike Snitzer | bbd848e0f | 2016-05-05 11:54:21 -0400 | [diff] [blame] | 109 | ret = 0; |
Shaun Tancheff | 05bd92d | 2016-06-07 11:32:13 -0500 | [diff] [blame] | 110 | bio_put(bio); |
Mike Snitzer | bbd848e0f | 2016-05-05 11:54:21 -0400 | [diff] [blame] | 111 | } |
Shaohua Li | 0cfbcaf | 2012-12-14 11:15:51 +0800 | [diff] [blame] | 112 | blk_finish_plug(&plug); |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 113 | |
Mike Snitzer | bbd848e0f | 2016-05-05 11:54:21 -0400 | [diff] [blame] | 114 | return ret; |
Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 115 | } |
| 116 | EXPORT_SYMBOL(blkdev_issue_discard); |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 117 | |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 118 | static int __blkdev_issue_write_zeroes(struct block_device *bdev, |
| 119 | sector_t sector, sector_t nr_sects, gfp_t gfp_mask, |
Christoph Hellwig | d928be9 | 2017-04-05 19:21:09 +0200 | [diff] [blame] | 120 | struct bio **biop, unsigned flags) |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 121 | { |
| 122 | struct bio *bio = *biop; |
| 123 | unsigned int max_write_zeroes_sectors; |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 124 | |
Ilya Dryomov | a13553c | 2018-01-11 14:09:12 +0100 | [diff] [blame] | 125 | if (bdev_read_only(bdev)) |
| 126 | return -EPERM; |
| 127 | |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 128 | /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */ |
| 129 | max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev); |
| 130 | |
| 131 | if (max_write_zeroes_sectors == 0) |
| 132 | return -EOPNOTSUPP; |
| 133 | |
| 134 | while (nr_sects) { |
Chaitanya Kulkarni | 0a3140e | 2022-01-24 10:11:02 +0100 | [diff] [blame] | 135 | bio = blk_next_bio(bio, bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask); |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 136 | bio->bi_iter.bi_sector = sector; |
Christoph Hellwig | d928be9 | 2017-04-05 19:21:09 +0200 | [diff] [blame] | 137 | if (flags & BLKDEV_ZERO_NOUNMAP) |
| 138 | bio->bi_opf |= REQ_NOUNMAP; |
Chaitanya Kulkarni | a6f0788 | 2016-11-30 12:28:59 -0800 | [diff] [blame] | 139 | |
| 140 | if (nr_sects > max_write_zeroes_sectors) { |
| 141 | bio->bi_iter.bi_size = max_write_zeroes_sectors << 9; |
| 142 | nr_sects -= max_write_zeroes_sectors; |
| 143 | sector += max_write_zeroes_sectors; |
| 144 | } else { |
| 145 | bio->bi_iter.bi_size = nr_sects << 9; |
| 146 | nr_sects = 0; |
| 147 | } |
| 148 | cond_resched(); |
| 149 | } |
| 150 | |
| 151 | *biop = bio; |
| 152 | return 0; |
| 153 | } |
| 154 | |
Damien Le Moal | 615d22a | 2017-07-06 20:21:15 +0900 | [diff] [blame] | 155 | /* |
| 156 | * Convert a number of 512B sectors to a number of pages. |
| 157 | * The result is limited to a number of pages that can fit into a BIO. |
| 158 | * Also make sure that the result is always at least 1 (page) for the cases |
| 159 | * where nr_sects is lower than the number of sectors in a page. |
| 160 | */ |
| 161 | static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects) |
| 162 | { |
Mikulas Patocka | 09c2c35 | 2017-09-11 09:46:49 -0600 | [diff] [blame] | 163 | sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512); |
Damien Le Moal | 615d22a | 2017-07-06 20:21:15 +0900 | [diff] [blame] | 164 | |
Christoph Hellwig | a8affc0 | 2021-03-11 12:01:37 +0100 | [diff] [blame] | 165 | return min(pages, (sector_t)BIO_MAX_VECS); |
Damien Le Moal | 615d22a | 2017-07-06 20:21:15 +0900 | [diff] [blame] | 166 | } |
| 167 | |
Ilya Dryomov | 425a4db | 2017-10-16 15:59:09 +0200 | [diff] [blame] | 168 | static int __blkdev_issue_zero_pages(struct block_device *bdev, |
| 169 | sector_t sector, sector_t nr_sects, gfp_t gfp_mask, |
| 170 | struct bio **biop) |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 171 | { |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 172 | struct bio *bio = *biop; |
Ilya Dryomov | 425a4db | 2017-10-16 15:59:09 +0200 | [diff] [blame] | 173 | int bi_size = 0; |
Lukas Czerner | 0aeea18 | 2011-03-11 10:23:53 +0100 | [diff] [blame] | 174 | unsigned int sz; |
Darrick J. Wong | 28b2be2 | 2016-10-11 13:51:08 -0700 | [diff] [blame] | 175 | |
Ilya Dryomov | a13553c | 2018-01-11 14:09:12 +0100 | [diff] [blame] | 176 | if (bdev_read_only(bdev)) |
| 177 | return -EPERM; |
| 178 | |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 179 | while (nr_sects != 0) { |
Chaitanya Kulkarni | 0a3140e | 2022-01-24 10:11:02 +0100 | [diff] [blame] | 180 | bio = blk_next_bio(bio, bdev, __blkdev_sectors_to_bio_pages(nr_sects), |
| 181 | REQ_OP_WRITE, gfp_mask); |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 182 | bio->bi_iter.bi_sector = sector; |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 183 | |
Jens Axboe | 0341aaf | 2010-04-29 09:28:21 +0200 | [diff] [blame] | 184 | while (nr_sects != 0) { |
Damien Le Moal | 615d22a | 2017-07-06 20:21:15 +0900 | [diff] [blame] | 185 | sz = min((sector_t) PAGE_SIZE, nr_sects << 9); |
| 186 | bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0); |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 187 | nr_sects -= bi_size >> 9; |
| 188 | sector += bi_size >> 9; |
Damien Le Moal | 615d22a | 2017-07-06 20:21:15 +0900 | [diff] [blame] | 189 | if (bi_size < sz) |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 190 | break; |
| 191 | } |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 192 | cond_resched(); |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 193 | } |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 194 | |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 195 | *biop = bio; |
Ilya Dryomov | 425a4db | 2017-10-16 15:59:09 +0200 | [diff] [blame] | 196 | return 0; |
| 197 | } |
| 198 | |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 199 | /** |
| 200 | * __blkdev_issue_zeroout - generate number of zero filed write bios |
| 201 | * @bdev: blockdev to issue |
| 202 | * @sector: start sector |
| 203 | * @nr_sects: number of sectors to write |
| 204 | * @gfp_mask: memory allocation flags (for bio_alloc) |
| 205 | * @biop: pointer to anchor bio |
| 206 | * @flags: controls detailed behavior |
| 207 | * |
| 208 | * Description: |
| 209 | * Zero-fill a block range, either using hardware offload or by explicitly |
| 210 | * writing zeroes to the device. |
| 211 | * |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 212 | * If a device is using logical block provisioning, the underlying space will |
| 213 | * not be released if %flags contains BLKDEV_ZERO_NOUNMAP. |
| 214 | * |
| 215 | * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return |
| 216 | * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided. |
| 217 | */ |
| 218 | int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
| 219 | sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, |
| 220 | unsigned flags) |
| 221 | { |
| 222 | int ret; |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 223 | sector_t bs_mask; |
| 224 | |
| 225 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
| 226 | if ((sector | nr_sects) & bs_mask) |
| 227 | return -EINVAL; |
| 228 | |
| 229 | ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, |
| 230 | biop, flags); |
| 231 | if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK)) |
Ilya Dryomov | 425a4db | 2017-10-16 15:59:09 +0200 | [diff] [blame] | 232 | return ret; |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 233 | |
Ilya Dryomov | 425a4db | 2017-10-16 15:59:09 +0200 | [diff] [blame] | 234 | return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, |
| 235 | biop); |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 236 | } |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 237 | EXPORT_SYMBOL(__blkdev_issue_zeroout); |
Martin K. Petersen | 579e8f3 | 2012-09-18 12:19:28 -0400 | [diff] [blame] | 238 | |
| 239 | /** |
| 240 | * blkdev_issue_zeroout - zero-fill a block range |
| 241 | * @bdev: blockdev to write |
| 242 | * @sector: start sector |
| 243 | * @nr_sects: number of sectors to write |
| 244 | * @gfp_mask: memory allocation flags (for bio_alloc) |
Christoph Hellwig | ee472d8 | 2017-04-05 19:21:08 +0200 | [diff] [blame] | 245 | * @flags: controls detailed behavior |
Martin K. Petersen | 579e8f3 | 2012-09-18 12:19:28 -0400 | [diff] [blame] | 246 | * |
| 247 | * Description: |
Christoph Hellwig | ee472d8 | 2017-04-05 19:21:08 +0200 | [diff] [blame] | 248 | * Zero-fill a block range, either using hardware offload or by explicitly |
| 249 | * writing zeroes to the device. See __blkdev_issue_zeroout() for the |
| 250 | * valid values for %flags. |
Martin K. Petersen | 579e8f3 | 2012-09-18 12:19:28 -0400 | [diff] [blame] | 251 | */ |
Martin K. Petersen | 579e8f3 | 2012-09-18 12:19:28 -0400 | [diff] [blame] | 252 | int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
Christoph Hellwig | ee472d8 | 2017-04-05 19:21:08 +0200 | [diff] [blame] | 253 | sector_t nr_sects, gfp_t gfp_mask, unsigned flags) |
Martin K. Petersen | 579e8f3 | 2012-09-18 12:19:28 -0400 | [diff] [blame] | 254 | { |
Ilya Dryomov | d5ce4c3 | 2017-10-16 15:59:10 +0200 | [diff] [blame] | 255 | int ret = 0; |
| 256 | sector_t bs_mask; |
| 257 | struct bio *bio; |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 258 | struct blk_plug plug; |
Ilya Dryomov | d5ce4c3 | 2017-10-16 15:59:10 +0200 | [diff] [blame] | 259 | bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev); |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 260 | |
Ilya Dryomov | d5ce4c3 | 2017-10-16 15:59:10 +0200 | [diff] [blame] | 261 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
| 262 | if ((sector | nr_sects) & bs_mask) |
| 263 | return -EINVAL; |
| 264 | |
| 265 | retry: |
| 266 | bio = NULL; |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 267 | blk_start_plug(&plug); |
Ilya Dryomov | d5ce4c3 | 2017-10-16 15:59:10 +0200 | [diff] [blame] | 268 | if (try_write_zeroes) { |
| 269 | ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, |
| 270 | gfp_mask, &bio, flags); |
| 271 | } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { |
| 272 | ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects, |
| 273 | gfp_mask, &bio); |
| 274 | } else { |
| 275 | /* No zeroing offload support */ |
| 276 | ret = -EOPNOTSUPP; |
| 277 | } |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 278 | if (ret == 0 && bio) { |
| 279 | ret = submit_bio_wait(bio); |
| 280 | bio_put(bio); |
Christoph Hellwig | e950fdf | 2016-07-19 11:23:33 +0200 | [diff] [blame] | 281 | } |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 282 | blk_finish_plug(&plug); |
Ilya Dryomov | d5ce4c3 | 2017-10-16 15:59:10 +0200 | [diff] [blame] | 283 | if (ret && try_write_zeroes) { |
| 284 | if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { |
| 285 | try_write_zeroes = false; |
| 286 | goto retry; |
| 287 | } |
| 288 | if (!bdev_write_zeroes_sectors(bdev)) { |
| 289 | /* |
| 290 | * Zeroing offload support was indicated, but the |
| 291 | * device reported ILLEGAL REQUEST (for some devices |
| 292 | * there is no non-destructive way to verify whether |
| 293 | * WRITE ZEROES is actually supported). |
| 294 | */ |
| 295 | ret = -EOPNOTSUPP; |
| 296 | } |
| 297 | } |
Martin K. Petersen | d93ba7a | 2015-01-20 20:06:30 -0500 | [diff] [blame] | 298 | |
Chaitanya Kulkarni | e73c23f | 2016-11-30 12:28:58 -0800 | [diff] [blame] | 299 | return ret; |
Martin K. Petersen | 579e8f3 | 2012-09-18 12:19:28 -0400 | [diff] [blame] | 300 | } |
Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 301 | EXPORT_SYMBOL(blkdev_issue_zeroout); |
Christoph Hellwig | 44abff2c | 2022-04-15 06:52:57 +0200 | [diff] [blame] | 302 | |
| 303 | int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector, |
| 304 | sector_t nr_sects, gfp_t gfp) |
| 305 | { |
| 306 | sector_t bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
| 307 | unsigned int max_sectors = bdev_max_secure_erase_sectors(bdev); |
| 308 | struct bio *bio = NULL; |
| 309 | struct blk_plug plug; |
| 310 | int ret = 0; |
| 311 | |
Mikulas Patocka | c4fa368 | 2022-09-14 16:55:51 -0400 | [diff] [blame] | 312 | /* make sure that "len << SECTOR_SHIFT" doesn't overflow */ |
| 313 | if (max_sectors > UINT_MAX >> SECTOR_SHIFT) |
| 314 | max_sectors = UINT_MAX >> SECTOR_SHIFT; |
| 315 | max_sectors &= ~bs_mask; |
| 316 | |
Christoph Hellwig | 44abff2c | 2022-04-15 06:52:57 +0200 | [diff] [blame] | 317 | if (max_sectors == 0) |
| 318 | return -EOPNOTSUPP; |
| 319 | if ((sector | nr_sects) & bs_mask) |
| 320 | return -EINVAL; |
| 321 | if (bdev_read_only(bdev)) |
| 322 | return -EPERM; |
| 323 | |
| 324 | blk_start_plug(&plug); |
| 325 | for (;;) { |
| 326 | unsigned int len = min_t(sector_t, nr_sects, max_sectors); |
| 327 | |
| 328 | bio = blk_next_bio(bio, bdev, 0, REQ_OP_SECURE_ERASE, gfp); |
| 329 | bio->bi_iter.bi_sector = sector; |
Mikulas Patocka | c4fa368 | 2022-09-14 16:55:51 -0400 | [diff] [blame] | 330 | bio->bi_iter.bi_size = len << SECTOR_SHIFT; |
Christoph Hellwig | 44abff2c | 2022-04-15 06:52:57 +0200 | [diff] [blame] | 331 | |
Mikulas Patocka | c4fa368 | 2022-09-14 16:55:51 -0400 | [diff] [blame] | 332 | sector += len; |
| 333 | nr_sects -= len; |
Christoph Hellwig | 44abff2c | 2022-04-15 06:52:57 +0200 | [diff] [blame] | 334 | if (!nr_sects) { |
| 335 | ret = submit_bio_wait(bio); |
| 336 | bio_put(bio); |
| 337 | break; |
| 338 | } |
| 339 | cond_resched(); |
| 340 | } |
| 341 | blk_finish_plug(&plug); |
| 342 | |
| 343 | return ret; |
| 344 | } |
| 345 | EXPORT_SYMBOL(blkdev_issue_secure_erase); |