Christoph Hellwig | 3dcf60bc | 2019-04-30 14:42:43 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 2 | /* |
| 3 | * Zoned block device handling |
| 4 | * |
| 5 | * Copyright (c) 2015, Hannes Reinecke |
| 6 | * Copyright (c) 2015, SUSE Linux GmbH |
| 7 | * |
| 8 | * Copyright (c) 2016, Damien Le Moal |
| 9 | * Copyright (c) 2016, Western Digital |
| 10 | */ |
| 11 | |
| 12 | #include <linux/kernel.h> |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/rbtree.h> |
| 15 | #include <linux/blkdev.h> |
Damien Le Moal | bf50545 | 2018-10-12 19:08:50 +0900 | [diff] [blame] | 16 | #include <linux/blk-mq.h> |
Damien Le Moal | 2620292 | 2019-07-01 14:09:18 +0900 | [diff] [blame] | 17 | #include <linux/mm.h> |
| 18 | #include <linux/vmalloc.h> |
Damien Le Moal | bd976e5 | 2019-07-01 14:09:16 +0900 | [diff] [blame] | 19 | #include <linux/sched/mm.h> |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 20 | |
Damien Le Moal | a2d6b3a | 2018-10-12 19:08:47 +0900 | [diff] [blame] | 21 | #include "blk.h" |
| 22 | |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 23 | static inline sector_t blk_zone_start(struct request_queue *q, |
| 24 | sector_t sector) |
| 25 | { |
Damien Le Moal | f99e864 | 2017-01-12 07:58:32 -0700 | [diff] [blame] | 26 | sector_t zone_mask = blk_queue_zone_sectors(q) - 1; |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 27 | |
| 28 | return sector & ~zone_mask; |
| 29 | } |
| 30 | |
| 31 | /* |
Christoph Hellwig | 6cc77e9 | 2017-12-21 15:43:38 +0900 | [diff] [blame] | 32 | * Return true if a request is a write requests that needs zone write locking. |
| 33 | */ |
| 34 | bool blk_req_needs_zone_write_lock(struct request *rq) |
| 35 | { |
| 36 | if (!rq->q->seq_zones_wlock) |
| 37 | return false; |
| 38 | |
| 39 | if (blk_rq_is_passthrough(rq)) |
| 40 | return false; |
| 41 | |
| 42 | switch (req_op(rq)) { |
| 43 | case REQ_OP_WRITE_ZEROES: |
| 44 | case REQ_OP_WRITE_SAME: |
| 45 | case REQ_OP_WRITE: |
| 46 | return blk_rq_zone_is_seq(rq); |
| 47 | default: |
| 48 | return false; |
| 49 | } |
| 50 | } |
| 51 | EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock); |
| 52 | |
| 53 | void __blk_req_zone_write_lock(struct request *rq) |
| 54 | { |
| 55 | if (WARN_ON_ONCE(test_and_set_bit(blk_rq_zone_no(rq), |
| 56 | rq->q->seq_zones_wlock))) |
| 57 | return; |
| 58 | |
| 59 | WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED); |
| 60 | rq->rq_flags |= RQF_ZONE_WRITE_LOCKED; |
| 61 | } |
| 62 | EXPORT_SYMBOL_GPL(__blk_req_zone_write_lock); |
| 63 | |
| 64 | void __blk_req_zone_write_unlock(struct request *rq) |
| 65 | { |
| 66 | rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED; |
| 67 | if (rq->q->seq_zones_wlock) |
| 68 | WARN_ON_ONCE(!test_and_clear_bit(blk_rq_zone_no(rq), |
| 69 | rq->q->seq_zones_wlock)); |
| 70 | } |
| 71 | EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock); |
| 72 | |
Damien Le Moal | a91e138 | 2018-10-12 19:08:43 +0900 | [diff] [blame] | 73 | static inline unsigned int __blkdev_nr_zones(struct request_queue *q, |
| 74 | sector_t nr_sectors) |
| 75 | { |
Damien Le Moal | 113ab72 | 2019-07-10 13:53:10 +0900 | [diff] [blame] | 76 | sector_t zone_sectors = blk_queue_zone_sectors(q); |
Damien Le Moal | a91e138 | 2018-10-12 19:08:43 +0900 | [diff] [blame] | 77 | |
| 78 | return (nr_sectors + zone_sectors - 1) >> ilog2(zone_sectors); |
| 79 | } |
| 80 | |
| 81 | /** |
| 82 | * blkdev_nr_zones - Get number of zones |
| 83 | * @bdev: Target block device |
| 84 | * |
| 85 | * Description: |
| 86 | * Return the total number of zones of a zoned block device. |
| 87 | * For a regular block device, the number of zones is always 0. |
| 88 | */ |
| 89 | unsigned int blkdev_nr_zones(struct block_device *bdev) |
| 90 | { |
| 91 | struct request_queue *q = bdev_get_queue(bdev); |
| 92 | |
| 93 | if (!blk_queue_is_zoned(q)) |
| 94 | return 0; |
| 95 | |
Damien Le Moal | 5eac3eb | 2019-11-11 11:39:25 +0900 | [diff] [blame] | 96 | return __blkdev_nr_zones(q, get_capacity(bdev->bd_disk)); |
Damien Le Moal | a91e138 | 2018-10-12 19:08:43 +0900 | [diff] [blame] | 97 | } |
| 98 | EXPORT_SYMBOL_GPL(blkdev_nr_zones); |
| 99 | |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 100 | /** |
| 101 | * blkdev_report_zones - Get zones information |
| 102 | * @bdev: Target block device |
| 103 | * @sector: Sector from which to report zones |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame^] | 104 | * @nr_zones: Maximum number of zones to report |
| 105 | * @cb: Callback function called for each reported zone |
| 106 | * @data: Private data for the callback |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 107 | * |
| 108 | * Description: |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame^] | 109 | * Get zone information starting from the zone containing @sector for at most |
| 110 | * @nr_zones, and call @cb for each zone reported by the device. |
| 111 | * To report all zones in a device starting from @sector, the BLK_ALL_ZONES |
| 112 | * constant can be passed to @nr_zones. |
| 113 | * Returns the number of zones reported by the device, or a negative errno |
| 114 | * value in case of failure. |
| 115 | * |
| 116 | * Note: The caller must use memalloc_noXX_save/restore() calls to control |
| 117 | * memory allocations done within this function. |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 118 | */ |
Christoph Hellwig | e76239a | 2018-10-12 19:08:49 +0900 | [diff] [blame] | 119 | int blkdev_report_zones(struct block_device *bdev, sector_t sector, |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame^] | 120 | unsigned int nr_zones, report_zones_cb cb, void *data) |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 121 | { |
Damien Le Moal | ceeb373 | 2019-11-11 11:39:24 +0900 | [diff] [blame] | 122 | struct gendisk *disk = bdev->bd_disk; |
Damien Le Moal | 5eac3eb | 2019-11-11 11:39:25 +0900 | [diff] [blame] | 123 | sector_t capacity = get_capacity(disk); |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 124 | |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame^] | 125 | if (!blk_queue_is_zoned(bdev_get_queue(bdev)) || |
| 126 | WARN_ON_ONCE(!disk->fops->report_zones)) |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 127 | return -EOPNOTSUPP; |
| 128 | |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame^] | 129 | if (!nr_zones || sector >= capacity) |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 130 | return 0; |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 131 | |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame^] | 132 | return disk->fops->report_zones(disk, sector, nr_zones, cb, data); |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 133 | } |
| 134 | EXPORT_SYMBOL_GPL(blkdev_report_zones); |
| 135 | |
Chaitanya Kulkarni | 6e33dbf | 2019-08-01 10:26:36 -0700 | [diff] [blame] | 136 | static inline bool blkdev_allow_reset_all_zones(struct block_device *bdev, |
Damien Le Moal | c7a1d92 | 2019-10-27 23:05:43 +0900 | [diff] [blame] | 137 | sector_t sector, |
Chaitanya Kulkarni | 6e33dbf | 2019-08-01 10:26:36 -0700 | [diff] [blame] | 138 | sector_t nr_sectors) |
| 139 | { |
| 140 | if (!blk_queue_zone_resetall(bdev_get_queue(bdev))) |
| 141 | return false; |
| 142 | |
Chaitanya Kulkarni | 6e33dbf | 2019-08-01 10:26:36 -0700 | [diff] [blame] | 143 | /* |
Damien Le Moal | 5eac3eb | 2019-11-11 11:39:25 +0900 | [diff] [blame] | 144 | * REQ_OP_ZONE_RESET_ALL can be executed only if the number of sectors |
| 145 | * of the applicable zone range is the entire disk. |
Chaitanya Kulkarni | 6e33dbf | 2019-08-01 10:26:36 -0700 | [diff] [blame] | 146 | */ |
Damien Le Moal | 5eac3eb | 2019-11-11 11:39:25 +0900 | [diff] [blame] | 147 | return !sector && nr_sectors == get_capacity(bdev->bd_disk); |
Chaitanya Kulkarni | 6e33dbf | 2019-08-01 10:26:36 -0700 | [diff] [blame] | 148 | } |
| 149 | |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 150 | /** |
Ajay Joshi | 6c1b1da | 2019-10-27 23:05:45 +0900 | [diff] [blame] | 151 | * blkdev_zone_mgmt - Execute a zone management operation on a range of zones |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 152 | * @bdev: Target block device |
Ajay Joshi | 6c1b1da | 2019-10-27 23:05:45 +0900 | [diff] [blame] | 153 | * @op: Operation to be performed on the zones |
| 154 | * @sector: Start sector of the first zone to operate on |
| 155 | * @nr_sectors: Number of sectors, should be at least the length of one zone and |
| 156 | * must be zone size aligned. |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 157 | * @gfp_mask: Memory allocation flags (for bio_alloc) |
| 158 | * |
| 159 | * Description: |
Ajay Joshi | 6c1b1da | 2019-10-27 23:05:45 +0900 | [diff] [blame] | 160 | * Perform the specified operation on the range of zones specified by |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 161 | * @sector..@sector+@nr_sectors. Specifying the entire disk sector range |
| 162 | * is valid, but the specified range should not contain conventional zones. |
Ajay Joshi | 6c1b1da | 2019-10-27 23:05:45 +0900 | [diff] [blame] | 163 | * The operation to execute on each zone can be a zone reset, open, close |
| 164 | * or finish request. |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 165 | */ |
Ajay Joshi | 6c1b1da | 2019-10-27 23:05:45 +0900 | [diff] [blame] | 166 | int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op, |
| 167 | sector_t sector, sector_t nr_sectors, |
| 168 | gfp_t gfp_mask) |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 169 | { |
| 170 | struct request_queue *q = bdev_get_queue(bdev); |
Ajay Joshi | 6c1b1da | 2019-10-27 23:05:45 +0900 | [diff] [blame] | 171 | sector_t zone_sectors = blk_queue_zone_sectors(q); |
Damien Le Moal | 5eac3eb | 2019-11-11 11:39:25 +0900 | [diff] [blame] | 172 | sector_t capacity = get_capacity(bdev->bd_disk); |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 173 | sector_t end_sector = sector + nr_sectors; |
Damien Le Moal | a2d6b3a | 2018-10-12 19:08:47 +0900 | [diff] [blame] | 174 | struct bio *bio = NULL; |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 175 | int ret; |
| 176 | |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 177 | if (!blk_queue_is_zoned(q)) |
| 178 | return -EOPNOTSUPP; |
| 179 | |
Damien Le Moal | a2d6b3a | 2018-10-12 19:08:47 +0900 | [diff] [blame] | 180 | if (bdev_read_only(bdev)) |
| 181 | return -EPERM; |
| 182 | |
Ajay Joshi | 6c1b1da | 2019-10-27 23:05:45 +0900 | [diff] [blame] | 183 | if (!op_is_zone_mgmt(op)) |
| 184 | return -EOPNOTSUPP; |
| 185 | |
Damien Le Moal | 5eac3eb | 2019-11-11 11:39:25 +0900 | [diff] [blame] | 186 | if (!nr_sectors || end_sector > capacity) |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 187 | /* Out of range */ |
| 188 | return -EINVAL; |
| 189 | |
| 190 | /* Check alignment (handle eventual smaller last zone) */ |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 191 | if (sector & (zone_sectors - 1)) |
| 192 | return -EINVAL; |
| 193 | |
Damien Le Moal | 5eac3eb | 2019-11-11 11:39:25 +0900 | [diff] [blame] | 194 | if ((nr_sectors & (zone_sectors - 1)) && end_sector != capacity) |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 195 | return -EINVAL; |
| 196 | |
| 197 | while (sector < end_sector) { |
Damien Le Moal | a2d6b3a | 2018-10-12 19:08:47 +0900 | [diff] [blame] | 198 | bio = blk_next_bio(bio, 0, gfp_mask); |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 199 | bio_set_dev(bio, bdev); |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 200 | |
Damien Le Moal | c7a1d92 | 2019-10-27 23:05:43 +0900 | [diff] [blame] | 201 | /* |
| 202 | * Special case for the zone reset operation that reset all |
| 203 | * zones, this is useful for applications like mkfs. |
| 204 | */ |
Ajay Joshi | 6c1b1da | 2019-10-27 23:05:45 +0900 | [diff] [blame] | 205 | if (op == REQ_OP_ZONE_RESET && |
| 206 | blkdev_allow_reset_all_zones(bdev, sector, nr_sectors)) { |
Damien Le Moal | c7a1d92 | 2019-10-27 23:05:43 +0900 | [diff] [blame] | 207 | bio->bi_opf = REQ_OP_ZONE_RESET_ALL; |
| 208 | break; |
| 209 | } |
| 210 | |
Ajay Joshi | 6c1b1da | 2019-10-27 23:05:45 +0900 | [diff] [blame] | 211 | bio->bi_opf = op; |
Damien Le Moal | c7a1d92 | 2019-10-27 23:05:43 +0900 | [diff] [blame] | 212 | bio->bi_iter.bi_sector = sector; |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 213 | sector += zone_sectors; |
| 214 | |
| 215 | /* This may take a while, so be nice to others */ |
| 216 | cond_resched(); |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 217 | } |
| 218 | |
Damien Le Moal | a2d6b3a | 2018-10-12 19:08:47 +0900 | [diff] [blame] | 219 | ret = submit_bio_wait(bio); |
| 220 | bio_put(bio); |
| 221 | |
Damien Le Moal | a2d6b3a | 2018-10-12 19:08:47 +0900 | [diff] [blame] | 222 | return ret; |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 223 | } |
Ajay Joshi | 6c1b1da | 2019-10-27 23:05:45 +0900 | [diff] [blame] | 224 | EXPORT_SYMBOL_GPL(blkdev_zone_mgmt); |
Shaun Tancheff | 3ed05a9 | 2016-10-18 15:40:35 +0900 | [diff] [blame] | 225 | |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame^] | 226 | struct zone_report_args { |
| 227 | struct blk_zone __user *zones; |
| 228 | }; |
| 229 | |
| 230 | static int blkdev_copy_zone_to_user(struct blk_zone *zone, unsigned int idx, |
| 231 | void *data) |
| 232 | { |
| 233 | struct zone_report_args *args = data; |
| 234 | |
| 235 | if (copy_to_user(&args->zones[idx], zone, sizeof(struct blk_zone))) |
| 236 | return -EFAULT; |
| 237 | return 0; |
| 238 | } |
| 239 | |
Bart Van Assche | 56c4bdd | 2018-03-08 15:28:50 -0800 | [diff] [blame] | 240 | /* |
Shaun Tancheff | 3ed05a9 | 2016-10-18 15:40:35 +0900 | [diff] [blame] | 241 | * BLKREPORTZONE ioctl processing. |
| 242 | * Called from blkdev_ioctl. |
| 243 | */ |
| 244 | int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, |
| 245 | unsigned int cmd, unsigned long arg) |
| 246 | { |
| 247 | void __user *argp = (void __user *)arg; |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame^] | 248 | struct zone_report_args args; |
Shaun Tancheff | 3ed05a9 | 2016-10-18 15:40:35 +0900 | [diff] [blame] | 249 | struct request_queue *q; |
| 250 | struct blk_zone_report rep; |
Shaun Tancheff | 3ed05a9 | 2016-10-18 15:40:35 +0900 | [diff] [blame] | 251 | int ret; |
| 252 | |
| 253 | if (!argp) |
| 254 | return -EINVAL; |
| 255 | |
| 256 | q = bdev_get_queue(bdev); |
| 257 | if (!q) |
| 258 | return -ENXIO; |
| 259 | |
| 260 | if (!blk_queue_is_zoned(q)) |
| 261 | return -ENOTTY; |
| 262 | |
| 263 | if (!capable(CAP_SYS_ADMIN)) |
| 264 | return -EACCES; |
| 265 | |
| 266 | if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report))) |
| 267 | return -EFAULT; |
| 268 | |
| 269 | if (!rep.nr_zones) |
| 270 | return -EINVAL; |
| 271 | |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame^] | 272 | args.zones = argp + sizeof(struct blk_zone_report); |
| 273 | ret = blkdev_report_zones(bdev, rep.sector, rep.nr_zones, |
| 274 | blkdev_copy_zone_to_user, &args); |
| 275 | if (ret < 0) |
| 276 | return ret; |
Bart Van Assche | 327ea4a | 2018-05-22 08:27:22 -0700 | [diff] [blame] | 277 | |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame^] | 278 | rep.nr_zones = ret; |
| 279 | if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report))) |
| 280 | return -EFAULT; |
| 281 | return 0; |
Shaun Tancheff | 3ed05a9 | 2016-10-18 15:40:35 +0900 | [diff] [blame] | 282 | } |
| 283 | |
Bart Van Assche | 56c4bdd | 2018-03-08 15:28:50 -0800 | [diff] [blame] | 284 | /* |
Ajay Joshi | e876df1 | 2019-10-27 23:05:46 +0900 | [diff] [blame] | 285 | * BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing. |
Shaun Tancheff | 3ed05a9 | 2016-10-18 15:40:35 +0900 | [diff] [blame] | 286 | * Called from blkdev_ioctl. |
| 287 | */ |
Ajay Joshi | e876df1 | 2019-10-27 23:05:46 +0900 | [diff] [blame] | 288 | int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode, |
| 289 | unsigned int cmd, unsigned long arg) |
Shaun Tancheff | 3ed05a9 | 2016-10-18 15:40:35 +0900 | [diff] [blame] | 290 | { |
| 291 | void __user *argp = (void __user *)arg; |
| 292 | struct request_queue *q; |
| 293 | struct blk_zone_range zrange; |
Ajay Joshi | e876df1 | 2019-10-27 23:05:46 +0900 | [diff] [blame] | 294 | enum req_opf op; |
Shaun Tancheff | 3ed05a9 | 2016-10-18 15:40:35 +0900 | [diff] [blame] | 295 | |
| 296 | if (!argp) |
| 297 | return -EINVAL; |
| 298 | |
| 299 | q = bdev_get_queue(bdev); |
| 300 | if (!q) |
| 301 | return -ENXIO; |
| 302 | |
| 303 | if (!blk_queue_is_zoned(q)) |
| 304 | return -ENOTTY; |
| 305 | |
| 306 | if (!capable(CAP_SYS_ADMIN)) |
| 307 | return -EACCES; |
| 308 | |
| 309 | if (!(mode & FMODE_WRITE)) |
| 310 | return -EBADF; |
| 311 | |
| 312 | if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range))) |
| 313 | return -EFAULT; |
| 314 | |
Ajay Joshi | e876df1 | 2019-10-27 23:05:46 +0900 | [diff] [blame] | 315 | switch (cmd) { |
| 316 | case BLKRESETZONE: |
| 317 | op = REQ_OP_ZONE_RESET; |
| 318 | break; |
| 319 | case BLKOPENZONE: |
| 320 | op = REQ_OP_ZONE_OPEN; |
| 321 | break; |
| 322 | case BLKCLOSEZONE: |
| 323 | op = REQ_OP_ZONE_CLOSE; |
| 324 | break; |
| 325 | case BLKFINISHZONE: |
| 326 | op = REQ_OP_ZONE_FINISH; |
| 327 | break; |
| 328 | default: |
| 329 | return -ENOTTY; |
| 330 | } |
| 331 | |
| 332 | return blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors, |
| 333 | GFP_KERNEL); |
Shaun Tancheff | 3ed05a9 | 2016-10-18 15:40:35 +0900 | [diff] [blame] | 334 | } |
Damien Le Moal | bf50545 | 2018-10-12 19:08:50 +0900 | [diff] [blame] | 335 | |
| 336 | static inline unsigned long *blk_alloc_zone_bitmap(int node, |
| 337 | unsigned int nr_zones) |
| 338 | { |
| 339 | return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long), |
| 340 | GFP_NOIO, node); |
| 341 | } |
| 342 | |
Damien Le Moal | bf50545 | 2018-10-12 19:08:50 +0900 | [diff] [blame] | 343 | void blk_queue_free_zone_bitmaps(struct request_queue *q) |
| 344 | { |
| 345 | kfree(q->seq_zones_bitmap); |
| 346 | q->seq_zones_bitmap = NULL; |
| 347 | kfree(q->seq_zones_wlock); |
| 348 | q->seq_zones_wlock = NULL; |
| 349 | } |
| 350 | |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame^] | 351 | struct blk_revalidate_zone_args { |
| 352 | struct gendisk *disk; |
| 353 | unsigned long *seq_zones_bitmap; |
| 354 | unsigned long *seq_zones_wlock; |
| 355 | sector_t sector; |
| 356 | }; |
| 357 | |
Damien Le Moal | d9dd730 | 2019-11-11 11:39:22 +0900 | [diff] [blame] | 358 | /* |
| 359 | * Helper function to check the validity of zones of a zoned block device. |
| 360 | */ |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame^] | 361 | static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx, |
| 362 | void *data) |
Damien Le Moal | d9dd730 | 2019-11-11 11:39:22 +0900 | [diff] [blame] | 363 | { |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame^] | 364 | struct blk_revalidate_zone_args *args = data; |
| 365 | struct gendisk *disk = args->disk; |
Damien Le Moal | d9dd730 | 2019-11-11 11:39:22 +0900 | [diff] [blame] | 366 | struct request_queue *q = disk->queue; |
| 367 | sector_t zone_sectors = blk_queue_zone_sectors(q); |
| 368 | sector_t capacity = get_capacity(disk); |
| 369 | |
| 370 | /* |
| 371 | * All zones must have the same size, with the exception on an eventual |
| 372 | * smaller last zone. |
| 373 | */ |
| 374 | if (zone->start + zone_sectors < capacity && |
| 375 | zone->len != zone_sectors) { |
| 376 | pr_warn("%s: Invalid zoned device with non constant zone size\n", |
| 377 | disk->disk_name); |
| 378 | return false; |
| 379 | } |
| 380 | |
| 381 | if (zone->start + zone->len >= capacity && |
| 382 | zone->len > zone_sectors) { |
| 383 | pr_warn("%s: Invalid zoned device with larger last zone size\n", |
| 384 | disk->disk_name); |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame^] | 385 | return -ENODEV; |
Damien Le Moal | d9dd730 | 2019-11-11 11:39:22 +0900 | [diff] [blame] | 386 | } |
| 387 | |
| 388 | /* Check for holes in the zone report */ |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame^] | 389 | if (zone->start != args->sector) { |
Damien Le Moal | d9dd730 | 2019-11-11 11:39:22 +0900 | [diff] [blame] | 390 | pr_warn("%s: Zone gap at sectors %llu..%llu\n", |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame^] | 391 | disk->disk_name, args->sector, zone->start); |
| 392 | return -ENODEV; |
Damien Le Moal | d9dd730 | 2019-11-11 11:39:22 +0900 | [diff] [blame] | 393 | } |
| 394 | |
| 395 | /* Check zone type */ |
| 396 | switch (zone->type) { |
| 397 | case BLK_ZONE_TYPE_CONVENTIONAL: |
| 398 | case BLK_ZONE_TYPE_SEQWRITE_REQ: |
| 399 | case BLK_ZONE_TYPE_SEQWRITE_PREF: |
| 400 | break; |
| 401 | default: |
| 402 | pr_warn("%s: Invalid zone type 0x%x at sectors %llu\n", |
| 403 | disk->disk_name, (int)zone->type, zone->start); |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame^] | 404 | return -ENODEV; |
Damien Le Moal | d9dd730 | 2019-11-11 11:39:22 +0900 | [diff] [blame] | 405 | } |
| 406 | |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame^] | 407 | if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) |
| 408 | set_bit(idx, args->seq_zones_bitmap); |
Damien Le Moal | d9dd730 | 2019-11-11 11:39:22 +0900 | [diff] [blame] | 409 | |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame^] | 410 | args->sector += zone->len; |
| 411 | return 0; |
| 412 | } |
| 413 | |
| 414 | static int blk_update_zone_info(struct gendisk *disk, unsigned int nr_zones, |
| 415 | struct blk_revalidate_zone_args *args) |
| 416 | { |
| 417 | /* |
| 418 | * Ensure that all memory allocations in this context are done as |
| 419 | * if GFP_NOIO was specified. |
| 420 | */ |
| 421 | unsigned int noio_flag = memalloc_noio_save(); |
| 422 | struct request_queue *q = disk->queue; |
| 423 | int ret; |
| 424 | |
| 425 | args->seq_zones_wlock = blk_alloc_zone_bitmap(q->node, nr_zones); |
| 426 | if (!args->seq_zones_wlock) |
| 427 | return -ENOMEM; |
| 428 | args->seq_zones_bitmap = blk_alloc_zone_bitmap(q->node, nr_zones); |
| 429 | if (!args->seq_zones_bitmap) |
| 430 | return -ENOMEM; |
| 431 | |
| 432 | ret = disk->fops->report_zones(disk, 0, nr_zones, |
| 433 | blk_revalidate_zone_cb, args); |
| 434 | memalloc_noio_restore(noio_flag); |
| 435 | return ret; |
Damien Le Moal | d9dd730 | 2019-11-11 11:39:22 +0900 | [diff] [blame] | 436 | } |
| 437 | |
Damien Le Moal | bf50545 | 2018-10-12 19:08:50 +0900 | [diff] [blame] | 438 | /** |
| 439 | * blk_revalidate_disk_zones - (re)allocate and initialize zone bitmaps |
| 440 | * @disk: Target disk |
| 441 | * |
| 442 | * Helper function for low-level device drivers to (re) allocate and initialize |
| 443 | * a disk request queue zone bitmaps. This functions should normally be called |
| 444 | * within the disk ->revalidate method. For BIO based queues, no zone bitmap |
| 445 | * is allocated. |
| 446 | */ |
| 447 | int blk_revalidate_disk_zones(struct gendisk *disk) |
| 448 | { |
| 449 | struct request_queue *q = disk->queue; |
| 450 | unsigned int nr_zones = __blkdev_nr_zones(q, get_capacity(disk)); |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame^] | 451 | struct blk_revalidate_zone_args args = { .disk = disk }; |
Damien Le Moal | bf50545 | 2018-10-12 19:08:50 +0900 | [diff] [blame] | 452 | int ret = 0; |
| 453 | |
Christoph Hellwig | c98c3d09 | 2019-11-11 11:39:23 +0900 | [diff] [blame] | 454 | if (WARN_ON_ONCE(!blk_queue_is_zoned(q))) |
| 455 | return -EIO; |
| 456 | |
Damien Le Moal | bf50545 | 2018-10-12 19:08:50 +0900 | [diff] [blame] | 457 | /* |
| 458 | * BIO based queues do not use a scheduler so only q->nr_zones |
| 459 | * needs to be updated so that the sysfs exposed value is correct. |
| 460 | */ |
Jens Axboe | 344e9ff | 2018-11-15 12:22:51 -0700 | [diff] [blame] | 461 | if (!queue_is_mq(q)) { |
Damien Le Moal | bf50545 | 2018-10-12 19:08:50 +0900 | [diff] [blame] | 462 | q->nr_zones = nr_zones; |
| 463 | return 0; |
| 464 | } |
| 465 | |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame^] | 466 | if (nr_zones) |
| 467 | ret = blk_update_zone_info(disk, nr_zones, &args); |
Damien Le Moal | bd976e5 | 2019-07-01 14:09:16 +0900 | [diff] [blame] | 468 | |
Damien Le Moal | bf50545 | 2018-10-12 19:08:50 +0900 | [diff] [blame] | 469 | /* |
| 470 | * Install the new bitmaps, making sure the queue is stopped and |
| 471 | * all I/Os are completed (i.e. a scheduler is not referencing the |
| 472 | * bitmaps). |
| 473 | */ |
| 474 | blk_mq_freeze_queue(q); |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame^] | 475 | if (ret >= 0) { |
| 476 | q->nr_zones = nr_zones; |
| 477 | swap(q->seq_zones_wlock, args.seq_zones_wlock); |
| 478 | swap(q->seq_zones_bitmap, args.seq_zones_bitmap); |
| 479 | ret = 0; |
| 480 | } else { |
| 481 | pr_warn("%s: failed to revalidate zones\n", disk->disk_name); |
| 482 | blk_queue_free_zone_bitmaps(q); |
| 483 | } |
Damien Le Moal | bf50545 | 2018-10-12 19:08:50 +0900 | [diff] [blame] | 484 | blk_mq_unfreeze_queue(q); |
| 485 | |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame^] | 486 | kfree(args.seq_zones_wlock); |
| 487 | kfree(args.seq_zones_bitmap); |
Damien Le Moal | bf50545 | 2018-10-12 19:08:50 +0900 | [diff] [blame] | 488 | return ret; |
| 489 | } |
| 490 | EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones); |
| 491 | |