Christoph Hellwig | 3dcf60bc | 2019-04-30 14:42:43 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 2 | /* |
| 3 | * Zoned block device handling |
| 4 | * |
| 5 | * Copyright (c) 2015, Hannes Reinecke |
| 6 | * Copyright (c) 2015, SUSE Linux GmbH |
| 7 | * |
| 8 | * Copyright (c) 2016, Damien Le Moal |
| 9 | * Copyright (c) 2016, Western Digital |
| 10 | */ |
| 11 | |
| 12 | #include <linux/kernel.h> |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/rbtree.h> |
| 15 | #include <linux/blkdev.h> |
Damien Le Moal | bf50545 | 2018-10-12 19:08:50 +0900 | [diff] [blame] | 16 | #include <linux/blk-mq.h> |
Damien Le Moal | 2620292 | 2019-07-01 14:09:18 +0900 | [diff] [blame] | 17 | #include <linux/mm.h> |
| 18 | #include <linux/vmalloc.h> |
Damien Le Moal | bd976e5 | 2019-07-01 14:09:16 +0900 | [diff] [blame] | 19 | #include <linux/sched/mm.h> |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 20 | |
Damien Le Moal | a2d6b3a | 2018-10-12 19:08:47 +0900 | [diff] [blame] | 21 | #include "blk.h" |
| 22 | |
Chaitanya Kulkarni | 02694e8 | 2020-03-25 10:49:54 -0700 | [diff] [blame] | 23 | #define ZONE_COND_NAME(name) [BLK_ZONE_COND_##name] = #name |
| 24 | static const char *const zone_cond_name[] = { |
| 25 | ZONE_COND_NAME(NOT_WP), |
| 26 | ZONE_COND_NAME(EMPTY), |
| 27 | ZONE_COND_NAME(IMP_OPEN), |
| 28 | ZONE_COND_NAME(EXP_OPEN), |
| 29 | ZONE_COND_NAME(CLOSED), |
| 30 | ZONE_COND_NAME(READONLY), |
| 31 | ZONE_COND_NAME(FULL), |
| 32 | ZONE_COND_NAME(OFFLINE), |
| 33 | }; |
| 34 | #undef ZONE_COND_NAME |
| 35 | |
| 36 | /** |
| 37 | * blk_zone_cond_str - Return string XXX in BLK_ZONE_COND_XXX. |
| 38 | * @zone_cond: BLK_ZONE_COND_XXX. |
| 39 | * |
| 40 | * Description: Centralize block layer function to convert BLK_ZONE_COND_XXX |
| 41 | * into string format. Useful in the debugging and tracing zone conditions. For |
| 42 | * invalid BLK_ZONE_COND_XXX it returns string "UNKNOWN". |
| 43 | */ |
| 44 | const char *blk_zone_cond_str(enum blk_zone_cond zone_cond) |
| 45 | { |
| 46 | static const char *zone_cond_str = "UNKNOWN"; |
| 47 | |
| 48 | if (zone_cond < ARRAY_SIZE(zone_cond_name) && zone_cond_name[zone_cond]) |
| 49 | zone_cond_str = zone_cond_name[zone_cond]; |
| 50 | |
| 51 | return zone_cond_str; |
| 52 | } |
| 53 | EXPORT_SYMBOL_GPL(blk_zone_cond_str); |
| 54 | |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 55 | /* |
Christoph Hellwig | 6cc77e9 | 2017-12-21 15:43:38 +0900 | [diff] [blame] | 56 | * Return true if a request is a write requests that needs zone write locking. |
| 57 | */ |
| 58 | bool blk_req_needs_zone_write_lock(struct request *rq) |
| 59 | { |
| 60 | if (!rq->q->seq_zones_wlock) |
| 61 | return false; |
| 62 | |
| 63 | if (blk_rq_is_passthrough(rq)) |
| 64 | return false; |
| 65 | |
| 66 | switch (req_op(rq)) { |
| 67 | case REQ_OP_WRITE_ZEROES: |
| 68 | case REQ_OP_WRITE_SAME: |
| 69 | case REQ_OP_WRITE: |
| 70 | return blk_rq_zone_is_seq(rq); |
| 71 | default: |
| 72 | return false; |
| 73 | } |
| 74 | } |
| 75 | EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock); |
| 76 | |
Johannes Thumshirn | 1392d37 | 2020-05-12 17:55:48 +0900 | [diff] [blame] | 77 | bool blk_req_zone_write_trylock(struct request *rq) |
| 78 | { |
| 79 | unsigned int zno = blk_rq_zone_no(rq); |
| 80 | |
| 81 | if (test_and_set_bit(zno, rq->q->seq_zones_wlock)) |
| 82 | return false; |
| 83 | |
| 84 | WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED); |
| 85 | rq->rq_flags |= RQF_ZONE_WRITE_LOCKED; |
| 86 | |
| 87 | return true; |
| 88 | } |
| 89 | EXPORT_SYMBOL_GPL(blk_req_zone_write_trylock); |
| 90 | |
Christoph Hellwig | 6cc77e9 | 2017-12-21 15:43:38 +0900 | [diff] [blame] | 91 | void __blk_req_zone_write_lock(struct request *rq) |
| 92 | { |
| 93 | if (WARN_ON_ONCE(test_and_set_bit(blk_rq_zone_no(rq), |
| 94 | rq->q->seq_zones_wlock))) |
| 95 | return; |
| 96 | |
| 97 | WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED); |
| 98 | rq->rq_flags |= RQF_ZONE_WRITE_LOCKED; |
| 99 | } |
| 100 | EXPORT_SYMBOL_GPL(__blk_req_zone_write_lock); |
| 101 | |
| 102 | void __blk_req_zone_write_unlock(struct request *rq) |
| 103 | { |
| 104 | rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED; |
| 105 | if (rq->q->seq_zones_wlock) |
| 106 | WARN_ON_ONCE(!test_and_clear_bit(blk_rq_zone_no(rq), |
| 107 | rq->q->seq_zones_wlock)); |
| 108 | } |
| 109 | EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock); |
| 110 | |
Damien Le Moal | a91e138 | 2018-10-12 19:08:43 +0900 | [diff] [blame] | 111 | /** |
| 112 | * blkdev_nr_zones - Get number of zones |
Christoph Hellwig | 9b38bb4 | 2019-12-03 10:39:04 +0100 | [diff] [blame] | 113 | * @disk: Target gendisk |
Damien Le Moal | a91e138 | 2018-10-12 19:08:43 +0900 | [diff] [blame] | 114 | * |
Christoph Hellwig | 9b38bb4 | 2019-12-03 10:39:04 +0100 | [diff] [blame] | 115 | * Return the total number of zones of a zoned block device. For a block |
| 116 | * device without zone capabilities, the number of zones is always 0. |
Damien Le Moal | a91e138 | 2018-10-12 19:08:43 +0900 | [diff] [blame] | 117 | */ |
Christoph Hellwig | 9b38bb4 | 2019-12-03 10:39:04 +0100 | [diff] [blame] | 118 | unsigned int blkdev_nr_zones(struct gendisk *disk) |
Damien Le Moal | a91e138 | 2018-10-12 19:08:43 +0900 | [diff] [blame] | 119 | { |
Christoph Hellwig | 9b38bb4 | 2019-12-03 10:39:04 +0100 | [diff] [blame] | 120 | sector_t zone_sectors = blk_queue_zone_sectors(disk->queue); |
Damien Le Moal | a91e138 | 2018-10-12 19:08:43 +0900 | [diff] [blame] | 121 | |
Christoph Hellwig | 9b38bb4 | 2019-12-03 10:39:04 +0100 | [diff] [blame] | 122 | if (!blk_queue_is_zoned(disk->queue)) |
Damien Le Moal | a91e138 | 2018-10-12 19:08:43 +0900 | [diff] [blame] | 123 | return 0; |
Christoph Hellwig | 9b38bb4 | 2019-12-03 10:39:04 +0100 | [diff] [blame] | 124 | return (get_capacity(disk) + zone_sectors - 1) >> ilog2(zone_sectors); |
Damien Le Moal | a91e138 | 2018-10-12 19:08:43 +0900 | [diff] [blame] | 125 | } |
| 126 | EXPORT_SYMBOL_GPL(blkdev_nr_zones); |
| 127 | |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 128 | /** |
| 129 | * blkdev_report_zones - Get zones information |
| 130 | * @bdev: Target block device |
| 131 | * @sector: Sector from which to report zones |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame] | 132 | * @nr_zones: Maximum number of zones to report |
| 133 | * @cb: Callback function called for each reported zone |
| 134 | * @data: Private data for the callback |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 135 | * |
| 136 | * Description: |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame] | 137 | * Get zone information starting from the zone containing @sector for at most |
| 138 | * @nr_zones, and call @cb for each zone reported by the device. |
| 139 | * To report all zones in a device starting from @sector, the BLK_ALL_ZONES |
| 140 | * constant can be passed to @nr_zones. |
| 141 | * Returns the number of zones reported by the device, or a negative errno |
| 142 | * value in case of failure. |
| 143 | * |
| 144 | * Note: The caller must use memalloc_noXX_save/restore() calls to control |
| 145 | * memory allocations done within this function. |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 146 | */ |
Christoph Hellwig | e76239a | 2018-10-12 19:08:49 +0900 | [diff] [blame] | 147 | int blkdev_report_zones(struct block_device *bdev, sector_t sector, |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame] | 148 | unsigned int nr_zones, report_zones_cb cb, void *data) |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 149 | { |
Damien Le Moal | ceeb373 | 2019-11-11 11:39:24 +0900 | [diff] [blame] | 150 | struct gendisk *disk = bdev->bd_disk; |
Damien Le Moal | 5eac3eb | 2019-11-11 11:39:25 +0900 | [diff] [blame] | 151 | sector_t capacity = get_capacity(disk); |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 152 | |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame] | 153 | if (!blk_queue_is_zoned(bdev_get_queue(bdev)) || |
| 154 | WARN_ON_ONCE(!disk->fops->report_zones)) |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 155 | return -EOPNOTSUPP; |
| 156 | |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame] | 157 | if (!nr_zones || sector >= capacity) |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 158 | return 0; |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 159 | |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame] | 160 | return disk->fops->report_zones(disk, sector, nr_zones, cb, data); |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 161 | } |
| 162 | EXPORT_SYMBOL_GPL(blkdev_report_zones); |
| 163 | |
Chaitanya Kulkarni | 6e33dbf | 2019-08-01 10:26:36 -0700 | [diff] [blame] | 164 | static inline bool blkdev_allow_reset_all_zones(struct block_device *bdev, |
Damien Le Moal | c7a1d92 | 2019-10-27 23:05:43 +0900 | [diff] [blame] | 165 | sector_t sector, |
Chaitanya Kulkarni | 6e33dbf | 2019-08-01 10:26:36 -0700 | [diff] [blame] | 166 | sector_t nr_sectors) |
| 167 | { |
| 168 | if (!blk_queue_zone_resetall(bdev_get_queue(bdev))) |
| 169 | return false; |
| 170 | |
Chaitanya Kulkarni | 6e33dbf | 2019-08-01 10:26:36 -0700 | [diff] [blame] | 171 | /* |
Damien Le Moal | 5eac3eb | 2019-11-11 11:39:25 +0900 | [diff] [blame] | 172 | * REQ_OP_ZONE_RESET_ALL can be executed only if the number of sectors |
| 173 | * of the applicable zone range is the entire disk. |
Chaitanya Kulkarni | 6e33dbf | 2019-08-01 10:26:36 -0700 | [diff] [blame] | 174 | */ |
Damien Le Moal | 5eac3eb | 2019-11-11 11:39:25 +0900 | [diff] [blame] | 175 | return !sector && nr_sectors == get_capacity(bdev->bd_disk); |
Chaitanya Kulkarni | 6e33dbf | 2019-08-01 10:26:36 -0700 | [diff] [blame] | 176 | } |
| 177 | |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 178 | /** |
Ajay Joshi | 6c1b1da | 2019-10-27 23:05:45 +0900 | [diff] [blame] | 179 | * blkdev_zone_mgmt - Execute a zone management operation on a range of zones |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 180 | * @bdev: Target block device |
Ajay Joshi | 6c1b1da | 2019-10-27 23:05:45 +0900 | [diff] [blame] | 181 | * @op: Operation to be performed on the zones |
| 182 | * @sector: Start sector of the first zone to operate on |
| 183 | * @nr_sectors: Number of sectors, should be at least the length of one zone and |
| 184 | * must be zone size aligned. |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 185 | * @gfp_mask: Memory allocation flags (for bio_alloc) |
| 186 | * |
| 187 | * Description: |
Ajay Joshi | 6c1b1da | 2019-10-27 23:05:45 +0900 | [diff] [blame] | 188 | * Perform the specified operation on the range of zones specified by |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 189 | * @sector..@sector+@nr_sectors. Specifying the entire disk sector range |
| 190 | * is valid, but the specified range should not contain conventional zones. |
Ajay Joshi | 6c1b1da | 2019-10-27 23:05:45 +0900 | [diff] [blame] | 191 | * The operation to execute on each zone can be a zone reset, open, close |
| 192 | * or finish request. |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 193 | */ |
Ajay Joshi | 6c1b1da | 2019-10-27 23:05:45 +0900 | [diff] [blame] | 194 | int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op, |
| 195 | sector_t sector, sector_t nr_sectors, |
| 196 | gfp_t gfp_mask) |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 197 | { |
| 198 | struct request_queue *q = bdev_get_queue(bdev); |
Ajay Joshi | 6c1b1da | 2019-10-27 23:05:45 +0900 | [diff] [blame] | 199 | sector_t zone_sectors = blk_queue_zone_sectors(q); |
Damien Le Moal | 5eac3eb | 2019-11-11 11:39:25 +0900 | [diff] [blame] | 200 | sector_t capacity = get_capacity(bdev->bd_disk); |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 201 | sector_t end_sector = sector + nr_sectors; |
Damien Le Moal | a2d6b3a | 2018-10-12 19:08:47 +0900 | [diff] [blame] | 202 | struct bio *bio = NULL; |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 203 | int ret; |
| 204 | |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 205 | if (!blk_queue_is_zoned(q)) |
| 206 | return -EOPNOTSUPP; |
| 207 | |
Damien Le Moal | a2d6b3a | 2018-10-12 19:08:47 +0900 | [diff] [blame] | 208 | if (bdev_read_only(bdev)) |
| 209 | return -EPERM; |
| 210 | |
Ajay Joshi | 6c1b1da | 2019-10-27 23:05:45 +0900 | [diff] [blame] | 211 | if (!op_is_zone_mgmt(op)) |
| 212 | return -EOPNOTSUPP; |
| 213 | |
Alexey Dobriyan | 11bde98 | 2020-02-12 20:40:27 +0300 | [diff] [blame] | 214 | if (end_sector <= sector || end_sector > capacity) |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 215 | /* Out of range */ |
| 216 | return -EINVAL; |
| 217 | |
| 218 | /* Check alignment (handle eventual smaller last zone) */ |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 219 | if (sector & (zone_sectors - 1)) |
| 220 | return -EINVAL; |
| 221 | |
Damien Le Moal | 5eac3eb | 2019-11-11 11:39:25 +0900 | [diff] [blame] | 222 | if ((nr_sectors & (zone_sectors - 1)) && end_sector != capacity) |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 223 | return -EINVAL; |
| 224 | |
| 225 | while (sector < end_sector) { |
Damien Le Moal | a2d6b3a | 2018-10-12 19:08:47 +0900 | [diff] [blame] | 226 | bio = blk_next_bio(bio, 0, gfp_mask); |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 227 | bio_set_dev(bio, bdev); |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 228 | |
Damien Le Moal | c7a1d92 | 2019-10-27 23:05:43 +0900 | [diff] [blame] | 229 | /* |
| 230 | * Special case for the zone reset operation that reset all |
| 231 | * zones, this is useful for applications like mkfs. |
| 232 | */ |
Ajay Joshi | 6c1b1da | 2019-10-27 23:05:45 +0900 | [diff] [blame] | 233 | if (op == REQ_OP_ZONE_RESET && |
| 234 | blkdev_allow_reset_all_zones(bdev, sector, nr_sectors)) { |
Damien Le Moal | faa44c6 | 2021-03-10 18:09:19 +0900 | [diff] [blame] | 235 | bio->bi_opf = REQ_OP_ZONE_RESET_ALL | REQ_SYNC; |
Damien Le Moal | c7a1d92 | 2019-10-27 23:05:43 +0900 | [diff] [blame] | 236 | break; |
| 237 | } |
| 238 | |
Chaitanya Kulkarni | 8e42d23 | 2020-01-07 13:58:17 -0800 | [diff] [blame] | 239 | bio->bi_opf = op | REQ_SYNC; |
Damien Le Moal | c7a1d92 | 2019-10-27 23:05:43 +0900 | [diff] [blame] | 240 | bio->bi_iter.bi_sector = sector; |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 241 | sector += zone_sectors; |
| 242 | |
| 243 | /* This may take a while, so be nice to others */ |
| 244 | cond_resched(); |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 245 | } |
| 246 | |
Damien Le Moal | a2d6b3a | 2018-10-12 19:08:47 +0900 | [diff] [blame] | 247 | ret = submit_bio_wait(bio); |
| 248 | bio_put(bio); |
| 249 | |
Damien Le Moal | a2d6b3a | 2018-10-12 19:08:47 +0900 | [diff] [blame] | 250 | return ret; |
Hannes Reinecke | 6a0cb1b | 2016-10-18 15:40:33 +0900 | [diff] [blame] | 251 | } |
Ajay Joshi | 6c1b1da | 2019-10-27 23:05:45 +0900 | [diff] [blame] | 252 | EXPORT_SYMBOL_GPL(blkdev_zone_mgmt); |
Shaun Tancheff | 3ed05a9 | 2016-10-18 15:40:35 +0900 | [diff] [blame] | 253 | |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame] | 254 | struct zone_report_args { |
| 255 | struct blk_zone __user *zones; |
| 256 | }; |
| 257 | |
| 258 | static int blkdev_copy_zone_to_user(struct blk_zone *zone, unsigned int idx, |
| 259 | void *data) |
| 260 | { |
| 261 | struct zone_report_args *args = data; |
| 262 | |
| 263 | if (copy_to_user(&args->zones[idx], zone, sizeof(struct blk_zone))) |
| 264 | return -EFAULT; |
| 265 | return 0; |
| 266 | } |
| 267 | |
Bart Van Assche | 56c4bdd | 2018-03-08 15:28:50 -0800 | [diff] [blame] | 268 | /* |
Shaun Tancheff | 3ed05a9 | 2016-10-18 15:40:35 +0900 | [diff] [blame] | 269 | * BLKREPORTZONE ioctl processing. |
| 270 | * Called from blkdev_ioctl. |
| 271 | */ |
| 272 | int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, |
| 273 | unsigned int cmd, unsigned long arg) |
| 274 | { |
| 275 | void __user *argp = (void __user *)arg; |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame] | 276 | struct zone_report_args args; |
Shaun Tancheff | 3ed05a9 | 2016-10-18 15:40:35 +0900 | [diff] [blame] | 277 | struct request_queue *q; |
| 278 | struct blk_zone_report rep; |
Shaun Tancheff | 3ed05a9 | 2016-10-18 15:40:35 +0900 | [diff] [blame] | 279 | int ret; |
| 280 | |
| 281 | if (!argp) |
| 282 | return -EINVAL; |
| 283 | |
| 284 | q = bdev_get_queue(bdev); |
| 285 | if (!q) |
| 286 | return -ENXIO; |
| 287 | |
| 288 | if (!blk_queue_is_zoned(q)) |
| 289 | return -ENOTTY; |
| 290 | |
| 291 | if (!capable(CAP_SYS_ADMIN)) |
| 292 | return -EACCES; |
| 293 | |
| 294 | if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report))) |
| 295 | return -EFAULT; |
| 296 | |
| 297 | if (!rep.nr_zones) |
| 298 | return -EINVAL; |
| 299 | |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame] | 300 | args.zones = argp + sizeof(struct blk_zone_report); |
| 301 | ret = blkdev_report_zones(bdev, rep.sector, rep.nr_zones, |
| 302 | blkdev_copy_zone_to_user, &args); |
| 303 | if (ret < 0) |
| 304 | return ret; |
Bart Van Assche | 327ea4a | 2018-05-22 08:27:22 -0700 | [diff] [blame] | 305 | |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame] | 306 | rep.nr_zones = ret; |
Matias Bjørling | 82394db | 2020-06-29 12:06:37 -0700 | [diff] [blame] | 307 | rep.flags = BLK_ZONE_REP_CAPACITY; |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame] | 308 | if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report))) |
| 309 | return -EFAULT; |
| 310 | return 0; |
Shaun Tancheff | 3ed05a9 | 2016-10-18 15:40:35 +0900 | [diff] [blame] | 311 | } |
| 312 | |
Shin'ichiro Kawasaki | e511350 | 2021-03-11 16:25:46 +0900 | [diff] [blame] | 313 | static int blkdev_truncate_zone_range(struct block_device *bdev, fmode_t mode, |
| 314 | const struct blk_zone_range *zrange) |
| 315 | { |
| 316 | loff_t start, end; |
| 317 | |
| 318 | if (zrange->sector + zrange->nr_sectors <= zrange->sector || |
| 319 | zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk)) |
| 320 | /* Out of range */ |
| 321 | return -EINVAL; |
| 322 | |
| 323 | start = zrange->sector << SECTOR_SHIFT; |
| 324 | end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1; |
| 325 | |
| 326 | return truncate_bdev_range(bdev, mode, start, end); |
| 327 | } |
| 328 | |
Bart Van Assche | 56c4bdd | 2018-03-08 15:28:50 -0800 | [diff] [blame] | 329 | /* |
Ajay Joshi | e876df1 | 2019-10-27 23:05:46 +0900 | [diff] [blame] | 330 | * BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing. |
Shaun Tancheff | 3ed05a9 | 2016-10-18 15:40:35 +0900 | [diff] [blame] | 331 | * Called from blkdev_ioctl. |
| 332 | */ |
Ajay Joshi | e876df1 | 2019-10-27 23:05:46 +0900 | [diff] [blame] | 333 | int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode, |
| 334 | unsigned int cmd, unsigned long arg) |
Shaun Tancheff | 3ed05a9 | 2016-10-18 15:40:35 +0900 | [diff] [blame] | 335 | { |
| 336 | void __user *argp = (void __user *)arg; |
| 337 | struct request_queue *q; |
| 338 | struct blk_zone_range zrange; |
Ajay Joshi | e876df1 | 2019-10-27 23:05:46 +0900 | [diff] [blame] | 339 | enum req_opf op; |
Shin'ichiro Kawasaki | e511350 | 2021-03-11 16:25:46 +0900 | [diff] [blame] | 340 | int ret; |
Shaun Tancheff | 3ed05a9 | 2016-10-18 15:40:35 +0900 | [diff] [blame] | 341 | |
| 342 | if (!argp) |
| 343 | return -EINVAL; |
| 344 | |
| 345 | q = bdev_get_queue(bdev); |
| 346 | if (!q) |
| 347 | return -ENXIO; |
| 348 | |
| 349 | if (!blk_queue_is_zoned(q)) |
| 350 | return -ENOTTY; |
| 351 | |
| 352 | if (!capable(CAP_SYS_ADMIN)) |
| 353 | return -EACCES; |
| 354 | |
| 355 | if (!(mode & FMODE_WRITE)) |
| 356 | return -EBADF; |
| 357 | |
| 358 | if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range))) |
| 359 | return -EFAULT; |
| 360 | |
Ajay Joshi | e876df1 | 2019-10-27 23:05:46 +0900 | [diff] [blame] | 361 | switch (cmd) { |
| 362 | case BLKRESETZONE: |
| 363 | op = REQ_OP_ZONE_RESET; |
Shin'ichiro Kawasaki | e511350 | 2021-03-11 16:25:46 +0900 | [diff] [blame] | 364 | |
| 365 | /* Invalidate the page cache, including dirty pages. */ |
| 366 | ret = blkdev_truncate_zone_range(bdev, mode, &zrange); |
| 367 | if (ret) |
| 368 | return ret; |
Ajay Joshi | e876df1 | 2019-10-27 23:05:46 +0900 | [diff] [blame] | 369 | break; |
| 370 | case BLKOPENZONE: |
| 371 | op = REQ_OP_ZONE_OPEN; |
| 372 | break; |
| 373 | case BLKCLOSEZONE: |
| 374 | op = REQ_OP_ZONE_CLOSE; |
| 375 | break; |
| 376 | case BLKFINISHZONE: |
| 377 | op = REQ_OP_ZONE_FINISH; |
| 378 | break; |
| 379 | default: |
| 380 | return -ENOTTY; |
| 381 | } |
| 382 | |
Shin'ichiro Kawasaki | e511350 | 2021-03-11 16:25:46 +0900 | [diff] [blame] | 383 | ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors, |
| 384 | GFP_KERNEL); |
| 385 | |
| 386 | /* |
| 387 | * Invalidate the page cache again for zone reset: writes can only be |
| 388 | * direct for zoned devices so concurrent writes would not add any page |
| 389 | * to the page cache after/during reset. The page cache may be filled |
| 390 | * again due to concurrent reads though and dropping the pages for |
| 391 | * these is fine. |
| 392 | */ |
| 393 | if (!ret && cmd == BLKRESETZONE) |
| 394 | ret = blkdev_truncate_zone_range(bdev, mode, &zrange); |
| 395 | |
| 396 | return ret; |
Shaun Tancheff | 3ed05a9 | 2016-10-18 15:40:35 +0900 | [diff] [blame] | 397 | } |
Damien Le Moal | bf50545 | 2018-10-12 19:08:50 +0900 | [diff] [blame] | 398 | |
| 399 | static inline unsigned long *blk_alloc_zone_bitmap(int node, |
| 400 | unsigned int nr_zones) |
| 401 | { |
| 402 | return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long), |
| 403 | GFP_NOIO, node); |
| 404 | } |
| 405 | |
Damien Le Moal | bf50545 | 2018-10-12 19:08:50 +0900 | [diff] [blame] | 406 | void blk_queue_free_zone_bitmaps(struct request_queue *q) |
| 407 | { |
Christoph Hellwig | f216fdd | 2019-12-03 10:39:05 +0100 | [diff] [blame] | 408 | kfree(q->conv_zones_bitmap); |
| 409 | q->conv_zones_bitmap = NULL; |
Damien Le Moal | bf50545 | 2018-10-12 19:08:50 +0900 | [diff] [blame] | 410 | kfree(q->seq_zones_wlock); |
| 411 | q->seq_zones_wlock = NULL; |
| 412 | } |
| 413 | |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame] | 414 | struct blk_revalidate_zone_args { |
| 415 | struct gendisk *disk; |
Christoph Hellwig | f216fdd | 2019-12-03 10:39:05 +0100 | [diff] [blame] | 416 | unsigned long *conv_zones_bitmap; |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame] | 417 | unsigned long *seq_zones_wlock; |
Christoph Hellwig | e94f581 | 2019-12-03 10:39:06 +0100 | [diff] [blame] | 418 | unsigned int nr_zones; |
Christoph Hellwig | 6c6b354 | 2019-12-03 10:39:08 +0100 | [diff] [blame] | 419 | sector_t zone_sectors; |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame] | 420 | sector_t sector; |
| 421 | }; |
| 422 | |
Damien Le Moal | d9dd730 | 2019-11-11 11:39:22 +0900 | [diff] [blame] | 423 | /* |
| 424 | * Helper function to check the validity of zones of a zoned block device. |
| 425 | */ |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame] | 426 | static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx, |
| 427 | void *data) |
Damien Le Moal | d9dd730 | 2019-11-11 11:39:22 +0900 | [diff] [blame] | 428 | { |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame] | 429 | struct blk_revalidate_zone_args *args = data; |
| 430 | struct gendisk *disk = args->disk; |
Damien Le Moal | d9dd730 | 2019-11-11 11:39:22 +0900 | [diff] [blame] | 431 | struct request_queue *q = disk->queue; |
Damien Le Moal | d9dd730 | 2019-11-11 11:39:22 +0900 | [diff] [blame] | 432 | sector_t capacity = get_capacity(disk); |
| 433 | |
| 434 | /* |
| 435 | * All zones must have the same size, with the exception on an eventual |
| 436 | * smaller last zone. |
| 437 | */ |
Christoph Hellwig | 6c6b354 | 2019-12-03 10:39:08 +0100 | [diff] [blame] | 438 | if (zone->start == 0) { |
| 439 | if (zone->len == 0 || !is_power_of_2(zone->len)) { |
| 440 | pr_warn("%s: Invalid zoned device with non power of two zone size (%llu)\n", |
| 441 | disk->disk_name, zone->len); |
| 442 | return -ENODEV; |
| 443 | } |
Damien Le Moal | d9dd730 | 2019-11-11 11:39:22 +0900 | [diff] [blame] | 444 | |
Christoph Hellwig | 6c6b354 | 2019-12-03 10:39:08 +0100 | [diff] [blame] | 445 | args->zone_sectors = zone->len; |
| 446 | args->nr_zones = (capacity + zone->len - 1) >> ilog2(zone->len); |
| 447 | } else if (zone->start + args->zone_sectors < capacity) { |
| 448 | if (zone->len != args->zone_sectors) { |
| 449 | pr_warn("%s: Invalid zoned device with non constant zone size\n", |
| 450 | disk->disk_name); |
| 451 | return -ENODEV; |
| 452 | } |
| 453 | } else { |
| 454 | if (zone->len > args->zone_sectors) { |
| 455 | pr_warn("%s: Invalid zoned device with larger last zone size\n", |
| 456 | disk->disk_name); |
| 457 | return -ENODEV; |
| 458 | } |
Damien Le Moal | d9dd730 | 2019-11-11 11:39:22 +0900 | [diff] [blame] | 459 | } |
| 460 | |
| 461 | /* Check for holes in the zone report */ |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame] | 462 | if (zone->start != args->sector) { |
Damien Le Moal | d9dd730 | 2019-11-11 11:39:22 +0900 | [diff] [blame] | 463 | pr_warn("%s: Zone gap at sectors %llu..%llu\n", |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame] | 464 | disk->disk_name, args->sector, zone->start); |
| 465 | return -ENODEV; |
Damien Le Moal | d9dd730 | 2019-11-11 11:39:22 +0900 | [diff] [blame] | 466 | } |
| 467 | |
| 468 | /* Check zone type */ |
| 469 | switch (zone->type) { |
| 470 | case BLK_ZONE_TYPE_CONVENTIONAL: |
Christoph Hellwig | e94f581 | 2019-12-03 10:39:06 +0100 | [diff] [blame] | 471 | if (!args->conv_zones_bitmap) { |
| 472 | args->conv_zones_bitmap = |
| 473 | blk_alloc_zone_bitmap(q->node, args->nr_zones); |
| 474 | if (!args->conv_zones_bitmap) |
| 475 | return -ENOMEM; |
| 476 | } |
| 477 | set_bit(idx, args->conv_zones_bitmap); |
| 478 | break; |
Damien Le Moal | d9dd730 | 2019-11-11 11:39:22 +0900 | [diff] [blame] | 479 | case BLK_ZONE_TYPE_SEQWRITE_REQ: |
| 480 | case BLK_ZONE_TYPE_SEQWRITE_PREF: |
Christoph Hellwig | e94f581 | 2019-12-03 10:39:06 +0100 | [diff] [blame] | 481 | if (!args->seq_zones_wlock) { |
| 482 | args->seq_zones_wlock = |
| 483 | blk_alloc_zone_bitmap(q->node, args->nr_zones); |
| 484 | if (!args->seq_zones_wlock) |
| 485 | return -ENOMEM; |
| 486 | } |
Damien Le Moal | d9dd730 | 2019-11-11 11:39:22 +0900 | [diff] [blame] | 487 | break; |
| 488 | default: |
| 489 | pr_warn("%s: Invalid zone type 0x%x at sectors %llu\n", |
| 490 | disk->disk_name, (int)zone->type, zone->start); |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame] | 491 | return -ENODEV; |
Damien Le Moal | d9dd730 | 2019-11-11 11:39:22 +0900 | [diff] [blame] | 492 | } |
| 493 | |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame] | 494 | args->sector += zone->len; |
| 495 | return 0; |
| 496 | } |
| 497 | |
Damien Le Moal | bf50545 | 2018-10-12 19:08:50 +0900 | [diff] [blame] | 498 | /** |
| 499 | * blk_revalidate_disk_zones - (re)allocate and initialize zone bitmaps |
| 500 | * @disk: Target disk |
Damien Le Moal | e732671 | 2020-05-12 17:55:49 +0900 | [diff] [blame] | 501 | * @update_driver_data: Callback to update driver data on the frozen disk |
Damien Le Moal | bf50545 | 2018-10-12 19:08:50 +0900 | [diff] [blame] | 502 | * |
| 503 | * Helper function for low-level device drivers to (re) allocate and initialize |
| 504 | * a disk request queue zone bitmaps. This functions should normally be called |
Christoph Hellwig | ae58954 | 2019-12-03 10:39:07 +0100 | [diff] [blame] | 505 | * within the disk ->revalidate method for blk-mq based drivers. For BIO based |
| 506 | * drivers only q->nr_zones needs to be updated so that the sysfs exposed value |
| 507 | * is correct. |
Damien Le Moal | e732671 | 2020-05-12 17:55:49 +0900 | [diff] [blame] | 508 | * If the @update_driver_data callback function is not NULL, the callback is |
| 509 | * executed with the device request queue frozen after all zones have been |
| 510 | * checked. |
Damien Le Moal | bf50545 | 2018-10-12 19:08:50 +0900 | [diff] [blame] | 511 | */ |
Damien Le Moal | e732671 | 2020-05-12 17:55:49 +0900 | [diff] [blame] | 512 | int blk_revalidate_disk_zones(struct gendisk *disk, |
| 513 | void (*update_driver_data)(struct gendisk *disk)) |
Damien Le Moal | bf50545 | 2018-10-12 19:08:50 +0900 | [diff] [blame] | 514 | { |
| 515 | struct request_queue *q = disk->queue; |
Christoph Hellwig | e94f581 | 2019-12-03 10:39:06 +0100 | [diff] [blame] | 516 | struct blk_revalidate_zone_args args = { |
| 517 | .disk = disk, |
Christoph Hellwig | e94f581 | 2019-12-03 10:39:06 +0100 | [diff] [blame] | 518 | }; |
Christoph Hellwig | 6c6b354 | 2019-12-03 10:39:08 +0100 | [diff] [blame] | 519 | unsigned int noio_flag; |
| 520 | int ret; |
Damien Le Moal | bf50545 | 2018-10-12 19:08:50 +0900 | [diff] [blame] | 521 | |
Christoph Hellwig | c98c3d09 | 2019-11-11 11:39:23 +0900 | [diff] [blame] | 522 | if (WARN_ON_ONCE(!blk_queue_is_zoned(q))) |
| 523 | return -EIO; |
Christoph Hellwig | ae58954 | 2019-12-03 10:39:07 +0100 | [diff] [blame] | 524 | if (WARN_ON_ONCE(!queue_is_mq(q))) |
| 525 | return -EIO; |
Damien Le Moal | bf50545 | 2018-10-12 19:08:50 +0900 | [diff] [blame] | 526 | |
Johannes Thumshirn | 1a1206d | 2020-07-30 20:25:17 +0900 | [diff] [blame] | 527 | if (!get_capacity(disk)) |
| 528 | return -EIO; |
| 529 | |
Christoph Hellwig | e94f581 | 2019-12-03 10:39:06 +0100 | [diff] [blame] | 530 | /* |
Christoph Hellwig | 6c6b354 | 2019-12-03 10:39:08 +0100 | [diff] [blame] | 531 | * Ensure that all memory allocations in this context are done as if |
| 532 | * GFP_NOIO was specified. |
Christoph Hellwig | e94f581 | 2019-12-03 10:39:06 +0100 | [diff] [blame] | 533 | */ |
Christoph Hellwig | 6c6b354 | 2019-12-03 10:39:08 +0100 | [diff] [blame] | 534 | noio_flag = memalloc_noio_save(); |
| 535 | ret = disk->fops->report_zones(disk, 0, UINT_MAX, |
| 536 | blk_revalidate_zone_cb, &args); |
Damien Le Moal | 2afdeb2 | 2020-11-11 16:36:06 +0900 | [diff] [blame] | 537 | if (!ret) { |
| 538 | pr_warn("%s: No zones reported\n", disk->disk_name); |
| 539 | ret = -ENODEV; |
| 540 | } |
Christoph Hellwig | 6c6b354 | 2019-12-03 10:39:08 +0100 | [diff] [blame] | 541 | memalloc_noio_restore(noio_flag); |
Damien Le Moal | bd976e5 | 2019-07-01 14:09:16 +0900 | [diff] [blame] | 542 | |
Damien Le Moal | bf50545 | 2018-10-12 19:08:50 +0900 | [diff] [blame] | 543 | /* |
Damien Le Moal | 2afdeb2 | 2020-11-11 16:36:06 +0900 | [diff] [blame] | 544 | * If zones where reported, make sure that the entire disk capacity |
| 545 | * has been checked. |
| 546 | */ |
| 547 | if (ret > 0 && args.sector != get_capacity(disk)) { |
| 548 | pr_warn("%s: Missing zones from sector %llu\n", |
| 549 | disk->disk_name, args.sector); |
| 550 | ret = -ENODEV; |
| 551 | } |
| 552 | |
| 553 | /* |
Christoph Hellwig | 6c6b354 | 2019-12-03 10:39:08 +0100 | [diff] [blame] | 554 | * Install the new bitmaps and update nr_zones only once the queue is |
| 555 | * stopped and all I/Os are completed (i.e. a scheduler is not |
| 556 | * referencing the bitmaps). |
Damien Le Moal | bf50545 | 2018-10-12 19:08:50 +0900 | [diff] [blame] | 557 | */ |
| 558 | blk_mq_freeze_queue(q); |
Damien Le Moal | 2afdeb2 | 2020-11-11 16:36:06 +0900 | [diff] [blame] | 559 | if (ret > 0) { |
Christoph Hellwig | 6c6b354 | 2019-12-03 10:39:08 +0100 | [diff] [blame] | 560 | blk_queue_chunk_sectors(q, args.zone_sectors); |
Christoph Hellwig | e94f581 | 2019-12-03 10:39:06 +0100 | [diff] [blame] | 561 | q->nr_zones = args.nr_zones; |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame] | 562 | swap(q->seq_zones_wlock, args.seq_zones_wlock); |
Christoph Hellwig | f216fdd | 2019-12-03 10:39:05 +0100 | [diff] [blame] | 563 | swap(q->conv_zones_bitmap, args.conv_zones_bitmap); |
Damien Le Moal | e732671 | 2020-05-12 17:55:49 +0900 | [diff] [blame] | 564 | if (update_driver_data) |
| 565 | update_driver_data(disk); |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame] | 566 | ret = 0; |
| 567 | } else { |
| 568 | pr_warn("%s: failed to revalidate zones\n", disk->disk_name); |
| 569 | blk_queue_free_zone_bitmaps(q); |
| 570 | } |
Damien Le Moal | bf50545 | 2018-10-12 19:08:50 +0900 | [diff] [blame] | 571 | blk_mq_unfreeze_queue(q); |
| 572 | |
Christoph Hellwig | d410035 | 2019-11-11 11:39:30 +0900 | [diff] [blame] | 573 | kfree(args.seq_zones_wlock); |
Christoph Hellwig | f216fdd | 2019-12-03 10:39:05 +0100 | [diff] [blame] | 574 | kfree(args.conv_zones_bitmap); |
Damien Le Moal | bf50545 | 2018-10-12 19:08:50 +0900 | [diff] [blame] | 575 | return ret; |
| 576 | } |
| 577 | EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones); |
Damien Le Moal | 508aebb | 2021-01-28 13:47:32 +0900 | [diff] [blame] | 578 | |
| 579 | void blk_queue_clear_zone_settings(struct request_queue *q) |
| 580 | { |
| 581 | blk_mq_freeze_queue(q); |
| 582 | |
| 583 | blk_queue_free_zone_bitmaps(q); |
| 584 | blk_queue_flag_clear(QUEUE_FLAG_ZONE_RESETALL, q); |
| 585 | q->required_elevator_features &= ~ELEVATOR_F_ZBD_SEQ_WRITE; |
| 586 | q->nr_zones = 0; |
| 587 | q->max_open_zones = 0; |
| 588 | q->max_active_zones = 0; |
| 589 | q->limits.chunk_sectors = 0; |
| 590 | q->limits.zone_write_granularity = 0; |
| 591 | q->limits.max_zone_append_sectors = 0; |
| 592 | |
| 593 | blk_mq_unfreeze_queue(q); |
| 594 | } |