blob: 635ce0648133dd28932ff5bab9447cecbf4af6ca [file] [log] [blame]
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02001// SPDX-License-Identifier: GPL-2.0
2#include <linux/vmalloc.h>
Damien Le Moalaa1c09cb62020-10-29 20:05:00 +09003#include <linux/bitmap.h>
Matias Bjørlingca4b2a02018-07-06 19:38:39 +02004#include "null_blk.h"
5
Chaitanya Kulkarni766c3292020-03-25 10:49:56 -07006#define CREATE_TRACE_POINTS
Damien Le Moaleebf34a2020-11-20 10:55:19 +09007#include "trace.h"
Chaitanya Kulkarni766c3292020-03-25 10:49:56 -07008
Damien Le Moaldb060f52022-04-20 09:57:17 +09009#undef pr_fmt
10#define pr_fmt(fmt) "null_blk: " fmt
11
Damien Le Moalcd92cdb2021-01-29 23:47:25 +090012static inline sector_t mb_to_sects(unsigned long mb)
13{
14 return ((sector_t)mb * SZ_1M) >> SECTOR_SHIFT;
15}
Matias Bjørlingca4b2a02018-07-06 19:38:39 +020016
17static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
18{
19 return sect >> ilog2(dev->zone_size_sects);
20}
21
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +090022static inline void null_lock_zone_res(struct nullb_device *dev)
23{
24 if (dev->need_zone_res_mgmt)
25 spin_lock_irq(&dev->zone_res_lock);
26}
27
28static inline void null_unlock_zone_res(struct nullb_device *dev)
29{
30 if (dev->need_zone_res_mgmt)
31 spin_unlock_irq(&dev->zone_res_lock);
32}
33
34static inline void null_init_zone_lock(struct nullb_device *dev,
35 struct nullb_zone *zone)
36{
37 if (!dev->memory_backed)
38 spin_lock_init(&zone->spinlock);
39 else
40 mutex_init(&zone->mutex);
41}
42
43static inline void null_lock_zone(struct nullb_device *dev,
44 struct nullb_zone *zone)
45{
46 if (!dev->memory_backed)
47 spin_lock_irq(&zone->spinlock);
48 else
49 mutex_lock(&zone->mutex);
50}
51
52static inline void null_unlock_zone(struct nullb_device *dev,
53 struct nullb_zone *zone)
54{
55 if (!dev->memory_backed)
56 spin_unlock_irq(&zone->spinlock);
57 else
58 mutex_unlock(&zone->mutex);
59}
60
Damien Le Moald205bde2020-04-23 12:02:38 +090061int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
Matias Bjørlingca4b2a02018-07-06 19:38:39 +020062{
Damien Le Moal0ebcdd72020-11-20 10:55:11 +090063 sector_t dev_capacity_sects, zone_capacity_sects;
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +090064 struct nullb_zone *zone;
Matias Bjørlingca4b2a02018-07-06 19:38:39 +020065 sector_t sector = 0;
66 unsigned int i;
67
68 if (!is_power_of_2(dev->zone_size)) {
André Almeida9c7eddf2019-09-16 11:07:59 -030069 pr_err("zone_size must be power-of-two\n");
Matias Bjørlingca4b2a02018-07-06 19:38:39 +020070 return -EINVAL;
71 }
Chaitanya Kulkarnie2748322020-05-20 16:01:51 -070072 if (dev->zone_size > dev->size) {
73 pr_err("Zone size larger than device capacity\n");
74 return -EINVAL;
75 }
Matias Bjørlingca4b2a02018-07-06 19:38:39 +020076
Aravind Ramesh089565f2020-06-29 12:06:38 -070077 if (!dev->zone_capacity)
78 dev->zone_capacity = dev->zone_size;
79
80 if (dev->zone_capacity > dev->zone_size) {
Damien Le Moaldb060f52022-04-20 09:57:17 +090081 pr_err("zone capacity (%lu MB) larger than zone size (%lu MB)\n",
82 dev->zone_capacity, dev->zone_size);
Aravind Ramesh089565f2020-06-29 12:06:38 -070083 return -EINVAL;
84 }
85
Damien Le Moalcd92cdb2021-01-29 23:47:25 +090086 zone_capacity_sects = mb_to_sects(dev->zone_capacity);
87 dev_capacity_sects = mb_to_sects(dev->size);
88 dev->zone_size_sects = mb_to_sects(dev->zone_size);
89 dev->nr_zones = round_up(dev_capacity_sects, dev->zone_size_sects)
90 >> ilog2(dev->zone_size_sects);
Damien Le Moal0ebcdd72020-11-20 10:55:11 +090091
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +090092 dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct nullb_zone),
93 GFP_KERNEL | __GFP_ZERO);
Matias Bjørlingca4b2a02018-07-06 19:38:39 +020094 if (!dev->zones)
95 return -ENOMEM;
96
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +090097 spin_lock_init(&dev->zone_res_lock);
Damien Le Moalaa1c09cb62020-10-29 20:05:00 +090098
Masato Suzukiea2c18e2018-10-30 16:14:05 +090099 if (dev->zone_nr_conv >= dev->nr_zones) {
100 dev->zone_nr_conv = dev->nr_zones - 1;
André Almeida9c7eddf2019-09-16 11:07:59 -0300101 pr_info("changed the number of conventional zones to %u",
Masato Suzukiea2c18e2018-10-30 16:14:05 +0900102 dev->zone_nr_conv);
103 }
104
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200105 /* Max active zones has to be < nbr of seq zones in order to be enforceable */
106 if (dev->zone_max_active >= dev->nr_zones - dev->zone_nr_conv) {
107 dev->zone_max_active = 0;
108 pr_info("zone_max_active limit disabled, limit >= zone count\n");
109 }
110
111 /* Max open zones has to be <= max active zones */
112 if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) {
113 dev->zone_max_open = dev->zone_max_active;
114 pr_info("changed the maximum number of open zones to %u\n",
115 dev->nr_zones);
116 } else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) {
117 dev->zone_max_open = 0;
118 pr_info("zone_max_open limit disabled, limit >= zone count\n");
119 }
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900120 dev->need_zone_res_mgmt = dev->zone_max_active || dev->zone_max_open;
Damien Le Moal2e8c6e02020-11-20 10:55:15 +0900121 dev->imp_close_zone_no = dev->zone_nr_conv;
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200122
Masato Suzukiea2c18e2018-10-30 16:14:05 +0900123 for (i = 0; i < dev->zone_nr_conv; i++) {
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900124 zone = &dev->zones[i];
Masato Suzukiea2c18e2018-10-30 16:14:05 +0900125
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900126 null_init_zone_lock(dev, zone);
Masato Suzukiea2c18e2018-10-30 16:14:05 +0900127 zone->start = sector;
128 zone->len = dev->zone_size_sects;
Matias Bjørling82394db2020-06-29 12:06:37 -0700129 zone->capacity = zone->len;
Masato Suzukiea2c18e2018-10-30 16:14:05 +0900130 zone->wp = zone->start + zone->len;
131 zone->type = BLK_ZONE_TYPE_CONVENTIONAL;
132 zone->cond = BLK_ZONE_COND_NOT_WP;
133
134 sector += dev->zone_size_sects;
135 }
136
137 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900138 zone = &dev->zones[i];
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200139
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900140 null_init_zone_lock(dev, zone);
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200141 zone->start = zone->wp = sector;
Damien Le Moal0ebcdd72020-11-20 10:55:11 +0900142 if (zone->start + dev->zone_size_sects > dev_capacity_sects)
143 zone->len = dev_capacity_sects - zone->start;
144 else
145 zone->len = dev->zone_size_sects;
146 zone->capacity =
147 min_t(sector_t, zone->len, zone_capacity_sects);
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200148 zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
149 zone->cond = BLK_ZONE_COND_EMPTY;
150
151 sector += dev->zone_size_sects;
152 }
153
154 return 0;
155}
156
Damien Le Moald205bde2020-04-23 12:02:38 +0900157int null_register_zoned_dev(struct nullb *nullb)
158{
Damien Le Moale0489ed2020-05-12 17:55:52 +0900159 struct nullb_device *dev = nullb->dev;
Damien Le Moald205bde2020-04-23 12:02:38 +0900160 struct request_queue *q = nullb->q;
161
Christoph Hellwig6b2bd272022-07-06 09:03:40 +0200162 disk_set_zoned(nullb->disk, BLK_ZONED_HM);
Damien Le Moal5752dc72021-01-28 13:47:28 +0900163 blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
164 blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
165
Damien Le Moale0489ed2020-05-12 17:55:52 +0900166 if (queue_is_mq(q)) {
167 int ret = blk_revalidate_disk_zones(nullb->disk, NULL);
Damien Le Moald205bde2020-04-23 12:02:38 +0900168
Damien Le Moale0489ed2020-05-12 17:55:52 +0900169 if (ret)
170 return ret;
171 } else {
172 blk_queue_chunk_sectors(q, dev->zone_size_sects);
Christoph Hellwigd86e7162022-07-06 09:03:50 +0200173 nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0);
Damien Le Moale0489ed2020-05-12 17:55:52 +0900174 }
175
176 blk_queue_max_zone_append_sectors(q, dev->zone_size_sects);
Christoph Hellwig982977d2022-07-06 09:03:44 +0200177 disk_set_max_open_zones(nullb->disk, dev->zone_max_open);
178 disk_set_max_active_zones(nullb->disk, dev->zone_max_active);
Damien Le Moald205bde2020-04-23 12:02:38 +0900179
180 return 0;
181}
182
183void null_free_zoned_dev(struct nullb_device *dev)
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200184{
185 kvfree(dev->zones);
Lv Yunlong72ce11d2021-04-26 07:32:29 -0700186 dev->zones = NULL;
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200187}
188
Christoph Hellwig7fc8fb52019-11-11 11:39:27 +0900189int null_report_zones(struct gendisk *disk, sector_t sector,
Christoph Hellwigd4100352019-11-11 11:39:30 +0900190 unsigned int nr_zones, report_zones_cb cb, void *data)
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200191{
Christoph Hellwige76239a2018-10-12 19:08:49 +0900192 struct nullb *nullb = disk->private_data;
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200193 struct nullb_device *dev = nullb->dev;
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900194 unsigned int first_zone, i;
195 struct nullb_zone *zone;
196 struct blk_zone blkz;
Christoph Hellwigd4100352019-11-11 11:39:30 +0900197 int error;
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200198
Christoph Hellwigd4100352019-11-11 11:39:30 +0900199 first_zone = null_zone_no(dev, sector);
200 if (first_zone >= dev->nr_zones)
201 return 0;
202
203 nr_zones = min(nr_zones, dev->nr_zones - first_zone);
Chaitanya Kulkarni766c3292020-03-25 10:49:56 -0700204 trace_nullb_report_zones(nullb, nr_zones);
205
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900206 memset(&blkz, 0, sizeof(struct blk_zone));
207 zone = &dev->zones[first_zone];
208 for (i = 0; i < nr_zones; i++, zone++) {
Christoph Hellwigd4100352019-11-11 11:39:30 +0900209 /*
210 * Stacked DM target drivers will remap the zone information by
211 * modifying the zone information passed to the report callback.
212 * So use a local copy to avoid corruption of the device zone
213 * array.
214 */
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900215 null_lock_zone(dev, zone);
216 blkz.start = zone->start;
217 blkz.len = zone->len;
218 blkz.wp = zone->wp;
219 blkz.type = zone->type;
220 blkz.cond = zone->cond;
221 blkz.capacity = zone->capacity;
222 null_unlock_zone(dev, zone);
Kanchan Joshi35bc10b2020-09-28 15:25:49 +0530223
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900224 error = cb(&blkz, i, data);
Christoph Hellwigd4100352019-11-11 11:39:30 +0900225 if (error)
226 return error;
Christoph Hellwige76239a2018-10-12 19:08:49 +0900227 }
228
Christoph Hellwigd4100352019-11-11 11:39:30 +0900229 return nr_zones;
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200230}
231
Damien Le Moalaa1c09cb62020-10-29 20:05:00 +0900232/*
233 * This is called in the case of memory backing from null_process_cmd()
234 * with the target zone already locked.
235 */
Ajay Joshidd85b492019-10-17 14:19:43 -0700236size_t null_zone_valid_read_len(struct nullb *nullb,
237 sector_t sector, unsigned int len)
238{
239 struct nullb_device *dev = nullb->dev;
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900240 struct nullb_zone *zone = &dev->zones[null_zone_no(dev, sector)];
Ajay Joshidd85b492019-10-17 14:19:43 -0700241 unsigned int nr_sectors = len >> SECTOR_SHIFT;
242
243 /* Read must be below the write pointer position */
244 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL ||
245 sector + nr_sectors <= zone->wp)
246 return len;
247
248 if (sector > zone->wp)
249 return 0;
250
251 return (zone->wp - sector) << SECTOR_SHIFT;
252}
253
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900254static blk_status_t __null_close_zone(struct nullb_device *dev,
255 struct nullb_zone *zone)
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200256{
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200257 switch (zone->cond) {
258 case BLK_ZONE_COND_CLOSED:
259 /* close operation on closed is not an error */
260 return BLK_STS_OK;
261 case BLK_ZONE_COND_IMP_OPEN:
262 dev->nr_zones_imp_open--;
263 break;
264 case BLK_ZONE_COND_EXP_OPEN:
265 dev->nr_zones_exp_open--;
266 break;
267 case BLK_ZONE_COND_EMPTY:
268 case BLK_ZONE_COND_FULL:
269 default:
270 return BLK_STS_IOERR;
271 }
272
273 if (zone->wp == zone->start) {
274 zone->cond = BLK_ZONE_COND_EMPTY;
275 } else {
276 zone->cond = BLK_ZONE_COND_CLOSED;
277 dev->nr_zones_closed++;
278 }
279
280 return BLK_STS_OK;
281}
282
Damien Le Moal2e8c6e02020-11-20 10:55:15 +0900283static void null_close_imp_open_zone(struct nullb_device *dev)
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200284{
Damien Le Moal2e8c6e02020-11-20 10:55:15 +0900285 struct nullb_zone *zone;
286 unsigned int zno, i;
287
288 zno = dev->imp_close_zone_no;
289 if (zno >= dev->nr_zones)
290 zno = dev->zone_nr_conv;
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200291
292 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
Damien Le Moal2e8c6e02020-11-20 10:55:15 +0900293 zone = &dev->zones[zno];
294 zno++;
295 if (zno >= dev->nr_zones)
296 zno = dev->zone_nr_conv;
297
298 if (zone->cond == BLK_ZONE_COND_IMP_OPEN) {
299 __null_close_zone(dev, zone);
300 dev->imp_close_zone_no = zno;
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200301 return;
302 }
303 }
304}
305
Keith Buschfd788742020-10-22 08:47:39 -0700306static blk_status_t null_check_active(struct nullb_device *dev)
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200307{
308 if (!dev->zone_max_active)
Keith Buschfd788742020-10-22 08:47:39 -0700309 return BLK_STS_OK;
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200310
Keith Buschfd788742020-10-22 08:47:39 -0700311 if (dev->nr_zones_exp_open + dev->nr_zones_imp_open +
312 dev->nr_zones_closed < dev->zone_max_active)
313 return BLK_STS_OK;
314
315 return BLK_STS_ZONE_ACTIVE_RESOURCE;
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200316}
317
Keith Buschfd788742020-10-22 08:47:39 -0700318static blk_status_t null_check_open(struct nullb_device *dev)
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200319{
320 if (!dev->zone_max_open)
Keith Buschfd788742020-10-22 08:47:39 -0700321 return BLK_STS_OK;
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200322
323 if (dev->nr_zones_exp_open + dev->nr_zones_imp_open < dev->zone_max_open)
Keith Buschfd788742020-10-22 08:47:39 -0700324 return BLK_STS_OK;
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200325
Keith Buschfd788742020-10-22 08:47:39 -0700326 if (dev->nr_zones_imp_open) {
327 if (null_check_active(dev) == BLK_STS_OK) {
Damien Le Moal2e8c6e02020-11-20 10:55:15 +0900328 null_close_imp_open_zone(dev);
Keith Buschfd788742020-10-22 08:47:39 -0700329 return BLK_STS_OK;
330 }
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200331 }
332
Keith Buschfd788742020-10-22 08:47:39 -0700333 return BLK_STS_ZONE_OPEN_RESOURCE;
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200334}
335
336/*
337 * This function matches the manage open zone resources function in the ZBC standard,
338 * with the addition of max active zones support (added in the ZNS standard).
339 *
340 * The function determines if a zone can transition to implicit open or explicit open,
341 * while maintaining the max open zone (and max active zone) limit(s). It may close an
342 * implicit open zone in order to make additional zone resources available.
343 *
344 * ZBC states that an implicit open zone shall be closed only if there is not
345 * room within the open limit. However, with the addition of an active limit,
346 * it is not certain that closing an implicit open zone will allow a new zone
347 * to be opened, since we might already be at the active limit capacity.
348 */
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900349static blk_status_t null_check_zone_resources(struct nullb_device *dev,
350 struct nullb_zone *zone)
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200351{
Keith Buschfd788742020-10-22 08:47:39 -0700352 blk_status_t ret;
353
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200354 switch (zone->cond) {
355 case BLK_ZONE_COND_EMPTY:
Keith Buschfd788742020-10-22 08:47:39 -0700356 ret = null_check_active(dev);
357 if (ret != BLK_STS_OK)
358 return ret;
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200359 fallthrough;
360 case BLK_ZONE_COND_CLOSED:
Keith Buschfd788742020-10-22 08:47:39 -0700361 return null_check_open(dev);
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200362 default:
363 /* Should never be called for other states */
364 WARN_ON(1);
Keith Buschfd788742020-10-22 08:47:39 -0700365 return BLK_STS_IOERR;
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200366 }
367}
368
Chaitanya Kulkarnifceb5d12019-08-22 21:45:18 -0700369static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
Damien Le Moale0489ed2020-05-12 17:55:52 +0900370 unsigned int nr_sectors, bool append)
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200371{
372 struct nullb_device *dev = cmd->nq->dev;
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200373 unsigned int zno = null_zone_no(dev, sector);
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900374 struct nullb_zone *zone = &dev->zones[zno];
Damien Le Moal9dd44c72020-04-23 12:02:37 +0900375 blk_status_t ret;
376
377 trace_nullb_zone_op(cmd, zno, zone->cond);
378
Damien Le Moal2e896d82020-11-20 10:55:12 +0900379 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) {
380 if (append)
381 return BLK_STS_IOERR;
Damien Le Moal9dd44c72020-04-23 12:02:37 +0900382 return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
Damien Le Moal2e896d82020-11-20 10:55:12 +0900383 }
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200384
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900385 null_lock_zone(dev, zone);
Damien Le Moalaa1c09cb62020-10-29 20:05:00 +0900386
Shin'ichiro Kawasakid3a57382022-12-01 15:10:36 +0900387 if (zone->cond == BLK_ZONE_COND_FULL ||
388 zone->cond == BLK_ZONE_COND_READONLY ||
389 zone->cond == BLK_ZONE_COND_OFFLINE) {
390 /* Cannot write to the zone */
Damien Le Moalaa1c09cb62020-10-29 20:05:00 +0900391 ret = BLK_STS_IOERR;
392 goto unlock;
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200393 }
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200394
395 /*
396 * Regular writes must be at the write pointer position.
397 * Zone append writes are automatically issued at the write
398 * pointer and the position returned using the request or BIO
399 * sector.
400 */
401 if (append) {
402 sector = zone->wp;
Damien Le Moalaacae8c2022-06-02 21:03:44 +0900403 if (dev->queue_mode == NULL_Q_MQ)
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200404 cmd->rq->__sector = sector;
Damien Le Moalaacae8c2022-06-02 21:03:44 +0900405 else
406 cmd->bio->bi_iter.bi_sector = sector;
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200407 } else if (sector != zone->wp) {
Damien Le Moalaa1c09cb62020-10-29 20:05:00 +0900408 ret = BLK_STS_IOERR;
409 goto unlock;
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200410 }
411
Damien Le Moalaa1c09cb62020-10-29 20:05:00 +0900412 if (zone->wp + nr_sectors > zone->start + zone->capacity) {
413 ret = BLK_STS_IOERR;
414 goto unlock;
415 }
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200416
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900417 if (zone->cond == BLK_ZONE_COND_CLOSED ||
418 zone->cond == BLK_ZONE_COND_EMPTY) {
419 null_lock_zone_res(dev);
420
421 ret = null_check_zone_resources(dev, zone);
422 if (ret != BLK_STS_OK) {
423 null_unlock_zone_res(dev);
424 goto unlock;
425 }
426 if (zone->cond == BLK_ZONE_COND_CLOSED) {
427 dev->nr_zones_closed--;
428 dev->nr_zones_imp_open++;
429 } else if (zone->cond == BLK_ZONE_COND_EMPTY) {
430 dev->nr_zones_imp_open++;
431 }
432
433 if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
434 zone->cond = BLK_ZONE_COND_IMP_OPEN;
435
436 null_unlock_zone_res(dev);
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200437 }
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200438
439 ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
440 if (ret != BLK_STS_OK)
Damien Le Moalaa1c09cb62020-10-29 20:05:00 +0900441 goto unlock;
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200442
443 zone->wp += nr_sectors;
444 if (zone->wp == zone->start + zone->capacity) {
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900445 null_lock_zone_res(dev);
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200446 if (zone->cond == BLK_ZONE_COND_EXP_OPEN)
447 dev->nr_zones_exp_open--;
448 else if (zone->cond == BLK_ZONE_COND_IMP_OPEN)
449 dev->nr_zones_imp_open--;
450 zone->cond = BLK_ZONE_COND_FULL;
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900451 null_unlock_zone_res(dev);
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200452 }
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900453
Damien Le Moalaa1c09cb62020-10-29 20:05:00 +0900454 ret = BLK_STS_OK;
455
456unlock:
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900457 null_unlock_zone(dev, zone);
Damien Le Moalaa1c09cb62020-10-29 20:05:00 +0900458
459 return ret;
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200460}
461
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900462static blk_status_t null_open_zone(struct nullb_device *dev,
463 struct nullb_zone *zone)
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200464{
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900465 blk_status_t ret = BLK_STS_OK;
Keith Buschfd788742020-10-22 08:47:39 -0700466
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200467 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
468 return BLK_STS_IOERR;
469
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900470 null_lock_zone_res(dev);
471
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200472 switch (zone->cond) {
473 case BLK_ZONE_COND_EXP_OPEN:
474 /* open operation on exp open is not an error */
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900475 goto unlock;
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200476 case BLK_ZONE_COND_EMPTY:
Keith Buschfd788742020-10-22 08:47:39 -0700477 ret = null_check_zone_resources(dev, zone);
478 if (ret != BLK_STS_OK)
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900479 goto unlock;
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200480 break;
481 case BLK_ZONE_COND_IMP_OPEN:
482 dev->nr_zones_imp_open--;
483 break;
484 case BLK_ZONE_COND_CLOSED:
Keith Buschfd788742020-10-22 08:47:39 -0700485 ret = null_check_zone_resources(dev, zone);
486 if (ret != BLK_STS_OK)
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900487 goto unlock;
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200488 dev->nr_zones_closed--;
489 break;
490 case BLK_ZONE_COND_FULL:
491 default:
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900492 ret = BLK_STS_IOERR;
493 goto unlock;
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200494 }
495
496 zone->cond = BLK_ZONE_COND_EXP_OPEN;
497 dev->nr_zones_exp_open++;
498
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900499unlock:
500 null_unlock_zone_res(dev);
501
502 return ret;
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200503}
504
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900505static blk_status_t null_close_zone(struct nullb_device *dev,
506 struct nullb_zone *zone)
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200507{
Keith Buschfd788742020-10-22 08:47:39 -0700508 blk_status_t ret;
509
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200510 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
511 return BLK_STS_IOERR;
512
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900513 null_lock_zone_res(dev);
514 ret = __null_close_zone(dev, zone);
515 null_unlock_zone_res(dev);
516
517 return ret;
518}
519
520static blk_status_t null_finish_zone(struct nullb_device *dev,
521 struct nullb_zone *zone)
522{
523 blk_status_t ret = BLK_STS_OK;
524
525 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
526 return BLK_STS_IOERR;
527
528 null_lock_zone_res(dev);
529
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200530 switch (zone->cond) {
531 case BLK_ZONE_COND_FULL:
532 /* finish operation on full is not an error */
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900533 goto unlock;
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200534 case BLK_ZONE_COND_EMPTY:
Keith Buschfd788742020-10-22 08:47:39 -0700535 ret = null_check_zone_resources(dev, zone);
536 if (ret != BLK_STS_OK)
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900537 goto unlock;
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200538 break;
539 case BLK_ZONE_COND_IMP_OPEN:
540 dev->nr_zones_imp_open--;
541 break;
542 case BLK_ZONE_COND_EXP_OPEN:
543 dev->nr_zones_exp_open--;
544 break;
545 case BLK_ZONE_COND_CLOSED:
Keith Buschfd788742020-10-22 08:47:39 -0700546 ret = null_check_zone_resources(dev, zone);
547 if (ret != BLK_STS_OK)
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900548 goto unlock;
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200549 dev->nr_zones_closed--;
550 break;
551 default:
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900552 ret = BLK_STS_IOERR;
553 goto unlock;
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200554 }
555
556 zone->cond = BLK_ZONE_COND_FULL;
557 zone->wp = zone->start + zone->len;
558
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900559unlock:
560 null_unlock_zone_res(dev);
561
562 return ret;
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200563}
564
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900565static blk_status_t null_reset_zone(struct nullb_device *dev,
566 struct nullb_zone *zone)
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200567{
568 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
569 return BLK_STS_IOERR;
570
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900571 null_lock_zone_res(dev);
572
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200573 switch (zone->cond) {
574 case BLK_ZONE_COND_EMPTY:
575 /* reset operation on empty is not an error */
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900576 null_unlock_zone_res(dev);
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200577 return BLK_STS_OK;
578 case BLK_ZONE_COND_IMP_OPEN:
579 dev->nr_zones_imp_open--;
580 break;
581 case BLK_ZONE_COND_EXP_OPEN:
582 dev->nr_zones_exp_open--;
583 break;
584 case BLK_ZONE_COND_CLOSED:
585 dev->nr_zones_closed--;
586 break;
587 case BLK_ZONE_COND_FULL:
588 break;
589 default:
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900590 null_unlock_zone_res(dev);
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200591 return BLK_STS_IOERR;
592 }
593
594 zone->cond = BLK_ZONE_COND_EMPTY;
595 zone->wp = zone->start;
596
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900597 null_unlock_zone_res(dev);
598
Damien Le Moal0ec4d912020-11-20 10:55:17 +0900599 if (dev->memory_backed)
600 return null_handle_discard(dev, zone->start, zone->len);
601
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200602 return BLK_STS_OK;
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200603}
604
Bart Van Asscheff07a022022-07-14 11:06:27 -0700605static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_op op,
Ajay Joshida644b22019-10-27 23:05:49 +0900606 sector_t sector)
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200607{
608 struct nullb_device *dev = cmd->nq->dev;
Damien Le Moalaa1c09cb62020-10-29 20:05:00 +0900609 unsigned int zone_no;
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900610 struct nullb_zone *zone;
Damien Le Moalaa1c09cb62020-10-29 20:05:00 +0900611 blk_status_t ret;
Chaitanya Kulkarnia61dbfb2019-08-01 10:26:38 -0700612 size_t i;
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200613
Damien Le Moalaa1c09cb62020-10-29 20:05:00 +0900614 if (op == REQ_OP_ZONE_RESET_ALL) {
Damien Le Moalf9c91042020-10-29 20:04:59 +0900615 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
616 zone = &dev->zones[i];
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900617 null_lock_zone(dev, zone);
Shin'ichiro Kawasakid3a57382022-12-01 15:10:36 +0900618 if (zone->cond != BLK_ZONE_COND_EMPTY &&
619 zone->cond != BLK_ZONE_COND_READONLY &&
620 zone->cond != BLK_ZONE_COND_OFFLINE) {
Damien Le Moalf9c91042020-10-29 20:04:59 +0900621 null_reset_zone(dev, zone);
622 trace_nullb_zone_op(cmd, i, zone->cond);
623 }
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900624 null_unlock_zone(dev, zone);
Damien Le Moalf9c91042020-10-29 20:04:59 +0900625 }
626 return BLK_STS_OK;
Damien Le Moalaa1c09cb62020-10-29 20:05:00 +0900627 }
628
629 zone_no = null_zone_no(dev, sector);
630 zone = &dev->zones[zone_no];
631
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900632 null_lock_zone(dev, zone);
Damien Le Moalaa1c09cb62020-10-29 20:05:00 +0900633
Shin'ichiro Kawasakid3a57382022-12-01 15:10:36 +0900634 if (zone->cond == BLK_ZONE_COND_READONLY ||
635 zone->cond == BLK_ZONE_COND_OFFLINE) {
636 ret = BLK_STS_IOERR;
637 goto unlock;
638 }
639
Damien Le Moalaa1c09cb62020-10-29 20:05:00 +0900640 switch (op) {
Chaitanya Kulkarnia61dbfb2019-08-01 10:26:38 -0700641 case REQ_OP_ZONE_RESET:
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200642 ret = null_reset_zone(dev, zone);
Chaitanya Kulkarnia61dbfb2019-08-01 10:26:38 -0700643 break;
Ajay Joshida644b22019-10-27 23:05:49 +0900644 case REQ_OP_ZONE_OPEN:
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200645 ret = null_open_zone(dev, zone);
Ajay Joshida644b22019-10-27 23:05:49 +0900646 break;
647 case REQ_OP_ZONE_CLOSE:
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200648 ret = null_close_zone(dev, zone);
Ajay Joshida644b22019-10-27 23:05:49 +0900649 break;
650 case REQ_OP_ZONE_FINISH:
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200651 ret = null_finish_zone(dev, zone);
Ajay Joshida644b22019-10-27 23:05:49 +0900652 break;
Chaitanya Kulkarnia61dbfb2019-08-01 10:26:38 -0700653 default:
Damien Le Moalaa1c09cb62020-10-29 20:05:00 +0900654 ret = BLK_STS_NOTSUPP;
655 break;
Masato Suzukiea2c18e2018-10-30 16:14:05 +0900656 }
Chaitanya Kulkarni766c3292020-03-25 10:49:56 -0700657
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200658 if (ret == BLK_STS_OK)
659 trace_nullb_zone_op(cmd, zone_no, zone->cond);
660
Shin'ichiro Kawasakid3a57382022-12-01 15:10:36 +0900661unlock:
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900662 null_unlock_zone(dev, zone);
Damien Le Moalaa1c09cb62020-10-29 20:05:00 +0900663
Niklas Casseldc4d137e2020-08-28 12:54:00 +0200664 return ret;
Chaitanya Kulkarnifceb5d12019-08-22 21:45:18 -0700665}
666
Bart Van Asscheff07a022022-07-14 11:06:27 -0700667blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op,
Damien Le Moal9dd44c72020-04-23 12:02:37 +0900668 sector_t sector, sector_t nr_sectors)
Chaitanya Kulkarnifceb5d12019-08-22 21:45:18 -0700669{
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900670 struct nullb_device *dev;
671 struct nullb_zone *zone;
Damien Le Moalaa1c09cb62020-10-29 20:05:00 +0900672 blk_status_t sts;
Kanchan Joshi35bc10b2020-09-28 15:25:49 +0530673
Chaitanya Kulkarnifceb5d12019-08-22 21:45:18 -0700674 switch (op) {
675 case REQ_OP_WRITE:
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900676 return null_zone_write(cmd, sector, nr_sectors, false);
Damien Le Moale0489ed2020-05-12 17:55:52 +0900677 case REQ_OP_ZONE_APPEND:
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900678 return null_zone_write(cmd, sector, nr_sectors, true);
Chaitanya Kulkarnifceb5d12019-08-22 21:45:18 -0700679 case REQ_OP_ZONE_RESET:
680 case REQ_OP_ZONE_RESET_ALL:
Ajay Joshida644b22019-10-27 23:05:49 +0900681 case REQ_OP_ZONE_OPEN:
682 case REQ_OP_ZONE_CLOSE:
683 case REQ_OP_ZONE_FINISH:
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900684 return null_zone_mgmt(cmd, op, sector);
Chaitanya Kulkarnifceb5d12019-08-22 21:45:18 -0700685 default:
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900686 dev = cmd->nq->dev;
687 zone = &dev->zones[null_zone_no(dev, sector)];
Shin'ichiro Kawasakid3a57382022-12-01 15:10:36 +0900688 if (zone->cond == BLK_ZONE_COND_OFFLINE)
689 return BLK_STS_IOERR;
Kanchan Joshi35bc10b2020-09-28 15:25:49 +0530690
Damien Le Moal2b8b7ed2020-11-20 10:55:14 +0900691 null_lock_zone(dev, zone);
692 sts = null_process_cmd(cmd, op, sector, nr_sectors);
693 null_unlock_zone(dev, zone);
694 return sts;
695 }
Matias Bjørlingca4b2a02018-07-06 19:38:39 +0200696}
Shin'ichiro Kawasakid3a57382022-12-01 15:10:36 +0900697
698/*
699 * Set a zone in the read-only or offline condition.
700 */
701static void null_set_zone_cond(struct nullb_device *dev,
702 struct nullb_zone *zone, enum blk_zone_cond cond)
703{
704 if (WARN_ON_ONCE(cond != BLK_ZONE_COND_READONLY &&
705 cond != BLK_ZONE_COND_OFFLINE))
706 return;
707
708 null_lock_zone(dev, zone);
709
710 /*
711 * If the read-only condition is requested again to zones already in
712 * read-only condition, restore back normal empty condition. Do the same
713 * if the offline condition is requested for offline zones. Otherwise,
714 * set the specified zone condition to the zones. Finish the zones
715 * beforehand to free up zone resources.
716 */
717 if (zone->cond == cond) {
718 zone->cond = BLK_ZONE_COND_EMPTY;
719 zone->wp = zone->start;
720 if (dev->memory_backed)
721 null_handle_discard(dev, zone->start, zone->len);
722 } else {
723 if (zone->cond != BLK_ZONE_COND_READONLY &&
724 zone->cond != BLK_ZONE_COND_OFFLINE)
725 null_finish_zone(dev, zone);
726 zone->cond = cond;
727 zone->wp = (sector_t)-1;
728 }
729
730 null_unlock_zone(dev, zone);
731}
732
733/*
734 * Identify a zone from the sector written to configfs file. Then set zone
735 * condition to the zone.
736 */
737ssize_t zone_cond_store(struct nullb_device *dev, const char *page,
738 size_t count, enum blk_zone_cond cond)
739{
740 unsigned long long sector;
741 unsigned int zone_no;
742 int ret;
743
744 if (!dev->zoned) {
745 pr_err("null_blk device is not zoned\n");
746 return -EINVAL;
747 }
748
749 if (!dev->zones) {
750 pr_err("null_blk device is not yet powered\n");
751 return -EINVAL;
752 }
753
754 ret = kstrtoull(page, 0, &sector);
755 if (ret < 0)
756 return ret;
757
758 zone_no = null_zone_no(dev, sector);
759 if (zone_no >= dev->nr_zones) {
760 pr_err("Sector out of range\n");
761 return -EINVAL;
762 }
763
764 if (dev->zones[zone_no].type == BLK_ZONE_TYPE_CONVENTIONAL) {
765 pr_err("Can not change condition of conventional zones\n");
766 return -EINVAL;
767 }
768
769 null_set_zone_cond(dev, &dev->zones[zone_no], cond);
770
771 return count;
772}