xref: /openbmc/linux/drivers/block/null_blk/zoned.c (revision 9ac895a8)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/vmalloc.h>
3 #include <linux/bitmap.h>
4 #include "null_blk.h"
5 
6 #define CREATE_TRACE_POINTS
7 #include "trace.h"
8 
9 #undef pr_fmt
10 #define pr_fmt(fmt)	"null_blk: " fmt
11 
mb_to_sects(unsigned long mb)12 static inline sector_t mb_to_sects(unsigned long mb)
13 {
14 	return ((sector_t)mb * SZ_1M) >> SECTOR_SHIFT;
15 }
16 
null_zone_no(struct nullb_device * dev,sector_t sect)17 static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
18 {
19 	return sect >> ilog2(dev->zone_size_sects);
20 }
21 
null_lock_zone_res(struct nullb_device * dev)22 static inline void null_lock_zone_res(struct nullb_device *dev)
23 {
24 	if (dev->need_zone_res_mgmt)
25 		spin_lock_irq(&dev->zone_res_lock);
26 }
27 
null_unlock_zone_res(struct nullb_device * dev)28 static inline void null_unlock_zone_res(struct nullb_device *dev)
29 {
30 	if (dev->need_zone_res_mgmt)
31 		spin_unlock_irq(&dev->zone_res_lock);
32 }
33 
null_init_zone_lock(struct nullb_device * dev,struct nullb_zone * zone)34 static inline void null_init_zone_lock(struct nullb_device *dev,
35 				       struct nullb_zone *zone)
36 {
37 	if (!dev->memory_backed)
38 		spin_lock_init(&zone->spinlock);
39 	else
40 		mutex_init(&zone->mutex);
41 }
42 
null_lock_zone(struct nullb_device * dev,struct nullb_zone * zone)43 static inline void null_lock_zone(struct nullb_device *dev,
44 				  struct nullb_zone *zone)
45 {
46 	if (!dev->memory_backed)
47 		spin_lock_irq(&zone->spinlock);
48 	else
49 		mutex_lock(&zone->mutex);
50 }
51 
null_unlock_zone(struct nullb_device * dev,struct nullb_zone * zone)52 static inline void null_unlock_zone(struct nullb_device *dev,
53 				    struct nullb_zone *zone)
54 {
55 	if (!dev->memory_backed)
56 		spin_unlock_irq(&zone->spinlock);
57 	else
58 		mutex_unlock(&zone->mutex);
59 }
60 
null_init_zoned_dev(struct nullb_device * dev,struct request_queue * q)61 int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
62 {
63 	sector_t dev_capacity_sects, zone_capacity_sects;
64 	struct nullb_zone *zone;
65 	sector_t sector = 0;
66 	unsigned int i;
67 
68 	if (!is_power_of_2(dev->zone_size)) {
69 		pr_err("zone_size must be power-of-two\n");
70 		return -EINVAL;
71 	}
72 	if (dev->zone_size > dev->size) {
73 		pr_err("Zone size larger than device capacity\n");
74 		return -EINVAL;
75 	}
76 
77 	if (!dev->zone_capacity)
78 		dev->zone_capacity = dev->zone_size;
79 
80 	if (dev->zone_capacity > dev->zone_size) {
81 		pr_err("zone capacity (%lu MB) larger than zone size (%lu MB)\n",
82 		       dev->zone_capacity, dev->zone_size);
83 		return -EINVAL;
84 	}
85 
86 	/*
87 	 * If a smaller zone capacity was requested, do not allow a smaller last
88 	 * zone at the same time as such zone configuration does not correspond
89 	 * to any real zoned device.
90 	 */
91 	if (dev->zone_capacity != dev->zone_size &&
92 	    dev->size & (dev->zone_size - 1)) {
93 		pr_err("A smaller last zone is not allowed with zone capacity smaller than zone size.\n");
94 		return -EINVAL;
95 	}
96 
97 	zone_capacity_sects = mb_to_sects(dev->zone_capacity);
98 	dev_capacity_sects = mb_to_sects(dev->size);
99 	dev->zone_size_sects = mb_to_sects(dev->zone_size);
100 	dev->nr_zones = round_up(dev_capacity_sects, dev->zone_size_sects)
101 		>> ilog2(dev->zone_size_sects);
102 
103 	dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct nullb_zone),
104 				    GFP_KERNEL | __GFP_ZERO);
105 	if (!dev->zones)
106 		return -ENOMEM;
107 
108 	spin_lock_init(&dev->zone_res_lock);
109 
110 	if (dev->zone_nr_conv >= dev->nr_zones) {
111 		dev->zone_nr_conv = dev->nr_zones - 1;
112 		pr_info("changed the number of conventional zones to %u",
113 			dev->zone_nr_conv);
114 	}
115 
116 	/* Max active zones has to be < nbr of seq zones in order to be enforceable */
117 	if (dev->zone_max_active >= dev->nr_zones - dev->zone_nr_conv) {
118 		dev->zone_max_active = 0;
119 		pr_info("zone_max_active limit disabled, limit >= zone count\n");
120 	}
121 
122 	/* Max open zones has to be <= max active zones */
123 	if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) {
124 		dev->zone_max_open = dev->zone_max_active;
125 		pr_info("changed the maximum number of open zones to %u\n",
126 			dev->zone_max_open);
127 	} else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) {
128 		dev->zone_max_open = 0;
129 		pr_info("zone_max_open limit disabled, limit >= zone count\n");
130 	}
131 	dev->need_zone_res_mgmt = dev->zone_max_active || dev->zone_max_open;
132 	dev->imp_close_zone_no = dev->zone_nr_conv;
133 
134 	for (i = 0; i <  dev->zone_nr_conv; i++) {
135 		zone = &dev->zones[i];
136 
137 		null_init_zone_lock(dev, zone);
138 		zone->start = sector;
139 		zone->len = dev->zone_size_sects;
140 		zone->capacity = zone->len;
141 		zone->wp = zone->start + zone->len;
142 		zone->type = BLK_ZONE_TYPE_CONVENTIONAL;
143 		zone->cond = BLK_ZONE_COND_NOT_WP;
144 
145 		sector += dev->zone_size_sects;
146 	}
147 
148 	for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
149 		zone = &dev->zones[i];
150 
151 		null_init_zone_lock(dev, zone);
152 		zone->start = zone->wp = sector;
153 		if (zone->start + dev->zone_size_sects > dev_capacity_sects)
154 			zone->len = dev_capacity_sects - zone->start;
155 		else
156 			zone->len = dev->zone_size_sects;
157 		zone->capacity =
158 			min_t(sector_t, zone->len, zone_capacity_sects);
159 		zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
160 		zone->cond = BLK_ZONE_COND_EMPTY;
161 
162 		sector += dev->zone_size_sects;
163 	}
164 
165 	return 0;
166 }
167 
null_register_zoned_dev(struct nullb * nullb)168 int null_register_zoned_dev(struct nullb *nullb)
169 {
170 	struct nullb_device *dev = nullb->dev;
171 	struct request_queue *q = nullb->q;
172 
173 	disk_set_zoned(nullb->disk, BLK_ZONED_HM);
174 	blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
175 	blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
176 	blk_queue_chunk_sectors(q, dev->zone_size_sects);
177 	nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0);
178 	blk_queue_max_zone_append_sectors(q, dev->zone_size_sects);
179 	disk_set_max_open_zones(nullb->disk, dev->zone_max_open);
180 	disk_set_max_active_zones(nullb->disk, dev->zone_max_active);
181 
182 	if (queue_is_mq(q))
183 		return blk_revalidate_disk_zones(nullb->disk, NULL);
184 
185 	return 0;
186 }
187 
null_free_zoned_dev(struct nullb_device * dev)188 void null_free_zoned_dev(struct nullb_device *dev)
189 {
190 	kvfree(dev->zones);
191 	dev->zones = NULL;
192 }
193 
null_report_zones(struct gendisk * disk,sector_t sector,unsigned int nr_zones,report_zones_cb cb,void * data)194 int null_report_zones(struct gendisk *disk, sector_t sector,
195 		unsigned int nr_zones, report_zones_cb cb, void *data)
196 {
197 	struct nullb *nullb = disk->private_data;
198 	struct nullb_device *dev = nullb->dev;
199 	unsigned int first_zone, i;
200 	struct nullb_zone *zone;
201 	struct blk_zone blkz;
202 	int error;
203 
204 	first_zone = null_zone_no(dev, sector);
205 	if (first_zone >= dev->nr_zones)
206 		return 0;
207 
208 	nr_zones = min(nr_zones, dev->nr_zones - first_zone);
209 	trace_nullb_report_zones(nullb, nr_zones);
210 
211 	memset(&blkz, 0, sizeof(struct blk_zone));
212 	zone = &dev->zones[first_zone];
213 	for (i = 0; i < nr_zones; i++, zone++) {
214 		/*
215 		 * Stacked DM target drivers will remap the zone information by
216 		 * modifying the zone information passed to the report callback.
217 		 * So use a local copy to avoid corruption of the device zone
218 		 * array.
219 		 */
220 		null_lock_zone(dev, zone);
221 		blkz.start = zone->start;
222 		blkz.len = zone->len;
223 		blkz.wp = zone->wp;
224 		blkz.type = zone->type;
225 		blkz.cond = zone->cond;
226 		blkz.capacity = zone->capacity;
227 		null_unlock_zone(dev, zone);
228 
229 		error = cb(&blkz, i, data);
230 		if (error)
231 			return error;
232 	}
233 
234 	return nr_zones;
235 }
236 
237 /*
238  * This is called in the case of memory backing from null_process_cmd()
239  * with the target zone already locked.
240  */
null_zone_valid_read_len(struct nullb * nullb,sector_t sector,unsigned int len)241 size_t null_zone_valid_read_len(struct nullb *nullb,
242 				sector_t sector, unsigned int len)
243 {
244 	struct nullb_device *dev = nullb->dev;
245 	struct nullb_zone *zone = &dev->zones[null_zone_no(dev, sector)];
246 	unsigned int nr_sectors = len >> SECTOR_SHIFT;
247 
248 	/* Read must be below the write pointer position */
249 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL ||
250 	    sector + nr_sectors <= zone->wp)
251 		return len;
252 
253 	if (sector > zone->wp)
254 		return 0;
255 
256 	return (zone->wp - sector) << SECTOR_SHIFT;
257 }
258 
__null_close_zone(struct nullb_device * dev,struct nullb_zone * zone)259 static blk_status_t __null_close_zone(struct nullb_device *dev,
260 				      struct nullb_zone *zone)
261 {
262 	switch (zone->cond) {
263 	case BLK_ZONE_COND_CLOSED:
264 		/* close operation on closed is not an error */
265 		return BLK_STS_OK;
266 	case BLK_ZONE_COND_IMP_OPEN:
267 		dev->nr_zones_imp_open--;
268 		break;
269 	case BLK_ZONE_COND_EXP_OPEN:
270 		dev->nr_zones_exp_open--;
271 		break;
272 	case BLK_ZONE_COND_EMPTY:
273 	case BLK_ZONE_COND_FULL:
274 	default:
275 		return BLK_STS_IOERR;
276 	}
277 
278 	if (zone->wp == zone->start) {
279 		zone->cond = BLK_ZONE_COND_EMPTY;
280 	} else {
281 		zone->cond = BLK_ZONE_COND_CLOSED;
282 		dev->nr_zones_closed++;
283 	}
284 
285 	return BLK_STS_OK;
286 }
287 
null_close_imp_open_zone(struct nullb_device * dev)288 static void null_close_imp_open_zone(struct nullb_device *dev)
289 {
290 	struct nullb_zone *zone;
291 	unsigned int zno, i;
292 
293 	zno = dev->imp_close_zone_no;
294 	if (zno >= dev->nr_zones)
295 		zno = dev->zone_nr_conv;
296 
297 	for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
298 		zone = &dev->zones[zno];
299 		zno++;
300 		if (zno >= dev->nr_zones)
301 			zno = dev->zone_nr_conv;
302 
303 		if (zone->cond == BLK_ZONE_COND_IMP_OPEN) {
304 			__null_close_zone(dev, zone);
305 			dev->imp_close_zone_no = zno;
306 			return;
307 		}
308 	}
309 }
310 
null_check_active(struct nullb_device * dev)311 static blk_status_t null_check_active(struct nullb_device *dev)
312 {
313 	if (!dev->zone_max_active)
314 		return BLK_STS_OK;
315 
316 	if (dev->nr_zones_exp_open + dev->nr_zones_imp_open +
317 			dev->nr_zones_closed < dev->zone_max_active)
318 		return BLK_STS_OK;
319 
320 	return BLK_STS_ZONE_ACTIVE_RESOURCE;
321 }
322 
null_check_open(struct nullb_device * dev)323 static blk_status_t null_check_open(struct nullb_device *dev)
324 {
325 	if (!dev->zone_max_open)
326 		return BLK_STS_OK;
327 
328 	if (dev->nr_zones_exp_open + dev->nr_zones_imp_open < dev->zone_max_open)
329 		return BLK_STS_OK;
330 
331 	if (dev->nr_zones_imp_open) {
332 		if (null_check_active(dev) == BLK_STS_OK) {
333 			null_close_imp_open_zone(dev);
334 			return BLK_STS_OK;
335 		}
336 	}
337 
338 	return BLK_STS_ZONE_OPEN_RESOURCE;
339 }
340 
341 /*
342  * This function matches the manage open zone resources function in the ZBC standard,
343  * with the addition of max active zones support (added in the ZNS standard).
344  *
345  * The function determines if a zone can transition to implicit open or explicit open,
346  * while maintaining the max open zone (and max active zone) limit(s). It may close an
347  * implicit open zone in order to make additional zone resources available.
348  *
349  * ZBC states that an implicit open zone shall be closed only if there is not
350  * room within the open limit. However, with the addition of an active limit,
351  * it is not certain that closing an implicit open zone will allow a new zone
352  * to be opened, since we might already be at the active limit capacity.
353  */
null_check_zone_resources(struct nullb_device * dev,struct nullb_zone * zone)354 static blk_status_t null_check_zone_resources(struct nullb_device *dev,
355 					      struct nullb_zone *zone)
356 {
357 	blk_status_t ret;
358 
359 	switch (zone->cond) {
360 	case BLK_ZONE_COND_EMPTY:
361 		ret = null_check_active(dev);
362 		if (ret != BLK_STS_OK)
363 			return ret;
364 		fallthrough;
365 	case BLK_ZONE_COND_CLOSED:
366 		return null_check_open(dev);
367 	default:
368 		/* Should never be called for other states */
369 		WARN_ON(1);
370 		return BLK_STS_IOERR;
371 	}
372 }
373 
null_zone_write(struct nullb_cmd * cmd,sector_t sector,unsigned int nr_sectors,bool append)374 static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
375 				    unsigned int nr_sectors, bool append)
376 {
377 	struct nullb_device *dev = cmd->nq->dev;
378 	unsigned int zno = null_zone_no(dev, sector);
379 	struct nullb_zone *zone = &dev->zones[zno];
380 	blk_status_t ret;
381 
382 	trace_nullb_zone_op(cmd, zno, zone->cond);
383 
384 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) {
385 		if (append)
386 			return BLK_STS_IOERR;
387 		return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
388 	}
389 
390 	null_lock_zone(dev, zone);
391 
392 	if (zone->cond == BLK_ZONE_COND_FULL ||
393 	    zone->cond == BLK_ZONE_COND_READONLY ||
394 	    zone->cond == BLK_ZONE_COND_OFFLINE) {
395 		/* Cannot write to the zone */
396 		ret = BLK_STS_IOERR;
397 		goto unlock;
398 	}
399 
400 	/*
401 	 * Regular writes must be at the write pointer position.
402 	 * Zone append writes are automatically issued at the write
403 	 * pointer and the position returned using the request or BIO
404 	 * sector.
405 	 */
406 	if (append) {
407 		sector = zone->wp;
408 		if (dev->queue_mode == NULL_Q_MQ)
409 			cmd->rq->__sector = sector;
410 		else
411 			cmd->bio->bi_iter.bi_sector = sector;
412 	} else if (sector != zone->wp) {
413 		ret = BLK_STS_IOERR;
414 		goto unlock;
415 	}
416 
417 	if (zone->wp + nr_sectors > zone->start + zone->capacity) {
418 		ret = BLK_STS_IOERR;
419 		goto unlock;
420 	}
421 
422 	if (zone->cond == BLK_ZONE_COND_CLOSED ||
423 	    zone->cond == BLK_ZONE_COND_EMPTY) {
424 		null_lock_zone_res(dev);
425 
426 		ret = null_check_zone_resources(dev, zone);
427 		if (ret != BLK_STS_OK) {
428 			null_unlock_zone_res(dev);
429 			goto unlock;
430 		}
431 		if (zone->cond == BLK_ZONE_COND_CLOSED) {
432 			dev->nr_zones_closed--;
433 			dev->nr_zones_imp_open++;
434 		} else if (zone->cond == BLK_ZONE_COND_EMPTY) {
435 			dev->nr_zones_imp_open++;
436 		}
437 
438 		if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
439 			zone->cond = BLK_ZONE_COND_IMP_OPEN;
440 
441 		null_unlock_zone_res(dev);
442 	}
443 
444 	ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
445 	if (ret != BLK_STS_OK)
446 		goto unlock;
447 
448 	zone->wp += nr_sectors;
449 	if (zone->wp == zone->start + zone->capacity) {
450 		null_lock_zone_res(dev);
451 		if (zone->cond == BLK_ZONE_COND_EXP_OPEN)
452 			dev->nr_zones_exp_open--;
453 		else if (zone->cond == BLK_ZONE_COND_IMP_OPEN)
454 			dev->nr_zones_imp_open--;
455 		zone->cond = BLK_ZONE_COND_FULL;
456 		null_unlock_zone_res(dev);
457 	}
458 
459 	ret = BLK_STS_OK;
460 
461 unlock:
462 	null_unlock_zone(dev, zone);
463 
464 	return ret;
465 }
466 
null_open_zone(struct nullb_device * dev,struct nullb_zone * zone)467 static blk_status_t null_open_zone(struct nullb_device *dev,
468 				   struct nullb_zone *zone)
469 {
470 	blk_status_t ret = BLK_STS_OK;
471 
472 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
473 		return BLK_STS_IOERR;
474 
475 	null_lock_zone_res(dev);
476 
477 	switch (zone->cond) {
478 	case BLK_ZONE_COND_EXP_OPEN:
479 		/* open operation on exp open is not an error */
480 		goto unlock;
481 	case BLK_ZONE_COND_EMPTY:
482 		ret = null_check_zone_resources(dev, zone);
483 		if (ret != BLK_STS_OK)
484 			goto unlock;
485 		break;
486 	case BLK_ZONE_COND_IMP_OPEN:
487 		dev->nr_zones_imp_open--;
488 		break;
489 	case BLK_ZONE_COND_CLOSED:
490 		ret = null_check_zone_resources(dev, zone);
491 		if (ret != BLK_STS_OK)
492 			goto unlock;
493 		dev->nr_zones_closed--;
494 		break;
495 	case BLK_ZONE_COND_FULL:
496 	default:
497 		ret = BLK_STS_IOERR;
498 		goto unlock;
499 	}
500 
501 	zone->cond = BLK_ZONE_COND_EXP_OPEN;
502 	dev->nr_zones_exp_open++;
503 
504 unlock:
505 	null_unlock_zone_res(dev);
506 
507 	return ret;
508 }
509 
null_close_zone(struct nullb_device * dev,struct nullb_zone * zone)510 static blk_status_t null_close_zone(struct nullb_device *dev,
511 				    struct nullb_zone *zone)
512 {
513 	blk_status_t ret;
514 
515 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
516 		return BLK_STS_IOERR;
517 
518 	null_lock_zone_res(dev);
519 	ret = __null_close_zone(dev, zone);
520 	null_unlock_zone_res(dev);
521 
522 	return ret;
523 }
524 
null_finish_zone(struct nullb_device * dev,struct nullb_zone * zone)525 static blk_status_t null_finish_zone(struct nullb_device *dev,
526 				     struct nullb_zone *zone)
527 {
528 	blk_status_t ret = BLK_STS_OK;
529 
530 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
531 		return BLK_STS_IOERR;
532 
533 	null_lock_zone_res(dev);
534 
535 	switch (zone->cond) {
536 	case BLK_ZONE_COND_FULL:
537 		/* finish operation on full is not an error */
538 		goto unlock;
539 	case BLK_ZONE_COND_EMPTY:
540 		ret = null_check_zone_resources(dev, zone);
541 		if (ret != BLK_STS_OK)
542 			goto unlock;
543 		break;
544 	case BLK_ZONE_COND_IMP_OPEN:
545 		dev->nr_zones_imp_open--;
546 		break;
547 	case BLK_ZONE_COND_EXP_OPEN:
548 		dev->nr_zones_exp_open--;
549 		break;
550 	case BLK_ZONE_COND_CLOSED:
551 		ret = null_check_zone_resources(dev, zone);
552 		if (ret != BLK_STS_OK)
553 			goto unlock;
554 		dev->nr_zones_closed--;
555 		break;
556 	default:
557 		ret = BLK_STS_IOERR;
558 		goto unlock;
559 	}
560 
561 	zone->cond = BLK_ZONE_COND_FULL;
562 	zone->wp = zone->start + zone->len;
563 
564 unlock:
565 	null_unlock_zone_res(dev);
566 
567 	return ret;
568 }
569 
null_reset_zone(struct nullb_device * dev,struct nullb_zone * zone)570 static blk_status_t null_reset_zone(struct nullb_device *dev,
571 				    struct nullb_zone *zone)
572 {
573 	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
574 		return BLK_STS_IOERR;
575 
576 	null_lock_zone_res(dev);
577 
578 	switch (zone->cond) {
579 	case BLK_ZONE_COND_EMPTY:
580 		/* reset operation on empty is not an error */
581 		null_unlock_zone_res(dev);
582 		return BLK_STS_OK;
583 	case BLK_ZONE_COND_IMP_OPEN:
584 		dev->nr_zones_imp_open--;
585 		break;
586 	case BLK_ZONE_COND_EXP_OPEN:
587 		dev->nr_zones_exp_open--;
588 		break;
589 	case BLK_ZONE_COND_CLOSED:
590 		dev->nr_zones_closed--;
591 		break;
592 	case BLK_ZONE_COND_FULL:
593 		break;
594 	default:
595 		null_unlock_zone_res(dev);
596 		return BLK_STS_IOERR;
597 	}
598 
599 	zone->cond = BLK_ZONE_COND_EMPTY;
600 	zone->wp = zone->start;
601 
602 	null_unlock_zone_res(dev);
603 
604 	if (dev->memory_backed)
605 		return null_handle_discard(dev, zone->start, zone->len);
606 
607 	return BLK_STS_OK;
608 }
609 
null_zone_mgmt(struct nullb_cmd * cmd,enum req_op op,sector_t sector)610 static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_op op,
611 				   sector_t sector)
612 {
613 	struct nullb_device *dev = cmd->nq->dev;
614 	unsigned int zone_no;
615 	struct nullb_zone *zone;
616 	blk_status_t ret;
617 	size_t i;
618 
619 	if (op == REQ_OP_ZONE_RESET_ALL) {
620 		for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
621 			zone = &dev->zones[i];
622 			null_lock_zone(dev, zone);
623 			if (zone->cond != BLK_ZONE_COND_EMPTY &&
624 			    zone->cond != BLK_ZONE_COND_READONLY &&
625 			    zone->cond != BLK_ZONE_COND_OFFLINE) {
626 				null_reset_zone(dev, zone);
627 				trace_nullb_zone_op(cmd, i, zone->cond);
628 			}
629 			null_unlock_zone(dev, zone);
630 		}
631 		return BLK_STS_OK;
632 	}
633 
634 	zone_no = null_zone_no(dev, sector);
635 	zone = &dev->zones[zone_no];
636 
637 	null_lock_zone(dev, zone);
638 
639 	if (zone->cond == BLK_ZONE_COND_READONLY ||
640 	    zone->cond == BLK_ZONE_COND_OFFLINE) {
641 		ret = BLK_STS_IOERR;
642 		goto unlock;
643 	}
644 
645 	switch (op) {
646 	case REQ_OP_ZONE_RESET:
647 		ret = null_reset_zone(dev, zone);
648 		break;
649 	case REQ_OP_ZONE_OPEN:
650 		ret = null_open_zone(dev, zone);
651 		break;
652 	case REQ_OP_ZONE_CLOSE:
653 		ret = null_close_zone(dev, zone);
654 		break;
655 	case REQ_OP_ZONE_FINISH:
656 		ret = null_finish_zone(dev, zone);
657 		break;
658 	default:
659 		ret = BLK_STS_NOTSUPP;
660 		break;
661 	}
662 
663 	if (ret == BLK_STS_OK)
664 		trace_nullb_zone_op(cmd, zone_no, zone->cond);
665 
666 unlock:
667 	null_unlock_zone(dev, zone);
668 
669 	return ret;
670 }
671 
null_process_zoned_cmd(struct nullb_cmd * cmd,enum req_op op,sector_t sector,sector_t nr_sectors)672 blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op,
673 				    sector_t sector, sector_t nr_sectors)
674 {
675 	struct nullb_device *dev;
676 	struct nullb_zone *zone;
677 	blk_status_t sts;
678 
679 	switch (op) {
680 	case REQ_OP_WRITE:
681 		return null_zone_write(cmd, sector, nr_sectors, false);
682 	case REQ_OP_ZONE_APPEND:
683 		return null_zone_write(cmd, sector, nr_sectors, true);
684 	case REQ_OP_ZONE_RESET:
685 	case REQ_OP_ZONE_RESET_ALL:
686 	case REQ_OP_ZONE_OPEN:
687 	case REQ_OP_ZONE_CLOSE:
688 	case REQ_OP_ZONE_FINISH:
689 		return null_zone_mgmt(cmd, op, sector);
690 	default:
691 		dev = cmd->nq->dev;
692 		zone = &dev->zones[null_zone_no(dev, sector)];
693 		if (zone->cond == BLK_ZONE_COND_OFFLINE)
694 			return BLK_STS_IOERR;
695 
696 		null_lock_zone(dev, zone);
697 		sts = null_process_cmd(cmd, op, sector, nr_sectors);
698 		null_unlock_zone(dev, zone);
699 		return sts;
700 	}
701 }
702 
703 /*
704  * Set a zone in the read-only or offline condition.
705  */
null_set_zone_cond(struct nullb_device * dev,struct nullb_zone * zone,enum blk_zone_cond cond)706 static void null_set_zone_cond(struct nullb_device *dev,
707 			       struct nullb_zone *zone, enum blk_zone_cond cond)
708 {
709 	if (WARN_ON_ONCE(cond != BLK_ZONE_COND_READONLY &&
710 			 cond != BLK_ZONE_COND_OFFLINE))
711 		return;
712 
713 	null_lock_zone(dev, zone);
714 
715 	/*
716 	 * If the read-only condition is requested again to zones already in
717 	 * read-only condition, restore back normal empty condition. Do the same
718 	 * if the offline condition is requested for offline zones. Otherwise,
719 	 * set the specified zone condition to the zones. Finish the zones
720 	 * beforehand to free up zone resources.
721 	 */
722 	if (zone->cond == cond) {
723 		zone->cond = BLK_ZONE_COND_EMPTY;
724 		zone->wp = zone->start;
725 		if (dev->memory_backed)
726 			null_handle_discard(dev, zone->start, zone->len);
727 	} else {
728 		if (zone->cond != BLK_ZONE_COND_READONLY &&
729 		    zone->cond != BLK_ZONE_COND_OFFLINE)
730 			null_finish_zone(dev, zone);
731 		zone->cond = cond;
732 		zone->wp = (sector_t)-1;
733 	}
734 
735 	null_unlock_zone(dev, zone);
736 }
737 
738 /*
739  * Identify a zone from the sector written to configfs file. Then set zone
740  * condition to the zone.
741  */
zone_cond_store(struct nullb_device * dev,const char * page,size_t count,enum blk_zone_cond cond)742 ssize_t zone_cond_store(struct nullb_device *dev, const char *page,
743 			size_t count, enum blk_zone_cond cond)
744 {
745 	unsigned long long sector;
746 	unsigned int zone_no;
747 	int ret;
748 
749 	if (!dev->zoned) {
750 		pr_err("null_blk device is not zoned\n");
751 		return -EINVAL;
752 	}
753 
754 	if (!dev->zones) {
755 		pr_err("null_blk device is not yet powered\n");
756 		return -EINVAL;
757 	}
758 
759 	ret = kstrtoull(page, 0, &sector);
760 	if (ret < 0)
761 		return ret;
762 
763 	zone_no = null_zone_no(dev, sector);
764 	if (zone_no >= dev->nr_zones) {
765 		pr_err("Sector out of range\n");
766 		return -EINVAL;
767 	}
768 
769 	if (dev->zones[zone_no].type == BLK_ZONE_TYPE_CONVENTIONAL) {
770 		pr_err("Can not change condition of conventional zones\n");
771 		return -EINVAL;
772 	}
773 
774 	null_set_zone_cond(dev, &dev->zones[zone_no], cond);
775 
776 	return count;
777 }
778