xref: /openbmc/linux/drivers/nvme/target/zns.c (revision c9933d49)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe ZNS-ZBD command implementation.
4  * Copyright (C) 2021 Western Digital Corporation or its affiliates.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/nvme.h>
8 #include <linux/blkdev.h>
9 #include "nvmet.h"
10 
11 /*
12  * We set the Memory Page Size Minimum (MPSMIN) for target controller to 0
13  * which gets added by 12 in the nvme_enable_ctrl() which results in 2^12 = 4k
14  * as page_shift value. When calculating the ZASL use shift by 12.
15  */
16 #define NVMET_MPSMIN_SHIFT	12
17 
18 static inline u8 nvmet_zasl(unsigned int zone_append_sects)
19 {
20 	/*
21 	 * Zone Append Size Limit (zasl) is expressed as a power of 2 value
22 	 * with the minimum memory page size (i.e. 12) as unit.
23 	 */
24 	return ilog2(zone_append_sects >> (NVMET_MPSMIN_SHIFT - 9));
25 }
26 
27 static int validate_conv_zones_cb(struct blk_zone *z,
28 				  unsigned int i, void *data)
29 {
30 	if (z->type == BLK_ZONE_TYPE_CONVENTIONAL)
31 		return -EOPNOTSUPP;
32 	return 0;
33 }
34 
35 bool nvmet_bdev_zns_enable(struct nvmet_ns *ns)
36 {
37 	struct request_queue *q = ns->bdev->bd_disk->queue;
38 	u8 zasl = nvmet_zasl(queue_max_zone_append_sectors(q));
39 	struct gendisk *bd_disk = ns->bdev->bd_disk;
40 	int ret;
41 
42 	if (ns->subsys->zasl) {
43 		if (ns->subsys->zasl > zasl)
44 			return false;
45 	}
46 	ns->subsys->zasl = zasl;
47 
48 	/*
49 	 * Generic zoned block devices may have a smaller last zone which is
50 	 * not supported by ZNS. Exclude zoned drives that have such smaller
51 	 * last zone.
52 	 */
53 	if (get_capacity(bd_disk) & (bdev_zone_sectors(ns->bdev) - 1))
54 		return false;
55 	/*
56 	 * ZNS does not define a conventional zone type. If the underlying
57 	 * device has a bitmap set indicating the existence of conventional
58 	 * zones, reject the device. Otherwise, use report zones to detect if
59 	 * the device has conventional zones.
60 	 */
61 	if (ns->bdev->bd_disk->queue->conv_zones_bitmap)
62 		return false;
63 
64 	ret = blkdev_report_zones(ns->bdev, 0, blkdev_nr_zones(bd_disk),
65 				  validate_conv_zones_cb, NULL);
66 	if (ret < 0)
67 		return false;
68 
69 	ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
70 
71 	return true;
72 }
73 
74 void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req)
75 {
76 	u8 zasl = req->sq->ctrl->subsys->zasl;
77 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
78 	struct nvme_id_ctrl_zns *id;
79 	u16 status;
80 
81 	id = kzalloc(sizeof(*id), GFP_KERNEL);
82 	if (!id) {
83 		status = NVME_SC_INTERNAL;
84 		goto out;
85 	}
86 
87 	if (ctrl->ops->get_mdts)
88 		id->zasl = min_t(u8, ctrl->ops->get_mdts(ctrl), zasl);
89 	else
90 		id->zasl = zasl;
91 
92 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
93 
94 	kfree(id);
95 out:
96 	nvmet_req_complete(req, status);
97 }
98 
99 void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
100 {
101 	struct nvme_id_ns_zns *id_zns;
102 	u64 zsze;
103 	u16 status;
104 
105 	if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
106 		req->error_loc = offsetof(struct nvme_identify, nsid);
107 		status = NVME_SC_INVALID_NS | NVME_SC_DNR;
108 		goto out;
109 	}
110 
111 	id_zns = kzalloc(sizeof(*id_zns), GFP_KERNEL);
112 	if (!id_zns) {
113 		status = NVME_SC_INTERNAL;
114 		goto out;
115 	}
116 
117 	status = nvmet_req_find_ns(req);
118 	if (status)
119 		goto done;
120 
121 	if (!bdev_is_zoned(req->ns->bdev)) {
122 		req->error_loc = offsetof(struct nvme_identify, nsid);
123 		goto done;
124 	}
125 
126 	if (nvmet_ns_revalidate(req->ns)) {
127 		mutex_lock(&req->ns->subsys->lock);
128 		nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
129 		mutex_unlock(&req->ns->subsys->lock);
130 	}
131 	zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >>
132 					req->ns->blksize_shift;
133 	id_zns->lbafe[0].zsze = cpu_to_le64(zsze);
134 	id_zns->mor = cpu_to_le32(bdev_max_open_zones(req->ns->bdev));
135 	id_zns->mar = cpu_to_le32(bdev_max_active_zones(req->ns->bdev));
136 
137 done:
138 	status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns));
139 	kfree(id_zns);
140 out:
141 	nvmet_req_complete(req, status);
142 }
143 
144 static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req)
145 {
146 	sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
147 	u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2;
148 
149 	if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
150 		req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba);
151 		return NVME_SC_LBA_RANGE | NVME_SC_DNR;
152 	}
153 
154 	if (out_bufsize < sizeof(struct nvme_zone_report)) {
155 		req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd);
156 		return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
157 	}
158 
159 	if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) {
160 		req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra);
161 		return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
162 	}
163 
164 	switch (req->cmd->zmr.pr) {
165 	case 0:
166 	case 1:
167 		break;
168 	default:
169 		req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr);
170 		return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
171 	}
172 
173 	switch (req->cmd->zmr.zrasf) {
174 	case NVME_ZRASF_ZONE_REPORT_ALL:
175 	case NVME_ZRASF_ZONE_STATE_EMPTY:
176 	case NVME_ZRASF_ZONE_STATE_IMP_OPEN:
177 	case NVME_ZRASF_ZONE_STATE_EXP_OPEN:
178 	case NVME_ZRASF_ZONE_STATE_CLOSED:
179 	case NVME_ZRASF_ZONE_STATE_FULL:
180 	case NVME_ZRASF_ZONE_STATE_READONLY:
181 	case NVME_ZRASF_ZONE_STATE_OFFLINE:
182 		break;
183 	default:
184 		req->error_loc =
185 			offsetof(struct nvme_zone_mgmt_recv_cmd, zrasf);
186 		return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
187 	}
188 
189 	return NVME_SC_SUCCESS;
190 }
191 
192 struct nvmet_report_zone_data {
193 	struct nvmet_req *req;
194 	u64 out_buf_offset;
195 	u64 out_nr_zones;
196 	u64 nr_zones;
197 	u8 zrasf;
198 };
199 
200 static int nvmet_bdev_report_zone_cb(struct blk_zone *z, unsigned i, void *d)
201 {
202 	static const unsigned int nvme_zrasf_to_blk_zcond[] = {
203 		[NVME_ZRASF_ZONE_STATE_EMPTY]	 = BLK_ZONE_COND_EMPTY,
204 		[NVME_ZRASF_ZONE_STATE_IMP_OPEN] = BLK_ZONE_COND_IMP_OPEN,
205 		[NVME_ZRASF_ZONE_STATE_EXP_OPEN] = BLK_ZONE_COND_EXP_OPEN,
206 		[NVME_ZRASF_ZONE_STATE_CLOSED]	 = BLK_ZONE_COND_CLOSED,
207 		[NVME_ZRASF_ZONE_STATE_READONLY] = BLK_ZONE_COND_READONLY,
208 		[NVME_ZRASF_ZONE_STATE_FULL]	 = BLK_ZONE_COND_FULL,
209 		[NVME_ZRASF_ZONE_STATE_OFFLINE]	 = BLK_ZONE_COND_OFFLINE,
210 	};
211 	struct nvmet_report_zone_data *rz = d;
212 
213 	if (rz->zrasf != NVME_ZRASF_ZONE_REPORT_ALL &&
214 	    z->cond != nvme_zrasf_to_blk_zcond[rz->zrasf])
215 		return 0;
216 
217 	if (rz->nr_zones < rz->out_nr_zones) {
218 		struct nvme_zone_descriptor zdesc = { };
219 		u16 status;
220 
221 		zdesc.zcap = nvmet_sect_to_lba(rz->req->ns, z->capacity);
222 		zdesc.zslba = nvmet_sect_to_lba(rz->req->ns, z->start);
223 		zdesc.wp = nvmet_sect_to_lba(rz->req->ns, z->wp);
224 		zdesc.za = z->reset ? 1 << 2 : 0;
225 		zdesc.zs = z->cond << 4;
226 		zdesc.zt = z->type;
227 
228 		status = nvmet_copy_to_sgl(rz->req, rz->out_buf_offset, &zdesc,
229 					   sizeof(zdesc));
230 		if (status)
231 			return -EINVAL;
232 
233 		rz->out_buf_offset += sizeof(zdesc);
234 	}
235 
236 	rz->nr_zones++;
237 
238 	return 0;
239 }
240 
241 static unsigned long nvmet_req_nr_zones_from_slba(struct nvmet_req *req)
242 {
243 	unsigned int sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
244 
245 	return blkdev_nr_zones(req->ns->bdev->bd_disk) -
246 		(sect >> ilog2(bdev_zone_sectors(req->ns->bdev)));
247 }
248 
249 static unsigned long get_nr_zones_from_buf(struct nvmet_req *req, u32 bufsize)
250 {
251 	if (bufsize <= sizeof(struct nvme_zone_report))
252 		return 0;
253 
254 	return (bufsize - sizeof(struct nvme_zone_report)) /
255 		sizeof(struct nvme_zone_descriptor);
256 }
257 
258 static void nvmet_bdev_zone_zmgmt_recv_work(struct work_struct *w)
259 {
260 	struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work);
261 	sector_t start_sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
262 	unsigned long req_slba_nr_zones = nvmet_req_nr_zones_from_slba(req);
263 	u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2;
264 	__le64 nr_zones;
265 	u16 status;
266 	int ret;
267 	struct nvmet_report_zone_data rz_data = {
268 		.out_nr_zones = get_nr_zones_from_buf(req, out_bufsize),
269 		/* leave the place for report zone header */
270 		.out_buf_offset = sizeof(struct nvme_zone_report),
271 		.zrasf = req->cmd->zmr.zrasf,
272 		.nr_zones = 0,
273 		.req = req,
274 	};
275 
276 	status = nvmet_bdev_validate_zone_mgmt_recv(req);
277 	if (status)
278 		goto out;
279 
280 	if (!req_slba_nr_zones) {
281 		status = NVME_SC_SUCCESS;
282 		goto out;
283 	}
284 
285 	ret = blkdev_report_zones(req->ns->bdev, start_sect, req_slba_nr_zones,
286 				 nvmet_bdev_report_zone_cb, &rz_data);
287 	if (ret < 0) {
288 		status = NVME_SC_INTERNAL;
289 		goto out;
290 	}
291 
292 	/*
293 	 * When partial bit is set nr_zones must indicate the number of zone
294 	 * descriptors actually transferred.
295 	 */
296 	if (req->cmd->zmr.pr)
297 		rz_data.nr_zones = min(rz_data.nr_zones, rz_data.out_nr_zones);
298 
299 	nr_zones = cpu_to_le64(rz_data.nr_zones);
300 	status = nvmet_copy_to_sgl(req, 0, &nr_zones, sizeof(nr_zones));
301 
302 out:
303 	nvmet_req_complete(req, status);
304 }
305 
306 void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req)
307 {
308 	INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zone_zmgmt_recv_work);
309 	queue_work(zbd_wq, &req->z.zmgmt_work);
310 }
311 
312 static inline enum req_opf zsa_req_op(u8 zsa)
313 {
314 	switch (zsa) {
315 	case NVME_ZONE_OPEN:
316 		return REQ_OP_ZONE_OPEN;
317 	case NVME_ZONE_CLOSE:
318 		return REQ_OP_ZONE_CLOSE;
319 	case NVME_ZONE_FINISH:
320 		return REQ_OP_ZONE_FINISH;
321 	case NVME_ZONE_RESET:
322 		return REQ_OP_ZONE_RESET;
323 	default:
324 		return REQ_OP_LAST;
325 	}
326 }
327 
328 static u16 blkdev_zone_mgmt_errno_to_nvme_status(int ret)
329 {
330 	switch (ret) {
331 	case 0:
332 		return NVME_SC_SUCCESS;
333 	case -EINVAL:
334 	case -EIO:
335 		return NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR;
336 	default:
337 		return NVME_SC_INTERNAL;
338 	}
339 }
340 
341 struct nvmet_zone_mgmt_send_all_data {
342 	unsigned long *zbitmap;
343 	struct nvmet_req *req;
344 };
345 
346 static int zmgmt_send_scan_cb(struct blk_zone *z, unsigned i, void *d)
347 {
348 	struct nvmet_zone_mgmt_send_all_data *data = d;
349 
350 	switch (zsa_req_op(data->req->cmd->zms.zsa)) {
351 	case REQ_OP_ZONE_OPEN:
352 		switch (z->cond) {
353 		case BLK_ZONE_COND_CLOSED:
354 			break;
355 		default:
356 			return 0;
357 		}
358 		break;
359 	case REQ_OP_ZONE_CLOSE:
360 		switch (z->cond) {
361 		case BLK_ZONE_COND_IMP_OPEN:
362 		case BLK_ZONE_COND_EXP_OPEN:
363 			break;
364 		default:
365 			return 0;
366 		}
367 		break;
368 	case REQ_OP_ZONE_FINISH:
369 		switch (z->cond) {
370 		case BLK_ZONE_COND_IMP_OPEN:
371 		case BLK_ZONE_COND_EXP_OPEN:
372 		case BLK_ZONE_COND_CLOSED:
373 			break;
374 		default:
375 			return 0;
376 		}
377 		break;
378 	default:
379 		return -EINVAL;
380 	}
381 
382 	set_bit(i, data->zbitmap);
383 
384 	return 0;
385 }
386 
387 static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
388 {
389 	struct block_device *bdev = req->ns->bdev;
390 	unsigned int nr_zones = blkdev_nr_zones(bdev->bd_disk);
391 	struct request_queue *q = bdev_get_queue(bdev);
392 	struct bio *bio = NULL;
393 	sector_t sector = 0;
394 	int ret;
395 	struct nvmet_zone_mgmt_send_all_data d = {
396 		.req = req,
397 	};
398 
399 	d.zbitmap = kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(*(d.zbitmap)),
400 				 GFP_NOIO, q->node);
401 	if (!d.zbitmap) {
402 		ret = -ENOMEM;
403 		goto out;
404 	}
405 
406 	/* Scan and build bitmap of the eligible zones */
407 	ret = blkdev_report_zones(bdev, 0, nr_zones, zmgmt_send_scan_cb, &d);
408 	if (ret != nr_zones) {
409 		if (ret > 0)
410 			ret = -EIO;
411 		goto out;
412 	} else {
413 		/* We scanned all the zones */
414 		ret = 0;
415 	}
416 
417 	while (sector < get_capacity(bdev->bd_disk)) {
418 		if (test_bit(blk_queue_zone_no(q, sector), d.zbitmap)) {
419 			bio = blk_next_bio(bio, bdev, 0,
420 				zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC,
421 				GFP_KERNEL);
422 			bio->bi_iter.bi_sector = sector;
423 			/* This may take a while, so be nice to others */
424 			cond_resched();
425 		}
426 		sector += blk_queue_zone_sectors(q);
427 	}
428 
429 	if (bio) {
430 		ret = submit_bio_wait(bio);
431 		bio_put(bio);
432 	}
433 
434 out:
435 	kfree(d.zbitmap);
436 
437 	return blkdev_zone_mgmt_errno_to_nvme_status(ret);
438 }
439 
440 static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req)
441 {
442 	int ret;
443 
444 	switch (zsa_req_op(req->cmd->zms.zsa)) {
445 	case REQ_OP_ZONE_RESET:
446 		ret = blkdev_zone_mgmt(req->ns->bdev, REQ_OP_ZONE_RESET, 0,
447 				       get_capacity(req->ns->bdev->bd_disk),
448 				       GFP_KERNEL);
449 		if (ret < 0)
450 			return blkdev_zone_mgmt_errno_to_nvme_status(ret);
451 		break;
452 	case REQ_OP_ZONE_OPEN:
453 	case REQ_OP_ZONE_CLOSE:
454 	case REQ_OP_ZONE_FINISH:
455 		return nvmet_bdev_zone_mgmt_emulate_all(req);
456 	default:
457 		/* this is needed to quiet compiler warning */
458 		req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
459 		return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
460 	}
461 
462 	return NVME_SC_SUCCESS;
463 }
464 
465 static void nvmet_bdev_zmgmt_send_work(struct work_struct *w)
466 {
467 	struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work);
468 	sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zms.slba);
469 	enum req_opf op = zsa_req_op(req->cmd->zms.zsa);
470 	struct block_device *bdev = req->ns->bdev;
471 	sector_t zone_sectors = bdev_zone_sectors(bdev);
472 	u16 status = NVME_SC_SUCCESS;
473 	int ret;
474 
475 	if (op == REQ_OP_LAST) {
476 		req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
477 		status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR;
478 		goto out;
479 	}
480 
481 	/* when select all bit is set slba field is ignored */
482 	if (req->cmd->zms.select_all) {
483 		status = nvmet_bdev_execute_zmgmt_send_all(req);
484 		goto out;
485 	}
486 
487 	if (sect >= get_capacity(bdev->bd_disk)) {
488 		req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
489 		status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
490 		goto out;
491 	}
492 
493 	if (sect & (zone_sectors - 1)) {
494 		req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
495 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
496 		goto out;
497 	}
498 
499 	ret = blkdev_zone_mgmt(bdev, op, sect, zone_sectors, GFP_KERNEL);
500 	if (ret < 0)
501 		status = blkdev_zone_mgmt_errno_to_nvme_status(ret);
502 
503 out:
504 	nvmet_req_complete(req, status);
505 }
506 
507 void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req)
508 {
509 	INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zmgmt_send_work);
510 	queue_work(zbd_wq, &req->z.zmgmt_work);
511 }
512 
513 static void nvmet_bdev_zone_append_bio_done(struct bio *bio)
514 {
515 	struct nvmet_req *req = bio->bi_private;
516 
517 	if (bio->bi_status == BLK_STS_OK) {
518 		req->cqe->result.u64 =
519 			nvmet_sect_to_lba(req->ns, bio->bi_iter.bi_sector);
520 	}
521 
522 	nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
523 	nvmet_req_bio_put(req, bio);
524 }
525 
526 void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
527 {
528 	sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
529 	const unsigned int op = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
530 	u16 status = NVME_SC_SUCCESS;
531 	unsigned int total_len = 0;
532 	struct scatterlist *sg;
533 	struct bio *bio;
534 	int sg_cnt;
535 
536 	/* Request is completed on len mismatch in nvmet_check_transter_len() */
537 	if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
538 		return;
539 
540 	if (!req->sg_cnt) {
541 		nvmet_req_complete(req, 0);
542 		return;
543 	}
544 
545 	if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
546 		req->error_loc = offsetof(struct nvme_rw_command, slba);
547 		status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
548 		goto out;
549 	}
550 
551 	if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) {
552 		req->error_loc = offsetof(struct nvme_rw_command, slba);
553 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
554 		goto out;
555 	}
556 
557 	if (nvmet_use_inline_bvec(req)) {
558 		bio = &req->z.inline_bio;
559 		bio_init(bio, req->ns->bdev, req->inline_bvec,
560 			 ARRAY_SIZE(req->inline_bvec), op);
561 	} else {
562 		bio = bio_alloc(req->ns->bdev, req->sg_cnt, op, GFP_KERNEL);
563 	}
564 
565 	bio->bi_end_io = nvmet_bdev_zone_append_bio_done;
566 	bio->bi_iter.bi_sector = sect;
567 	bio->bi_private = req;
568 	if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
569 		bio->bi_opf |= REQ_FUA;
570 
571 	for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) {
572 		struct page *p = sg_page(sg);
573 		unsigned int l = sg->length;
574 		unsigned int o = sg->offset;
575 		unsigned int ret;
576 
577 		ret = bio_add_zone_append_page(bio, p, l, o);
578 		if (ret != sg->length) {
579 			status = NVME_SC_INTERNAL;
580 			goto out_put_bio;
581 		}
582 		total_len += sg->length;
583 	}
584 
585 	if (total_len != nvmet_rw_data_len(req)) {
586 		status = NVME_SC_INTERNAL | NVME_SC_DNR;
587 		goto out_put_bio;
588 	}
589 
590 	submit_bio(bio);
591 	return;
592 
593 out_put_bio:
594 	nvmet_req_bio_put(req, bio);
595 out:
596 	nvmet_req_complete(req, status);
597 }
598 
599 u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req)
600 {
601 	struct nvme_command *cmd = req->cmd;
602 
603 	switch (cmd->common.opcode) {
604 	case nvme_cmd_zone_append:
605 		req->execute = nvmet_bdev_execute_zone_append;
606 		return 0;
607 	case nvme_cmd_zone_mgmt_recv:
608 		req->execute = nvmet_bdev_execute_zone_mgmt_recv;
609 		return 0;
610 	case nvme_cmd_zone_mgmt_send:
611 		req->execute = nvmet_bdev_execute_zone_mgmt_send;
612 		return 0;
613 	default:
614 		return nvmet_bdev_parse_io_cmd(req);
615 	}
616 }
617