xref: /openbmc/linux/drivers/nvme/target/zns.c (revision c699ce1a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe ZNS-ZBD command implementation.
4  * Copyright (C) 2021 Western Digital Corporation or its affiliates.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/nvme.h>
8 #include <linux/blkdev.h>
9 #include "nvmet.h"
10 
11 /*
12  * We set the Memory Page Size Minimum (MPSMIN) for target controller to 0
13  * which gets added by 12 in the nvme_enable_ctrl() which results in 2^12 = 4k
14  * as page_shift value. When calculating the ZASL use shift by 12.
15  */
16 #define NVMET_MPSMIN_SHIFT	12
17 
18 static inline u8 nvmet_zasl(unsigned int zone_append_sects)
19 {
20 	/*
21 	 * Zone Append Size Limit (zasl) is expressed as a power of 2 value
22 	 * with the minimum memory page size (i.e. 12) as unit.
23 	 */
24 	return ilog2(zone_append_sects >> (NVMET_MPSMIN_SHIFT - 9));
25 }
26 
27 static int validate_conv_zones_cb(struct blk_zone *z,
28 				  unsigned int i, void *data)
29 {
30 	if (z->type == BLK_ZONE_TYPE_CONVENTIONAL)
31 		return -EOPNOTSUPP;
32 	return 0;
33 }
34 
35 bool nvmet_bdev_zns_enable(struct nvmet_ns *ns)
36 {
37 	u8 zasl = nvmet_zasl(bdev_max_zone_append_sectors(ns->bdev));
38 	struct gendisk *bd_disk = ns->bdev->bd_disk;
39 	int ret;
40 
41 	if (ns->subsys->zasl) {
42 		if (ns->subsys->zasl > zasl)
43 			return false;
44 	}
45 	ns->subsys->zasl = zasl;
46 
47 	/*
48 	 * Generic zoned block devices may have a smaller last zone which is
49 	 * not supported by ZNS. Exclude zoned drives that have such smaller
50 	 * last zone.
51 	 */
52 	if (get_capacity(bd_disk) & (bdev_zone_sectors(ns->bdev) - 1))
53 		return false;
54 	/*
55 	 * ZNS does not define a conventional zone type. If the underlying
56 	 * device has a bitmap set indicating the existence of conventional
57 	 * zones, reject the device. Otherwise, use report zones to detect if
58 	 * the device has conventional zones.
59 	 */
60 	if (ns->bdev->bd_disk->conv_zones_bitmap)
61 		return false;
62 
63 	ret = blkdev_report_zones(ns->bdev, 0, bdev_nr_zones(ns->bdev),
64 				  validate_conv_zones_cb, NULL);
65 	if (ret < 0)
66 		return false;
67 
68 	ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
69 
70 	return true;
71 }
72 
73 void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req)
74 {
75 	u8 zasl = req->sq->ctrl->subsys->zasl;
76 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
77 	struct nvme_id_ctrl_zns *id;
78 	u16 status;
79 
80 	id = kzalloc(sizeof(*id), GFP_KERNEL);
81 	if (!id) {
82 		status = NVME_SC_INTERNAL;
83 		goto out;
84 	}
85 
86 	if (ctrl->ops->get_mdts)
87 		id->zasl = min_t(u8, ctrl->ops->get_mdts(ctrl), zasl);
88 	else
89 		id->zasl = zasl;
90 
91 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
92 
93 	kfree(id);
94 out:
95 	nvmet_req_complete(req, status);
96 }
97 
98 void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
99 {
100 	struct nvme_id_ns_zns *id_zns;
101 	u64 zsze;
102 	u16 status;
103 	u32 mar, mor;
104 
105 	if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
106 		req->error_loc = offsetof(struct nvme_identify, nsid);
107 		status = NVME_SC_INVALID_NS | NVME_SC_DNR;
108 		goto out;
109 	}
110 
111 	id_zns = kzalloc(sizeof(*id_zns), GFP_KERNEL);
112 	if (!id_zns) {
113 		status = NVME_SC_INTERNAL;
114 		goto out;
115 	}
116 
117 	status = nvmet_req_find_ns(req);
118 	if (status)
119 		goto done;
120 
121 	if (!bdev_is_zoned(req->ns->bdev)) {
122 		req->error_loc = offsetof(struct nvme_identify, nsid);
123 		goto done;
124 	}
125 
126 	if (nvmet_ns_revalidate(req->ns)) {
127 		mutex_lock(&req->ns->subsys->lock);
128 		nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
129 		mutex_unlock(&req->ns->subsys->lock);
130 	}
131 	zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >>
132 					req->ns->blksize_shift;
133 	id_zns->lbafe[0].zsze = cpu_to_le64(zsze);
134 
135 	mor = bdev_max_open_zones(req->ns->bdev);
136 	if (!mor)
137 		mor = U32_MAX;
138 	else
139 		mor--;
140 	id_zns->mor = cpu_to_le32(mor);
141 
142 	mar = bdev_max_active_zones(req->ns->bdev);
143 	if (!mar)
144 		mar = U32_MAX;
145 	else
146 		mar--;
147 	id_zns->mar = cpu_to_le32(mar);
148 
149 done:
150 	status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns));
151 	kfree(id_zns);
152 out:
153 	nvmet_req_complete(req, status);
154 }
155 
156 static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req)
157 {
158 	sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
159 	u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2;
160 
161 	if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
162 		req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba);
163 		return NVME_SC_LBA_RANGE | NVME_SC_DNR;
164 	}
165 
166 	if (out_bufsize < sizeof(struct nvme_zone_report)) {
167 		req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd);
168 		return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
169 	}
170 
171 	if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) {
172 		req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra);
173 		return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
174 	}
175 
176 	switch (req->cmd->zmr.pr) {
177 	case 0:
178 	case 1:
179 		break;
180 	default:
181 		req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr);
182 		return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
183 	}
184 
185 	switch (req->cmd->zmr.zrasf) {
186 	case NVME_ZRASF_ZONE_REPORT_ALL:
187 	case NVME_ZRASF_ZONE_STATE_EMPTY:
188 	case NVME_ZRASF_ZONE_STATE_IMP_OPEN:
189 	case NVME_ZRASF_ZONE_STATE_EXP_OPEN:
190 	case NVME_ZRASF_ZONE_STATE_CLOSED:
191 	case NVME_ZRASF_ZONE_STATE_FULL:
192 	case NVME_ZRASF_ZONE_STATE_READONLY:
193 	case NVME_ZRASF_ZONE_STATE_OFFLINE:
194 		break;
195 	default:
196 		req->error_loc =
197 			offsetof(struct nvme_zone_mgmt_recv_cmd, zrasf);
198 		return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
199 	}
200 
201 	return NVME_SC_SUCCESS;
202 }
203 
204 struct nvmet_report_zone_data {
205 	struct nvmet_req *req;
206 	u64 out_buf_offset;
207 	u64 out_nr_zones;
208 	u64 nr_zones;
209 	u8 zrasf;
210 };
211 
212 static int nvmet_bdev_report_zone_cb(struct blk_zone *z, unsigned i, void *d)
213 {
214 	static const unsigned int nvme_zrasf_to_blk_zcond[] = {
215 		[NVME_ZRASF_ZONE_STATE_EMPTY]	 = BLK_ZONE_COND_EMPTY,
216 		[NVME_ZRASF_ZONE_STATE_IMP_OPEN] = BLK_ZONE_COND_IMP_OPEN,
217 		[NVME_ZRASF_ZONE_STATE_EXP_OPEN] = BLK_ZONE_COND_EXP_OPEN,
218 		[NVME_ZRASF_ZONE_STATE_CLOSED]	 = BLK_ZONE_COND_CLOSED,
219 		[NVME_ZRASF_ZONE_STATE_READONLY] = BLK_ZONE_COND_READONLY,
220 		[NVME_ZRASF_ZONE_STATE_FULL]	 = BLK_ZONE_COND_FULL,
221 		[NVME_ZRASF_ZONE_STATE_OFFLINE]	 = BLK_ZONE_COND_OFFLINE,
222 	};
223 	struct nvmet_report_zone_data *rz = d;
224 
225 	if (rz->zrasf != NVME_ZRASF_ZONE_REPORT_ALL &&
226 	    z->cond != nvme_zrasf_to_blk_zcond[rz->zrasf])
227 		return 0;
228 
229 	if (rz->nr_zones < rz->out_nr_zones) {
230 		struct nvme_zone_descriptor zdesc = { };
231 		u16 status;
232 
233 		zdesc.zcap = nvmet_sect_to_lba(rz->req->ns, z->capacity);
234 		zdesc.zslba = nvmet_sect_to_lba(rz->req->ns, z->start);
235 		zdesc.wp = nvmet_sect_to_lba(rz->req->ns, z->wp);
236 		zdesc.za = z->reset ? 1 << 2 : 0;
237 		zdesc.zs = z->cond << 4;
238 		zdesc.zt = z->type;
239 
240 		status = nvmet_copy_to_sgl(rz->req, rz->out_buf_offset, &zdesc,
241 					   sizeof(zdesc));
242 		if (status)
243 			return -EINVAL;
244 
245 		rz->out_buf_offset += sizeof(zdesc);
246 	}
247 
248 	rz->nr_zones++;
249 
250 	return 0;
251 }
252 
253 static unsigned long nvmet_req_nr_zones_from_slba(struct nvmet_req *req)
254 {
255 	unsigned int sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
256 
257 	return bdev_nr_zones(req->ns->bdev) -
258 		(sect >> ilog2(bdev_zone_sectors(req->ns->bdev)));
259 }
260 
261 static unsigned long get_nr_zones_from_buf(struct nvmet_req *req, u32 bufsize)
262 {
263 	if (bufsize <= sizeof(struct nvme_zone_report))
264 		return 0;
265 
266 	return (bufsize - sizeof(struct nvme_zone_report)) /
267 		sizeof(struct nvme_zone_descriptor);
268 }
269 
270 static void nvmet_bdev_zone_zmgmt_recv_work(struct work_struct *w)
271 {
272 	struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work);
273 	sector_t start_sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
274 	unsigned long req_slba_nr_zones = nvmet_req_nr_zones_from_slba(req);
275 	u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2;
276 	__le64 nr_zones;
277 	u16 status;
278 	int ret;
279 	struct nvmet_report_zone_data rz_data = {
280 		.out_nr_zones = get_nr_zones_from_buf(req, out_bufsize),
281 		/* leave the place for report zone header */
282 		.out_buf_offset = sizeof(struct nvme_zone_report),
283 		.zrasf = req->cmd->zmr.zrasf,
284 		.nr_zones = 0,
285 		.req = req,
286 	};
287 
288 	status = nvmet_bdev_validate_zone_mgmt_recv(req);
289 	if (status)
290 		goto out;
291 
292 	if (!req_slba_nr_zones) {
293 		status = NVME_SC_SUCCESS;
294 		goto out;
295 	}
296 
297 	ret = blkdev_report_zones(req->ns->bdev, start_sect, req_slba_nr_zones,
298 				 nvmet_bdev_report_zone_cb, &rz_data);
299 	if (ret < 0) {
300 		status = NVME_SC_INTERNAL;
301 		goto out;
302 	}
303 
304 	/*
305 	 * When partial bit is set nr_zones must indicate the number of zone
306 	 * descriptors actually transferred.
307 	 */
308 	if (req->cmd->zmr.pr)
309 		rz_data.nr_zones = min(rz_data.nr_zones, rz_data.out_nr_zones);
310 
311 	nr_zones = cpu_to_le64(rz_data.nr_zones);
312 	status = nvmet_copy_to_sgl(req, 0, &nr_zones, sizeof(nr_zones));
313 
314 out:
315 	nvmet_req_complete(req, status);
316 }
317 
318 void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req)
319 {
320 	INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zone_zmgmt_recv_work);
321 	queue_work(zbd_wq, &req->z.zmgmt_work);
322 }
323 
324 static inline enum req_op zsa_req_op(u8 zsa)
325 {
326 	switch (zsa) {
327 	case NVME_ZONE_OPEN:
328 		return REQ_OP_ZONE_OPEN;
329 	case NVME_ZONE_CLOSE:
330 		return REQ_OP_ZONE_CLOSE;
331 	case NVME_ZONE_FINISH:
332 		return REQ_OP_ZONE_FINISH;
333 	case NVME_ZONE_RESET:
334 		return REQ_OP_ZONE_RESET;
335 	default:
336 		return REQ_OP_LAST;
337 	}
338 }
339 
340 static u16 blkdev_zone_mgmt_errno_to_nvme_status(int ret)
341 {
342 	switch (ret) {
343 	case 0:
344 		return NVME_SC_SUCCESS;
345 	case -EINVAL:
346 	case -EIO:
347 		return NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR;
348 	default:
349 		return NVME_SC_INTERNAL;
350 	}
351 }
352 
353 struct nvmet_zone_mgmt_send_all_data {
354 	unsigned long *zbitmap;
355 	struct nvmet_req *req;
356 };
357 
358 static int zmgmt_send_scan_cb(struct blk_zone *z, unsigned i, void *d)
359 {
360 	struct nvmet_zone_mgmt_send_all_data *data = d;
361 
362 	switch (zsa_req_op(data->req->cmd->zms.zsa)) {
363 	case REQ_OP_ZONE_OPEN:
364 		switch (z->cond) {
365 		case BLK_ZONE_COND_CLOSED:
366 			break;
367 		default:
368 			return 0;
369 		}
370 		break;
371 	case REQ_OP_ZONE_CLOSE:
372 		switch (z->cond) {
373 		case BLK_ZONE_COND_IMP_OPEN:
374 		case BLK_ZONE_COND_EXP_OPEN:
375 			break;
376 		default:
377 			return 0;
378 		}
379 		break;
380 	case REQ_OP_ZONE_FINISH:
381 		switch (z->cond) {
382 		case BLK_ZONE_COND_IMP_OPEN:
383 		case BLK_ZONE_COND_EXP_OPEN:
384 		case BLK_ZONE_COND_CLOSED:
385 			break;
386 		default:
387 			return 0;
388 		}
389 		break;
390 	default:
391 		return -EINVAL;
392 	}
393 
394 	set_bit(i, data->zbitmap);
395 
396 	return 0;
397 }
398 
399 static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
400 {
401 	struct block_device *bdev = req->ns->bdev;
402 	unsigned int nr_zones = bdev_nr_zones(bdev);
403 	struct bio *bio = NULL;
404 	sector_t sector = 0;
405 	int ret;
406 	struct nvmet_zone_mgmt_send_all_data d = {
407 		.req = req,
408 	};
409 
410 	d.zbitmap = kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(*(d.zbitmap)),
411 				 GFP_NOIO, bdev->bd_disk->node_id);
412 	if (!d.zbitmap) {
413 		ret = -ENOMEM;
414 		goto out;
415 	}
416 
417 	/* Scan and build bitmap of the eligible zones */
418 	ret = blkdev_report_zones(bdev, 0, nr_zones, zmgmt_send_scan_cb, &d);
419 	if (ret != nr_zones) {
420 		if (ret > 0)
421 			ret = -EIO;
422 		goto out;
423 	} else {
424 		/* We scanned all the zones */
425 		ret = 0;
426 	}
427 
428 	while (sector < bdev_nr_sectors(bdev)) {
429 		if (test_bit(disk_zone_no(bdev->bd_disk, sector), d.zbitmap)) {
430 			bio = blk_next_bio(bio, bdev, 0,
431 				zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC,
432 				GFP_KERNEL);
433 			bio->bi_iter.bi_sector = sector;
434 			/* This may take a while, so be nice to others */
435 			cond_resched();
436 		}
437 		sector += bdev_zone_sectors(bdev);
438 	}
439 
440 	if (bio) {
441 		ret = submit_bio_wait(bio);
442 		bio_put(bio);
443 	}
444 
445 out:
446 	kfree(d.zbitmap);
447 
448 	return blkdev_zone_mgmt_errno_to_nvme_status(ret);
449 }
450 
451 static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req)
452 {
453 	int ret;
454 
455 	switch (zsa_req_op(req->cmd->zms.zsa)) {
456 	case REQ_OP_ZONE_RESET:
457 		ret = blkdev_zone_mgmt(req->ns->bdev, REQ_OP_ZONE_RESET, 0,
458 				       get_capacity(req->ns->bdev->bd_disk),
459 				       GFP_KERNEL);
460 		if (ret < 0)
461 			return blkdev_zone_mgmt_errno_to_nvme_status(ret);
462 		break;
463 	case REQ_OP_ZONE_OPEN:
464 	case REQ_OP_ZONE_CLOSE:
465 	case REQ_OP_ZONE_FINISH:
466 		return nvmet_bdev_zone_mgmt_emulate_all(req);
467 	default:
468 		/* this is needed to quiet compiler warning */
469 		req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
470 		return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
471 	}
472 
473 	return NVME_SC_SUCCESS;
474 }
475 
476 static void nvmet_bdev_zmgmt_send_work(struct work_struct *w)
477 {
478 	struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work);
479 	sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zms.slba);
480 	enum req_op op = zsa_req_op(req->cmd->zms.zsa);
481 	struct block_device *bdev = req->ns->bdev;
482 	sector_t zone_sectors = bdev_zone_sectors(bdev);
483 	u16 status = NVME_SC_SUCCESS;
484 	int ret;
485 
486 	if (op == REQ_OP_LAST) {
487 		req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
488 		status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR;
489 		goto out;
490 	}
491 
492 	/* when select all bit is set slba field is ignored */
493 	if (req->cmd->zms.select_all) {
494 		status = nvmet_bdev_execute_zmgmt_send_all(req);
495 		goto out;
496 	}
497 
498 	if (sect >= get_capacity(bdev->bd_disk)) {
499 		req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
500 		status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
501 		goto out;
502 	}
503 
504 	if (sect & (zone_sectors - 1)) {
505 		req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
506 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
507 		goto out;
508 	}
509 
510 	ret = blkdev_zone_mgmt(bdev, op, sect, zone_sectors, GFP_KERNEL);
511 	if (ret < 0)
512 		status = blkdev_zone_mgmt_errno_to_nvme_status(ret);
513 
514 out:
515 	nvmet_req_complete(req, status);
516 }
517 
518 void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req)
519 {
520 	INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zmgmt_send_work);
521 	queue_work(zbd_wq, &req->z.zmgmt_work);
522 }
523 
524 static void nvmet_bdev_zone_append_bio_done(struct bio *bio)
525 {
526 	struct nvmet_req *req = bio->bi_private;
527 
528 	if (bio->bi_status == BLK_STS_OK) {
529 		req->cqe->result.u64 =
530 			nvmet_sect_to_lba(req->ns, bio->bi_iter.bi_sector);
531 	}
532 
533 	nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
534 	nvmet_req_bio_put(req, bio);
535 }
536 
537 void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
538 {
539 	sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
540 	const blk_opf_t opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
541 	u16 status = NVME_SC_SUCCESS;
542 	unsigned int total_len = 0;
543 	struct scatterlist *sg;
544 	struct bio *bio;
545 	int sg_cnt;
546 
547 	/* Request is completed on len mismatch in nvmet_check_transter_len() */
548 	if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
549 		return;
550 
551 	if (!req->sg_cnt) {
552 		nvmet_req_complete(req, 0);
553 		return;
554 	}
555 
556 	if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
557 		req->error_loc = offsetof(struct nvme_rw_command, slba);
558 		status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
559 		goto out;
560 	}
561 
562 	if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) {
563 		req->error_loc = offsetof(struct nvme_rw_command, slba);
564 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
565 		goto out;
566 	}
567 
568 	if (nvmet_use_inline_bvec(req)) {
569 		bio = &req->z.inline_bio;
570 		bio_init(bio, req->ns->bdev, req->inline_bvec,
571 			 ARRAY_SIZE(req->inline_bvec), opf);
572 	} else {
573 		bio = bio_alloc(req->ns->bdev, req->sg_cnt, opf, GFP_KERNEL);
574 	}
575 
576 	bio->bi_end_io = nvmet_bdev_zone_append_bio_done;
577 	bio->bi_iter.bi_sector = sect;
578 	bio->bi_private = req;
579 	if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
580 		bio->bi_opf |= REQ_FUA;
581 
582 	for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) {
583 		struct page *p = sg_page(sg);
584 		unsigned int l = sg->length;
585 		unsigned int o = sg->offset;
586 		unsigned int ret;
587 
588 		ret = bio_add_zone_append_page(bio, p, l, o);
589 		if (ret != sg->length) {
590 			status = NVME_SC_INTERNAL;
591 			goto out_put_bio;
592 		}
593 		total_len += sg->length;
594 	}
595 
596 	if (total_len != nvmet_rw_data_len(req)) {
597 		status = NVME_SC_INTERNAL | NVME_SC_DNR;
598 		goto out_put_bio;
599 	}
600 
601 	submit_bio(bio);
602 	return;
603 
604 out_put_bio:
605 	nvmet_req_bio_put(req, bio);
606 out:
607 	nvmet_req_complete(req, status);
608 }
609 
610 u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req)
611 {
612 	struct nvme_command *cmd = req->cmd;
613 
614 	switch (cmd->common.opcode) {
615 	case nvme_cmd_zone_append:
616 		req->execute = nvmet_bdev_execute_zone_append;
617 		return 0;
618 	case nvme_cmd_zone_mgmt_recv:
619 		req->execute = nvmet_bdev_execute_zone_mgmt_recv;
620 		return 0;
621 	case nvme_cmd_zone_mgmt_send:
622 		req->execute = nvmet_bdev_execute_zone_mgmt_send;
623 		return 0;
624 	default:
625 		return nvmet_bdev_parse_io_cmd(req);
626 	}
627 }
628