1*aaf2e048SChaitanya Kulkarni // SPDX-License-Identifier: GPL-2.0 2*aaf2e048SChaitanya Kulkarni /* 3*aaf2e048SChaitanya Kulkarni * NVMe ZNS-ZBD command implementation. 4*aaf2e048SChaitanya Kulkarni * Copyright (C) 2021 Western Digital Corporation or its affiliates. 5*aaf2e048SChaitanya Kulkarni */ 6*aaf2e048SChaitanya Kulkarni #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7*aaf2e048SChaitanya Kulkarni #include <linux/nvme.h> 8*aaf2e048SChaitanya Kulkarni #include <linux/blkdev.h> 9*aaf2e048SChaitanya Kulkarni #include "nvmet.h" 10*aaf2e048SChaitanya Kulkarni 11*aaf2e048SChaitanya Kulkarni /* 12*aaf2e048SChaitanya Kulkarni * We set the Memory Page Size Minimum (MPSMIN) for target controller to 0 13*aaf2e048SChaitanya Kulkarni * which gets added by 12 in the nvme_enable_ctrl() which results in 2^12 = 4k 14*aaf2e048SChaitanya Kulkarni * as page_shift value. When calculating the ZASL use shift by 12. 15*aaf2e048SChaitanya Kulkarni */ 16*aaf2e048SChaitanya Kulkarni #define NVMET_MPSMIN_SHIFT 12 17*aaf2e048SChaitanya Kulkarni 18*aaf2e048SChaitanya Kulkarni static inline u8 nvmet_zasl(unsigned int zone_append_sects) 19*aaf2e048SChaitanya Kulkarni { 20*aaf2e048SChaitanya Kulkarni /* 21*aaf2e048SChaitanya Kulkarni * Zone Append Size Limit (zasl) is expressed as a power of 2 value 22*aaf2e048SChaitanya Kulkarni * with the minimum memory page size (i.e. 12) as unit. 23*aaf2e048SChaitanya Kulkarni */ 24*aaf2e048SChaitanya Kulkarni return ilog2(zone_append_sects >> (NVMET_MPSMIN_SHIFT - 9)); 25*aaf2e048SChaitanya Kulkarni } 26*aaf2e048SChaitanya Kulkarni 27*aaf2e048SChaitanya Kulkarni static int validate_conv_zones_cb(struct blk_zone *z, 28*aaf2e048SChaitanya Kulkarni unsigned int i, void *data) 29*aaf2e048SChaitanya Kulkarni { 30*aaf2e048SChaitanya Kulkarni if (z->type == BLK_ZONE_TYPE_CONVENTIONAL) 31*aaf2e048SChaitanya Kulkarni return -EOPNOTSUPP; 32*aaf2e048SChaitanya Kulkarni return 0; 33*aaf2e048SChaitanya Kulkarni } 34*aaf2e048SChaitanya Kulkarni 35*aaf2e048SChaitanya Kulkarni bool nvmet_bdev_zns_enable(struct nvmet_ns *ns) 36*aaf2e048SChaitanya Kulkarni { 37*aaf2e048SChaitanya Kulkarni struct request_queue *q = ns->bdev->bd_disk->queue; 38*aaf2e048SChaitanya Kulkarni u8 zasl = nvmet_zasl(queue_max_zone_append_sectors(q)); 39*aaf2e048SChaitanya Kulkarni struct gendisk *bd_disk = ns->bdev->bd_disk; 40*aaf2e048SChaitanya Kulkarni int ret; 41*aaf2e048SChaitanya Kulkarni 42*aaf2e048SChaitanya Kulkarni if (ns->subsys->zasl) { 43*aaf2e048SChaitanya Kulkarni if (ns->subsys->zasl > zasl) 44*aaf2e048SChaitanya Kulkarni return false; 45*aaf2e048SChaitanya Kulkarni } 46*aaf2e048SChaitanya Kulkarni ns->subsys->zasl = zasl; 47*aaf2e048SChaitanya Kulkarni 48*aaf2e048SChaitanya Kulkarni /* 49*aaf2e048SChaitanya Kulkarni * Generic zoned block devices may have a smaller last zone which is 50*aaf2e048SChaitanya Kulkarni * not supported by ZNS. Exclude zoned drives that have such smaller 51*aaf2e048SChaitanya Kulkarni * last zone. 52*aaf2e048SChaitanya Kulkarni */ 53*aaf2e048SChaitanya Kulkarni if (get_capacity(bd_disk) & (bdev_zone_sectors(ns->bdev) - 1)) 54*aaf2e048SChaitanya Kulkarni return false; 55*aaf2e048SChaitanya Kulkarni /* 56*aaf2e048SChaitanya Kulkarni * ZNS does not define a conventional zone type. If the underlying 57*aaf2e048SChaitanya Kulkarni * device has a bitmap set indicating the existence of conventional 58*aaf2e048SChaitanya Kulkarni * zones, reject the device. Otherwise, use report zones to detect if 59*aaf2e048SChaitanya Kulkarni * the device has conventional zones. 60*aaf2e048SChaitanya Kulkarni */ 61*aaf2e048SChaitanya Kulkarni if (ns->bdev->bd_disk->queue->conv_zones_bitmap) 62*aaf2e048SChaitanya Kulkarni return false; 63*aaf2e048SChaitanya Kulkarni 64*aaf2e048SChaitanya Kulkarni ret = blkdev_report_zones(ns->bdev, 0, blkdev_nr_zones(bd_disk), 65*aaf2e048SChaitanya Kulkarni validate_conv_zones_cb, NULL); 66*aaf2e048SChaitanya Kulkarni if (ret < 0) 67*aaf2e048SChaitanya Kulkarni return false; 68*aaf2e048SChaitanya Kulkarni 69*aaf2e048SChaitanya Kulkarni ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev)); 70*aaf2e048SChaitanya Kulkarni 71*aaf2e048SChaitanya Kulkarni return true; 72*aaf2e048SChaitanya Kulkarni } 73*aaf2e048SChaitanya Kulkarni 74*aaf2e048SChaitanya Kulkarni void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req) 75*aaf2e048SChaitanya Kulkarni { 76*aaf2e048SChaitanya Kulkarni u8 zasl = req->sq->ctrl->subsys->zasl; 77*aaf2e048SChaitanya Kulkarni struct nvmet_ctrl *ctrl = req->sq->ctrl; 78*aaf2e048SChaitanya Kulkarni struct nvme_id_ctrl_zns *id; 79*aaf2e048SChaitanya Kulkarni u16 status; 80*aaf2e048SChaitanya Kulkarni 81*aaf2e048SChaitanya Kulkarni id = kzalloc(sizeof(*id), GFP_KERNEL); 82*aaf2e048SChaitanya Kulkarni if (!id) { 83*aaf2e048SChaitanya Kulkarni status = NVME_SC_INTERNAL; 84*aaf2e048SChaitanya Kulkarni goto out; 85*aaf2e048SChaitanya Kulkarni } 86*aaf2e048SChaitanya Kulkarni 87*aaf2e048SChaitanya Kulkarni if (ctrl->ops->get_mdts) 88*aaf2e048SChaitanya Kulkarni id->zasl = min_t(u8, ctrl->ops->get_mdts(ctrl), zasl); 89*aaf2e048SChaitanya Kulkarni else 90*aaf2e048SChaitanya Kulkarni id->zasl = zasl; 91*aaf2e048SChaitanya Kulkarni 92*aaf2e048SChaitanya Kulkarni status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); 93*aaf2e048SChaitanya Kulkarni 94*aaf2e048SChaitanya Kulkarni kfree(id); 95*aaf2e048SChaitanya Kulkarni out: 96*aaf2e048SChaitanya Kulkarni nvmet_req_complete(req, status); 97*aaf2e048SChaitanya Kulkarni } 98*aaf2e048SChaitanya Kulkarni 99*aaf2e048SChaitanya Kulkarni void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req) 100*aaf2e048SChaitanya Kulkarni { 101*aaf2e048SChaitanya Kulkarni struct nvme_id_ns_zns *id_zns; 102*aaf2e048SChaitanya Kulkarni u64 zsze; 103*aaf2e048SChaitanya Kulkarni u16 status; 104*aaf2e048SChaitanya Kulkarni 105*aaf2e048SChaitanya Kulkarni if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { 106*aaf2e048SChaitanya Kulkarni req->error_loc = offsetof(struct nvme_identify, nsid); 107*aaf2e048SChaitanya Kulkarni status = NVME_SC_INVALID_NS | NVME_SC_DNR; 108*aaf2e048SChaitanya Kulkarni goto out; 109*aaf2e048SChaitanya Kulkarni } 110*aaf2e048SChaitanya Kulkarni 111*aaf2e048SChaitanya Kulkarni id_zns = kzalloc(sizeof(*id_zns), GFP_KERNEL); 112*aaf2e048SChaitanya Kulkarni if (!id_zns) { 113*aaf2e048SChaitanya Kulkarni status = NVME_SC_INTERNAL; 114*aaf2e048SChaitanya Kulkarni goto out; 115*aaf2e048SChaitanya Kulkarni } 116*aaf2e048SChaitanya Kulkarni 117*aaf2e048SChaitanya Kulkarni status = nvmet_req_find_ns(req); 118*aaf2e048SChaitanya Kulkarni if (status) { 119*aaf2e048SChaitanya Kulkarni status = NVME_SC_INTERNAL; 120*aaf2e048SChaitanya Kulkarni goto done; 121*aaf2e048SChaitanya Kulkarni } 122*aaf2e048SChaitanya Kulkarni 123*aaf2e048SChaitanya Kulkarni if (!bdev_is_zoned(req->ns->bdev)) { 124*aaf2e048SChaitanya Kulkarni req->error_loc = offsetof(struct nvme_identify, nsid); 125*aaf2e048SChaitanya Kulkarni status = NVME_SC_INVALID_NS | NVME_SC_DNR; 126*aaf2e048SChaitanya Kulkarni goto done; 127*aaf2e048SChaitanya Kulkarni } 128*aaf2e048SChaitanya Kulkarni 129*aaf2e048SChaitanya Kulkarni nvmet_ns_revalidate(req->ns); 130*aaf2e048SChaitanya Kulkarni zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >> 131*aaf2e048SChaitanya Kulkarni req->ns->blksize_shift; 132*aaf2e048SChaitanya Kulkarni id_zns->lbafe[0].zsze = cpu_to_le64(zsze); 133*aaf2e048SChaitanya Kulkarni id_zns->mor = cpu_to_le32(bdev_max_open_zones(req->ns->bdev)); 134*aaf2e048SChaitanya Kulkarni id_zns->mar = cpu_to_le32(bdev_max_active_zones(req->ns->bdev)); 135*aaf2e048SChaitanya Kulkarni 136*aaf2e048SChaitanya Kulkarni done: 137*aaf2e048SChaitanya Kulkarni status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns)); 138*aaf2e048SChaitanya Kulkarni kfree(id_zns); 139*aaf2e048SChaitanya Kulkarni out: 140*aaf2e048SChaitanya Kulkarni nvmet_req_complete(req, status); 141*aaf2e048SChaitanya Kulkarni } 142*aaf2e048SChaitanya Kulkarni 143*aaf2e048SChaitanya Kulkarni static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req) 144*aaf2e048SChaitanya Kulkarni { 145*aaf2e048SChaitanya Kulkarni sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); 146*aaf2e048SChaitanya Kulkarni u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2; 147*aaf2e048SChaitanya Kulkarni 148*aaf2e048SChaitanya Kulkarni if (sect >= get_capacity(req->ns->bdev->bd_disk)) { 149*aaf2e048SChaitanya Kulkarni req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba); 150*aaf2e048SChaitanya Kulkarni return NVME_SC_LBA_RANGE | NVME_SC_DNR; 151*aaf2e048SChaitanya Kulkarni } 152*aaf2e048SChaitanya Kulkarni 153*aaf2e048SChaitanya Kulkarni if (out_bufsize < sizeof(struct nvme_zone_report)) { 154*aaf2e048SChaitanya Kulkarni req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd); 155*aaf2e048SChaitanya Kulkarni return NVME_SC_INVALID_FIELD | NVME_SC_DNR; 156*aaf2e048SChaitanya Kulkarni } 157*aaf2e048SChaitanya Kulkarni 158*aaf2e048SChaitanya Kulkarni if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) { 159*aaf2e048SChaitanya Kulkarni req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra); 160*aaf2e048SChaitanya Kulkarni return NVME_SC_INVALID_FIELD | NVME_SC_DNR; 161*aaf2e048SChaitanya Kulkarni } 162*aaf2e048SChaitanya Kulkarni 163*aaf2e048SChaitanya Kulkarni switch (req->cmd->zmr.pr) { 164*aaf2e048SChaitanya Kulkarni case 0: 165*aaf2e048SChaitanya Kulkarni case 1: 166*aaf2e048SChaitanya Kulkarni break; 167*aaf2e048SChaitanya Kulkarni default: 168*aaf2e048SChaitanya Kulkarni req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr); 169*aaf2e048SChaitanya Kulkarni return NVME_SC_INVALID_FIELD | NVME_SC_DNR; 170*aaf2e048SChaitanya Kulkarni } 171*aaf2e048SChaitanya Kulkarni 172*aaf2e048SChaitanya Kulkarni switch (req->cmd->zmr.zrasf) { 173*aaf2e048SChaitanya Kulkarni case NVME_ZRASF_ZONE_REPORT_ALL: 174*aaf2e048SChaitanya Kulkarni case NVME_ZRASF_ZONE_STATE_EMPTY: 175*aaf2e048SChaitanya Kulkarni case NVME_ZRASF_ZONE_STATE_IMP_OPEN: 176*aaf2e048SChaitanya Kulkarni case NVME_ZRASF_ZONE_STATE_EXP_OPEN: 177*aaf2e048SChaitanya Kulkarni case NVME_ZRASF_ZONE_STATE_CLOSED: 178*aaf2e048SChaitanya Kulkarni case NVME_ZRASF_ZONE_STATE_FULL: 179*aaf2e048SChaitanya Kulkarni case NVME_ZRASF_ZONE_STATE_READONLY: 180*aaf2e048SChaitanya Kulkarni case NVME_ZRASF_ZONE_STATE_OFFLINE: 181*aaf2e048SChaitanya Kulkarni break; 182*aaf2e048SChaitanya Kulkarni default: 183*aaf2e048SChaitanya Kulkarni req->error_loc = 184*aaf2e048SChaitanya Kulkarni offsetof(struct nvme_zone_mgmt_recv_cmd, zrasf); 185*aaf2e048SChaitanya Kulkarni return NVME_SC_INVALID_FIELD | NVME_SC_DNR; 186*aaf2e048SChaitanya Kulkarni } 187*aaf2e048SChaitanya Kulkarni 188*aaf2e048SChaitanya Kulkarni return NVME_SC_SUCCESS; 189*aaf2e048SChaitanya Kulkarni } 190*aaf2e048SChaitanya Kulkarni 191*aaf2e048SChaitanya Kulkarni struct nvmet_report_zone_data { 192*aaf2e048SChaitanya Kulkarni struct nvmet_req *req; 193*aaf2e048SChaitanya Kulkarni u64 out_buf_offset; 194*aaf2e048SChaitanya Kulkarni u64 out_nr_zones; 195*aaf2e048SChaitanya Kulkarni u64 nr_zones; 196*aaf2e048SChaitanya Kulkarni u8 zrasf; 197*aaf2e048SChaitanya Kulkarni }; 198*aaf2e048SChaitanya Kulkarni 199*aaf2e048SChaitanya Kulkarni static int nvmet_bdev_report_zone_cb(struct blk_zone *z, unsigned i, void *d) 200*aaf2e048SChaitanya Kulkarni { 201*aaf2e048SChaitanya Kulkarni static const unsigned int nvme_zrasf_to_blk_zcond[] = { 202*aaf2e048SChaitanya Kulkarni [NVME_ZRASF_ZONE_STATE_EMPTY] = BLK_ZONE_COND_EMPTY, 203*aaf2e048SChaitanya Kulkarni [NVME_ZRASF_ZONE_STATE_IMP_OPEN] = BLK_ZONE_COND_IMP_OPEN, 204*aaf2e048SChaitanya Kulkarni [NVME_ZRASF_ZONE_STATE_EXP_OPEN] = BLK_ZONE_COND_EXP_OPEN, 205*aaf2e048SChaitanya Kulkarni [NVME_ZRASF_ZONE_STATE_CLOSED] = BLK_ZONE_COND_CLOSED, 206*aaf2e048SChaitanya Kulkarni [NVME_ZRASF_ZONE_STATE_READONLY] = BLK_ZONE_COND_READONLY, 207*aaf2e048SChaitanya Kulkarni [NVME_ZRASF_ZONE_STATE_FULL] = BLK_ZONE_COND_FULL, 208*aaf2e048SChaitanya Kulkarni [NVME_ZRASF_ZONE_STATE_OFFLINE] = BLK_ZONE_COND_OFFLINE, 209*aaf2e048SChaitanya Kulkarni }; 210*aaf2e048SChaitanya Kulkarni struct nvmet_report_zone_data *rz = d; 211*aaf2e048SChaitanya Kulkarni 212*aaf2e048SChaitanya Kulkarni if (rz->zrasf != NVME_ZRASF_ZONE_REPORT_ALL && 213*aaf2e048SChaitanya Kulkarni z->cond != nvme_zrasf_to_blk_zcond[rz->zrasf]) 214*aaf2e048SChaitanya Kulkarni return 0; 215*aaf2e048SChaitanya Kulkarni 216*aaf2e048SChaitanya Kulkarni if (rz->nr_zones < rz->out_nr_zones) { 217*aaf2e048SChaitanya Kulkarni struct nvme_zone_descriptor zdesc = { }; 218*aaf2e048SChaitanya Kulkarni u16 status; 219*aaf2e048SChaitanya Kulkarni 220*aaf2e048SChaitanya Kulkarni zdesc.zcap = nvmet_sect_to_lba(rz->req->ns, z->capacity); 221*aaf2e048SChaitanya Kulkarni zdesc.zslba = nvmet_sect_to_lba(rz->req->ns, z->start); 222*aaf2e048SChaitanya Kulkarni zdesc.wp = nvmet_sect_to_lba(rz->req->ns, z->wp); 223*aaf2e048SChaitanya Kulkarni zdesc.za = z->reset ? 1 << 2 : 0; 224*aaf2e048SChaitanya Kulkarni zdesc.zs = z->cond << 4; 225*aaf2e048SChaitanya Kulkarni zdesc.zt = z->type; 226*aaf2e048SChaitanya Kulkarni 227*aaf2e048SChaitanya Kulkarni status = nvmet_copy_to_sgl(rz->req, rz->out_buf_offset, &zdesc, 228*aaf2e048SChaitanya Kulkarni sizeof(zdesc)); 229*aaf2e048SChaitanya Kulkarni if (status) 230*aaf2e048SChaitanya Kulkarni return -EINVAL; 231*aaf2e048SChaitanya Kulkarni 232*aaf2e048SChaitanya Kulkarni rz->out_buf_offset += sizeof(zdesc); 233*aaf2e048SChaitanya Kulkarni } 234*aaf2e048SChaitanya Kulkarni 235*aaf2e048SChaitanya Kulkarni rz->nr_zones++; 236*aaf2e048SChaitanya Kulkarni 237*aaf2e048SChaitanya Kulkarni return 0; 238*aaf2e048SChaitanya Kulkarni } 239*aaf2e048SChaitanya Kulkarni 240*aaf2e048SChaitanya Kulkarni static unsigned long nvmet_req_nr_zones_from_slba(struct nvmet_req *req) 241*aaf2e048SChaitanya Kulkarni { 242*aaf2e048SChaitanya Kulkarni unsigned int sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); 243*aaf2e048SChaitanya Kulkarni 244*aaf2e048SChaitanya Kulkarni return blkdev_nr_zones(req->ns->bdev->bd_disk) - 245*aaf2e048SChaitanya Kulkarni (sect >> ilog2(bdev_zone_sectors(req->ns->bdev))); 246*aaf2e048SChaitanya Kulkarni } 247*aaf2e048SChaitanya Kulkarni 248*aaf2e048SChaitanya Kulkarni static unsigned long get_nr_zones_from_buf(struct nvmet_req *req, u32 bufsize) 249*aaf2e048SChaitanya Kulkarni { 250*aaf2e048SChaitanya Kulkarni if (bufsize <= sizeof(struct nvme_zone_report)) 251*aaf2e048SChaitanya Kulkarni return 0; 252*aaf2e048SChaitanya Kulkarni 253*aaf2e048SChaitanya Kulkarni return (bufsize - sizeof(struct nvme_zone_report)) / 254*aaf2e048SChaitanya Kulkarni sizeof(struct nvme_zone_descriptor); 255*aaf2e048SChaitanya Kulkarni } 256*aaf2e048SChaitanya Kulkarni 257*aaf2e048SChaitanya Kulkarni static void nvmet_bdev_zone_zmgmt_recv_work(struct work_struct *w) 258*aaf2e048SChaitanya Kulkarni { 259*aaf2e048SChaitanya Kulkarni struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work); 260*aaf2e048SChaitanya Kulkarni sector_t start_sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); 261*aaf2e048SChaitanya Kulkarni unsigned long req_slba_nr_zones = nvmet_req_nr_zones_from_slba(req); 262*aaf2e048SChaitanya Kulkarni u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2; 263*aaf2e048SChaitanya Kulkarni __le64 nr_zones; 264*aaf2e048SChaitanya Kulkarni u16 status; 265*aaf2e048SChaitanya Kulkarni int ret; 266*aaf2e048SChaitanya Kulkarni struct nvmet_report_zone_data rz_data = { 267*aaf2e048SChaitanya Kulkarni .out_nr_zones = get_nr_zones_from_buf(req, out_bufsize), 268*aaf2e048SChaitanya Kulkarni /* leave the place for report zone header */ 269*aaf2e048SChaitanya Kulkarni .out_buf_offset = sizeof(struct nvme_zone_report), 270*aaf2e048SChaitanya Kulkarni .zrasf = req->cmd->zmr.zrasf, 271*aaf2e048SChaitanya Kulkarni .nr_zones = 0, 272*aaf2e048SChaitanya Kulkarni .req = req, 273*aaf2e048SChaitanya Kulkarni }; 274*aaf2e048SChaitanya Kulkarni 275*aaf2e048SChaitanya Kulkarni status = nvmet_bdev_validate_zone_mgmt_recv(req); 276*aaf2e048SChaitanya Kulkarni if (status) 277*aaf2e048SChaitanya Kulkarni goto out; 278*aaf2e048SChaitanya Kulkarni 279*aaf2e048SChaitanya Kulkarni if (!req_slba_nr_zones) { 280*aaf2e048SChaitanya Kulkarni status = NVME_SC_SUCCESS; 281*aaf2e048SChaitanya Kulkarni goto out; 282*aaf2e048SChaitanya Kulkarni } 283*aaf2e048SChaitanya Kulkarni 284*aaf2e048SChaitanya Kulkarni ret = blkdev_report_zones(req->ns->bdev, start_sect, req_slba_nr_zones, 285*aaf2e048SChaitanya Kulkarni nvmet_bdev_report_zone_cb, &rz_data); 286*aaf2e048SChaitanya Kulkarni if (ret < 0) { 287*aaf2e048SChaitanya Kulkarni status = NVME_SC_INTERNAL; 288*aaf2e048SChaitanya Kulkarni goto out; 289*aaf2e048SChaitanya Kulkarni } 290*aaf2e048SChaitanya Kulkarni 291*aaf2e048SChaitanya Kulkarni /* 292*aaf2e048SChaitanya Kulkarni * When partial bit is set nr_zones must indicate the number of zone 293*aaf2e048SChaitanya Kulkarni * descriptors actually transferred. 294*aaf2e048SChaitanya Kulkarni */ 295*aaf2e048SChaitanya Kulkarni if (req->cmd->zmr.pr) 296*aaf2e048SChaitanya Kulkarni rz_data.nr_zones = min(rz_data.nr_zones, rz_data.out_nr_zones); 297*aaf2e048SChaitanya Kulkarni 298*aaf2e048SChaitanya Kulkarni nr_zones = cpu_to_le64(rz_data.nr_zones); 299*aaf2e048SChaitanya Kulkarni status = nvmet_copy_to_sgl(req, 0, &nr_zones, sizeof(nr_zones)); 300*aaf2e048SChaitanya Kulkarni 301*aaf2e048SChaitanya Kulkarni out: 302*aaf2e048SChaitanya Kulkarni nvmet_req_complete(req, status); 303*aaf2e048SChaitanya Kulkarni } 304*aaf2e048SChaitanya Kulkarni 305*aaf2e048SChaitanya Kulkarni void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req) 306*aaf2e048SChaitanya Kulkarni { 307*aaf2e048SChaitanya Kulkarni INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zone_zmgmt_recv_work); 308*aaf2e048SChaitanya Kulkarni queue_work(zbd_wq, &req->z.zmgmt_work); 309*aaf2e048SChaitanya Kulkarni } 310*aaf2e048SChaitanya Kulkarni 311*aaf2e048SChaitanya Kulkarni static inline enum req_opf zsa_req_op(u8 zsa) 312*aaf2e048SChaitanya Kulkarni { 313*aaf2e048SChaitanya Kulkarni switch (zsa) { 314*aaf2e048SChaitanya Kulkarni case NVME_ZONE_OPEN: 315*aaf2e048SChaitanya Kulkarni return REQ_OP_ZONE_OPEN; 316*aaf2e048SChaitanya Kulkarni case NVME_ZONE_CLOSE: 317*aaf2e048SChaitanya Kulkarni return REQ_OP_ZONE_CLOSE; 318*aaf2e048SChaitanya Kulkarni case NVME_ZONE_FINISH: 319*aaf2e048SChaitanya Kulkarni return REQ_OP_ZONE_FINISH; 320*aaf2e048SChaitanya Kulkarni case NVME_ZONE_RESET: 321*aaf2e048SChaitanya Kulkarni return REQ_OP_ZONE_RESET; 322*aaf2e048SChaitanya Kulkarni default: 323*aaf2e048SChaitanya Kulkarni return REQ_OP_LAST; 324*aaf2e048SChaitanya Kulkarni } 325*aaf2e048SChaitanya Kulkarni } 326*aaf2e048SChaitanya Kulkarni 327*aaf2e048SChaitanya Kulkarni static u16 blkdev_zone_mgmt_errno_to_nvme_status(int ret) 328*aaf2e048SChaitanya Kulkarni { 329*aaf2e048SChaitanya Kulkarni switch (ret) { 330*aaf2e048SChaitanya Kulkarni case 0: 331*aaf2e048SChaitanya Kulkarni return NVME_SC_SUCCESS; 332*aaf2e048SChaitanya Kulkarni case -EINVAL: 333*aaf2e048SChaitanya Kulkarni case -EIO: 334*aaf2e048SChaitanya Kulkarni return NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR; 335*aaf2e048SChaitanya Kulkarni default: 336*aaf2e048SChaitanya Kulkarni return NVME_SC_INTERNAL; 337*aaf2e048SChaitanya Kulkarni } 338*aaf2e048SChaitanya Kulkarni } 339*aaf2e048SChaitanya Kulkarni 340*aaf2e048SChaitanya Kulkarni struct nvmet_zone_mgmt_send_all_data { 341*aaf2e048SChaitanya Kulkarni unsigned long *zbitmap; 342*aaf2e048SChaitanya Kulkarni struct nvmet_req *req; 343*aaf2e048SChaitanya Kulkarni }; 344*aaf2e048SChaitanya Kulkarni 345*aaf2e048SChaitanya Kulkarni static int zmgmt_send_scan_cb(struct blk_zone *z, unsigned i, void *d) 346*aaf2e048SChaitanya Kulkarni { 347*aaf2e048SChaitanya Kulkarni struct nvmet_zone_mgmt_send_all_data *data = d; 348*aaf2e048SChaitanya Kulkarni 349*aaf2e048SChaitanya Kulkarni switch (zsa_req_op(data->req->cmd->zms.zsa)) { 350*aaf2e048SChaitanya Kulkarni case REQ_OP_ZONE_OPEN: 351*aaf2e048SChaitanya Kulkarni switch (z->cond) { 352*aaf2e048SChaitanya Kulkarni case BLK_ZONE_COND_CLOSED: 353*aaf2e048SChaitanya Kulkarni break; 354*aaf2e048SChaitanya Kulkarni default: 355*aaf2e048SChaitanya Kulkarni return 0; 356*aaf2e048SChaitanya Kulkarni } 357*aaf2e048SChaitanya Kulkarni break; 358*aaf2e048SChaitanya Kulkarni case REQ_OP_ZONE_CLOSE: 359*aaf2e048SChaitanya Kulkarni switch (z->cond) { 360*aaf2e048SChaitanya Kulkarni case BLK_ZONE_COND_IMP_OPEN: 361*aaf2e048SChaitanya Kulkarni case BLK_ZONE_COND_EXP_OPEN: 362*aaf2e048SChaitanya Kulkarni break; 363*aaf2e048SChaitanya Kulkarni default: 364*aaf2e048SChaitanya Kulkarni return 0; 365*aaf2e048SChaitanya Kulkarni } 366*aaf2e048SChaitanya Kulkarni break; 367*aaf2e048SChaitanya Kulkarni case REQ_OP_ZONE_FINISH: 368*aaf2e048SChaitanya Kulkarni switch (z->cond) { 369*aaf2e048SChaitanya Kulkarni case BLK_ZONE_COND_IMP_OPEN: 370*aaf2e048SChaitanya Kulkarni case BLK_ZONE_COND_EXP_OPEN: 371*aaf2e048SChaitanya Kulkarni case BLK_ZONE_COND_CLOSED: 372*aaf2e048SChaitanya Kulkarni break; 373*aaf2e048SChaitanya Kulkarni default: 374*aaf2e048SChaitanya Kulkarni return 0; 375*aaf2e048SChaitanya Kulkarni } 376*aaf2e048SChaitanya Kulkarni break; 377*aaf2e048SChaitanya Kulkarni default: 378*aaf2e048SChaitanya Kulkarni return -EINVAL; 379*aaf2e048SChaitanya Kulkarni } 380*aaf2e048SChaitanya Kulkarni 381*aaf2e048SChaitanya Kulkarni set_bit(i, data->zbitmap); 382*aaf2e048SChaitanya Kulkarni 383*aaf2e048SChaitanya Kulkarni return 0; 384*aaf2e048SChaitanya Kulkarni } 385*aaf2e048SChaitanya Kulkarni 386*aaf2e048SChaitanya Kulkarni static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req) 387*aaf2e048SChaitanya Kulkarni { 388*aaf2e048SChaitanya Kulkarni struct block_device *bdev = req->ns->bdev; 389*aaf2e048SChaitanya Kulkarni unsigned int nr_zones = blkdev_nr_zones(bdev->bd_disk); 390*aaf2e048SChaitanya Kulkarni struct request_queue *q = bdev_get_queue(bdev); 391*aaf2e048SChaitanya Kulkarni struct bio *bio = NULL; 392*aaf2e048SChaitanya Kulkarni sector_t sector = 0; 393*aaf2e048SChaitanya Kulkarni int ret; 394*aaf2e048SChaitanya Kulkarni struct nvmet_zone_mgmt_send_all_data d = { 395*aaf2e048SChaitanya Kulkarni .req = req, 396*aaf2e048SChaitanya Kulkarni }; 397*aaf2e048SChaitanya Kulkarni 398*aaf2e048SChaitanya Kulkarni d.zbitmap = kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(*(d.zbitmap)), 399*aaf2e048SChaitanya Kulkarni GFP_NOIO, q->node); 400*aaf2e048SChaitanya Kulkarni if (!d.zbitmap) { 401*aaf2e048SChaitanya Kulkarni ret = -ENOMEM; 402*aaf2e048SChaitanya Kulkarni goto out; 403*aaf2e048SChaitanya Kulkarni } 404*aaf2e048SChaitanya Kulkarni 405*aaf2e048SChaitanya Kulkarni /* Scan and build bitmap of the eligible zones */ 406*aaf2e048SChaitanya Kulkarni ret = blkdev_report_zones(bdev, 0, nr_zones, zmgmt_send_scan_cb, &d); 407*aaf2e048SChaitanya Kulkarni if (ret != nr_zones) { 408*aaf2e048SChaitanya Kulkarni if (ret > 0) 409*aaf2e048SChaitanya Kulkarni ret = -EIO; 410*aaf2e048SChaitanya Kulkarni goto out; 411*aaf2e048SChaitanya Kulkarni } else { 412*aaf2e048SChaitanya Kulkarni /* We scanned all the zones */ 413*aaf2e048SChaitanya Kulkarni ret = 0; 414*aaf2e048SChaitanya Kulkarni } 415*aaf2e048SChaitanya Kulkarni 416*aaf2e048SChaitanya Kulkarni while (sector < get_capacity(bdev->bd_disk)) { 417*aaf2e048SChaitanya Kulkarni if (test_bit(blk_queue_zone_no(q, sector), d.zbitmap)) { 418*aaf2e048SChaitanya Kulkarni bio = blk_next_bio(bio, 0, GFP_KERNEL); 419*aaf2e048SChaitanya Kulkarni bio->bi_opf = zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC; 420*aaf2e048SChaitanya Kulkarni bio->bi_iter.bi_sector = sector; 421*aaf2e048SChaitanya Kulkarni bio_set_dev(bio, bdev); 422*aaf2e048SChaitanya Kulkarni /* This may take a while, so be nice to others */ 423*aaf2e048SChaitanya Kulkarni cond_resched(); 424*aaf2e048SChaitanya Kulkarni } 425*aaf2e048SChaitanya Kulkarni sector += blk_queue_zone_sectors(q); 426*aaf2e048SChaitanya Kulkarni } 427*aaf2e048SChaitanya Kulkarni 428*aaf2e048SChaitanya Kulkarni if (bio) { 429*aaf2e048SChaitanya Kulkarni ret = submit_bio_wait(bio); 430*aaf2e048SChaitanya Kulkarni bio_put(bio); 431*aaf2e048SChaitanya Kulkarni } 432*aaf2e048SChaitanya Kulkarni 433*aaf2e048SChaitanya Kulkarni out: 434*aaf2e048SChaitanya Kulkarni kfree(d.zbitmap); 435*aaf2e048SChaitanya Kulkarni 436*aaf2e048SChaitanya Kulkarni return blkdev_zone_mgmt_errno_to_nvme_status(ret); 437*aaf2e048SChaitanya Kulkarni } 438*aaf2e048SChaitanya Kulkarni 439*aaf2e048SChaitanya Kulkarni static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req) 440*aaf2e048SChaitanya Kulkarni { 441*aaf2e048SChaitanya Kulkarni int ret; 442*aaf2e048SChaitanya Kulkarni 443*aaf2e048SChaitanya Kulkarni switch (zsa_req_op(req->cmd->zms.zsa)) { 444*aaf2e048SChaitanya Kulkarni case REQ_OP_ZONE_RESET: 445*aaf2e048SChaitanya Kulkarni ret = blkdev_zone_mgmt(req->ns->bdev, REQ_OP_ZONE_RESET, 0, 446*aaf2e048SChaitanya Kulkarni get_capacity(req->ns->bdev->bd_disk), 447*aaf2e048SChaitanya Kulkarni GFP_KERNEL); 448*aaf2e048SChaitanya Kulkarni if (ret < 0) 449*aaf2e048SChaitanya Kulkarni return blkdev_zone_mgmt_errno_to_nvme_status(ret); 450*aaf2e048SChaitanya Kulkarni break; 451*aaf2e048SChaitanya Kulkarni case REQ_OP_ZONE_OPEN: 452*aaf2e048SChaitanya Kulkarni case REQ_OP_ZONE_CLOSE: 453*aaf2e048SChaitanya Kulkarni case REQ_OP_ZONE_FINISH: 454*aaf2e048SChaitanya Kulkarni return nvmet_bdev_zone_mgmt_emulate_all(req); 455*aaf2e048SChaitanya Kulkarni default: 456*aaf2e048SChaitanya Kulkarni /* this is needed to quiet compiler warning */ 457*aaf2e048SChaitanya Kulkarni req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa); 458*aaf2e048SChaitanya Kulkarni return NVME_SC_INVALID_FIELD | NVME_SC_DNR; 459*aaf2e048SChaitanya Kulkarni } 460*aaf2e048SChaitanya Kulkarni 461*aaf2e048SChaitanya Kulkarni return NVME_SC_SUCCESS; 462*aaf2e048SChaitanya Kulkarni } 463*aaf2e048SChaitanya Kulkarni 464*aaf2e048SChaitanya Kulkarni static void nvmet_bdev_zmgmt_send_work(struct work_struct *w) 465*aaf2e048SChaitanya Kulkarni { 466*aaf2e048SChaitanya Kulkarni struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work); 467*aaf2e048SChaitanya Kulkarni sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zms.slba); 468*aaf2e048SChaitanya Kulkarni enum req_opf op = zsa_req_op(req->cmd->zms.zsa); 469*aaf2e048SChaitanya Kulkarni struct block_device *bdev = req->ns->bdev; 470*aaf2e048SChaitanya Kulkarni sector_t zone_sectors = bdev_zone_sectors(bdev); 471*aaf2e048SChaitanya Kulkarni u16 status = NVME_SC_SUCCESS; 472*aaf2e048SChaitanya Kulkarni int ret; 473*aaf2e048SChaitanya Kulkarni 474*aaf2e048SChaitanya Kulkarni if (op == REQ_OP_LAST) { 475*aaf2e048SChaitanya Kulkarni req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa); 476*aaf2e048SChaitanya Kulkarni status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR; 477*aaf2e048SChaitanya Kulkarni goto out; 478*aaf2e048SChaitanya Kulkarni } 479*aaf2e048SChaitanya Kulkarni 480*aaf2e048SChaitanya Kulkarni /* when select all bit is set slba field is ignored */ 481*aaf2e048SChaitanya Kulkarni if (req->cmd->zms.select_all) { 482*aaf2e048SChaitanya Kulkarni status = nvmet_bdev_execute_zmgmt_send_all(req); 483*aaf2e048SChaitanya Kulkarni goto out; 484*aaf2e048SChaitanya Kulkarni } 485*aaf2e048SChaitanya Kulkarni 486*aaf2e048SChaitanya Kulkarni if (sect >= get_capacity(bdev->bd_disk)) { 487*aaf2e048SChaitanya Kulkarni req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba); 488*aaf2e048SChaitanya Kulkarni status = NVME_SC_LBA_RANGE | NVME_SC_DNR; 489*aaf2e048SChaitanya Kulkarni goto out; 490*aaf2e048SChaitanya Kulkarni } 491*aaf2e048SChaitanya Kulkarni 492*aaf2e048SChaitanya Kulkarni if (sect & (zone_sectors - 1)) { 493*aaf2e048SChaitanya Kulkarni req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba); 494*aaf2e048SChaitanya Kulkarni status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 495*aaf2e048SChaitanya Kulkarni goto out; 496*aaf2e048SChaitanya Kulkarni } 497*aaf2e048SChaitanya Kulkarni 498*aaf2e048SChaitanya Kulkarni ret = blkdev_zone_mgmt(bdev, op, sect, zone_sectors, GFP_KERNEL); 499*aaf2e048SChaitanya Kulkarni if (ret < 0) 500*aaf2e048SChaitanya Kulkarni status = blkdev_zone_mgmt_errno_to_nvme_status(ret); 501*aaf2e048SChaitanya Kulkarni 502*aaf2e048SChaitanya Kulkarni out: 503*aaf2e048SChaitanya Kulkarni nvmet_req_complete(req, status); 504*aaf2e048SChaitanya Kulkarni } 505*aaf2e048SChaitanya Kulkarni 506*aaf2e048SChaitanya Kulkarni void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req) 507*aaf2e048SChaitanya Kulkarni { 508*aaf2e048SChaitanya Kulkarni INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zmgmt_send_work); 509*aaf2e048SChaitanya Kulkarni queue_work(zbd_wq, &req->z.zmgmt_work); 510*aaf2e048SChaitanya Kulkarni } 511*aaf2e048SChaitanya Kulkarni 512*aaf2e048SChaitanya Kulkarni static void nvmet_bdev_zone_append_bio_done(struct bio *bio) 513*aaf2e048SChaitanya Kulkarni { 514*aaf2e048SChaitanya Kulkarni struct nvmet_req *req = bio->bi_private; 515*aaf2e048SChaitanya Kulkarni 516*aaf2e048SChaitanya Kulkarni if (bio->bi_status == BLK_STS_OK) { 517*aaf2e048SChaitanya Kulkarni req->cqe->result.u64 = 518*aaf2e048SChaitanya Kulkarni nvmet_sect_to_lba(req->ns, bio->bi_iter.bi_sector); 519*aaf2e048SChaitanya Kulkarni } 520*aaf2e048SChaitanya Kulkarni 521*aaf2e048SChaitanya Kulkarni nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status)); 522*aaf2e048SChaitanya Kulkarni nvmet_req_bio_put(req, bio); 523*aaf2e048SChaitanya Kulkarni } 524*aaf2e048SChaitanya Kulkarni 525*aaf2e048SChaitanya Kulkarni void nvmet_bdev_execute_zone_append(struct nvmet_req *req) 526*aaf2e048SChaitanya Kulkarni { 527*aaf2e048SChaitanya Kulkarni sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); 528*aaf2e048SChaitanya Kulkarni u16 status = NVME_SC_SUCCESS; 529*aaf2e048SChaitanya Kulkarni unsigned int total_len = 0; 530*aaf2e048SChaitanya Kulkarni struct scatterlist *sg; 531*aaf2e048SChaitanya Kulkarni struct bio *bio; 532*aaf2e048SChaitanya Kulkarni int sg_cnt; 533*aaf2e048SChaitanya Kulkarni 534*aaf2e048SChaitanya Kulkarni /* Request is completed on len mismatch in nvmet_check_transter_len() */ 535*aaf2e048SChaitanya Kulkarni if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req))) 536*aaf2e048SChaitanya Kulkarni return; 537*aaf2e048SChaitanya Kulkarni 538*aaf2e048SChaitanya Kulkarni if (!req->sg_cnt) { 539*aaf2e048SChaitanya Kulkarni nvmet_req_complete(req, 0); 540*aaf2e048SChaitanya Kulkarni return; 541*aaf2e048SChaitanya Kulkarni } 542*aaf2e048SChaitanya Kulkarni 543*aaf2e048SChaitanya Kulkarni if (sect >= get_capacity(req->ns->bdev->bd_disk)) { 544*aaf2e048SChaitanya Kulkarni req->error_loc = offsetof(struct nvme_rw_command, slba); 545*aaf2e048SChaitanya Kulkarni status = NVME_SC_LBA_RANGE | NVME_SC_DNR; 546*aaf2e048SChaitanya Kulkarni goto out; 547*aaf2e048SChaitanya Kulkarni } 548*aaf2e048SChaitanya Kulkarni 549*aaf2e048SChaitanya Kulkarni if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) { 550*aaf2e048SChaitanya Kulkarni req->error_loc = offsetof(struct nvme_rw_command, slba); 551*aaf2e048SChaitanya Kulkarni status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 552*aaf2e048SChaitanya Kulkarni goto out; 553*aaf2e048SChaitanya Kulkarni } 554*aaf2e048SChaitanya Kulkarni 555*aaf2e048SChaitanya Kulkarni if (nvmet_use_inline_bvec(req)) { 556*aaf2e048SChaitanya Kulkarni bio = &req->z.inline_bio; 557*aaf2e048SChaitanya Kulkarni bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); 558*aaf2e048SChaitanya Kulkarni } else { 559*aaf2e048SChaitanya Kulkarni bio = bio_alloc(GFP_KERNEL, req->sg_cnt); 560*aaf2e048SChaitanya Kulkarni } 561*aaf2e048SChaitanya Kulkarni 562*aaf2e048SChaitanya Kulkarni bio->bi_opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE; 563*aaf2e048SChaitanya Kulkarni bio->bi_end_io = nvmet_bdev_zone_append_bio_done; 564*aaf2e048SChaitanya Kulkarni bio_set_dev(bio, req->ns->bdev); 565*aaf2e048SChaitanya Kulkarni bio->bi_iter.bi_sector = sect; 566*aaf2e048SChaitanya Kulkarni bio->bi_private = req; 567*aaf2e048SChaitanya Kulkarni if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) 568*aaf2e048SChaitanya Kulkarni bio->bi_opf |= REQ_FUA; 569*aaf2e048SChaitanya Kulkarni 570*aaf2e048SChaitanya Kulkarni for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) { 571*aaf2e048SChaitanya Kulkarni struct page *p = sg_page(sg); 572*aaf2e048SChaitanya Kulkarni unsigned int l = sg->length; 573*aaf2e048SChaitanya Kulkarni unsigned int o = sg->offset; 574*aaf2e048SChaitanya Kulkarni unsigned int ret; 575*aaf2e048SChaitanya Kulkarni 576*aaf2e048SChaitanya Kulkarni ret = bio_add_zone_append_page(bio, p, l, o); 577*aaf2e048SChaitanya Kulkarni if (ret != sg->length) { 578*aaf2e048SChaitanya Kulkarni status = NVME_SC_INTERNAL; 579*aaf2e048SChaitanya Kulkarni goto out_put_bio; 580*aaf2e048SChaitanya Kulkarni } 581*aaf2e048SChaitanya Kulkarni total_len += sg->length; 582*aaf2e048SChaitanya Kulkarni } 583*aaf2e048SChaitanya Kulkarni 584*aaf2e048SChaitanya Kulkarni if (total_len != nvmet_rw_data_len(req)) { 585*aaf2e048SChaitanya Kulkarni status = NVME_SC_INTERNAL | NVME_SC_DNR; 586*aaf2e048SChaitanya Kulkarni goto out_put_bio; 587*aaf2e048SChaitanya Kulkarni } 588*aaf2e048SChaitanya Kulkarni 589*aaf2e048SChaitanya Kulkarni submit_bio(bio); 590*aaf2e048SChaitanya Kulkarni return; 591*aaf2e048SChaitanya Kulkarni 592*aaf2e048SChaitanya Kulkarni out_put_bio: 593*aaf2e048SChaitanya Kulkarni nvmet_req_bio_put(req, bio); 594*aaf2e048SChaitanya Kulkarni out: 595*aaf2e048SChaitanya Kulkarni nvmet_req_complete(req, status); 596*aaf2e048SChaitanya Kulkarni } 597*aaf2e048SChaitanya Kulkarni 598*aaf2e048SChaitanya Kulkarni u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req) 599*aaf2e048SChaitanya Kulkarni { 600*aaf2e048SChaitanya Kulkarni struct nvme_command *cmd = req->cmd; 601*aaf2e048SChaitanya Kulkarni 602*aaf2e048SChaitanya Kulkarni switch (cmd->common.opcode) { 603*aaf2e048SChaitanya Kulkarni case nvme_cmd_zone_append: 604*aaf2e048SChaitanya Kulkarni req->execute = nvmet_bdev_execute_zone_append; 605*aaf2e048SChaitanya Kulkarni return 0; 606*aaf2e048SChaitanya Kulkarni case nvme_cmd_zone_mgmt_recv: 607*aaf2e048SChaitanya Kulkarni req->execute = nvmet_bdev_execute_zone_mgmt_recv; 608*aaf2e048SChaitanya Kulkarni return 0; 609*aaf2e048SChaitanya Kulkarni case nvme_cmd_zone_mgmt_send: 610*aaf2e048SChaitanya Kulkarni req->execute = nvmet_bdev_execute_zone_mgmt_send; 611*aaf2e048SChaitanya Kulkarni return 0; 612*aaf2e048SChaitanya Kulkarni default: 613*aaf2e048SChaitanya Kulkarni return nvmet_bdev_parse_io_cmd(req); 614*aaf2e048SChaitanya Kulkarni } 615*aaf2e048SChaitanya Kulkarni } 616