1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVMe Fabrics command implementation. 4 * Copyright (c) 2015-2016 HGST, a Western Digital Company. 5 */ 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 #include <linux/blkdev.h> 8 #include "nvmet.h" 9 10 static void nvmet_execute_prop_set(struct nvmet_req *req) 11 { 12 u64 val = le64_to_cpu(req->cmd->prop_set.value); 13 u16 status = 0; 14 15 if (!nvmet_check_data_len(req, 0)) 16 return; 17 18 if (req->cmd->prop_set.attrib & 1) { 19 req->error_loc = 20 offsetof(struct nvmf_property_set_command, attrib); 21 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 22 goto out; 23 } 24 25 switch (le32_to_cpu(req->cmd->prop_set.offset)) { 26 case NVME_REG_CC: 27 nvmet_update_cc(req->sq->ctrl, val); 28 break; 29 default: 30 req->error_loc = 31 offsetof(struct nvmf_property_set_command, offset); 32 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 33 } 34 out: 35 nvmet_req_complete(req, status); 36 } 37 38 static void nvmet_execute_prop_get(struct nvmet_req *req) 39 { 40 struct nvmet_ctrl *ctrl = req->sq->ctrl; 41 u16 status = 0; 42 u64 val = 0; 43 44 if (!nvmet_check_data_len(req, 0)) 45 return; 46 47 if (req->cmd->prop_get.attrib & 1) { 48 switch (le32_to_cpu(req->cmd->prop_get.offset)) { 49 case NVME_REG_CAP: 50 val = ctrl->cap; 51 break; 52 default: 53 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 54 break; 55 } 56 } else { 57 switch (le32_to_cpu(req->cmd->prop_get.offset)) { 58 case NVME_REG_VS: 59 val = ctrl->subsys->ver; 60 break; 61 case NVME_REG_CC: 62 val = ctrl->cc; 63 break; 64 case NVME_REG_CSTS: 65 val = ctrl->csts; 66 break; 67 default: 68 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 69 break; 70 } 71 } 72 73 if (status && req->cmd->prop_get.attrib & 1) { 74 req->error_loc = 75 offsetof(struct nvmf_property_get_command, offset); 76 } else { 77 req->error_loc = 78 offsetof(struct nvmf_property_get_command, attrib); 79 } 80 81 req->cqe->result.u64 = cpu_to_le64(val); 82 nvmet_req_complete(req, status); 83 } 84 85 u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req) 86 { 87 struct nvme_command *cmd = req->cmd; 88 89 switch (cmd->fabrics.fctype) { 90 case nvme_fabrics_type_property_set: 91 req->execute = nvmet_execute_prop_set; 92 break; 93 case nvme_fabrics_type_property_get: 94 req->execute = nvmet_execute_prop_get; 95 break; 96 default: 97 pr_err("received unknown capsule type 0x%x\n", 98 cmd->fabrics.fctype); 99 req->error_loc = offsetof(struct nvmf_common_command, fctype); 100 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 101 } 102 103 return 0; 104 } 105 106 static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) 107 { 108 struct nvmf_connect_command *c = &req->cmd->connect; 109 u16 qid = le16_to_cpu(c->qid); 110 u16 sqsize = le16_to_cpu(c->sqsize); 111 struct nvmet_ctrl *old; 112 113 old = cmpxchg(&req->sq->ctrl, NULL, ctrl); 114 if (old) { 115 pr_warn("queue already connected!\n"); 116 req->error_loc = offsetof(struct nvmf_connect_command, opcode); 117 return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; 118 } 119 if (!sqsize) { 120 pr_warn("queue size zero!\n"); 121 req->error_loc = offsetof(struct nvmf_connect_command, sqsize); 122 return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; 123 } 124 125 /* note: convert queue size from 0's-based value to 1's-based value */ 126 nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1); 127 nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1); 128 129 if (c->cattr & NVME_CONNECT_DISABLE_SQFLOW) { 130 req->sq->sqhd_disabled = true; 131 req->cqe->sq_head = cpu_to_le16(0xffff); 132 } 133 134 if (ctrl->ops->install_queue) { 135 u16 ret = ctrl->ops->install_queue(req->sq); 136 137 if (ret) { 138 pr_err("failed to install queue %d cntlid %d ret %x\n", 139 qid, ret, ctrl->cntlid); 140 return ret; 141 } 142 } 143 144 return 0; 145 } 146 147 static void nvmet_execute_admin_connect(struct nvmet_req *req) 148 { 149 struct nvmf_connect_command *c = &req->cmd->connect; 150 struct nvmf_connect_data *d; 151 struct nvmet_ctrl *ctrl = NULL; 152 u16 status = 0; 153 154 if (!nvmet_check_data_len(req, sizeof(struct nvmf_connect_data))) 155 return; 156 157 d = kmalloc(sizeof(*d), GFP_KERNEL); 158 if (!d) { 159 status = NVME_SC_INTERNAL; 160 goto complete; 161 } 162 163 status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d)); 164 if (status) 165 goto out; 166 167 /* zero out initial completion result, assign values as needed */ 168 req->cqe->result.u32 = 0; 169 170 if (c->recfmt != 0) { 171 pr_warn("invalid connect version (%d).\n", 172 le16_to_cpu(c->recfmt)); 173 req->error_loc = offsetof(struct nvmf_connect_command, recfmt); 174 status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR; 175 goto out; 176 } 177 178 if (unlikely(d->cntlid != cpu_to_le16(0xffff))) { 179 pr_warn("connect attempt for invalid controller ID %#x\n", 180 d->cntlid); 181 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; 182 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); 183 goto out; 184 } 185 186 status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req, 187 le32_to_cpu(c->kato), &ctrl); 188 if (status) { 189 if (status == (NVME_SC_INVALID_FIELD | NVME_SC_DNR)) 190 req->error_loc = 191 offsetof(struct nvme_common_command, opcode); 192 goto out; 193 } 194 195 uuid_copy(&ctrl->hostid, &d->hostid); 196 197 status = nvmet_install_queue(ctrl, req); 198 if (status) { 199 nvmet_ctrl_put(ctrl); 200 goto out; 201 } 202 203 pr_info("creating controller %d for subsystem %s for NQN %s.\n", 204 ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn); 205 req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid); 206 207 out: 208 kfree(d); 209 complete: 210 nvmet_req_complete(req, status); 211 } 212 213 static void nvmet_execute_io_connect(struct nvmet_req *req) 214 { 215 struct nvmf_connect_command *c = &req->cmd->connect; 216 struct nvmf_connect_data *d; 217 struct nvmet_ctrl *ctrl = NULL; 218 u16 qid = le16_to_cpu(c->qid); 219 u16 status = 0; 220 221 if (!nvmet_check_data_len(req, sizeof(struct nvmf_connect_data))) 222 return; 223 224 d = kmalloc(sizeof(*d), GFP_KERNEL); 225 if (!d) { 226 status = NVME_SC_INTERNAL; 227 goto complete; 228 } 229 230 status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d)); 231 if (status) 232 goto out; 233 234 /* zero out initial completion result, assign values as needed */ 235 req->cqe->result.u32 = 0; 236 237 if (c->recfmt != 0) { 238 pr_warn("invalid connect version (%d).\n", 239 le16_to_cpu(c->recfmt)); 240 status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR; 241 goto out; 242 } 243 244 status = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn, 245 le16_to_cpu(d->cntlid), 246 req, &ctrl); 247 if (status) 248 goto out; 249 250 if (unlikely(qid > ctrl->subsys->max_qid)) { 251 pr_warn("invalid queue id (%d)\n", qid); 252 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; 253 req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid); 254 goto out_ctrl_put; 255 } 256 257 status = nvmet_install_queue(ctrl, req); 258 if (status) { 259 /* pass back cntlid that had the issue of installing queue */ 260 req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid); 261 goto out_ctrl_put; 262 } 263 264 pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid); 265 266 out: 267 kfree(d); 268 complete: 269 nvmet_req_complete(req, status); 270 return; 271 272 out_ctrl_put: 273 nvmet_ctrl_put(ctrl); 274 goto out; 275 } 276 277 u16 nvmet_parse_connect_cmd(struct nvmet_req *req) 278 { 279 struct nvme_command *cmd = req->cmd; 280 281 if (!nvme_is_fabrics(cmd)) { 282 pr_err("invalid command 0x%x on unconnected queue.\n", 283 cmd->fabrics.opcode); 284 req->error_loc = offsetof(struct nvme_common_command, opcode); 285 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 286 } 287 if (cmd->fabrics.fctype != nvme_fabrics_type_connect) { 288 pr_err("invalid capsule type 0x%x on unconnected queue.\n", 289 cmd->fabrics.fctype); 290 req->error_loc = offsetof(struct nvmf_common_command, fctype); 291 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 292 } 293 294 if (cmd->connect.qid == 0) 295 req->execute = nvmet_execute_admin_connect; 296 else 297 req->execute = nvmet_execute_io_connect; 298 return 0; 299 } 300