1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVMe Fabrics command implementation. 4 * Copyright (c) 2015-2016 HGST, a Western Digital Company. 5 */ 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 #include <linux/blkdev.h> 8 #include "nvmet.h" 9 10 static void nvmet_execute_prop_set(struct nvmet_req *req) 11 { 12 u64 val = le64_to_cpu(req->cmd->prop_set.value); 13 u16 status = 0; 14 15 if (!nvmet_check_transfer_len(req, 0)) 16 return; 17 18 if (req->cmd->prop_set.attrib & 1) { 19 req->error_loc = 20 offsetof(struct nvmf_property_set_command, attrib); 21 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 22 goto out; 23 } 24 25 switch (le32_to_cpu(req->cmd->prop_set.offset)) { 26 case NVME_REG_CC: 27 nvmet_update_cc(req->sq->ctrl, val); 28 break; 29 default: 30 req->error_loc = 31 offsetof(struct nvmf_property_set_command, offset); 32 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 33 } 34 out: 35 nvmet_req_complete(req, status); 36 } 37 38 static void nvmet_execute_prop_get(struct nvmet_req *req) 39 { 40 struct nvmet_ctrl *ctrl = req->sq->ctrl; 41 u16 status = 0; 42 u64 val = 0; 43 44 if (!nvmet_check_transfer_len(req, 0)) 45 return; 46 47 if (req->cmd->prop_get.attrib & 1) { 48 switch (le32_to_cpu(req->cmd->prop_get.offset)) { 49 case NVME_REG_CAP: 50 val = ctrl->cap; 51 break; 52 default: 53 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 54 break; 55 } 56 } else { 57 switch (le32_to_cpu(req->cmd->prop_get.offset)) { 58 case NVME_REG_VS: 59 val = ctrl->subsys->ver; 60 break; 61 case NVME_REG_CC: 62 val = ctrl->cc; 63 break; 64 case NVME_REG_CSTS: 65 val = ctrl->csts; 66 break; 67 default: 68 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 69 break; 70 } 71 } 72 73 if (status && req->cmd->prop_get.attrib & 1) { 74 req->error_loc = 75 offsetof(struct nvmf_property_get_command, offset); 76 } else { 77 req->error_loc = 78 offsetof(struct nvmf_property_get_command, attrib); 79 } 80 81 req->cqe->result.u64 = cpu_to_le64(val); 82 nvmet_req_complete(req, status); 83 } 84 85 u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req) 86 { 87 struct nvme_command *cmd = req->cmd; 88 89 switch (cmd->fabrics.fctype) { 90 case nvme_fabrics_type_property_set: 91 req->execute = nvmet_execute_prop_set; 92 break; 93 case nvme_fabrics_type_property_get: 94 req->execute = nvmet_execute_prop_get; 95 break; 96 #ifdef CONFIG_NVME_TARGET_AUTH 97 case nvme_fabrics_type_auth_send: 98 req->execute = nvmet_execute_auth_send; 99 break; 100 case nvme_fabrics_type_auth_receive: 101 req->execute = nvmet_execute_auth_receive; 102 break; 103 #endif 104 default: 105 pr_debug("received unknown capsule type 0x%x\n", 106 cmd->fabrics.fctype); 107 req->error_loc = offsetof(struct nvmf_common_command, fctype); 108 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 109 } 110 111 return 0; 112 } 113 114 u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req) 115 { 116 struct nvme_command *cmd = req->cmd; 117 118 switch (cmd->fabrics.fctype) { 119 #ifdef CONFIG_NVME_TARGET_AUTH 120 case nvme_fabrics_type_auth_send: 121 req->execute = nvmet_execute_auth_send; 122 break; 123 case nvme_fabrics_type_auth_receive: 124 req->execute = nvmet_execute_auth_receive; 125 break; 126 #endif 127 default: 128 pr_debug("received unknown capsule type 0x%x\n", 129 cmd->fabrics.fctype); 130 req->error_loc = offsetof(struct nvmf_common_command, fctype); 131 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 132 } 133 134 return 0; 135 } 136 137 static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) 138 { 139 struct nvmf_connect_command *c = &req->cmd->connect; 140 u16 qid = le16_to_cpu(c->qid); 141 u16 sqsize = le16_to_cpu(c->sqsize); 142 struct nvmet_ctrl *old; 143 u16 mqes = NVME_CAP_MQES(ctrl->cap); 144 u16 ret; 145 146 if (!sqsize) { 147 pr_warn("queue size zero!\n"); 148 req->error_loc = offsetof(struct nvmf_connect_command, sqsize); 149 req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize); 150 ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; 151 goto err; 152 } 153 154 if (ctrl->sqs[qid] != NULL) { 155 pr_warn("qid %u has already been created\n", qid); 156 req->error_loc = offsetof(struct nvmf_connect_command, qid); 157 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; 158 } 159 160 if (sqsize > mqes) { 161 pr_warn("sqsize %u is larger than MQES supported %u cntlid %d\n", 162 sqsize, mqes, ctrl->cntlid); 163 req->error_loc = offsetof(struct nvmf_connect_command, sqsize); 164 req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize); 165 return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; 166 } 167 168 old = cmpxchg(&req->sq->ctrl, NULL, ctrl); 169 if (old) { 170 pr_warn("queue already connected!\n"); 171 req->error_loc = offsetof(struct nvmf_connect_command, opcode); 172 return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; 173 } 174 175 /* note: convert queue size from 0's-based value to 1's-based value */ 176 nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1); 177 nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1); 178 179 if (c->cattr & NVME_CONNECT_DISABLE_SQFLOW) { 180 req->sq->sqhd_disabled = true; 181 req->cqe->sq_head = cpu_to_le16(0xffff); 182 } 183 184 if (ctrl->ops->install_queue) { 185 ret = ctrl->ops->install_queue(req->sq); 186 if (ret) { 187 pr_err("failed to install queue %d cntlid %d ret %x\n", 188 qid, ctrl->cntlid, ret); 189 ctrl->sqs[qid] = NULL; 190 goto err; 191 } 192 } 193 194 return 0; 195 196 err: 197 req->sq->ctrl = NULL; 198 return ret; 199 } 200 201 static u32 nvmet_connect_result(struct nvmet_ctrl *ctrl) 202 { 203 return (u32)ctrl->cntlid | 204 (nvmet_has_auth(ctrl) ? NVME_CONNECT_AUTHREQ_ATR : 0); 205 } 206 207 static void nvmet_execute_admin_connect(struct nvmet_req *req) 208 { 209 struct nvmf_connect_command *c = &req->cmd->connect; 210 struct nvmf_connect_data *d; 211 struct nvmet_ctrl *ctrl = NULL; 212 u16 status = 0; 213 int ret; 214 215 if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data))) 216 return; 217 218 d = kmalloc(sizeof(*d), GFP_KERNEL); 219 if (!d) { 220 status = NVME_SC_INTERNAL; 221 goto complete; 222 } 223 224 status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d)); 225 if (status) 226 goto out; 227 228 /* zero out initial completion result, assign values as needed */ 229 req->cqe->result.u32 = 0; 230 231 if (c->recfmt != 0) { 232 pr_warn("invalid connect version (%d).\n", 233 le16_to_cpu(c->recfmt)); 234 req->error_loc = offsetof(struct nvmf_connect_command, recfmt); 235 status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR; 236 goto out; 237 } 238 239 if (unlikely(d->cntlid != cpu_to_le16(0xffff))) { 240 pr_warn("connect attempt for invalid controller ID %#x\n", 241 d->cntlid); 242 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; 243 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); 244 goto out; 245 } 246 247 status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req, 248 le32_to_cpu(c->kato), &ctrl); 249 if (status) 250 goto out; 251 252 ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support; 253 254 uuid_copy(&ctrl->hostid, &d->hostid); 255 256 ret = nvmet_setup_auth(ctrl); 257 if (ret < 0) { 258 pr_err("Failed to setup authentication, error %d\n", ret); 259 nvmet_ctrl_put(ctrl); 260 if (ret == -EPERM) 261 status = (NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR); 262 else 263 status = NVME_SC_INTERNAL; 264 goto out; 265 } 266 267 status = nvmet_install_queue(ctrl, req); 268 if (status) { 269 nvmet_ctrl_put(ctrl); 270 goto out; 271 } 272 273 pr_info("creating %s controller %d for subsystem %s for NQN %s%s%s.\n", 274 nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm", 275 ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn, 276 ctrl->pi_support ? " T10-PI is enabled" : "", 277 nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : ""); 278 req->cqe->result.u32 = cpu_to_le32(nvmet_connect_result(ctrl)); 279 out: 280 kfree(d); 281 complete: 282 nvmet_req_complete(req, status); 283 } 284 285 static void nvmet_execute_io_connect(struct nvmet_req *req) 286 { 287 struct nvmf_connect_command *c = &req->cmd->connect; 288 struct nvmf_connect_data *d; 289 struct nvmet_ctrl *ctrl; 290 u16 qid = le16_to_cpu(c->qid); 291 u16 status = 0; 292 293 if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data))) 294 return; 295 296 d = kmalloc(sizeof(*d), GFP_KERNEL); 297 if (!d) { 298 status = NVME_SC_INTERNAL; 299 goto complete; 300 } 301 302 status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d)); 303 if (status) 304 goto out; 305 306 /* zero out initial completion result, assign values as needed */ 307 req->cqe->result.u32 = 0; 308 309 if (c->recfmt != 0) { 310 pr_warn("invalid connect version (%d).\n", 311 le16_to_cpu(c->recfmt)); 312 status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR; 313 goto out; 314 } 315 316 ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn, 317 le16_to_cpu(d->cntlid), req); 318 if (!ctrl) { 319 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; 320 goto out; 321 } 322 323 if (unlikely(qid > ctrl->subsys->max_qid)) { 324 pr_warn("invalid queue id (%d)\n", qid); 325 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; 326 req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid); 327 goto out_ctrl_put; 328 } 329 330 status = nvmet_install_queue(ctrl, req); 331 if (status) 332 goto out_ctrl_put; 333 334 pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid); 335 req->cqe->result.u32 = cpu_to_le32(nvmet_connect_result(ctrl)); 336 out: 337 kfree(d); 338 complete: 339 nvmet_req_complete(req, status); 340 return; 341 342 out_ctrl_put: 343 nvmet_ctrl_put(ctrl); 344 goto out; 345 } 346 347 u16 nvmet_parse_connect_cmd(struct nvmet_req *req) 348 { 349 struct nvme_command *cmd = req->cmd; 350 351 if (!nvme_is_fabrics(cmd)) { 352 pr_debug("invalid command 0x%x on unconnected queue.\n", 353 cmd->fabrics.opcode); 354 req->error_loc = offsetof(struct nvme_common_command, opcode); 355 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 356 } 357 if (cmd->fabrics.fctype != nvme_fabrics_type_connect) { 358 pr_debug("invalid capsule type 0x%x on unconnected queue.\n", 359 cmd->fabrics.fctype); 360 req->error_loc = offsetof(struct nvmf_common_command, fctype); 361 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 362 } 363 364 if (cmd->connect.qid == 0) 365 req->execute = nvmet_execute_admin_connect; 366 else 367 req->execute = nvmet_execute_io_connect; 368 return 0; 369 } 370