1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVMe Over Fabrics Target Passthrough command implementation. 4 * 5 * Copyright (c) 2017-2018 Western Digital Corporation or its 6 * affiliates. 7 * Copyright (c) 2019-2020, Eideticom Inc. 8 * 9 */ 10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 #include <linux/module.h> 12 13 #include "../host/nvme.h" 14 #include "nvmet.h" 15 16 MODULE_IMPORT_NS(NVME_TARGET_PASSTHRU); 17 18 /* 19 * xarray to maintain one passthru subsystem per nvme controller. 20 */ 21 static DEFINE_XARRAY(passthru_subsystems); 22 23 static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req) 24 { 25 struct nvmet_ctrl *ctrl = req->sq->ctrl; 26 struct nvme_ctrl *pctrl = ctrl->subsys->passthru_ctrl; 27 u16 status = NVME_SC_SUCCESS; 28 struct nvme_id_ctrl *id; 29 u32 max_hw_sectors; 30 int page_shift; 31 32 id = kzalloc(sizeof(*id), GFP_KERNEL); 33 if (!id) 34 return NVME_SC_INTERNAL; 35 36 status = nvmet_copy_from_sgl(req, 0, id, sizeof(*id)); 37 if (status) 38 goto out_free; 39 40 id->cntlid = cpu_to_le16(ctrl->cntlid); 41 id->ver = cpu_to_le32(ctrl->subsys->ver); 42 43 /* 44 * The passthru NVMe driver may have a limit on the number of segments 45 * which depends on the host's memory fragementation. To solve this, 46 * ensure mdts is limited to the pages equal to the number of segments. 47 */ 48 max_hw_sectors = min_not_zero(pctrl->max_segments << (PAGE_SHIFT - 9), 49 pctrl->max_hw_sectors); 50 51 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12; 52 53 id->mdts = ilog2(max_hw_sectors) + 9 - page_shift; 54 55 id->acl = 3; 56 /* 57 * We export aerl limit for the fabrics controller, update this when 58 * passthru based aerl support is added. 59 */ 60 id->aerl = NVMET_ASYNC_EVENTS - 1; 61 62 /* emulate kas as most of the PCIe ctrl don't have a support for kas */ 63 id->kas = cpu_to_le16(NVMET_KAS); 64 65 /* don't support host memory buffer */ 66 id->hmpre = 0; 67 id->hmmin = 0; 68 69 id->sqes = min_t(__u8, ((0x6 << 4) | 0x6), id->sqes); 70 id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes); 71 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD); 72 73 /* don't support fuse commands */ 74 id->fuses = 0; 75 76 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */ 77 if (ctrl->ops->flags & NVMF_KEYED_SGLS) 78 id->sgls |= cpu_to_le32(1 << 2); 79 if (req->port->inline_data_size) 80 id->sgls |= cpu_to_le32(1 << 20); 81 82 /* 83 * When passsthru controller is setup using nvme-loop transport it will 84 * export the passthru ctrl subsysnqn (PCIe NVMe ctrl) and will fail in 85 * the nvme/host/core.c in the nvme_init_subsystem()->nvme_active_ctrl() 86 * code path with duplicate ctr subsynqn. In order to prevent that we 87 * mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn. 88 */ 89 memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn)); 90 91 /* use fabric id-ctrl values */ 92 id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) + 93 req->port->inline_data_size) / 16); 94 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16); 95 96 id->msdbd = ctrl->ops->msdbd; 97 98 /* Support multipath connections with fabrics */ 99 id->cmic |= 1 << 1; 100 101 /* Disable reservations, see nvmet_parse_passthru_io_cmd() */ 102 id->oncs &= cpu_to_le16(~NVME_CTRL_ONCS_RESERVATIONS); 103 104 status = nvmet_copy_to_sgl(req, 0, id, sizeof(struct nvme_id_ctrl)); 105 106 out_free: 107 kfree(id); 108 return status; 109 } 110 111 static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req) 112 { 113 u16 status = NVME_SC_SUCCESS; 114 struct nvme_id_ns *id; 115 int i; 116 117 id = kzalloc(sizeof(*id), GFP_KERNEL); 118 if (!id) 119 return NVME_SC_INTERNAL; 120 121 status = nvmet_copy_from_sgl(req, 0, id, sizeof(struct nvme_id_ns)); 122 if (status) 123 goto out_free; 124 125 for (i = 0; i < (id->nlbaf + 1); i++) 126 if (id->lbaf[i].ms) 127 memset(&id->lbaf[i], 0, sizeof(id->lbaf[i])); 128 129 id->flbas = id->flbas & ~(1 << 4); 130 131 /* 132 * Presently the NVMEof target code does not support sending 133 * metadata, so we must disable it here. This should be updated 134 * once target starts supporting metadata. 135 */ 136 id->mc = 0; 137 138 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); 139 140 out_free: 141 kfree(id); 142 return status; 143 } 144 145 static void nvmet_passthru_execute_cmd_work(struct work_struct *w) 146 { 147 struct nvmet_req *req = container_of(w, struct nvmet_req, p.work); 148 struct request *rq = req->p.rq; 149 u16 status; 150 151 nvme_execute_passthru_rq(rq); 152 153 status = nvme_req(rq)->status; 154 if (status == NVME_SC_SUCCESS && 155 req->cmd->common.opcode == nvme_admin_identify) { 156 switch (req->cmd->identify.cns) { 157 case NVME_ID_CNS_CTRL: 158 nvmet_passthru_override_id_ctrl(req); 159 break; 160 case NVME_ID_CNS_NS: 161 nvmet_passthru_override_id_ns(req); 162 break; 163 } 164 } 165 166 req->cqe->result = nvme_req(rq)->result; 167 nvmet_req_complete(req, status); 168 blk_put_request(rq); 169 } 170 171 static void nvmet_passthru_req_done(struct request *rq, 172 blk_status_t blk_status) 173 { 174 struct nvmet_req *req = rq->end_io_data; 175 176 req->cqe->result = nvme_req(rq)->result; 177 nvmet_req_complete(req, nvme_req(rq)->status); 178 blk_put_request(rq); 179 } 180 181 static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq) 182 { 183 int sg_cnt = req->sg_cnt; 184 struct scatterlist *sg; 185 int op_flags = 0; 186 struct bio *bio; 187 int i, ret; 188 189 if (req->cmd->common.opcode == nvme_cmd_flush) 190 op_flags = REQ_FUA; 191 else if (nvme_is_write(req->cmd)) 192 op_flags = REQ_SYNC | REQ_IDLE; 193 194 bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES)); 195 bio->bi_end_io = bio_put; 196 bio->bi_opf = req_op(rq) | op_flags; 197 198 for_each_sg(req->sg, sg, req->sg_cnt, i) { 199 if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length, 200 sg->offset) < sg->length) { 201 bio_put(bio); 202 return -EINVAL; 203 } 204 sg_cnt--; 205 } 206 207 ret = blk_rq_append_bio(rq, &bio); 208 if (unlikely(ret)) { 209 bio_put(bio); 210 return ret; 211 } 212 213 return 0; 214 } 215 216 static void nvmet_passthru_execute_cmd(struct nvmet_req *req) 217 { 218 struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req); 219 struct request_queue *q = ctrl->admin_q; 220 struct nvme_ns *ns = NULL; 221 struct request *rq = NULL; 222 u32 effects; 223 u16 status; 224 int ret; 225 226 if (likely(req->sq->qid != 0)) { 227 u32 nsid = le32_to_cpu(req->cmd->common.nsid); 228 229 ns = nvme_find_get_ns(ctrl, nsid); 230 if (unlikely(!ns)) { 231 pr_err("failed to get passthru ns nsid:%u\n", nsid); 232 status = NVME_SC_INVALID_NS | NVME_SC_DNR; 233 goto fail_out; 234 } 235 236 q = ns->queue; 237 } 238 239 rq = nvme_alloc_request(q, req->cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY); 240 if (IS_ERR(rq)) { 241 rq = NULL; 242 status = NVME_SC_INTERNAL; 243 goto fail_out; 244 } 245 246 if (req->sg_cnt) { 247 ret = nvmet_passthru_map_sg(req, rq); 248 if (unlikely(ret)) { 249 status = NVME_SC_INTERNAL; 250 goto fail_out; 251 } 252 } 253 254 /* 255 * If there are effects for the command we are about to execute, or 256 * an end_req function we need to use nvme_execute_passthru_rq() 257 * synchronously in a work item seeing the end_req function and 258 * nvme_passthru_end() can't be called in the request done callback 259 * which is typically in interrupt context. 260 */ 261 effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode); 262 if (req->p.use_workqueue || effects) { 263 INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work); 264 req->p.rq = rq; 265 schedule_work(&req->p.work); 266 } else { 267 rq->end_io_data = req; 268 blk_execute_rq_nowait(rq->q, ns ? ns->disk : NULL, rq, 0, 269 nvmet_passthru_req_done); 270 } 271 272 if (ns) 273 nvme_put_ns(ns); 274 275 return; 276 277 fail_out: 278 if (ns) 279 nvme_put_ns(ns); 280 nvmet_req_complete(req, status); 281 blk_put_request(rq); 282 } 283 284 /* 285 * We need to emulate set host behaviour to ensure that any requested 286 * behaviour of the target's host matches the requested behaviour 287 * of the device's host and fail otherwise. 288 */ 289 static void nvmet_passthru_set_host_behaviour(struct nvmet_req *req) 290 { 291 struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req); 292 struct nvme_feat_host_behavior *host; 293 u16 status = NVME_SC_INTERNAL; 294 int ret; 295 296 host = kzalloc(sizeof(*host) * 2, GFP_KERNEL); 297 if (!host) 298 goto out_complete_req; 299 300 ret = nvme_get_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0, 301 host, sizeof(*host), NULL); 302 if (ret) 303 goto out_free_host; 304 305 status = nvmet_copy_from_sgl(req, 0, &host[1], sizeof(*host)); 306 if (status) 307 goto out_free_host; 308 309 if (memcmp(&host[0], &host[1], sizeof(host[0]))) { 310 pr_warn("target host has requested different behaviour from the local host\n"); 311 status = NVME_SC_INTERNAL; 312 } 313 314 out_free_host: 315 kfree(host); 316 out_complete_req: 317 nvmet_req_complete(req, status); 318 } 319 320 static u16 nvmet_setup_passthru_command(struct nvmet_req *req) 321 { 322 req->p.use_workqueue = false; 323 req->execute = nvmet_passthru_execute_cmd; 324 return NVME_SC_SUCCESS; 325 } 326 327 u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req) 328 { 329 switch (req->cmd->common.opcode) { 330 case nvme_cmd_resv_register: 331 case nvme_cmd_resv_report: 332 case nvme_cmd_resv_acquire: 333 case nvme_cmd_resv_release: 334 /* 335 * Reservations cannot be supported properly because the 336 * underlying device has no way of differentiating different 337 * hosts that connect via fabrics. This could potentially be 338 * emulated in the future if regular targets grow support for 339 * this feature. 340 */ 341 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 342 } 343 344 return nvmet_setup_passthru_command(req); 345 } 346 347 /* 348 * Only features that are emulated or specifically allowed in the list are 349 * passed down to the controller. This function implements the allow list for 350 * both get and set features. 351 */ 352 static u16 nvmet_passthru_get_set_features(struct nvmet_req *req) 353 { 354 switch (le32_to_cpu(req->cmd->features.fid)) { 355 case NVME_FEAT_ARBITRATION: 356 case NVME_FEAT_POWER_MGMT: 357 case NVME_FEAT_LBA_RANGE: 358 case NVME_FEAT_TEMP_THRESH: 359 case NVME_FEAT_ERR_RECOVERY: 360 case NVME_FEAT_VOLATILE_WC: 361 case NVME_FEAT_WRITE_ATOMIC: 362 case NVME_FEAT_AUTO_PST: 363 case NVME_FEAT_TIMESTAMP: 364 case NVME_FEAT_HCTM: 365 case NVME_FEAT_NOPSC: 366 case NVME_FEAT_RRL: 367 case NVME_FEAT_PLM_CONFIG: 368 case NVME_FEAT_PLM_WINDOW: 369 case NVME_FEAT_HOST_BEHAVIOR: 370 case NVME_FEAT_SANITIZE: 371 case NVME_FEAT_VENDOR_START ... NVME_FEAT_VENDOR_END: 372 return nvmet_setup_passthru_command(req); 373 374 case NVME_FEAT_ASYNC_EVENT: 375 /* There is no support for forwarding ASYNC events */ 376 case NVME_FEAT_IRQ_COALESCE: 377 case NVME_FEAT_IRQ_CONFIG: 378 /* The IRQ settings will not apply to the target controller */ 379 case NVME_FEAT_HOST_MEM_BUF: 380 /* 381 * Any HMB that's set will not be passed through and will 382 * not work as expected 383 */ 384 case NVME_FEAT_SW_PROGRESS: 385 /* 386 * The Pre-Boot Software Load Count doesn't make much 387 * sense for a target to export 388 */ 389 case NVME_FEAT_RESV_MASK: 390 case NVME_FEAT_RESV_PERSIST: 391 /* No reservations, see nvmet_parse_passthru_io_cmd() */ 392 default: 393 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 394 } 395 } 396 397 u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) 398 { 399 /* 400 * Passthru all vendor specific commands 401 */ 402 if (req->cmd->common.opcode >= nvme_admin_vendor_start) 403 return nvmet_setup_passthru_command(req); 404 405 switch (req->cmd->common.opcode) { 406 case nvme_admin_async_event: 407 req->execute = nvmet_execute_async_event; 408 return NVME_SC_SUCCESS; 409 case nvme_admin_keep_alive: 410 /* 411 * Most PCIe ctrls don't support keep alive cmd, we route keep 412 * alive to the non-passthru mode. In future please change this 413 * code when PCIe ctrls with keep alive support available. 414 */ 415 req->execute = nvmet_execute_keep_alive; 416 return NVME_SC_SUCCESS; 417 case nvme_admin_set_features: 418 switch (le32_to_cpu(req->cmd->features.fid)) { 419 case NVME_FEAT_ASYNC_EVENT: 420 case NVME_FEAT_KATO: 421 case NVME_FEAT_NUM_QUEUES: 422 case NVME_FEAT_HOST_ID: 423 req->execute = nvmet_execute_set_features; 424 return NVME_SC_SUCCESS; 425 case NVME_FEAT_HOST_BEHAVIOR: 426 req->execute = nvmet_passthru_set_host_behaviour; 427 return NVME_SC_SUCCESS; 428 default: 429 return nvmet_passthru_get_set_features(req); 430 } 431 break; 432 case nvme_admin_get_features: 433 switch (le32_to_cpu(req->cmd->features.fid)) { 434 case NVME_FEAT_ASYNC_EVENT: 435 case NVME_FEAT_KATO: 436 case NVME_FEAT_NUM_QUEUES: 437 case NVME_FEAT_HOST_ID: 438 req->execute = nvmet_execute_get_features; 439 return NVME_SC_SUCCESS; 440 default: 441 return nvmet_passthru_get_set_features(req); 442 } 443 break; 444 case nvme_admin_identify: 445 switch (req->cmd->identify.cns) { 446 case NVME_ID_CNS_CTRL: 447 req->execute = nvmet_passthru_execute_cmd; 448 req->p.use_workqueue = true; 449 return NVME_SC_SUCCESS; 450 case NVME_ID_CNS_NS: 451 req->execute = nvmet_passthru_execute_cmd; 452 req->p.use_workqueue = true; 453 return NVME_SC_SUCCESS; 454 default: 455 return nvmet_setup_passthru_command(req); 456 } 457 case nvme_admin_get_log_page: 458 return nvmet_setup_passthru_command(req); 459 default: 460 /* Reject commands not in the allowlist above */ 461 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; 462 } 463 } 464 465 int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys) 466 { 467 struct nvme_ctrl *ctrl; 468 int ret = -EINVAL; 469 void *old; 470 471 mutex_lock(&subsys->lock); 472 if (!subsys->passthru_ctrl_path) 473 goto out_unlock; 474 if (subsys->passthru_ctrl) 475 goto out_unlock; 476 477 if (subsys->nr_namespaces) { 478 pr_info("cannot enable both passthru and regular namespaces for a single subsystem"); 479 goto out_unlock; 480 } 481 482 ctrl = nvme_ctrl_get_by_path(subsys->passthru_ctrl_path); 483 if (IS_ERR(ctrl)) { 484 ret = PTR_ERR(ctrl); 485 pr_err("failed to open nvme controller %s\n", 486 subsys->passthru_ctrl_path); 487 488 goto out_unlock; 489 } 490 491 old = xa_cmpxchg(&passthru_subsystems, ctrl->cntlid, NULL, 492 subsys, GFP_KERNEL); 493 if (xa_is_err(old)) { 494 ret = xa_err(old); 495 goto out_put_ctrl; 496 } 497 498 if (old) 499 goto out_put_ctrl; 500 501 subsys->passthru_ctrl = ctrl; 502 subsys->ver = ctrl->vs; 503 504 if (subsys->ver < NVME_VS(1, 2, 1)) { 505 pr_warn("nvme controller version is too old: %llu.%llu.%llu, advertising 1.2.1\n", 506 NVME_MAJOR(subsys->ver), NVME_MINOR(subsys->ver), 507 NVME_TERTIARY(subsys->ver)); 508 subsys->ver = NVME_VS(1, 2, 1); 509 } 510 511 mutex_unlock(&subsys->lock); 512 return 0; 513 514 out_put_ctrl: 515 nvme_put_ctrl(ctrl); 516 out_unlock: 517 mutex_unlock(&subsys->lock); 518 return ret; 519 } 520 521 static void __nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys) 522 { 523 if (subsys->passthru_ctrl) { 524 xa_erase(&passthru_subsystems, subsys->passthru_ctrl->cntlid); 525 nvme_put_ctrl(subsys->passthru_ctrl); 526 } 527 subsys->passthru_ctrl = NULL; 528 subsys->ver = NVMET_DEFAULT_VS; 529 } 530 531 void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys) 532 { 533 mutex_lock(&subsys->lock); 534 __nvmet_passthru_ctrl_disable(subsys); 535 mutex_unlock(&subsys->lock); 536 } 537 538 void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys) 539 { 540 mutex_lock(&subsys->lock); 541 __nvmet_passthru_ctrl_disable(subsys); 542 mutex_unlock(&subsys->lock); 543 kfree(subsys->passthru_ctrl_path); 544 } 545