1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVMe over Fabrics common host code. 4 * Copyright (c) 2015-2016 HGST, a Western Digital Company. 5 */ 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 #include <linux/init.h> 8 #include <linux/miscdevice.h> 9 #include <linux/module.h> 10 #include <linux/mutex.h> 11 #include <linux/parser.h> 12 #include <linux/seq_file.h> 13 #include "nvme.h" 14 #include "fabrics.h" 15 16 static LIST_HEAD(nvmf_transports); 17 static DECLARE_RWSEM(nvmf_transports_rwsem); 18 19 static LIST_HEAD(nvmf_hosts); 20 static DEFINE_MUTEX(nvmf_hosts_mutex); 21 22 static struct nvmf_host *nvmf_default_host; 23 24 static struct nvmf_host *__nvmf_host_find(const char *hostnqn) 25 { 26 struct nvmf_host *host; 27 28 list_for_each_entry(host, &nvmf_hosts, list) { 29 if (!strcmp(host->nqn, hostnqn)) 30 return host; 31 } 32 33 return NULL; 34 } 35 36 static struct nvmf_host *nvmf_host_add(const char *hostnqn) 37 { 38 struct nvmf_host *host; 39 40 mutex_lock(&nvmf_hosts_mutex); 41 host = __nvmf_host_find(hostnqn); 42 if (host) { 43 kref_get(&host->ref); 44 goto out_unlock; 45 } 46 47 host = kmalloc(sizeof(*host), GFP_KERNEL); 48 if (!host) 49 goto out_unlock; 50 51 kref_init(&host->ref); 52 strlcpy(host->nqn, hostnqn, NVMF_NQN_SIZE); 53 54 list_add_tail(&host->list, &nvmf_hosts); 55 out_unlock: 56 mutex_unlock(&nvmf_hosts_mutex); 57 return host; 58 } 59 60 static struct nvmf_host *nvmf_host_default(void) 61 { 62 struct nvmf_host *host; 63 64 host = kmalloc(sizeof(*host), GFP_KERNEL); 65 if (!host) 66 return NULL; 67 68 kref_init(&host->ref); 69 uuid_gen(&host->id); 70 snprintf(host->nqn, NVMF_NQN_SIZE, 71 "nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id); 72 73 mutex_lock(&nvmf_hosts_mutex); 74 list_add_tail(&host->list, &nvmf_hosts); 75 mutex_unlock(&nvmf_hosts_mutex); 76 77 return host; 78 } 79 80 static void nvmf_host_destroy(struct kref *ref) 81 { 82 struct nvmf_host *host = container_of(ref, struct nvmf_host, ref); 83 84 mutex_lock(&nvmf_hosts_mutex); 85 list_del(&host->list); 86 mutex_unlock(&nvmf_hosts_mutex); 87 88 kfree(host); 89 } 90 91 static void nvmf_host_put(struct nvmf_host *host) 92 { 93 if (host) 94 kref_put(&host->ref, nvmf_host_destroy); 95 } 96 97 /** 98 * nvmf_get_address() - Get address/port 99 * @ctrl: Host NVMe controller instance which we got the address 100 * @buf: OUTPUT parameter that will contain the address/port 101 * @size: buffer size 102 */ 103 int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size) 104 { 105 int len = 0; 106 107 if (ctrl->opts->mask & NVMF_OPT_TRADDR) 108 len += snprintf(buf, size, "traddr=%s", ctrl->opts->traddr); 109 if (ctrl->opts->mask & NVMF_OPT_TRSVCID) 110 len += snprintf(buf + len, size - len, "%strsvcid=%s", 111 (len) ? "," : "", ctrl->opts->trsvcid); 112 if (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR) 113 len += snprintf(buf + len, size - len, "%shost_traddr=%s", 114 (len) ? "," : "", ctrl->opts->host_traddr); 115 len += snprintf(buf + len, size - len, "\n"); 116 117 return len; 118 } 119 EXPORT_SYMBOL_GPL(nvmf_get_address); 120 121 /** 122 * nvmf_reg_read32() - NVMe Fabrics "Property Get" API function. 123 * @ctrl: Host NVMe controller instance maintaining the admin 124 * queue used to submit the property read command to 125 * the allocated NVMe controller resource on the target system. 126 * @off: Starting offset value of the targeted property 127 * register (see the fabrics section of the NVMe standard). 128 * @val: OUTPUT parameter that will contain the value of 129 * the property after a successful read. 130 * 131 * Used by the host system to retrieve a 32-bit capsule property value 132 * from an NVMe controller on the target system. 133 * 134 * ("Capsule property" is an "PCIe register concept" applied to the 135 * NVMe fabrics space.) 136 * 137 * Return: 138 * 0: successful read 139 * > 0: NVMe error status code 140 * < 0: Linux errno error code 141 */ 142 int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) 143 { 144 struct nvme_command cmd; 145 union nvme_result res; 146 int ret; 147 148 memset(&cmd, 0, sizeof(cmd)); 149 cmd.prop_get.opcode = nvme_fabrics_command; 150 cmd.prop_get.fctype = nvme_fabrics_type_property_get; 151 cmd.prop_get.offset = cpu_to_le32(off); 152 153 ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, NULL, 0, 0, 154 NVME_QID_ANY, 0, 0, false); 155 156 if (ret >= 0) 157 *val = le64_to_cpu(res.u64); 158 if (unlikely(ret != 0)) 159 dev_err(ctrl->device, 160 "Property Get error: %d, offset %#x\n", 161 ret > 0 ? ret & ~NVME_SC_DNR : ret, off); 162 163 return ret; 164 } 165 EXPORT_SYMBOL_GPL(nvmf_reg_read32); 166 167 /** 168 * nvmf_reg_read64() - NVMe Fabrics "Property Get" API function. 169 * @ctrl: Host NVMe controller instance maintaining the admin 170 * queue used to submit the property read command to 171 * the allocated controller resource on the target system. 172 * @off: Starting offset value of the targeted property 173 * register (see the fabrics section of the NVMe standard). 174 * @val: OUTPUT parameter that will contain the value of 175 * the property after a successful read. 176 * 177 * Used by the host system to retrieve a 64-bit capsule property value 178 * from an NVMe controller on the target system. 179 * 180 * ("Capsule property" is an "PCIe register concept" applied to the 181 * NVMe fabrics space.) 182 * 183 * Return: 184 * 0: successful read 185 * > 0: NVMe error status code 186 * < 0: Linux errno error code 187 */ 188 int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) 189 { 190 struct nvme_command cmd; 191 union nvme_result res; 192 int ret; 193 194 memset(&cmd, 0, sizeof(cmd)); 195 cmd.prop_get.opcode = nvme_fabrics_command; 196 cmd.prop_get.fctype = nvme_fabrics_type_property_get; 197 cmd.prop_get.attrib = 1; 198 cmd.prop_get.offset = cpu_to_le32(off); 199 200 ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, NULL, 0, 0, 201 NVME_QID_ANY, 0, 0, false); 202 203 if (ret >= 0) 204 *val = le64_to_cpu(res.u64); 205 if (unlikely(ret != 0)) 206 dev_err(ctrl->device, 207 "Property Get error: %d, offset %#x\n", 208 ret > 0 ? ret & ~NVME_SC_DNR : ret, off); 209 return ret; 210 } 211 EXPORT_SYMBOL_GPL(nvmf_reg_read64); 212 213 /** 214 * nvmf_reg_write32() - NVMe Fabrics "Property Write" API function. 215 * @ctrl: Host NVMe controller instance maintaining the admin 216 * queue used to submit the property read command to 217 * the allocated NVMe controller resource on the target system. 218 * @off: Starting offset value of the targeted property 219 * register (see the fabrics section of the NVMe standard). 220 * @val: Input parameter that contains the value to be 221 * written to the property. 222 * 223 * Used by the NVMe host system to write a 32-bit capsule property value 224 * to an NVMe controller on the target system. 225 * 226 * ("Capsule property" is an "PCIe register concept" applied to the 227 * NVMe fabrics space.) 228 * 229 * Return: 230 * 0: successful write 231 * > 0: NVMe error status code 232 * < 0: Linux errno error code 233 */ 234 int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) 235 { 236 struct nvme_command cmd; 237 int ret; 238 239 memset(&cmd, 0, sizeof(cmd)); 240 cmd.prop_set.opcode = nvme_fabrics_command; 241 cmd.prop_set.fctype = nvme_fabrics_type_property_set; 242 cmd.prop_set.attrib = 0; 243 cmd.prop_set.offset = cpu_to_le32(off); 244 cmd.prop_set.value = cpu_to_le64(val); 245 246 ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, NULL, 0, 0, 247 NVME_QID_ANY, 0, 0, false); 248 if (unlikely(ret)) 249 dev_err(ctrl->device, 250 "Property Set error: %d, offset %#x\n", 251 ret > 0 ? ret & ~NVME_SC_DNR : ret, off); 252 return ret; 253 } 254 EXPORT_SYMBOL_GPL(nvmf_reg_write32); 255 256 /** 257 * nvmf_log_connect_error() - Error-parsing-diagnostic print 258 * out function for connect() errors. 259 * 260 * @ctrl: the specific /dev/nvmeX device that had the error. 261 * 262 * @errval: Error code to be decoded in a more human-friendly 263 * printout. 264 * 265 * @offset: For use with the NVMe error code NVME_SC_CONNECT_INVALID_PARAM. 266 * 267 * @cmd: This is the SQE portion of a submission capsule. 268 * 269 * @data: This is the "Data" portion of a submission capsule. 270 */ 271 static void nvmf_log_connect_error(struct nvme_ctrl *ctrl, 272 int errval, int offset, struct nvme_command *cmd, 273 struct nvmf_connect_data *data) 274 { 275 int err_sctype = errval & (~NVME_SC_DNR); 276 277 switch (err_sctype) { 278 279 case (NVME_SC_CONNECT_INVALID_PARAM): 280 if (offset >> 16) { 281 char *inv_data = "Connect Invalid Data Parameter"; 282 283 switch (offset & 0xffff) { 284 case (offsetof(struct nvmf_connect_data, cntlid)): 285 dev_err(ctrl->device, 286 "%s, cntlid: %d\n", 287 inv_data, data->cntlid); 288 break; 289 case (offsetof(struct nvmf_connect_data, hostnqn)): 290 dev_err(ctrl->device, 291 "%s, hostnqn \"%s\"\n", 292 inv_data, data->hostnqn); 293 break; 294 case (offsetof(struct nvmf_connect_data, subsysnqn)): 295 dev_err(ctrl->device, 296 "%s, subsysnqn \"%s\"\n", 297 inv_data, data->subsysnqn); 298 break; 299 default: 300 dev_err(ctrl->device, 301 "%s, starting byte offset: %d\n", 302 inv_data, offset & 0xffff); 303 break; 304 } 305 } else { 306 char *inv_sqe = "Connect Invalid SQE Parameter"; 307 308 switch (offset) { 309 case (offsetof(struct nvmf_connect_command, qid)): 310 dev_err(ctrl->device, 311 "%s, qid %d\n", 312 inv_sqe, cmd->connect.qid); 313 break; 314 default: 315 dev_err(ctrl->device, 316 "%s, starting byte offset: %d\n", 317 inv_sqe, offset); 318 } 319 } 320 break; 321 322 case NVME_SC_CONNECT_INVALID_HOST: 323 dev_err(ctrl->device, 324 "Connect for subsystem %s is not allowed, hostnqn: %s\n", 325 data->subsysnqn, data->hostnqn); 326 break; 327 328 case NVME_SC_CONNECT_CTRL_BUSY: 329 dev_err(ctrl->device, 330 "Connect command failed: controller is busy or not available\n"); 331 break; 332 333 case NVME_SC_CONNECT_FORMAT: 334 dev_err(ctrl->device, 335 "Connect incompatible format: %d", 336 cmd->connect.recfmt); 337 break; 338 339 default: 340 dev_err(ctrl->device, 341 "Connect command failed, error wo/DNR bit: %d\n", 342 err_sctype); 343 break; 344 } /* switch (err_sctype) */ 345 } 346 347 /** 348 * nvmf_connect_admin_queue() - NVMe Fabrics Admin Queue "Connect" 349 * API function. 350 * @ctrl: Host nvme controller instance used to request 351 * a new NVMe controller allocation on the target 352 * system and establish an NVMe Admin connection to 353 * that controller. 354 * 355 * This function enables an NVMe host device to request a new allocation of 356 * an NVMe controller resource on a target system as well establish a 357 * fabrics-protocol connection of the NVMe Admin queue between the 358 * host system device and the allocated NVMe controller on the 359 * target system via a NVMe Fabrics "Connect" command. 360 * 361 * Return: 362 * 0: success 363 * > 0: NVMe error status code 364 * < 0: Linux errno error code 365 * 366 */ 367 int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl) 368 { 369 struct nvme_command cmd; 370 union nvme_result res; 371 struct nvmf_connect_data *data; 372 int ret; 373 374 memset(&cmd, 0, sizeof(cmd)); 375 cmd.connect.opcode = nvme_fabrics_command; 376 cmd.connect.fctype = nvme_fabrics_type_connect; 377 cmd.connect.qid = 0; 378 cmd.connect.sqsize = cpu_to_le16(NVME_AQ_DEPTH - 1); 379 380 /* 381 * Set keep-alive timeout in seconds granularity (ms * 1000) 382 * and add a grace period for controller kato enforcement 383 */ 384 cmd.connect.kato = ctrl->opts->discovery_nqn ? 0 : 385 cpu_to_le32((ctrl->kato + NVME_KATO_GRACE) * 1000); 386 387 if (ctrl->opts->disable_sqflow) 388 cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW; 389 390 data = kzalloc(sizeof(*data), GFP_KERNEL); 391 if (!data) 392 return -ENOMEM; 393 394 uuid_copy(&data->hostid, &ctrl->opts->host->id); 395 data->cntlid = cpu_to_le16(0xffff); 396 strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE); 397 strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE); 398 399 ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, 400 data, sizeof(*data), 0, NVME_QID_ANY, 1, 401 BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, false); 402 if (ret) { 403 nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32), 404 &cmd, data); 405 goto out_free_data; 406 } 407 408 ctrl->cntlid = le16_to_cpu(res.u16); 409 410 out_free_data: 411 kfree(data); 412 return ret; 413 } 414 EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue); 415 416 /** 417 * nvmf_connect_io_queue() - NVMe Fabrics I/O Queue "Connect" 418 * API function. 419 * @ctrl: Host nvme controller instance used to establish an 420 * NVMe I/O queue connection to the already allocated NVMe 421 * controller on the target system. 422 * @qid: NVMe I/O queue number for the new I/O connection between 423 * host and target (note qid == 0 is illegal as this is 424 * the Admin queue, per NVMe standard). 425 * @poll: Whether or not to poll for the completion of the connect cmd. 426 * 427 * This function issues a fabrics-protocol connection 428 * of a NVMe I/O queue (via NVMe Fabrics "Connect" command) 429 * between the host system device and the allocated NVMe controller 430 * on the target system. 431 * 432 * Return: 433 * 0: success 434 * > 0: NVMe error status code 435 * < 0: Linux errno error code 436 */ 437 int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll) 438 { 439 struct nvme_command cmd; 440 struct nvmf_connect_data *data; 441 union nvme_result res; 442 int ret; 443 444 memset(&cmd, 0, sizeof(cmd)); 445 cmd.connect.opcode = nvme_fabrics_command; 446 cmd.connect.fctype = nvme_fabrics_type_connect; 447 cmd.connect.qid = cpu_to_le16(qid); 448 cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize); 449 450 if (ctrl->opts->disable_sqflow) 451 cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW; 452 453 data = kzalloc(sizeof(*data), GFP_KERNEL); 454 if (!data) 455 return -ENOMEM; 456 457 uuid_copy(&data->hostid, &ctrl->opts->host->id); 458 data->cntlid = cpu_to_le16(ctrl->cntlid); 459 strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE); 460 strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE); 461 462 ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res, 463 data, sizeof(*data), 0, qid, 1, 464 BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, poll); 465 if (ret) { 466 nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32), 467 &cmd, data); 468 } 469 kfree(data); 470 return ret; 471 } 472 EXPORT_SYMBOL_GPL(nvmf_connect_io_queue); 473 474 bool nvmf_should_reconnect(struct nvme_ctrl *ctrl) 475 { 476 if (ctrl->opts->max_reconnects == -1 || 477 ctrl->nr_reconnects < ctrl->opts->max_reconnects) 478 return true; 479 480 return false; 481 } 482 EXPORT_SYMBOL_GPL(nvmf_should_reconnect); 483 484 /** 485 * nvmf_register_transport() - NVMe Fabrics Library registration function. 486 * @ops: Transport ops instance to be registered to the 487 * common fabrics library. 488 * 489 * API function that registers the type of specific transport fabric 490 * being implemented to the common NVMe fabrics library. Part of 491 * the overall init sequence of starting up a fabrics driver. 492 */ 493 int nvmf_register_transport(struct nvmf_transport_ops *ops) 494 { 495 if (!ops->create_ctrl) 496 return -EINVAL; 497 498 down_write(&nvmf_transports_rwsem); 499 list_add_tail(&ops->entry, &nvmf_transports); 500 up_write(&nvmf_transports_rwsem); 501 502 return 0; 503 } 504 EXPORT_SYMBOL_GPL(nvmf_register_transport); 505 506 /** 507 * nvmf_unregister_transport() - NVMe Fabrics Library unregistration function. 508 * @ops: Transport ops instance to be unregistered from the 509 * common fabrics library. 510 * 511 * Fabrics API function that unregisters the type of specific transport 512 * fabric being implemented from the common NVMe fabrics library. 513 * Part of the overall exit sequence of unloading the implemented driver. 514 */ 515 void nvmf_unregister_transport(struct nvmf_transport_ops *ops) 516 { 517 down_write(&nvmf_transports_rwsem); 518 list_del(&ops->entry); 519 up_write(&nvmf_transports_rwsem); 520 } 521 EXPORT_SYMBOL_GPL(nvmf_unregister_transport); 522 523 static struct nvmf_transport_ops *nvmf_lookup_transport( 524 struct nvmf_ctrl_options *opts) 525 { 526 struct nvmf_transport_ops *ops; 527 528 lockdep_assert_held(&nvmf_transports_rwsem); 529 530 list_for_each_entry(ops, &nvmf_transports, entry) { 531 if (strcmp(ops->name, opts->transport) == 0) 532 return ops; 533 } 534 535 return NULL; 536 } 537 538 /* 539 * For something we're not in a state to send to the device the default action 540 * is to busy it and retry it after the controller state is recovered. However, 541 * if the controller is deleting or if anything is marked for failfast or 542 * nvme multipath it is immediately failed. 543 * 544 * Note: commands used to initialize the controller will be marked for failfast. 545 * Note: nvme cli/ioctl commands are marked for failfast. 546 */ 547 blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl, 548 struct request *rq) 549 { 550 if (ctrl->state != NVME_CTRL_DELETING && 551 ctrl->state != NVME_CTRL_DEAD && 552 !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) 553 return BLK_STS_RESOURCE; 554 555 nvme_req(rq)->status = NVME_SC_HOST_PATH_ERROR; 556 blk_mq_start_request(rq); 557 nvme_complete_rq(rq); 558 return BLK_STS_OK; 559 } 560 EXPORT_SYMBOL_GPL(nvmf_fail_nonready_command); 561 562 bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq, 563 bool queue_live) 564 { 565 struct nvme_request *req = nvme_req(rq); 566 567 /* 568 * If we are in some state of setup or teardown only allow 569 * internally generated commands. 570 */ 571 if (!blk_rq_is_passthrough(rq) || (req->flags & NVME_REQ_USERCMD)) 572 return false; 573 574 /* 575 * Only allow commands on a live queue, except for the connect command, 576 * which is require to set the queue live in the appropinquate states. 577 */ 578 switch (ctrl->state) { 579 case NVME_CTRL_NEW: 580 case NVME_CTRL_CONNECTING: 581 if (nvme_is_fabrics(req->cmd) && 582 req->cmd->fabrics.fctype == nvme_fabrics_type_connect) 583 return true; 584 break; 585 default: 586 break; 587 case NVME_CTRL_DEAD: 588 return false; 589 } 590 591 return queue_live; 592 } 593 EXPORT_SYMBOL_GPL(__nvmf_check_ready); 594 595 static const match_table_t opt_tokens = { 596 { NVMF_OPT_TRANSPORT, "transport=%s" }, 597 { NVMF_OPT_TRADDR, "traddr=%s" }, 598 { NVMF_OPT_TRSVCID, "trsvcid=%s" }, 599 { NVMF_OPT_NQN, "nqn=%s" }, 600 { NVMF_OPT_QUEUE_SIZE, "queue_size=%d" }, 601 { NVMF_OPT_NR_IO_QUEUES, "nr_io_queues=%d" }, 602 { NVMF_OPT_RECONNECT_DELAY, "reconnect_delay=%d" }, 603 { NVMF_OPT_CTRL_LOSS_TMO, "ctrl_loss_tmo=%d" }, 604 { NVMF_OPT_KATO, "keep_alive_tmo=%d" }, 605 { NVMF_OPT_HOSTNQN, "hostnqn=%s" }, 606 { NVMF_OPT_HOST_TRADDR, "host_traddr=%s" }, 607 { NVMF_OPT_HOST_ID, "hostid=%s" }, 608 { NVMF_OPT_DUP_CONNECT, "duplicate_connect" }, 609 { NVMF_OPT_DISABLE_SQFLOW, "disable_sqflow" }, 610 { NVMF_OPT_HDR_DIGEST, "hdr_digest" }, 611 { NVMF_OPT_DATA_DIGEST, "data_digest" }, 612 { NVMF_OPT_NR_WRITE_QUEUES, "nr_write_queues=%d" }, 613 { NVMF_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" }, 614 { NVMF_OPT_ERR, NULL } 615 }; 616 617 static int nvmf_parse_options(struct nvmf_ctrl_options *opts, 618 const char *buf) 619 { 620 substring_t args[MAX_OPT_ARGS]; 621 char *options, *o, *p; 622 int token, ret = 0; 623 size_t nqnlen = 0; 624 int ctrl_loss_tmo = NVMF_DEF_CTRL_LOSS_TMO; 625 uuid_t hostid; 626 627 /* Set defaults */ 628 opts->queue_size = NVMF_DEF_QUEUE_SIZE; 629 opts->nr_io_queues = num_online_cpus(); 630 opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY; 631 opts->kato = NVME_DEFAULT_KATO; 632 opts->duplicate_connect = false; 633 opts->hdr_digest = false; 634 opts->data_digest = false; 635 636 options = o = kstrdup(buf, GFP_KERNEL); 637 if (!options) 638 return -ENOMEM; 639 640 uuid_gen(&hostid); 641 642 while ((p = strsep(&o, ",\n")) != NULL) { 643 if (!*p) 644 continue; 645 646 token = match_token(p, opt_tokens, args); 647 opts->mask |= token; 648 switch (token) { 649 case NVMF_OPT_TRANSPORT: 650 p = match_strdup(args); 651 if (!p) { 652 ret = -ENOMEM; 653 goto out; 654 } 655 kfree(opts->transport); 656 opts->transport = p; 657 break; 658 case NVMF_OPT_NQN: 659 p = match_strdup(args); 660 if (!p) { 661 ret = -ENOMEM; 662 goto out; 663 } 664 kfree(opts->subsysnqn); 665 opts->subsysnqn = p; 666 nqnlen = strlen(opts->subsysnqn); 667 if (nqnlen >= NVMF_NQN_SIZE) { 668 pr_err("%s needs to be < %d bytes\n", 669 opts->subsysnqn, NVMF_NQN_SIZE); 670 ret = -EINVAL; 671 goto out; 672 } 673 opts->discovery_nqn = 674 !(strcmp(opts->subsysnqn, 675 NVME_DISC_SUBSYS_NAME)); 676 break; 677 case NVMF_OPT_TRADDR: 678 p = match_strdup(args); 679 if (!p) { 680 ret = -ENOMEM; 681 goto out; 682 } 683 kfree(opts->traddr); 684 opts->traddr = p; 685 break; 686 case NVMF_OPT_TRSVCID: 687 p = match_strdup(args); 688 if (!p) { 689 ret = -ENOMEM; 690 goto out; 691 } 692 kfree(opts->trsvcid); 693 opts->trsvcid = p; 694 break; 695 case NVMF_OPT_QUEUE_SIZE: 696 if (match_int(args, &token)) { 697 ret = -EINVAL; 698 goto out; 699 } 700 if (token < NVMF_MIN_QUEUE_SIZE || 701 token > NVMF_MAX_QUEUE_SIZE) { 702 pr_err("Invalid queue_size %d\n", token); 703 ret = -EINVAL; 704 goto out; 705 } 706 opts->queue_size = token; 707 break; 708 case NVMF_OPT_NR_IO_QUEUES: 709 if (match_int(args, &token)) { 710 ret = -EINVAL; 711 goto out; 712 } 713 if (token <= 0) { 714 pr_err("Invalid number of IOQs %d\n", token); 715 ret = -EINVAL; 716 goto out; 717 } 718 if (opts->discovery_nqn) { 719 pr_debug("Ignoring nr_io_queues value for discovery controller\n"); 720 break; 721 } 722 723 opts->nr_io_queues = min_t(unsigned int, 724 num_online_cpus(), token); 725 break; 726 case NVMF_OPT_KATO: 727 if (match_int(args, &token)) { 728 ret = -EINVAL; 729 goto out; 730 } 731 732 if (token < 0) { 733 pr_err("Invalid keep_alive_tmo %d\n", token); 734 ret = -EINVAL; 735 goto out; 736 } else if (token == 0 && !opts->discovery_nqn) { 737 /* Allowed for debug */ 738 pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n"); 739 } 740 opts->kato = token; 741 742 if (opts->discovery_nqn && opts->kato) { 743 pr_err("Discovery controllers cannot accept KATO != 0\n"); 744 ret = -EINVAL; 745 goto out; 746 } 747 748 break; 749 case NVMF_OPT_CTRL_LOSS_TMO: 750 if (match_int(args, &token)) { 751 ret = -EINVAL; 752 goto out; 753 } 754 755 if (token < 0) 756 pr_warn("ctrl_loss_tmo < 0 will reconnect forever\n"); 757 ctrl_loss_tmo = token; 758 break; 759 case NVMF_OPT_HOSTNQN: 760 if (opts->host) { 761 pr_err("hostnqn already user-assigned: %s\n", 762 opts->host->nqn); 763 ret = -EADDRINUSE; 764 goto out; 765 } 766 p = match_strdup(args); 767 if (!p) { 768 ret = -ENOMEM; 769 goto out; 770 } 771 nqnlen = strlen(p); 772 if (nqnlen >= NVMF_NQN_SIZE) { 773 pr_err("%s needs to be < %d bytes\n", 774 p, NVMF_NQN_SIZE); 775 kfree(p); 776 ret = -EINVAL; 777 goto out; 778 } 779 nvmf_host_put(opts->host); 780 opts->host = nvmf_host_add(p); 781 kfree(p); 782 if (!opts->host) { 783 ret = -ENOMEM; 784 goto out; 785 } 786 break; 787 case NVMF_OPT_RECONNECT_DELAY: 788 if (match_int(args, &token)) { 789 ret = -EINVAL; 790 goto out; 791 } 792 if (token <= 0) { 793 pr_err("Invalid reconnect_delay %d\n", token); 794 ret = -EINVAL; 795 goto out; 796 } 797 opts->reconnect_delay = token; 798 break; 799 case NVMF_OPT_HOST_TRADDR: 800 p = match_strdup(args); 801 if (!p) { 802 ret = -ENOMEM; 803 goto out; 804 } 805 kfree(opts->host_traddr); 806 opts->host_traddr = p; 807 break; 808 case NVMF_OPT_HOST_ID: 809 p = match_strdup(args); 810 if (!p) { 811 ret = -ENOMEM; 812 goto out; 813 } 814 ret = uuid_parse(p, &hostid); 815 if (ret) { 816 pr_err("Invalid hostid %s\n", p); 817 ret = -EINVAL; 818 kfree(p); 819 goto out; 820 } 821 kfree(p); 822 break; 823 case NVMF_OPT_DUP_CONNECT: 824 opts->duplicate_connect = true; 825 break; 826 case NVMF_OPT_DISABLE_SQFLOW: 827 opts->disable_sqflow = true; 828 break; 829 case NVMF_OPT_HDR_DIGEST: 830 opts->hdr_digest = true; 831 break; 832 case NVMF_OPT_DATA_DIGEST: 833 opts->data_digest = true; 834 break; 835 case NVMF_OPT_NR_WRITE_QUEUES: 836 if (match_int(args, &token)) { 837 ret = -EINVAL; 838 goto out; 839 } 840 if (token <= 0) { 841 pr_err("Invalid nr_write_queues %d\n", token); 842 ret = -EINVAL; 843 goto out; 844 } 845 opts->nr_write_queues = token; 846 break; 847 case NVMF_OPT_NR_POLL_QUEUES: 848 if (match_int(args, &token)) { 849 ret = -EINVAL; 850 goto out; 851 } 852 if (token <= 0) { 853 pr_err("Invalid nr_poll_queues %d\n", token); 854 ret = -EINVAL; 855 goto out; 856 } 857 opts->nr_poll_queues = token; 858 break; 859 default: 860 pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n", 861 p); 862 ret = -EINVAL; 863 goto out; 864 } 865 } 866 867 if (opts->discovery_nqn) { 868 opts->kato = 0; 869 opts->nr_io_queues = 0; 870 opts->nr_write_queues = 0; 871 opts->nr_poll_queues = 0; 872 opts->duplicate_connect = true; 873 } 874 if (ctrl_loss_tmo < 0) 875 opts->max_reconnects = -1; 876 else 877 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo, 878 opts->reconnect_delay); 879 880 if (!opts->host) { 881 kref_get(&nvmf_default_host->ref); 882 opts->host = nvmf_default_host; 883 } 884 885 uuid_copy(&opts->host->id, &hostid); 886 887 out: 888 kfree(options); 889 return ret; 890 } 891 892 static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts, 893 unsigned int required_opts) 894 { 895 if ((opts->mask & required_opts) != required_opts) { 896 int i; 897 898 for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) { 899 if ((opt_tokens[i].token & required_opts) && 900 !(opt_tokens[i].token & opts->mask)) { 901 pr_warn("missing parameter '%s'\n", 902 opt_tokens[i].pattern); 903 } 904 } 905 906 return -EINVAL; 907 } 908 909 return 0; 910 } 911 912 bool nvmf_ip_options_match(struct nvme_ctrl *ctrl, 913 struct nvmf_ctrl_options *opts) 914 { 915 if (!nvmf_ctlr_matches_baseopts(ctrl, opts) || 916 strcmp(opts->traddr, ctrl->opts->traddr) || 917 strcmp(opts->trsvcid, ctrl->opts->trsvcid)) 918 return false; 919 920 /* 921 * Checking the local address is rough. In most cases, none is specified 922 * and the host port is selected by the stack. 923 * 924 * Assume no match if: 925 * - local address is specified and address is not the same 926 * - local address is not specified but remote is, or vice versa 927 * (admin using specific host_traddr when it matters). 928 */ 929 if ((opts->mask & NVMF_OPT_HOST_TRADDR) && 930 (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) { 931 if (strcmp(opts->host_traddr, ctrl->opts->host_traddr)) 932 return false; 933 } else if ((opts->mask & NVMF_OPT_HOST_TRADDR) || 934 (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) { 935 return false; 936 } 937 938 return true; 939 } 940 EXPORT_SYMBOL_GPL(nvmf_ip_options_match); 941 942 static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts, 943 unsigned int allowed_opts) 944 { 945 if (opts->mask & ~allowed_opts) { 946 int i; 947 948 for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) { 949 if ((opt_tokens[i].token & opts->mask) && 950 (opt_tokens[i].token & ~allowed_opts)) { 951 pr_warn("invalid parameter '%s'\n", 952 opt_tokens[i].pattern); 953 } 954 } 955 956 return -EINVAL; 957 } 958 959 return 0; 960 } 961 962 void nvmf_free_options(struct nvmf_ctrl_options *opts) 963 { 964 nvmf_host_put(opts->host); 965 kfree(opts->transport); 966 kfree(opts->traddr); 967 kfree(opts->trsvcid); 968 kfree(opts->subsysnqn); 969 kfree(opts->host_traddr); 970 kfree(opts); 971 } 972 EXPORT_SYMBOL_GPL(nvmf_free_options); 973 974 #define NVMF_REQUIRED_OPTS (NVMF_OPT_TRANSPORT | NVMF_OPT_NQN) 975 #define NVMF_ALLOWED_OPTS (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \ 976 NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \ 977 NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\ 978 NVMF_OPT_DISABLE_SQFLOW) 979 980 static struct nvme_ctrl * 981 nvmf_create_ctrl(struct device *dev, const char *buf) 982 { 983 struct nvmf_ctrl_options *opts; 984 struct nvmf_transport_ops *ops; 985 struct nvme_ctrl *ctrl; 986 int ret; 987 988 opts = kzalloc(sizeof(*opts), GFP_KERNEL); 989 if (!opts) 990 return ERR_PTR(-ENOMEM); 991 992 ret = nvmf_parse_options(opts, buf); 993 if (ret) 994 goto out_free_opts; 995 996 997 request_module("nvme-%s", opts->transport); 998 999 /* 1000 * Check the generic options first as we need a valid transport for 1001 * the lookup below. Then clear the generic flags so that transport 1002 * drivers don't have to care about them. 1003 */ 1004 ret = nvmf_check_required_opts(opts, NVMF_REQUIRED_OPTS); 1005 if (ret) 1006 goto out_free_opts; 1007 opts->mask &= ~NVMF_REQUIRED_OPTS; 1008 1009 down_read(&nvmf_transports_rwsem); 1010 ops = nvmf_lookup_transport(opts); 1011 if (!ops) { 1012 pr_info("no handler found for transport %s.\n", 1013 opts->transport); 1014 ret = -EINVAL; 1015 goto out_unlock; 1016 } 1017 1018 if (!try_module_get(ops->module)) { 1019 ret = -EBUSY; 1020 goto out_unlock; 1021 } 1022 up_read(&nvmf_transports_rwsem); 1023 1024 ret = nvmf_check_required_opts(opts, ops->required_opts); 1025 if (ret) 1026 goto out_module_put; 1027 ret = nvmf_check_allowed_opts(opts, NVMF_ALLOWED_OPTS | 1028 ops->allowed_opts | ops->required_opts); 1029 if (ret) 1030 goto out_module_put; 1031 1032 ctrl = ops->create_ctrl(dev, opts); 1033 if (IS_ERR(ctrl)) { 1034 ret = PTR_ERR(ctrl); 1035 goto out_module_put; 1036 } 1037 1038 module_put(ops->module); 1039 return ctrl; 1040 1041 out_module_put: 1042 module_put(ops->module); 1043 goto out_free_opts; 1044 out_unlock: 1045 up_read(&nvmf_transports_rwsem); 1046 out_free_opts: 1047 nvmf_free_options(opts); 1048 return ERR_PTR(ret); 1049 } 1050 1051 static struct class *nvmf_class; 1052 static struct device *nvmf_device; 1053 static DEFINE_MUTEX(nvmf_dev_mutex); 1054 1055 static ssize_t nvmf_dev_write(struct file *file, const char __user *ubuf, 1056 size_t count, loff_t *pos) 1057 { 1058 struct seq_file *seq_file = file->private_data; 1059 struct nvme_ctrl *ctrl; 1060 const char *buf; 1061 int ret = 0; 1062 1063 if (count > PAGE_SIZE) 1064 return -ENOMEM; 1065 1066 buf = memdup_user_nul(ubuf, count); 1067 if (IS_ERR(buf)) 1068 return PTR_ERR(buf); 1069 1070 mutex_lock(&nvmf_dev_mutex); 1071 if (seq_file->private) { 1072 ret = -EINVAL; 1073 goto out_unlock; 1074 } 1075 1076 ctrl = nvmf_create_ctrl(nvmf_device, buf); 1077 if (IS_ERR(ctrl)) { 1078 ret = PTR_ERR(ctrl); 1079 goto out_unlock; 1080 } 1081 1082 seq_file->private = ctrl; 1083 1084 out_unlock: 1085 mutex_unlock(&nvmf_dev_mutex); 1086 kfree(buf); 1087 return ret ? ret : count; 1088 } 1089 1090 static int nvmf_dev_show(struct seq_file *seq_file, void *private) 1091 { 1092 struct nvme_ctrl *ctrl; 1093 int ret = 0; 1094 1095 mutex_lock(&nvmf_dev_mutex); 1096 ctrl = seq_file->private; 1097 if (!ctrl) { 1098 ret = -EINVAL; 1099 goto out_unlock; 1100 } 1101 1102 seq_printf(seq_file, "instance=%d,cntlid=%d\n", 1103 ctrl->instance, ctrl->cntlid); 1104 1105 out_unlock: 1106 mutex_unlock(&nvmf_dev_mutex); 1107 return ret; 1108 } 1109 1110 static int nvmf_dev_open(struct inode *inode, struct file *file) 1111 { 1112 /* 1113 * The miscdevice code initializes file->private_data, but doesn't 1114 * make use of it later. 1115 */ 1116 file->private_data = NULL; 1117 return single_open(file, nvmf_dev_show, NULL); 1118 } 1119 1120 static int nvmf_dev_release(struct inode *inode, struct file *file) 1121 { 1122 struct seq_file *seq_file = file->private_data; 1123 struct nvme_ctrl *ctrl = seq_file->private; 1124 1125 if (ctrl) 1126 nvme_put_ctrl(ctrl); 1127 return single_release(inode, file); 1128 } 1129 1130 static const struct file_operations nvmf_dev_fops = { 1131 .owner = THIS_MODULE, 1132 .write = nvmf_dev_write, 1133 .read = seq_read, 1134 .open = nvmf_dev_open, 1135 .release = nvmf_dev_release, 1136 }; 1137 1138 static struct miscdevice nvmf_misc = { 1139 .minor = MISC_DYNAMIC_MINOR, 1140 .name = "nvme-fabrics", 1141 .fops = &nvmf_dev_fops, 1142 }; 1143 1144 static int __init nvmf_init(void) 1145 { 1146 int ret; 1147 1148 nvmf_default_host = nvmf_host_default(); 1149 if (!nvmf_default_host) 1150 return -ENOMEM; 1151 1152 nvmf_class = class_create(THIS_MODULE, "nvme-fabrics"); 1153 if (IS_ERR(nvmf_class)) { 1154 pr_err("couldn't register class nvme-fabrics\n"); 1155 ret = PTR_ERR(nvmf_class); 1156 goto out_free_host; 1157 } 1158 1159 nvmf_device = 1160 device_create(nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl"); 1161 if (IS_ERR(nvmf_device)) { 1162 pr_err("couldn't create nvme-fabris device!\n"); 1163 ret = PTR_ERR(nvmf_device); 1164 goto out_destroy_class; 1165 } 1166 1167 ret = misc_register(&nvmf_misc); 1168 if (ret) { 1169 pr_err("couldn't register misc device: %d\n", ret); 1170 goto out_destroy_device; 1171 } 1172 1173 return 0; 1174 1175 out_destroy_device: 1176 device_destroy(nvmf_class, MKDEV(0, 0)); 1177 out_destroy_class: 1178 class_destroy(nvmf_class); 1179 out_free_host: 1180 nvmf_host_put(nvmf_default_host); 1181 return ret; 1182 } 1183 1184 static void __exit nvmf_exit(void) 1185 { 1186 misc_deregister(&nvmf_misc); 1187 device_destroy(nvmf_class, MKDEV(0, 0)); 1188 class_destroy(nvmf_class); 1189 nvmf_host_put(nvmf_default_host); 1190 1191 BUILD_BUG_ON(sizeof(struct nvmf_common_command) != 64); 1192 BUILD_BUG_ON(sizeof(struct nvmf_connect_command) != 64); 1193 BUILD_BUG_ON(sizeof(struct nvmf_property_get_command) != 64); 1194 BUILD_BUG_ON(sizeof(struct nvmf_property_set_command) != 64); 1195 BUILD_BUG_ON(sizeof(struct nvmf_connect_data) != 1024); 1196 } 1197 1198 MODULE_LICENSE("GPL v2"); 1199 1200 module_init(nvmf_init); 1201 module_exit(nvmf_exit); 1202