1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVMe over Fabrics common host code. 4 * Copyright (c) 2015-2016 HGST, a Western Digital Company. 5 */ 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 #include <linux/init.h> 8 #include <linux/miscdevice.h> 9 #include <linux/module.h> 10 #include <linux/mutex.h> 11 #include <linux/parser.h> 12 #include <linux/seq_file.h> 13 #include "nvme.h" 14 #include "fabrics.h" 15 16 static LIST_HEAD(nvmf_transports); 17 static DECLARE_RWSEM(nvmf_transports_rwsem); 18 19 static LIST_HEAD(nvmf_hosts); 20 static DEFINE_MUTEX(nvmf_hosts_mutex); 21 22 static struct nvmf_host *nvmf_default_host; 23 24 static struct nvmf_host *__nvmf_host_find(const char *hostnqn) 25 { 26 struct nvmf_host *host; 27 28 list_for_each_entry(host, &nvmf_hosts, list) { 29 if (!strcmp(host->nqn, hostnqn)) 30 return host; 31 } 32 33 return NULL; 34 } 35 36 static struct nvmf_host *nvmf_host_add(const char *hostnqn) 37 { 38 struct nvmf_host *host; 39 40 mutex_lock(&nvmf_hosts_mutex); 41 host = __nvmf_host_find(hostnqn); 42 if (host) { 43 kref_get(&host->ref); 44 goto out_unlock; 45 } 46 47 host = kmalloc(sizeof(*host), GFP_KERNEL); 48 if (!host) 49 goto out_unlock; 50 51 kref_init(&host->ref); 52 strlcpy(host->nqn, hostnqn, NVMF_NQN_SIZE); 53 54 list_add_tail(&host->list, &nvmf_hosts); 55 out_unlock: 56 mutex_unlock(&nvmf_hosts_mutex); 57 return host; 58 } 59 60 static struct nvmf_host *nvmf_host_default(void) 61 { 62 struct nvmf_host *host; 63 64 host = kmalloc(sizeof(*host), GFP_KERNEL); 65 if (!host) 66 return NULL; 67 68 kref_init(&host->ref); 69 uuid_gen(&host->id); 70 snprintf(host->nqn, NVMF_NQN_SIZE, 71 "nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id); 72 73 mutex_lock(&nvmf_hosts_mutex); 74 list_add_tail(&host->list, &nvmf_hosts); 75 mutex_unlock(&nvmf_hosts_mutex); 76 77 return host; 78 } 79 80 static void nvmf_host_destroy(struct kref *ref) 81 { 82 struct nvmf_host *host = container_of(ref, struct nvmf_host, ref); 83 84 mutex_lock(&nvmf_hosts_mutex); 85 list_del(&host->list); 86 mutex_unlock(&nvmf_hosts_mutex); 87 88 kfree(host); 89 } 90 91 static void nvmf_host_put(struct nvmf_host *host) 92 { 93 if (host) 94 kref_put(&host->ref, nvmf_host_destroy); 95 } 96 97 /** 98 * nvmf_get_address() - Get address/port 99 * @ctrl: Host NVMe controller instance which we got the address 100 * @buf: OUTPUT parameter that will contain the address/port 101 * @size: buffer size 102 */ 103 int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size) 104 { 105 int len = 0; 106 107 if (ctrl->opts->mask & NVMF_OPT_TRADDR) 108 len += scnprintf(buf, size, "traddr=%s", ctrl->opts->traddr); 109 if (ctrl->opts->mask & NVMF_OPT_TRSVCID) 110 len += scnprintf(buf + len, size - len, "%strsvcid=%s", 111 (len) ? "," : "", ctrl->opts->trsvcid); 112 if (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR) 113 len += scnprintf(buf + len, size - len, "%shost_traddr=%s", 114 (len) ? "," : "", ctrl->opts->host_traddr); 115 len += scnprintf(buf + len, size - len, "\n"); 116 117 return len; 118 } 119 EXPORT_SYMBOL_GPL(nvmf_get_address); 120 121 /** 122 * nvmf_reg_read32() - NVMe Fabrics "Property Get" API function. 123 * @ctrl: Host NVMe controller instance maintaining the admin 124 * queue used to submit the property read command to 125 * the allocated NVMe controller resource on the target system. 126 * @off: Starting offset value of the targeted property 127 * register (see the fabrics section of the NVMe standard). 128 * @val: OUTPUT parameter that will contain the value of 129 * the property after a successful read. 130 * 131 * Used by the host system to retrieve a 32-bit capsule property value 132 * from an NVMe controller on the target system. 133 * 134 * ("Capsule property" is an "PCIe register concept" applied to the 135 * NVMe fabrics space.) 136 * 137 * Return: 138 * 0: successful read 139 * > 0: NVMe error status code 140 * < 0: Linux errno error code 141 */ 142 int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) 143 { 144 struct nvme_command cmd; 145 union nvme_result res; 146 int ret; 147 148 memset(&cmd, 0, sizeof(cmd)); 149 cmd.prop_get.opcode = nvme_fabrics_command; 150 cmd.prop_get.fctype = nvme_fabrics_type_property_get; 151 cmd.prop_get.offset = cpu_to_le32(off); 152 153 ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0, 154 NVME_QID_ANY, 0, 0, false); 155 156 if (ret >= 0) 157 *val = le64_to_cpu(res.u64); 158 if (unlikely(ret != 0)) 159 dev_err(ctrl->device, 160 "Property Get error: %d, offset %#x\n", 161 ret > 0 ? ret & ~NVME_SC_DNR : ret, off); 162 163 return ret; 164 } 165 EXPORT_SYMBOL_GPL(nvmf_reg_read32); 166 167 /** 168 * nvmf_reg_read64() - NVMe Fabrics "Property Get" API function. 169 * @ctrl: Host NVMe controller instance maintaining the admin 170 * queue used to submit the property read command to 171 * the allocated controller resource on the target system. 172 * @off: Starting offset value of the targeted property 173 * register (see the fabrics section of the NVMe standard). 174 * @val: OUTPUT parameter that will contain the value of 175 * the property after a successful read. 176 * 177 * Used by the host system to retrieve a 64-bit capsule property value 178 * from an NVMe controller on the target system. 179 * 180 * ("Capsule property" is an "PCIe register concept" applied to the 181 * NVMe fabrics space.) 182 * 183 * Return: 184 * 0: successful read 185 * > 0: NVMe error status code 186 * < 0: Linux errno error code 187 */ 188 int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) 189 { 190 struct nvme_command cmd; 191 union nvme_result res; 192 int ret; 193 194 memset(&cmd, 0, sizeof(cmd)); 195 cmd.prop_get.opcode = nvme_fabrics_command; 196 cmd.prop_get.fctype = nvme_fabrics_type_property_get; 197 cmd.prop_get.attrib = 1; 198 cmd.prop_get.offset = cpu_to_le32(off); 199 200 ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0, 201 NVME_QID_ANY, 0, 0, false); 202 203 if (ret >= 0) 204 *val = le64_to_cpu(res.u64); 205 if (unlikely(ret != 0)) 206 dev_err(ctrl->device, 207 "Property Get error: %d, offset %#x\n", 208 ret > 0 ? ret & ~NVME_SC_DNR : ret, off); 209 return ret; 210 } 211 EXPORT_SYMBOL_GPL(nvmf_reg_read64); 212 213 /** 214 * nvmf_reg_write32() - NVMe Fabrics "Property Write" API function. 215 * @ctrl: Host NVMe controller instance maintaining the admin 216 * queue used to submit the property read command to 217 * the allocated NVMe controller resource on the target system. 218 * @off: Starting offset value of the targeted property 219 * register (see the fabrics section of the NVMe standard). 220 * @val: Input parameter that contains the value to be 221 * written to the property. 222 * 223 * Used by the NVMe host system to write a 32-bit capsule property value 224 * to an NVMe controller on the target system. 225 * 226 * ("Capsule property" is an "PCIe register concept" applied to the 227 * NVMe fabrics space.) 228 * 229 * Return: 230 * 0: successful write 231 * > 0: NVMe error status code 232 * < 0: Linux errno error code 233 */ 234 int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) 235 { 236 struct nvme_command cmd; 237 int ret; 238 239 memset(&cmd, 0, sizeof(cmd)); 240 cmd.prop_set.opcode = nvme_fabrics_command; 241 cmd.prop_set.fctype = nvme_fabrics_type_property_set; 242 cmd.prop_set.attrib = 0; 243 cmd.prop_set.offset = cpu_to_le32(off); 244 cmd.prop_set.value = cpu_to_le64(val); 245 246 ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0, 0, 247 NVME_QID_ANY, 0, 0, false); 248 if (unlikely(ret)) 249 dev_err(ctrl->device, 250 "Property Set error: %d, offset %#x\n", 251 ret > 0 ? ret & ~NVME_SC_DNR : ret, off); 252 return ret; 253 } 254 EXPORT_SYMBOL_GPL(nvmf_reg_write32); 255 256 /** 257 * nvmf_log_connect_error() - Error-parsing-diagnostic print 258 * out function for connect() errors. 259 * 260 * @ctrl: the specific /dev/nvmeX device that had the error. 261 * 262 * @errval: Error code to be decoded in a more human-friendly 263 * printout. 264 * 265 * @offset: For use with the NVMe error code NVME_SC_CONNECT_INVALID_PARAM. 266 * 267 * @cmd: This is the SQE portion of a submission capsule. 268 * 269 * @data: This is the "Data" portion of a submission capsule. 270 */ 271 static void nvmf_log_connect_error(struct nvme_ctrl *ctrl, 272 int errval, int offset, struct nvme_command *cmd, 273 struct nvmf_connect_data *data) 274 { 275 int err_sctype = errval & (~NVME_SC_DNR); 276 277 switch (err_sctype) { 278 279 case (NVME_SC_CONNECT_INVALID_PARAM): 280 if (offset >> 16) { 281 char *inv_data = "Connect Invalid Data Parameter"; 282 283 switch (offset & 0xffff) { 284 case (offsetof(struct nvmf_connect_data, cntlid)): 285 dev_err(ctrl->device, 286 "%s, cntlid: %d\n", 287 inv_data, data->cntlid); 288 break; 289 case (offsetof(struct nvmf_connect_data, hostnqn)): 290 dev_err(ctrl->device, 291 "%s, hostnqn \"%s\"\n", 292 inv_data, data->hostnqn); 293 break; 294 case (offsetof(struct nvmf_connect_data, subsysnqn)): 295 dev_err(ctrl->device, 296 "%s, subsysnqn \"%s\"\n", 297 inv_data, data->subsysnqn); 298 break; 299 default: 300 dev_err(ctrl->device, 301 "%s, starting byte offset: %d\n", 302 inv_data, offset & 0xffff); 303 break; 304 } 305 } else { 306 char *inv_sqe = "Connect Invalid SQE Parameter"; 307 308 switch (offset) { 309 case (offsetof(struct nvmf_connect_command, qid)): 310 dev_err(ctrl->device, 311 "%s, qid %d\n", 312 inv_sqe, cmd->connect.qid); 313 break; 314 default: 315 dev_err(ctrl->device, 316 "%s, starting byte offset: %d\n", 317 inv_sqe, offset); 318 } 319 } 320 break; 321 322 case NVME_SC_CONNECT_INVALID_HOST: 323 dev_err(ctrl->device, 324 "Connect for subsystem %s is not allowed, hostnqn: %s\n", 325 data->subsysnqn, data->hostnqn); 326 break; 327 328 case NVME_SC_CONNECT_CTRL_BUSY: 329 dev_err(ctrl->device, 330 "Connect command failed: controller is busy or not available\n"); 331 break; 332 333 case NVME_SC_CONNECT_FORMAT: 334 dev_err(ctrl->device, 335 "Connect incompatible format: %d", 336 cmd->connect.recfmt); 337 break; 338 339 default: 340 dev_err(ctrl->device, 341 "Connect command failed, error wo/DNR bit: %d\n", 342 err_sctype); 343 break; 344 } /* switch (err_sctype) */ 345 } 346 347 /** 348 * nvmf_connect_admin_queue() - NVMe Fabrics Admin Queue "Connect" 349 * API function. 350 * @ctrl: Host nvme controller instance used to request 351 * a new NVMe controller allocation on the target 352 * system and establish an NVMe Admin connection to 353 * that controller. 354 * 355 * This function enables an NVMe host device to request a new allocation of 356 * an NVMe controller resource on a target system as well establish a 357 * fabrics-protocol connection of the NVMe Admin queue between the 358 * host system device and the allocated NVMe controller on the 359 * target system via a NVMe Fabrics "Connect" command. 360 * 361 * Return: 362 * 0: success 363 * > 0: NVMe error status code 364 * < 0: Linux errno error code 365 * 366 */ 367 int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl) 368 { 369 struct nvme_command cmd; 370 union nvme_result res; 371 struct nvmf_connect_data *data; 372 int ret; 373 374 memset(&cmd, 0, sizeof(cmd)); 375 cmd.connect.opcode = nvme_fabrics_command; 376 cmd.connect.fctype = nvme_fabrics_type_connect; 377 cmd.connect.qid = 0; 378 cmd.connect.sqsize = cpu_to_le16(NVME_AQ_DEPTH - 1); 379 380 /* 381 * Set keep-alive timeout in seconds granularity (ms * 1000) 382 * and add a grace period for controller kato enforcement 383 */ 384 cmd.connect.kato = ctrl->kato ? 385 cpu_to_le32((ctrl->kato + NVME_KATO_GRACE) * 1000) : 0; 386 387 if (ctrl->opts->disable_sqflow) 388 cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW; 389 390 data = kzalloc(sizeof(*data), GFP_KERNEL); 391 if (!data) 392 return -ENOMEM; 393 394 uuid_copy(&data->hostid, &ctrl->opts->host->id); 395 data->cntlid = cpu_to_le16(0xffff); 396 strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE); 397 strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE); 398 399 ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, 400 data, sizeof(*data), 0, NVME_QID_ANY, 1, 401 BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, false); 402 if (ret) { 403 nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32), 404 &cmd, data); 405 goto out_free_data; 406 } 407 408 ctrl->cntlid = le16_to_cpu(res.u16); 409 410 out_free_data: 411 kfree(data); 412 return ret; 413 } 414 EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue); 415 416 /** 417 * nvmf_connect_io_queue() - NVMe Fabrics I/O Queue "Connect" 418 * API function. 419 * @ctrl: Host nvme controller instance used to establish an 420 * NVMe I/O queue connection to the already allocated NVMe 421 * controller on the target system. 422 * @qid: NVMe I/O queue number for the new I/O connection between 423 * host and target (note qid == 0 is illegal as this is 424 * the Admin queue, per NVMe standard). 425 * @poll: Whether or not to poll for the completion of the connect cmd. 426 * 427 * This function issues a fabrics-protocol connection 428 * of a NVMe I/O queue (via NVMe Fabrics "Connect" command) 429 * between the host system device and the allocated NVMe controller 430 * on the target system. 431 * 432 * Return: 433 * 0: success 434 * > 0: NVMe error status code 435 * < 0: Linux errno error code 436 */ 437 int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll) 438 { 439 struct nvme_command cmd; 440 struct nvmf_connect_data *data; 441 union nvme_result res; 442 int ret; 443 444 memset(&cmd, 0, sizeof(cmd)); 445 cmd.connect.opcode = nvme_fabrics_command; 446 cmd.connect.fctype = nvme_fabrics_type_connect; 447 cmd.connect.qid = cpu_to_le16(qid); 448 cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize); 449 450 if (ctrl->opts->disable_sqflow) 451 cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW; 452 453 data = kzalloc(sizeof(*data), GFP_KERNEL); 454 if (!data) 455 return -ENOMEM; 456 457 uuid_copy(&data->hostid, &ctrl->opts->host->id); 458 data->cntlid = cpu_to_le16(ctrl->cntlid); 459 strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE); 460 strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE); 461 462 ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res, 463 data, sizeof(*data), 0, qid, 1, 464 BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, poll); 465 if (ret) { 466 nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32), 467 &cmd, data); 468 } 469 kfree(data); 470 return ret; 471 } 472 EXPORT_SYMBOL_GPL(nvmf_connect_io_queue); 473 474 bool nvmf_should_reconnect(struct nvme_ctrl *ctrl) 475 { 476 if (ctrl->opts->max_reconnects == -1 || 477 ctrl->nr_reconnects < ctrl->opts->max_reconnects) 478 return true; 479 480 return false; 481 } 482 EXPORT_SYMBOL_GPL(nvmf_should_reconnect); 483 484 /** 485 * nvmf_register_transport() - NVMe Fabrics Library registration function. 486 * @ops: Transport ops instance to be registered to the 487 * common fabrics library. 488 * 489 * API function that registers the type of specific transport fabric 490 * being implemented to the common NVMe fabrics library. Part of 491 * the overall init sequence of starting up a fabrics driver. 492 */ 493 int nvmf_register_transport(struct nvmf_transport_ops *ops) 494 { 495 if (!ops->create_ctrl) 496 return -EINVAL; 497 498 down_write(&nvmf_transports_rwsem); 499 list_add_tail(&ops->entry, &nvmf_transports); 500 up_write(&nvmf_transports_rwsem); 501 502 return 0; 503 } 504 EXPORT_SYMBOL_GPL(nvmf_register_transport); 505 506 /** 507 * nvmf_unregister_transport() - NVMe Fabrics Library unregistration function. 508 * @ops: Transport ops instance to be unregistered from the 509 * common fabrics library. 510 * 511 * Fabrics API function that unregisters the type of specific transport 512 * fabric being implemented from the common NVMe fabrics library. 513 * Part of the overall exit sequence of unloading the implemented driver. 514 */ 515 void nvmf_unregister_transport(struct nvmf_transport_ops *ops) 516 { 517 down_write(&nvmf_transports_rwsem); 518 list_del(&ops->entry); 519 up_write(&nvmf_transports_rwsem); 520 } 521 EXPORT_SYMBOL_GPL(nvmf_unregister_transport); 522 523 static struct nvmf_transport_ops *nvmf_lookup_transport( 524 struct nvmf_ctrl_options *opts) 525 { 526 struct nvmf_transport_ops *ops; 527 528 lockdep_assert_held(&nvmf_transports_rwsem); 529 530 list_for_each_entry(ops, &nvmf_transports, entry) { 531 if (strcmp(ops->name, opts->transport) == 0) 532 return ops; 533 } 534 535 return NULL; 536 } 537 538 /* 539 * For something we're not in a state to send to the device the default action 540 * is to busy it and retry it after the controller state is recovered. However, 541 * if the controller is deleting or if anything is marked for failfast or 542 * nvme multipath it is immediately failed. 543 * 544 * Note: commands used to initialize the controller will be marked for failfast. 545 * Note: nvme cli/ioctl commands are marked for failfast. 546 */ 547 blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl, 548 struct request *rq) 549 { 550 if (ctrl->state != NVME_CTRL_DELETING_NOIO && 551 ctrl->state != NVME_CTRL_DEAD && 552 !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) 553 return BLK_STS_RESOURCE; 554 555 nvme_req(rq)->status = NVME_SC_HOST_PATH_ERROR; 556 blk_mq_start_request(rq); 557 nvme_complete_rq(rq); 558 return BLK_STS_OK; 559 } 560 EXPORT_SYMBOL_GPL(nvmf_fail_nonready_command); 561 562 bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq, 563 bool queue_live) 564 { 565 struct nvme_request *req = nvme_req(rq); 566 567 /* 568 * If we are in some state of setup or teardown only allow 569 * internally generated commands. 570 */ 571 if (!blk_rq_is_passthrough(rq) || (req->flags & NVME_REQ_USERCMD)) 572 return false; 573 574 /* 575 * Only allow commands on a live queue, except for the connect command, 576 * which is require to set the queue live in the appropinquate states. 577 */ 578 switch (ctrl->state) { 579 case NVME_CTRL_CONNECTING: 580 if (nvme_is_fabrics(req->cmd) && 581 req->cmd->fabrics.fctype == nvme_fabrics_type_connect) 582 return true; 583 break; 584 default: 585 break; 586 case NVME_CTRL_DEAD: 587 return false; 588 } 589 590 return queue_live; 591 } 592 EXPORT_SYMBOL_GPL(__nvmf_check_ready); 593 594 static const match_table_t opt_tokens = { 595 { NVMF_OPT_TRANSPORT, "transport=%s" }, 596 { NVMF_OPT_TRADDR, "traddr=%s" }, 597 { NVMF_OPT_TRSVCID, "trsvcid=%s" }, 598 { NVMF_OPT_NQN, "nqn=%s" }, 599 { NVMF_OPT_QUEUE_SIZE, "queue_size=%d" }, 600 { NVMF_OPT_NR_IO_QUEUES, "nr_io_queues=%d" }, 601 { NVMF_OPT_RECONNECT_DELAY, "reconnect_delay=%d" }, 602 { NVMF_OPT_CTRL_LOSS_TMO, "ctrl_loss_tmo=%d" }, 603 { NVMF_OPT_KATO, "keep_alive_tmo=%d" }, 604 { NVMF_OPT_HOSTNQN, "hostnqn=%s" }, 605 { NVMF_OPT_HOST_TRADDR, "host_traddr=%s" }, 606 { NVMF_OPT_HOST_ID, "hostid=%s" }, 607 { NVMF_OPT_DUP_CONNECT, "duplicate_connect" }, 608 { NVMF_OPT_DISABLE_SQFLOW, "disable_sqflow" }, 609 { NVMF_OPT_HDR_DIGEST, "hdr_digest" }, 610 { NVMF_OPT_DATA_DIGEST, "data_digest" }, 611 { NVMF_OPT_NR_WRITE_QUEUES, "nr_write_queues=%d" }, 612 { NVMF_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" }, 613 { NVMF_OPT_TOS, "tos=%d" }, 614 { NVMF_OPT_ERR, NULL } 615 }; 616 617 static int nvmf_parse_options(struct nvmf_ctrl_options *opts, 618 const char *buf) 619 { 620 substring_t args[MAX_OPT_ARGS]; 621 char *options, *o, *p; 622 int token, ret = 0; 623 size_t nqnlen = 0; 624 int ctrl_loss_tmo = NVMF_DEF_CTRL_LOSS_TMO; 625 uuid_t hostid; 626 627 /* Set defaults */ 628 opts->queue_size = NVMF_DEF_QUEUE_SIZE; 629 opts->nr_io_queues = num_online_cpus(); 630 opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY; 631 opts->kato = NVME_DEFAULT_KATO; 632 opts->duplicate_connect = false; 633 opts->hdr_digest = false; 634 opts->data_digest = false; 635 opts->tos = -1; /* < 0 == use transport default */ 636 637 options = o = kstrdup(buf, GFP_KERNEL); 638 if (!options) 639 return -ENOMEM; 640 641 uuid_gen(&hostid); 642 643 while ((p = strsep(&o, ",\n")) != NULL) { 644 if (!*p) 645 continue; 646 647 token = match_token(p, opt_tokens, args); 648 opts->mask |= token; 649 switch (token) { 650 case NVMF_OPT_TRANSPORT: 651 p = match_strdup(args); 652 if (!p) { 653 ret = -ENOMEM; 654 goto out; 655 } 656 kfree(opts->transport); 657 opts->transport = p; 658 break; 659 case NVMF_OPT_NQN: 660 p = match_strdup(args); 661 if (!p) { 662 ret = -ENOMEM; 663 goto out; 664 } 665 kfree(opts->subsysnqn); 666 opts->subsysnqn = p; 667 nqnlen = strlen(opts->subsysnqn); 668 if (nqnlen >= NVMF_NQN_SIZE) { 669 pr_err("%s needs to be < %d bytes\n", 670 opts->subsysnqn, NVMF_NQN_SIZE); 671 ret = -EINVAL; 672 goto out; 673 } 674 opts->discovery_nqn = 675 !(strcmp(opts->subsysnqn, 676 NVME_DISC_SUBSYS_NAME)); 677 break; 678 case NVMF_OPT_TRADDR: 679 p = match_strdup(args); 680 if (!p) { 681 ret = -ENOMEM; 682 goto out; 683 } 684 kfree(opts->traddr); 685 opts->traddr = p; 686 break; 687 case NVMF_OPT_TRSVCID: 688 p = match_strdup(args); 689 if (!p) { 690 ret = -ENOMEM; 691 goto out; 692 } 693 kfree(opts->trsvcid); 694 opts->trsvcid = p; 695 break; 696 case NVMF_OPT_QUEUE_SIZE: 697 if (match_int(args, &token)) { 698 ret = -EINVAL; 699 goto out; 700 } 701 if (token < NVMF_MIN_QUEUE_SIZE || 702 token > NVMF_MAX_QUEUE_SIZE) { 703 pr_err("Invalid queue_size %d\n", token); 704 ret = -EINVAL; 705 goto out; 706 } 707 opts->queue_size = token; 708 break; 709 case NVMF_OPT_NR_IO_QUEUES: 710 if (match_int(args, &token)) { 711 ret = -EINVAL; 712 goto out; 713 } 714 if (token <= 0) { 715 pr_err("Invalid number of IOQs %d\n", token); 716 ret = -EINVAL; 717 goto out; 718 } 719 if (opts->discovery_nqn) { 720 pr_debug("Ignoring nr_io_queues value for discovery controller\n"); 721 break; 722 } 723 724 opts->nr_io_queues = min_t(unsigned int, 725 num_online_cpus(), token); 726 break; 727 case NVMF_OPT_KATO: 728 if (match_int(args, &token)) { 729 ret = -EINVAL; 730 goto out; 731 } 732 733 if (token < 0) { 734 pr_err("Invalid keep_alive_tmo %d\n", token); 735 ret = -EINVAL; 736 goto out; 737 } else if (token == 0 && !opts->discovery_nqn) { 738 /* Allowed for debug */ 739 pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n"); 740 } 741 opts->kato = token; 742 break; 743 case NVMF_OPT_CTRL_LOSS_TMO: 744 if (match_int(args, &token)) { 745 ret = -EINVAL; 746 goto out; 747 } 748 749 if (token < 0) 750 pr_warn("ctrl_loss_tmo < 0 will reconnect forever\n"); 751 ctrl_loss_tmo = token; 752 break; 753 case NVMF_OPT_HOSTNQN: 754 if (opts->host) { 755 pr_err("hostnqn already user-assigned: %s\n", 756 opts->host->nqn); 757 ret = -EADDRINUSE; 758 goto out; 759 } 760 p = match_strdup(args); 761 if (!p) { 762 ret = -ENOMEM; 763 goto out; 764 } 765 nqnlen = strlen(p); 766 if (nqnlen >= NVMF_NQN_SIZE) { 767 pr_err("%s needs to be < %d bytes\n", 768 p, NVMF_NQN_SIZE); 769 kfree(p); 770 ret = -EINVAL; 771 goto out; 772 } 773 nvmf_host_put(opts->host); 774 opts->host = nvmf_host_add(p); 775 kfree(p); 776 if (!opts->host) { 777 ret = -ENOMEM; 778 goto out; 779 } 780 break; 781 case NVMF_OPT_RECONNECT_DELAY: 782 if (match_int(args, &token)) { 783 ret = -EINVAL; 784 goto out; 785 } 786 if (token <= 0) { 787 pr_err("Invalid reconnect_delay %d\n", token); 788 ret = -EINVAL; 789 goto out; 790 } 791 opts->reconnect_delay = token; 792 break; 793 case NVMF_OPT_HOST_TRADDR: 794 p = match_strdup(args); 795 if (!p) { 796 ret = -ENOMEM; 797 goto out; 798 } 799 kfree(opts->host_traddr); 800 opts->host_traddr = p; 801 break; 802 case NVMF_OPT_HOST_ID: 803 p = match_strdup(args); 804 if (!p) { 805 ret = -ENOMEM; 806 goto out; 807 } 808 ret = uuid_parse(p, &hostid); 809 if (ret) { 810 pr_err("Invalid hostid %s\n", p); 811 ret = -EINVAL; 812 kfree(p); 813 goto out; 814 } 815 kfree(p); 816 break; 817 case NVMF_OPT_DUP_CONNECT: 818 opts->duplicate_connect = true; 819 break; 820 case NVMF_OPT_DISABLE_SQFLOW: 821 opts->disable_sqflow = true; 822 break; 823 case NVMF_OPT_HDR_DIGEST: 824 opts->hdr_digest = true; 825 break; 826 case NVMF_OPT_DATA_DIGEST: 827 opts->data_digest = true; 828 break; 829 case NVMF_OPT_NR_WRITE_QUEUES: 830 if (match_int(args, &token)) { 831 ret = -EINVAL; 832 goto out; 833 } 834 if (token <= 0) { 835 pr_err("Invalid nr_write_queues %d\n", token); 836 ret = -EINVAL; 837 goto out; 838 } 839 opts->nr_write_queues = token; 840 break; 841 case NVMF_OPT_NR_POLL_QUEUES: 842 if (match_int(args, &token)) { 843 ret = -EINVAL; 844 goto out; 845 } 846 if (token <= 0) { 847 pr_err("Invalid nr_poll_queues %d\n", token); 848 ret = -EINVAL; 849 goto out; 850 } 851 opts->nr_poll_queues = token; 852 break; 853 case NVMF_OPT_TOS: 854 if (match_int(args, &token)) { 855 ret = -EINVAL; 856 goto out; 857 } 858 if (token < 0) { 859 pr_err("Invalid type of service %d\n", token); 860 ret = -EINVAL; 861 goto out; 862 } 863 if (token > 255) { 864 pr_warn("Clamping type of service to 255\n"); 865 token = 255; 866 } 867 opts->tos = token; 868 break; 869 default: 870 pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n", 871 p); 872 ret = -EINVAL; 873 goto out; 874 } 875 } 876 877 if (opts->discovery_nqn) { 878 opts->nr_io_queues = 0; 879 opts->nr_write_queues = 0; 880 opts->nr_poll_queues = 0; 881 opts->duplicate_connect = true; 882 } 883 if (ctrl_loss_tmo < 0) 884 opts->max_reconnects = -1; 885 else 886 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo, 887 opts->reconnect_delay); 888 889 if (!opts->host) { 890 kref_get(&nvmf_default_host->ref); 891 opts->host = nvmf_default_host; 892 } 893 894 uuid_copy(&opts->host->id, &hostid); 895 896 out: 897 kfree(options); 898 return ret; 899 } 900 901 static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts, 902 unsigned int required_opts) 903 { 904 if ((opts->mask & required_opts) != required_opts) { 905 int i; 906 907 for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) { 908 if ((opt_tokens[i].token & required_opts) && 909 !(opt_tokens[i].token & opts->mask)) { 910 pr_warn("missing parameter '%s'\n", 911 opt_tokens[i].pattern); 912 } 913 } 914 915 return -EINVAL; 916 } 917 918 return 0; 919 } 920 921 bool nvmf_ip_options_match(struct nvme_ctrl *ctrl, 922 struct nvmf_ctrl_options *opts) 923 { 924 if (!nvmf_ctlr_matches_baseopts(ctrl, opts) || 925 strcmp(opts->traddr, ctrl->opts->traddr) || 926 strcmp(opts->trsvcid, ctrl->opts->trsvcid)) 927 return false; 928 929 /* 930 * Checking the local address is rough. In most cases, none is specified 931 * and the host port is selected by the stack. 932 * 933 * Assume no match if: 934 * - local address is specified and address is not the same 935 * - local address is not specified but remote is, or vice versa 936 * (admin using specific host_traddr when it matters). 937 */ 938 if ((opts->mask & NVMF_OPT_HOST_TRADDR) && 939 (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) { 940 if (strcmp(opts->host_traddr, ctrl->opts->host_traddr)) 941 return false; 942 } else if ((opts->mask & NVMF_OPT_HOST_TRADDR) || 943 (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) { 944 return false; 945 } 946 947 return true; 948 } 949 EXPORT_SYMBOL_GPL(nvmf_ip_options_match); 950 951 static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts, 952 unsigned int allowed_opts) 953 { 954 if (opts->mask & ~allowed_opts) { 955 int i; 956 957 for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) { 958 if ((opt_tokens[i].token & opts->mask) && 959 (opt_tokens[i].token & ~allowed_opts)) { 960 pr_warn("invalid parameter '%s'\n", 961 opt_tokens[i].pattern); 962 } 963 } 964 965 return -EINVAL; 966 } 967 968 return 0; 969 } 970 971 void nvmf_free_options(struct nvmf_ctrl_options *opts) 972 { 973 nvmf_host_put(opts->host); 974 kfree(opts->transport); 975 kfree(opts->traddr); 976 kfree(opts->trsvcid); 977 kfree(opts->subsysnqn); 978 kfree(opts->host_traddr); 979 kfree(opts); 980 } 981 EXPORT_SYMBOL_GPL(nvmf_free_options); 982 983 #define NVMF_REQUIRED_OPTS (NVMF_OPT_TRANSPORT | NVMF_OPT_NQN) 984 #define NVMF_ALLOWED_OPTS (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \ 985 NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \ 986 NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\ 987 NVMF_OPT_DISABLE_SQFLOW) 988 989 static struct nvme_ctrl * 990 nvmf_create_ctrl(struct device *dev, const char *buf) 991 { 992 struct nvmf_ctrl_options *opts; 993 struct nvmf_transport_ops *ops; 994 struct nvme_ctrl *ctrl; 995 int ret; 996 997 opts = kzalloc(sizeof(*opts), GFP_KERNEL); 998 if (!opts) 999 return ERR_PTR(-ENOMEM); 1000 1001 ret = nvmf_parse_options(opts, buf); 1002 if (ret) 1003 goto out_free_opts; 1004 1005 1006 request_module("nvme-%s", opts->transport); 1007 1008 /* 1009 * Check the generic options first as we need a valid transport for 1010 * the lookup below. Then clear the generic flags so that transport 1011 * drivers don't have to care about them. 1012 */ 1013 ret = nvmf_check_required_opts(opts, NVMF_REQUIRED_OPTS); 1014 if (ret) 1015 goto out_free_opts; 1016 opts->mask &= ~NVMF_REQUIRED_OPTS; 1017 1018 down_read(&nvmf_transports_rwsem); 1019 ops = nvmf_lookup_transport(opts); 1020 if (!ops) { 1021 pr_info("no handler found for transport %s.\n", 1022 opts->transport); 1023 ret = -EINVAL; 1024 goto out_unlock; 1025 } 1026 1027 if (!try_module_get(ops->module)) { 1028 ret = -EBUSY; 1029 goto out_unlock; 1030 } 1031 up_read(&nvmf_transports_rwsem); 1032 1033 ret = nvmf_check_required_opts(opts, ops->required_opts); 1034 if (ret) 1035 goto out_module_put; 1036 ret = nvmf_check_allowed_opts(opts, NVMF_ALLOWED_OPTS | 1037 ops->allowed_opts | ops->required_opts); 1038 if (ret) 1039 goto out_module_put; 1040 1041 ctrl = ops->create_ctrl(dev, opts); 1042 if (IS_ERR(ctrl)) { 1043 ret = PTR_ERR(ctrl); 1044 goto out_module_put; 1045 } 1046 1047 module_put(ops->module); 1048 return ctrl; 1049 1050 out_module_put: 1051 module_put(ops->module); 1052 goto out_free_opts; 1053 out_unlock: 1054 up_read(&nvmf_transports_rwsem); 1055 out_free_opts: 1056 nvmf_free_options(opts); 1057 return ERR_PTR(ret); 1058 } 1059 1060 static struct class *nvmf_class; 1061 static struct device *nvmf_device; 1062 static DEFINE_MUTEX(nvmf_dev_mutex); 1063 1064 static ssize_t nvmf_dev_write(struct file *file, const char __user *ubuf, 1065 size_t count, loff_t *pos) 1066 { 1067 struct seq_file *seq_file = file->private_data; 1068 struct nvme_ctrl *ctrl; 1069 const char *buf; 1070 int ret = 0; 1071 1072 if (count > PAGE_SIZE) 1073 return -ENOMEM; 1074 1075 buf = memdup_user_nul(ubuf, count); 1076 if (IS_ERR(buf)) 1077 return PTR_ERR(buf); 1078 1079 mutex_lock(&nvmf_dev_mutex); 1080 if (seq_file->private) { 1081 ret = -EINVAL; 1082 goto out_unlock; 1083 } 1084 1085 ctrl = nvmf_create_ctrl(nvmf_device, buf); 1086 if (IS_ERR(ctrl)) { 1087 ret = PTR_ERR(ctrl); 1088 goto out_unlock; 1089 } 1090 1091 seq_file->private = ctrl; 1092 1093 out_unlock: 1094 mutex_unlock(&nvmf_dev_mutex); 1095 kfree(buf); 1096 return ret ? ret : count; 1097 } 1098 1099 static int nvmf_dev_show(struct seq_file *seq_file, void *private) 1100 { 1101 struct nvme_ctrl *ctrl; 1102 int ret = 0; 1103 1104 mutex_lock(&nvmf_dev_mutex); 1105 ctrl = seq_file->private; 1106 if (!ctrl) { 1107 ret = -EINVAL; 1108 goto out_unlock; 1109 } 1110 1111 seq_printf(seq_file, "instance=%d,cntlid=%d\n", 1112 ctrl->instance, ctrl->cntlid); 1113 1114 out_unlock: 1115 mutex_unlock(&nvmf_dev_mutex); 1116 return ret; 1117 } 1118 1119 static int nvmf_dev_open(struct inode *inode, struct file *file) 1120 { 1121 /* 1122 * The miscdevice code initializes file->private_data, but doesn't 1123 * make use of it later. 1124 */ 1125 file->private_data = NULL; 1126 return single_open(file, nvmf_dev_show, NULL); 1127 } 1128 1129 static int nvmf_dev_release(struct inode *inode, struct file *file) 1130 { 1131 struct seq_file *seq_file = file->private_data; 1132 struct nvme_ctrl *ctrl = seq_file->private; 1133 1134 if (ctrl) 1135 nvme_put_ctrl(ctrl); 1136 return single_release(inode, file); 1137 } 1138 1139 static const struct file_operations nvmf_dev_fops = { 1140 .owner = THIS_MODULE, 1141 .write = nvmf_dev_write, 1142 .read = seq_read, 1143 .open = nvmf_dev_open, 1144 .release = nvmf_dev_release, 1145 }; 1146 1147 static struct miscdevice nvmf_misc = { 1148 .minor = MISC_DYNAMIC_MINOR, 1149 .name = "nvme-fabrics", 1150 .fops = &nvmf_dev_fops, 1151 }; 1152 1153 static int __init nvmf_init(void) 1154 { 1155 int ret; 1156 1157 nvmf_default_host = nvmf_host_default(); 1158 if (!nvmf_default_host) 1159 return -ENOMEM; 1160 1161 nvmf_class = class_create(THIS_MODULE, "nvme-fabrics"); 1162 if (IS_ERR(nvmf_class)) { 1163 pr_err("couldn't register class nvme-fabrics\n"); 1164 ret = PTR_ERR(nvmf_class); 1165 goto out_free_host; 1166 } 1167 1168 nvmf_device = 1169 device_create(nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl"); 1170 if (IS_ERR(nvmf_device)) { 1171 pr_err("couldn't create nvme-fabris device!\n"); 1172 ret = PTR_ERR(nvmf_device); 1173 goto out_destroy_class; 1174 } 1175 1176 ret = misc_register(&nvmf_misc); 1177 if (ret) { 1178 pr_err("couldn't register misc device: %d\n", ret); 1179 goto out_destroy_device; 1180 } 1181 1182 return 0; 1183 1184 out_destroy_device: 1185 device_destroy(nvmf_class, MKDEV(0, 0)); 1186 out_destroy_class: 1187 class_destroy(nvmf_class); 1188 out_free_host: 1189 nvmf_host_put(nvmf_default_host); 1190 return ret; 1191 } 1192 1193 static void __exit nvmf_exit(void) 1194 { 1195 misc_deregister(&nvmf_misc); 1196 device_destroy(nvmf_class, MKDEV(0, 0)); 1197 class_destroy(nvmf_class); 1198 nvmf_host_put(nvmf_default_host); 1199 1200 BUILD_BUG_ON(sizeof(struct nvmf_common_command) != 64); 1201 BUILD_BUG_ON(sizeof(struct nvmf_connect_command) != 64); 1202 BUILD_BUG_ON(sizeof(struct nvmf_property_get_command) != 64); 1203 BUILD_BUG_ON(sizeof(struct nvmf_property_set_command) != 64); 1204 BUILD_BUG_ON(sizeof(struct nvmf_connect_data) != 1024); 1205 } 1206 1207 MODULE_LICENSE("GPL v2"); 1208 1209 module_init(nvmf_init); 1210 module_exit(nvmf_exit); 1211