1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Configfs interface for the NVMe target. 4 * Copyright (c) 2015-2016 HGST, a Western Digital Company. 5 */ 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 #include <linux/kernel.h> 8 #include <linux/module.h> 9 #include <linux/slab.h> 10 #include <linux/stat.h> 11 #include <linux/ctype.h> 12 #include <linux/pci.h> 13 #include <linux/pci-p2pdma.h> 14 #ifdef CONFIG_NVME_TARGET_AUTH 15 #include <linux/nvme-auth.h> 16 #endif 17 #include <crypto/hash.h> 18 #include <crypto/kpp.h> 19 20 #include "nvmet.h" 21 22 static const struct config_item_type nvmet_host_type; 23 static const struct config_item_type nvmet_subsys_type; 24 25 static LIST_HEAD(nvmet_ports_list); 26 struct list_head *nvmet_ports = &nvmet_ports_list; 27 28 struct nvmet_type_name_map { 29 u8 type; 30 const char *name; 31 }; 32 33 static struct nvmet_type_name_map nvmet_transport[] = { 34 { NVMF_TRTYPE_RDMA, "rdma" }, 35 { NVMF_TRTYPE_FC, "fc" }, 36 { NVMF_TRTYPE_TCP, "tcp" }, 37 { NVMF_TRTYPE_LOOP, "loop" }, 38 }; 39 40 static const struct nvmet_type_name_map nvmet_addr_family[] = { 41 { NVMF_ADDR_FAMILY_PCI, "pcie" }, 42 { NVMF_ADDR_FAMILY_IP4, "ipv4" }, 43 { NVMF_ADDR_FAMILY_IP6, "ipv6" }, 44 { NVMF_ADDR_FAMILY_IB, "ib" }, 45 { NVMF_ADDR_FAMILY_FC, "fc" }, 46 { NVMF_ADDR_FAMILY_LOOP, "loop" }, 47 }; 48 49 static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller) 50 { 51 if (p->enabled) 52 pr_err("Disable port '%u' before changing attribute in %s\n", 53 le16_to_cpu(p->disc_addr.portid), caller); 54 return p->enabled; 55 } 56 57 /* 58 * nvmet_port Generic ConfigFS definitions. 59 * Used in any place in the ConfigFS tree that refers to an address. 60 */ 61 static ssize_t nvmet_addr_adrfam_show(struct config_item *item, char *page) 62 { 63 u8 adrfam = to_nvmet_port(item)->disc_addr.adrfam; 64 int i; 65 66 for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) { 67 if (nvmet_addr_family[i].type == adrfam) 68 return snprintf(page, PAGE_SIZE, "%s\n", 69 nvmet_addr_family[i].name); 70 } 71 72 return snprintf(page, PAGE_SIZE, "\n"); 73 } 74 75 static ssize_t nvmet_addr_adrfam_store(struct config_item *item, 76 const char *page, size_t count) 77 { 78 struct nvmet_port *port = to_nvmet_port(item); 79 int i; 80 81 if (nvmet_is_port_enabled(port, __func__)) 82 return -EACCES; 83 84 for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) { 85 if (sysfs_streq(page, nvmet_addr_family[i].name)) 86 goto found; 87 } 88 89 pr_err("Invalid value '%s' for adrfam\n", page); 90 return -EINVAL; 91 92 found: 93 port->disc_addr.adrfam = nvmet_addr_family[i].type; 94 return count; 95 } 96 97 CONFIGFS_ATTR(nvmet_, addr_adrfam); 98 99 static ssize_t nvmet_addr_portid_show(struct config_item *item, 100 char *page) 101 { 102 __le16 portid = to_nvmet_port(item)->disc_addr.portid; 103 104 return snprintf(page, PAGE_SIZE, "%d\n", le16_to_cpu(portid)); 105 } 106 107 static ssize_t nvmet_addr_portid_store(struct config_item *item, 108 const char *page, size_t count) 109 { 110 struct nvmet_port *port = to_nvmet_port(item); 111 u16 portid = 0; 112 113 if (kstrtou16(page, 0, &portid)) { 114 pr_err("Invalid value '%s' for portid\n", page); 115 return -EINVAL; 116 } 117 118 if (nvmet_is_port_enabled(port, __func__)) 119 return -EACCES; 120 121 port->disc_addr.portid = cpu_to_le16(portid); 122 return count; 123 } 124 125 CONFIGFS_ATTR(nvmet_, addr_portid); 126 127 static ssize_t nvmet_addr_traddr_show(struct config_item *item, 128 char *page) 129 { 130 struct nvmet_port *port = to_nvmet_port(item); 131 132 return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.traddr); 133 } 134 135 static ssize_t nvmet_addr_traddr_store(struct config_item *item, 136 const char *page, size_t count) 137 { 138 struct nvmet_port *port = to_nvmet_port(item); 139 140 if (count > NVMF_TRADDR_SIZE) { 141 pr_err("Invalid value '%s' for traddr\n", page); 142 return -EINVAL; 143 } 144 145 if (nvmet_is_port_enabled(port, __func__)) 146 return -EACCES; 147 148 if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1) 149 return -EINVAL; 150 return count; 151 } 152 153 CONFIGFS_ATTR(nvmet_, addr_traddr); 154 155 static const struct nvmet_type_name_map nvmet_addr_treq[] = { 156 { NVMF_TREQ_NOT_SPECIFIED, "not specified" }, 157 { NVMF_TREQ_REQUIRED, "required" }, 158 { NVMF_TREQ_NOT_REQUIRED, "not required" }, 159 }; 160 161 static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page) 162 { 163 u8 treq = to_nvmet_port(item)->disc_addr.treq & 164 NVME_TREQ_SECURE_CHANNEL_MASK; 165 int i; 166 167 for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) { 168 if (treq == nvmet_addr_treq[i].type) 169 return snprintf(page, PAGE_SIZE, "%s\n", 170 nvmet_addr_treq[i].name); 171 } 172 173 return snprintf(page, PAGE_SIZE, "\n"); 174 } 175 176 static ssize_t nvmet_addr_treq_store(struct config_item *item, 177 const char *page, size_t count) 178 { 179 struct nvmet_port *port = to_nvmet_port(item); 180 u8 treq = port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK; 181 int i; 182 183 if (nvmet_is_port_enabled(port, __func__)) 184 return -EACCES; 185 186 for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) { 187 if (sysfs_streq(page, nvmet_addr_treq[i].name)) 188 goto found; 189 } 190 191 pr_err("Invalid value '%s' for treq\n", page); 192 return -EINVAL; 193 194 found: 195 treq |= nvmet_addr_treq[i].type; 196 port->disc_addr.treq = treq; 197 return count; 198 } 199 200 CONFIGFS_ATTR(nvmet_, addr_treq); 201 202 static ssize_t nvmet_addr_trsvcid_show(struct config_item *item, 203 char *page) 204 { 205 struct nvmet_port *port = to_nvmet_port(item); 206 207 return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.trsvcid); 208 } 209 210 static ssize_t nvmet_addr_trsvcid_store(struct config_item *item, 211 const char *page, size_t count) 212 { 213 struct nvmet_port *port = to_nvmet_port(item); 214 215 if (count > NVMF_TRSVCID_SIZE) { 216 pr_err("Invalid value '%s' for trsvcid\n", page); 217 return -EINVAL; 218 } 219 if (nvmet_is_port_enabled(port, __func__)) 220 return -EACCES; 221 222 if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1) 223 return -EINVAL; 224 return count; 225 } 226 227 CONFIGFS_ATTR(nvmet_, addr_trsvcid); 228 229 static ssize_t nvmet_param_inline_data_size_show(struct config_item *item, 230 char *page) 231 { 232 struct nvmet_port *port = to_nvmet_port(item); 233 234 return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size); 235 } 236 237 static ssize_t nvmet_param_inline_data_size_store(struct config_item *item, 238 const char *page, size_t count) 239 { 240 struct nvmet_port *port = to_nvmet_port(item); 241 int ret; 242 243 if (nvmet_is_port_enabled(port, __func__)) 244 return -EACCES; 245 ret = kstrtoint(page, 0, &port->inline_data_size); 246 if (ret) { 247 pr_err("Invalid value '%s' for inline_data_size\n", page); 248 return -EINVAL; 249 } 250 return count; 251 } 252 253 CONFIGFS_ATTR(nvmet_, param_inline_data_size); 254 255 #ifdef CONFIG_BLK_DEV_INTEGRITY 256 static ssize_t nvmet_param_pi_enable_show(struct config_item *item, 257 char *page) 258 { 259 struct nvmet_port *port = to_nvmet_port(item); 260 261 return snprintf(page, PAGE_SIZE, "%d\n", port->pi_enable); 262 } 263 264 static ssize_t nvmet_param_pi_enable_store(struct config_item *item, 265 const char *page, size_t count) 266 { 267 struct nvmet_port *port = to_nvmet_port(item); 268 bool val; 269 270 if (strtobool(page, &val)) 271 return -EINVAL; 272 273 if (nvmet_is_port_enabled(port, __func__)) 274 return -EACCES; 275 276 port->pi_enable = val; 277 return count; 278 } 279 280 CONFIGFS_ATTR(nvmet_, param_pi_enable); 281 #endif 282 283 static ssize_t nvmet_addr_trtype_show(struct config_item *item, 284 char *page) 285 { 286 struct nvmet_port *port = to_nvmet_port(item); 287 int i; 288 289 for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) { 290 if (port->disc_addr.trtype == nvmet_transport[i].type) 291 return snprintf(page, PAGE_SIZE, 292 "%s\n", nvmet_transport[i].name); 293 } 294 295 return sprintf(page, "\n"); 296 } 297 298 static void nvmet_port_init_tsas_rdma(struct nvmet_port *port) 299 { 300 port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED; 301 port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED; 302 port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM; 303 } 304 305 static ssize_t nvmet_addr_trtype_store(struct config_item *item, 306 const char *page, size_t count) 307 { 308 struct nvmet_port *port = to_nvmet_port(item); 309 int i; 310 311 if (nvmet_is_port_enabled(port, __func__)) 312 return -EACCES; 313 314 for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) { 315 if (sysfs_streq(page, nvmet_transport[i].name)) 316 goto found; 317 } 318 319 pr_err("Invalid value '%s' for trtype\n", page); 320 return -EINVAL; 321 322 found: 323 memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE); 324 port->disc_addr.trtype = nvmet_transport[i].type; 325 if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) 326 nvmet_port_init_tsas_rdma(port); 327 return count; 328 } 329 330 CONFIGFS_ATTR(nvmet_, addr_trtype); 331 332 /* 333 * Namespace structures & file operation functions below 334 */ 335 static ssize_t nvmet_ns_device_path_show(struct config_item *item, char *page) 336 { 337 return sprintf(page, "%s\n", to_nvmet_ns(item)->device_path); 338 } 339 340 static ssize_t nvmet_ns_device_path_store(struct config_item *item, 341 const char *page, size_t count) 342 { 343 struct nvmet_ns *ns = to_nvmet_ns(item); 344 struct nvmet_subsys *subsys = ns->subsys; 345 size_t len; 346 int ret; 347 348 mutex_lock(&subsys->lock); 349 ret = -EBUSY; 350 if (ns->enabled) 351 goto out_unlock; 352 353 ret = -EINVAL; 354 len = strcspn(page, "\n"); 355 if (!len) 356 goto out_unlock; 357 358 kfree(ns->device_path); 359 ret = -ENOMEM; 360 ns->device_path = kmemdup_nul(page, len, GFP_KERNEL); 361 if (!ns->device_path) 362 goto out_unlock; 363 364 mutex_unlock(&subsys->lock); 365 return count; 366 367 out_unlock: 368 mutex_unlock(&subsys->lock); 369 return ret; 370 } 371 372 CONFIGFS_ATTR(nvmet_ns_, device_path); 373 374 #ifdef CONFIG_PCI_P2PDMA 375 static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page) 376 { 377 struct nvmet_ns *ns = to_nvmet_ns(item); 378 379 return pci_p2pdma_enable_show(page, ns->p2p_dev, ns->use_p2pmem); 380 } 381 382 static ssize_t nvmet_ns_p2pmem_store(struct config_item *item, 383 const char *page, size_t count) 384 { 385 struct nvmet_ns *ns = to_nvmet_ns(item); 386 struct pci_dev *p2p_dev = NULL; 387 bool use_p2pmem; 388 int ret = count; 389 int error; 390 391 mutex_lock(&ns->subsys->lock); 392 if (ns->enabled) { 393 ret = -EBUSY; 394 goto out_unlock; 395 } 396 397 error = pci_p2pdma_enable_store(page, &p2p_dev, &use_p2pmem); 398 if (error) { 399 ret = error; 400 goto out_unlock; 401 } 402 403 ns->use_p2pmem = use_p2pmem; 404 pci_dev_put(ns->p2p_dev); 405 ns->p2p_dev = p2p_dev; 406 407 out_unlock: 408 mutex_unlock(&ns->subsys->lock); 409 410 return ret; 411 } 412 413 CONFIGFS_ATTR(nvmet_ns_, p2pmem); 414 #endif /* CONFIG_PCI_P2PDMA */ 415 416 static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page) 417 { 418 return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid); 419 } 420 421 static ssize_t nvmet_ns_device_uuid_store(struct config_item *item, 422 const char *page, size_t count) 423 { 424 struct nvmet_ns *ns = to_nvmet_ns(item); 425 struct nvmet_subsys *subsys = ns->subsys; 426 int ret = 0; 427 428 mutex_lock(&subsys->lock); 429 if (ns->enabled) { 430 ret = -EBUSY; 431 goto out_unlock; 432 } 433 434 if (uuid_parse(page, &ns->uuid)) 435 ret = -EINVAL; 436 437 out_unlock: 438 mutex_unlock(&subsys->lock); 439 return ret ? ret : count; 440 } 441 442 CONFIGFS_ATTR(nvmet_ns_, device_uuid); 443 444 static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page) 445 { 446 return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid); 447 } 448 449 static ssize_t nvmet_ns_device_nguid_store(struct config_item *item, 450 const char *page, size_t count) 451 { 452 struct nvmet_ns *ns = to_nvmet_ns(item); 453 struct nvmet_subsys *subsys = ns->subsys; 454 u8 nguid[16]; 455 const char *p = page; 456 int i; 457 int ret = 0; 458 459 mutex_lock(&subsys->lock); 460 if (ns->enabled) { 461 ret = -EBUSY; 462 goto out_unlock; 463 } 464 465 for (i = 0; i < 16; i++) { 466 if (p + 2 > page + count) { 467 ret = -EINVAL; 468 goto out_unlock; 469 } 470 if (!isxdigit(p[0]) || !isxdigit(p[1])) { 471 ret = -EINVAL; 472 goto out_unlock; 473 } 474 475 nguid[i] = (hex_to_bin(p[0]) << 4) | hex_to_bin(p[1]); 476 p += 2; 477 478 if (*p == '-' || *p == ':') 479 p++; 480 } 481 482 memcpy(&ns->nguid, nguid, sizeof(nguid)); 483 out_unlock: 484 mutex_unlock(&subsys->lock); 485 return ret ? ret : count; 486 } 487 488 CONFIGFS_ATTR(nvmet_ns_, device_nguid); 489 490 static ssize_t nvmet_ns_ana_grpid_show(struct config_item *item, char *page) 491 { 492 return sprintf(page, "%u\n", to_nvmet_ns(item)->anagrpid); 493 } 494 495 static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item, 496 const char *page, size_t count) 497 { 498 struct nvmet_ns *ns = to_nvmet_ns(item); 499 u32 oldgrpid, newgrpid; 500 int ret; 501 502 ret = kstrtou32(page, 0, &newgrpid); 503 if (ret) 504 return ret; 505 506 if (newgrpid < 1 || newgrpid > NVMET_MAX_ANAGRPS) 507 return -EINVAL; 508 509 down_write(&nvmet_ana_sem); 510 oldgrpid = ns->anagrpid; 511 nvmet_ana_group_enabled[newgrpid]++; 512 ns->anagrpid = newgrpid; 513 nvmet_ana_group_enabled[oldgrpid]--; 514 nvmet_ana_chgcnt++; 515 up_write(&nvmet_ana_sem); 516 517 nvmet_send_ana_event(ns->subsys, NULL); 518 return count; 519 } 520 521 CONFIGFS_ATTR(nvmet_ns_, ana_grpid); 522 523 static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page) 524 { 525 return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled); 526 } 527 528 static ssize_t nvmet_ns_enable_store(struct config_item *item, 529 const char *page, size_t count) 530 { 531 struct nvmet_ns *ns = to_nvmet_ns(item); 532 bool enable; 533 int ret = 0; 534 535 if (strtobool(page, &enable)) 536 return -EINVAL; 537 538 if (enable) 539 ret = nvmet_ns_enable(ns); 540 else 541 nvmet_ns_disable(ns); 542 543 return ret ? ret : count; 544 } 545 546 CONFIGFS_ATTR(nvmet_ns_, enable); 547 548 static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page) 549 { 550 return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io); 551 } 552 553 static ssize_t nvmet_ns_buffered_io_store(struct config_item *item, 554 const char *page, size_t count) 555 { 556 struct nvmet_ns *ns = to_nvmet_ns(item); 557 bool val; 558 559 if (strtobool(page, &val)) 560 return -EINVAL; 561 562 mutex_lock(&ns->subsys->lock); 563 if (ns->enabled) { 564 pr_err("disable ns before setting buffered_io value.\n"); 565 mutex_unlock(&ns->subsys->lock); 566 return -EINVAL; 567 } 568 569 ns->buffered_io = val; 570 mutex_unlock(&ns->subsys->lock); 571 return count; 572 } 573 574 CONFIGFS_ATTR(nvmet_ns_, buffered_io); 575 576 static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item, 577 const char *page, size_t count) 578 { 579 struct nvmet_ns *ns = to_nvmet_ns(item); 580 bool val; 581 582 if (strtobool(page, &val)) 583 return -EINVAL; 584 585 if (!val) 586 return -EINVAL; 587 588 mutex_lock(&ns->subsys->lock); 589 if (!ns->enabled) { 590 pr_err("enable ns before revalidate.\n"); 591 mutex_unlock(&ns->subsys->lock); 592 return -EINVAL; 593 } 594 if (nvmet_ns_revalidate(ns)) 595 nvmet_ns_changed(ns->subsys, ns->nsid); 596 mutex_unlock(&ns->subsys->lock); 597 return count; 598 } 599 600 CONFIGFS_ATTR_WO(nvmet_ns_, revalidate_size); 601 602 static struct configfs_attribute *nvmet_ns_attrs[] = { 603 &nvmet_ns_attr_device_path, 604 &nvmet_ns_attr_device_nguid, 605 &nvmet_ns_attr_device_uuid, 606 &nvmet_ns_attr_ana_grpid, 607 &nvmet_ns_attr_enable, 608 &nvmet_ns_attr_buffered_io, 609 &nvmet_ns_attr_revalidate_size, 610 #ifdef CONFIG_PCI_P2PDMA 611 &nvmet_ns_attr_p2pmem, 612 #endif 613 NULL, 614 }; 615 616 static void nvmet_ns_release(struct config_item *item) 617 { 618 struct nvmet_ns *ns = to_nvmet_ns(item); 619 620 nvmet_ns_free(ns); 621 } 622 623 static struct configfs_item_operations nvmet_ns_item_ops = { 624 .release = nvmet_ns_release, 625 }; 626 627 static const struct config_item_type nvmet_ns_type = { 628 .ct_item_ops = &nvmet_ns_item_ops, 629 .ct_attrs = nvmet_ns_attrs, 630 .ct_owner = THIS_MODULE, 631 }; 632 633 static struct config_group *nvmet_ns_make(struct config_group *group, 634 const char *name) 635 { 636 struct nvmet_subsys *subsys = namespaces_to_subsys(&group->cg_item); 637 struct nvmet_ns *ns; 638 int ret; 639 u32 nsid; 640 641 ret = kstrtou32(name, 0, &nsid); 642 if (ret) 643 goto out; 644 645 ret = -EINVAL; 646 if (nsid == 0 || nsid == NVME_NSID_ALL) { 647 pr_err("invalid nsid %#x", nsid); 648 goto out; 649 } 650 651 ret = -ENOMEM; 652 ns = nvmet_ns_alloc(subsys, nsid); 653 if (!ns) 654 goto out; 655 config_group_init_type_name(&ns->group, name, &nvmet_ns_type); 656 657 pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn); 658 659 return &ns->group; 660 out: 661 return ERR_PTR(ret); 662 } 663 664 static struct configfs_group_operations nvmet_namespaces_group_ops = { 665 .make_group = nvmet_ns_make, 666 }; 667 668 static const struct config_item_type nvmet_namespaces_type = { 669 .ct_group_ops = &nvmet_namespaces_group_ops, 670 .ct_owner = THIS_MODULE, 671 }; 672 673 #ifdef CONFIG_NVME_TARGET_PASSTHRU 674 675 static ssize_t nvmet_passthru_device_path_show(struct config_item *item, 676 char *page) 677 { 678 struct nvmet_subsys *subsys = to_subsys(item->ci_parent); 679 680 return snprintf(page, PAGE_SIZE, "%s\n", subsys->passthru_ctrl_path); 681 } 682 683 static ssize_t nvmet_passthru_device_path_store(struct config_item *item, 684 const char *page, size_t count) 685 { 686 struct nvmet_subsys *subsys = to_subsys(item->ci_parent); 687 size_t len; 688 int ret; 689 690 mutex_lock(&subsys->lock); 691 692 ret = -EBUSY; 693 if (subsys->passthru_ctrl) 694 goto out_unlock; 695 696 ret = -EINVAL; 697 len = strcspn(page, "\n"); 698 if (!len) 699 goto out_unlock; 700 701 kfree(subsys->passthru_ctrl_path); 702 ret = -ENOMEM; 703 subsys->passthru_ctrl_path = kstrndup(page, len, GFP_KERNEL); 704 if (!subsys->passthru_ctrl_path) 705 goto out_unlock; 706 707 mutex_unlock(&subsys->lock); 708 709 return count; 710 out_unlock: 711 mutex_unlock(&subsys->lock); 712 return ret; 713 } 714 CONFIGFS_ATTR(nvmet_passthru_, device_path); 715 716 static ssize_t nvmet_passthru_enable_show(struct config_item *item, 717 char *page) 718 { 719 struct nvmet_subsys *subsys = to_subsys(item->ci_parent); 720 721 return sprintf(page, "%d\n", subsys->passthru_ctrl ? 1 : 0); 722 } 723 724 static ssize_t nvmet_passthru_enable_store(struct config_item *item, 725 const char *page, size_t count) 726 { 727 struct nvmet_subsys *subsys = to_subsys(item->ci_parent); 728 bool enable; 729 int ret = 0; 730 731 if (strtobool(page, &enable)) 732 return -EINVAL; 733 734 if (enable) 735 ret = nvmet_passthru_ctrl_enable(subsys); 736 else 737 nvmet_passthru_ctrl_disable(subsys); 738 739 return ret ? ret : count; 740 } 741 CONFIGFS_ATTR(nvmet_passthru_, enable); 742 743 static ssize_t nvmet_passthru_admin_timeout_show(struct config_item *item, 744 char *page) 745 { 746 return sprintf(page, "%u\n", to_subsys(item->ci_parent)->admin_timeout); 747 } 748 749 static ssize_t nvmet_passthru_admin_timeout_store(struct config_item *item, 750 const char *page, size_t count) 751 { 752 struct nvmet_subsys *subsys = to_subsys(item->ci_parent); 753 unsigned int timeout; 754 755 if (kstrtouint(page, 0, &timeout)) 756 return -EINVAL; 757 subsys->admin_timeout = timeout; 758 return count; 759 } 760 CONFIGFS_ATTR(nvmet_passthru_, admin_timeout); 761 762 static ssize_t nvmet_passthru_io_timeout_show(struct config_item *item, 763 char *page) 764 { 765 return sprintf(page, "%u\n", to_subsys(item->ci_parent)->io_timeout); 766 } 767 768 static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item, 769 const char *page, size_t count) 770 { 771 struct nvmet_subsys *subsys = to_subsys(item->ci_parent); 772 unsigned int timeout; 773 774 if (kstrtouint(page, 0, &timeout)) 775 return -EINVAL; 776 subsys->io_timeout = timeout; 777 return count; 778 } 779 CONFIGFS_ATTR(nvmet_passthru_, io_timeout); 780 781 static ssize_t nvmet_passthru_clear_ids_show(struct config_item *item, 782 char *page) 783 { 784 return sprintf(page, "%u\n", to_subsys(item->ci_parent)->clear_ids); 785 } 786 787 static ssize_t nvmet_passthru_clear_ids_store(struct config_item *item, 788 const char *page, size_t count) 789 { 790 struct nvmet_subsys *subsys = to_subsys(item->ci_parent); 791 unsigned int clear_ids; 792 793 if (kstrtouint(page, 0, &clear_ids)) 794 return -EINVAL; 795 subsys->clear_ids = clear_ids; 796 return count; 797 } 798 CONFIGFS_ATTR(nvmet_passthru_, clear_ids); 799 800 static struct configfs_attribute *nvmet_passthru_attrs[] = { 801 &nvmet_passthru_attr_device_path, 802 &nvmet_passthru_attr_enable, 803 &nvmet_passthru_attr_admin_timeout, 804 &nvmet_passthru_attr_io_timeout, 805 &nvmet_passthru_attr_clear_ids, 806 NULL, 807 }; 808 809 static const struct config_item_type nvmet_passthru_type = { 810 .ct_attrs = nvmet_passthru_attrs, 811 .ct_owner = THIS_MODULE, 812 }; 813 814 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys) 815 { 816 config_group_init_type_name(&subsys->passthru_group, 817 "passthru", &nvmet_passthru_type); 818 configfs_add_default_group(&subsys->passthru_group, 819 &subsys->group); 820 } 821 822 #else /* CONFIG_NVME_TARGET_PASSTHRU */ 823 824 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys) 825 { 826 } 827 828 #endif /* CONFIG_NVME_TARGET_PASSTHRU */ 829 830 static int nvmet_port_subsys_allow_link(struct config_item *parent, 831 struct config_item *target) 832 { 833 struct nvmet_port *port = to_nvmet_port(parent->ci_parent); 834 struct nvmet_subsys *subsys; 835 struct nvmet_subsys_link *link, *p; 836 int ret; 837 838 if (target->ci_type != &nvmet_subsys_type) { 839 pr_err("can only link subsystems into the subsystems dir.!\n"); 840 return -EINVAL; 841 } 842 subsys = to_subsys(target); 843 link = kmalloc(sizeof(*link), GFP_KERNEL); 844 if (!link) 845 return -ENOMEM; 846 link->subsys = subsys; 847 848 down_write(&nvmet_config_sem); 849 ret = -EEXIST; 850 list_for_each_entry(p, &port->subsystems, entry) { 851 if (p->subsys == subsys) 852 goto out_free_link; 853 } 854 855 if (list_empty(&port->subsystems)) { 856 ret = nvmet_enable_port(port); 857 if (ret) 858 goto out_free_link; 859 } 860 861 list_add_tail(&link->entry, &port->subsystems); 862 nvmet_port_disc_changed(port, subsys); 863 864 up_write(&nvmet_config_sem); 865 return 0; 866 867 out_free_link: 868 up_write(&nvmet_config_sem); 869 kfree(link); 870 return ret; 871 } 872 873 static void nvmet_port_subsys_drop_link(struct config_item *parent, 874 struct config_item *target) 875 { 876 struct nvmet_port *port = to_nvmet_port(parent->ci_parent); 877 struct nvmet_subsys *subsys = to_subsys(target); 878 struct nvmet_subsys_link *p; 879 880 down_write(&nvmet_config_sem); 881 list_for_each_entry(p, &port->subsystems, entry) { 882 if (p->subsys == subsys) 883 goto found; 884 } 885 up_write(&nvmet_config_sem); 886 return; 887 888 found: 889 list_del(&p->entry); 890 nvmet_port_del_ctrls(port, subsys); 891 nvmet_port_disc_changed(port, subsys); 892 893 if (list_empty(&port->subsystems)) 894 nvmet_disable_port(port); 895 up_write(&nvmet_config_sem); 896 kfree(p); 897 } 898 899 static struct configfs_item_operations nvmet_port_subsys_item_ops = { 900 .allow_link = nvmet_port_subsys_allow_link, 901 .drop_link = nvmet_port_subsys_drop_link, 902 }; 903 904 static const struct config_item_type nvmet_port_subsys_type = { 905 .ct_item_ops = &nvmet_port_subsys_item_ops, 906 .ct_owner = THIS_MODULE, 907 }; 908 909 static int nvmet_allowed_hosts_allow_link(struct config_item *parent, 910 struct config_item *target) 911 { 912 struct nvmet_subsys *subsys = to_subsys(parent->ci_parent); 913 struct nvmet_host *host; 914 struct nvmet_host_link *link, *p; 915 int ret; 916 917 if (target->ci_type != &nvmet_host_type) { 918 pr_err("can only link hosts into the allowed_hosts directory!\n"); 919 return -EINVAL; 920 } 921 922 host = to_host(target); 923 link = kmalloc(sizeof(*link), GFP_KERNEL); 924 if (!link) 925 return -ENOMEM; 926 link->host = host; 927 928 down_write(&nvmet_config_sem); 929 ret = -EINVAL; 930 if (subsys->allow_any_host) { 931 pr_err("can't add hosts when allow_any_host is set!\n"); 932 goto out_free_link; 933 } 934 935 ret = -EEXIST; 936 list_for_each_entry(p, &subsys->hosts, entry) { 937 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host))) 938 goto out_free_link; 939 } 940 list_add_tail(&link->entry, &subsys->hosts); 941 nvmet_subsys_disc_changed(subsys, host); 942 943 up_write(&nvmet_config_sem); 944 return 0; 945 out_free_link: 946 up_write(&nvmet_config_sem); 947 kfree(link); 948 return ret; 949 } 950 951 static void nvmet_allowed_hosts_drop_link(struct config_item *parent, 952 struct config_item *target) 953 { 954 struct nvmet_subsys *subsys = to_subsys(parent->ci_parent); 955 struct nvmet_host *host = to_host(target); 956 struct nvmet_host_link *p; 957 958 down_write(&nvmet_config_sem); 959 list_for_each_entry(p, &subsys->hosts, entry) { 960 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host))) 961 goto found; 962 } 963 up_write(&nvmet_config_sem); 964 return; 965 966 found: 967 list_del(&p->entry); 968 nvmet_subsys_disc_changed(subsys, host); 969 970 up_write(&nvmet_config_sem); 971 kfree(p); 972 } 973 974 static struct configfs_item_operations nvmet_allowed_hosts_item_ops = { 975 .allow_link = nvmet_allowed_hosts_allow_link, 976 .drop_link = nvmet_allowed_hosts_drop_link, 977 }; 978 979 static const struct config_item_type nvmet_allowed_hosts_type = { 980 .ct_item_ops = &nvmet_allowed_hosts_item_ops, 981 .ct_owner = THIS_MODULE, 982 }; 983 984 static ssize_t nvmet_subsys_attr_allow_any_host_show(struct config_item *item, 985 char *page) 986 { 987 return snprintf(page, PAGE_SIZE, "%d\n", 988 to_subsys(item)->allow_any_host); 989 } 990 991 static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item, 992 const char *page, size_t count) 993 { 994 struct nvmet_subsys *subsys = to_subsys(item); 995 bool allow_any_host; 996 int ret = 0; 997 998 if (strtobool(page, &allow_any_host)) 999 return -EINVAL; 1000 1001 down_write(&nvmet_config_sem); 1002 if (allow_any_host && !list_empty(&subsys->hosts)) { 1003 pr_err("Can't set allow_any_host when explicit hosts are set!\n"); 1004 ret = -EINVAL; 1005 goto out_unlock; 1006 } 1007 1008 if (subsys->allow_any_host != allow_any_host) { 1009 subsys->allow_any_host = allow_any_host; 1010 nvmet_subsys_disc_changed(subsys, NULL); 1011 } 1012 1013 out_unlock: 1014 up_write(&nvmet_config_sem); 1015 return ret ? ret : count; 1016 } 1017 1018 CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host); 1019 1020 static ssize_t nvmet_subsys_attr_version_show(struct config_item *item, 1021 char *page) 1022 { 1023 struct nvmet_subsys *subsys = to_subsys(item); 1024 1025 if (NVME_TERTIARY(subsys->ver)) 1026 return snprintf(page, PAGE_SIZE, "%llu.%llu.%llu\n", 1027 NVME_MAJOR(subsys->ver), 1028 NVME_MINOR(subsys->ver), 1029 NVME_TERTIARY(subsys->ver)); 1030 1031 return snprintf(page, PAGE_SIZE, "%llu.%llu\n", 1032 NVME_MAJOR(subsys->ver), 1033 NVME_MINOR(subsys->ver)); 1034 } 1035 1036 static ssize_t 1037 nvmet_subsys_attr_version_store_locked(struct nvmet_subsys *subsys, 1038 const char *page, size_t count) 1039 { 1040 int major, minor, tertiary = 0; 1041 int ret; 1042 1043 if (subsys->subsys_discovered) { 1044 if (NVME_TERTIARY(subsys->ver)) 1045 pr_err("Can't set version number. %llu.%llu.%llu is already assigned\n", 1046 NVME_MAJOR(subsys->ver), 1047 NVME_MINOR(subsys->ver), 1048 NVME_TERTIARY(subsys->ver)); 1049 else 1050 pr_err("Can't set version number. %llu.%llu is already assigned\n", 1051 NVME_MAJOR(subsys->ver), 1052 NVME_MINOR(subsys->ver)); 1053 return -EINVAL; 1054 } 1055 1056 /* passthru subsystems use the underlying controller's version */ 1057 if (nvmet_is_passthru_subsys(subsys)) 1058 return -EINVAL; 1059 1060 ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary); 1061 if (ret != 2 && ret != 3) 1062 return -EINVAL; 1063 1064 subsys->ver = NVME_VS(major, minor, tertiary); 1065 1066 return count; 1067 } 1068 1069 static ssize_t nvmet_subsys_attr_version_store(struct config_item *item, 1070 const char *page, size_t count) 1071 { 1072 struct nvmet_subsys *subsys = to_subsys(item); 1073 ssize_t ret; 1074 1075 down_write(&nvmet_config_sem); 1076 mutex_lock(&subsys->lock); 1077 ret = nvmet_subsys_attr_version_store_locked(subsys, page, count); 1078 mutex_unlock(&subsys->lock); 1079 up_write(&nvmet_config_sem); 1080 1081 return ret; 1082 } 1083 CONFIGFS_ATTR(nvmet_subsys_, attr_version); 1084 1085 /* See Section 1.5 of NVMe 1.4 */ 1086 static bool nvmet_is_ascii(const char c) 1087 { 1088 return c >= 0x20 && c <= 0x7e; 1089 } 1090 1091 static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item, 1092 char *page) 1093 { 1094 struct nvmet_subsys *subsys = to_subsys(item); 1095 1096 return snprintf(page, PAGE_SIZE, "%.*s\n", 1097 NVMET_SN_MAX_SIZE, subsys->serial); 1098 } 1099 1100 static ssize_t 1101 nvmet_subsys_attr_serial_store_locked(struct nvmet_subsys *subsys, 1102 const char *page, size_t count) 1103 { 1104 int pos, len = strcspn(page, "\n"); 1105 1106 if (subsys->subsys_discovered) { 1107 pr_err("Can't set serial number. %s is already assigned\n", 1108 subsys->serial); 1109 return -EINVAL; 1110 } 1111 1112 if (!len || len > NVMET_SN_MAX_SIZE) { 1113 pr_err("Serial Number can not be empty or exceed %d Bytes\n", 1114 NVMET_SN_MAX_SIZE); 1115 return -EINVAL; 1116 } 1117 1118 for (pos = 0; pos < len; pos++) { 1119 if (!nvmet_is_ascii(page[pos])) { 1120 pr_err("Serial Number must contain only ASCII strings\n"); 1121 return -EINVAL; 1122 } 1123 } 1124 1125 memcpy_and_pad(subsys->serial, NVMET_SN_MAX_SIZE, page, len, ' '); 1126 1127 return count; 1128 } 1129 1130 static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item, 1131 const char *page, size_t count) 1132 { 1133 struct nvmet_subsys *subsys = to_subsys(item); 1134 ssize_t ret; 1135 1136 down_write(&nvmet_config_sem); 1137 mutex_lock(&subsys->lock); 1138 ret = nvmet_subsys_attr_serial_store_locked(subsys, page, count); 1139 mutex_unlock(&subsys->lock); 1140 up_write(&nvmet_config_sem); 1141 1142 return ret; 1143 } 1144 CONFIGFS_ATTR(nvmet_subsys_, attr_serial); 1145 1146 static ssize_t nvmet_subsys_attr_cntlid_min_show(struct config_item *item, 1147 char *page) 1148 { 1149 return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_min); 1150 } 1151 1152 static ssize_t nvmet_subsys_attr_cntlid_min_store(struct config_item *item, 1153 const char *page, size_t cnt) 1154 { 1155 u16 cntlid_min; 1156 1157 if (sscanf(page, "%hu\n", &cntlid_min) != 1) 1158 return -EINVAL; 1159 1160 if (cntlid_min == 0) 1161 return -EINVAL; 1162 1163 down_write(&nvmet_config_sem); 1164 if (cntlid_min >= to_subsys(item)->cntlid_max) 1165 goto out_unlock; 1166 to_subsys(item)->cntlid_min = cntlid_min; 1167 up_write(&nvmet_config_sem); 1168 return cnt; 1169 1170 out_unlock: 1171 up_write(&nvmet_config_sem); 1172 return -EINVAL; 1173 } 1174 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_min); 1175 1176 static ssize_t nvmet_subsys_attr_cntlid_max_show(struct config_item *item, 1177 char *page) 1178 { 1179 return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_max); 1180 } 1181 1182 static ssize_t nvmet_subsys_attr_cntlid_max_store(struct config_item *item, 1183 const char *page, size_t cnt) 1184 { 1185 u16 cntlid_max; 1186 1187 if (sscanf(page, "%hu\n", &cntlid_max) != 1) 1188 return -EINVAL; 1189 1190 if (cntlid_max == 0) 1191 return -EINVAL; 1192 1193 down_write(&nvmet_config_sem); 1194 if (cntlid_max <= to_subsys(item)->cntlid_min) 1195 goto out_unlock; 1196 to_subsys(item)->cntlid_max = cntlid_max; 1197 up_write(&nvmet_config_sem); 1198 return cnt; 1199 1200 out_unlock: 1201 up_write(&nvmet_config_sem); 1202 return -EINVAL; 1203 } 1204 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max); 1205 1206 static ssize_t nvmet_subsys_attr_model_show(struct config_item *item, 1207 char *page) 1208 { 1209 struct nvmet_subsys *subsys = to_subsys(item); 1210 1211 return snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number); 1212 } 1213 1214 static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys, 1215 const char *page, size_t count) 1216 { 1217 int pos = 0, len; 1218 1219 if (subsys->subsys_discovered) { 1220 pr_err("Can't set model number. %s is already assigned\n", 1221 subsys->model_number); 1222 return -EINVAL; 1223 } 1224 1225 len = strcspn(page, "\n"); 1226 if (!len) 1227 return -EINVAL; 1228 1229 if (len > NVMET_MN_MAX_SIZE) { 1230 pr_err("Model number size can not exceed %d Bytes\n", 1231 NVMET_MN_MAX_SIZE); 1232 return -EINVAL; 1233 } 1234 1235 for (pos = 0; pos < len; pos++) { 1236 if (!nvmet_is_ascii(page[pos])) 1237 return -EINVAL; 1238 } 1239 1240 subsys->model_number = kmemdup_nul(page, len, GFP_KERNEL); 1241 if (!subsys->model_number) 1242 return -ENOMEM; 1243 return count; 1244 } 1245 1246 static ssize_t nvmet_subsys_attr_model_store(struct config_item *item, 1247 const char *page, size_t count) 1248 { 1249 struct nvmet_subsys *subsys = to_subsys(item); 1250 ssize_t ret; 1251 1252 down_write(&nvmet_config_sem); 1253 mutex_lock(&subsys->lock); 1254 ret = nvmet_subsys_attr_model_store_locked(subsys, page, count); 1255 mutex_unlock(&subsys->lock); 1256 up_write(&nvmet_config_sem); 1257 1258 return ret; 1259 } 1260 CONFIGFS_ATTR(nvmet_subsys_, attr_model); 1261 1262 #ifdef CONFIG_BLK_DEV_INTEGRITY 1263 static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item, 1264 char *page) 1265 { 1266 return snprintf(page, PAGE_SIZE, "%d\n", to_subsys(item)->pi_support); 1267 } 1268 1269 static ssize_t nvmet_subsys_attr_pi_enable_store(struct config_item *item, 1270 const char *page, size_t count) 1271 { 1272 struct nvmet_subsys *subsys = to_subsys(item); 1273 bool pi_enable; 1274 1275 if (strtobool(page, &pi_enable)) 1276 return -EINVAL; 1277 1278 subsys->pi_support = pi_enable; 1279 return count; 1280 } 1281 CONFIGFS_ATTR(nvmet_subsys_, attr_pi_enable); 1282 #endif 1283 1284 static ssize_t nvmet_subsys_attr_qid_max_show(struct config_item *item, 1285 char *page) 1286 { 1287 return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->max_qid); 1288 } 1289 1290 static ssize_t nvmet_subsys_attr_qid_max_store(struct config_item *item, 1291 const char *page, size_t cnt) 1292 { 1293 struct nvmet_port *port = to_nvmet_port(item); 1294 u16 qid_max; 1295 1296 if (nvmet_is_port_enabled(port, __func__)) 1297 return -EACCES; 1298 1299 if (sscanf(page, "%hu\n", &qid_max) != 1) 1300 return -EINVAL; 1301 1302 if (qid_max < 1 || qid_max > NVMET_NR_QUEUES) 1303 return -EINVAL; 1304 1305 down_write(&nvmet_config_sem); 1306 to_subsys(item)->max_qid = qid_max; 1307 up_write(&nvmet_config_sem); 1308 return cnt; 1309 } 1310 CONFIGFS_ATTR(nvmet_subsys_, attr_qid_max); 1311 1312 static struct configfs_attribute *nvmet_subsys_attrs[] = { 1313 &nvmet_subsys_attr_attr_allow_any_host, 1314 &nvmet_subsys_attr_attr_version, 1315 &nvmet_subsys_attr_attr_serial, 1316 &nvmet_subsys_attr_attr_cntlid_min, 1317 &nvmet_subsys_attr_attr_cntlid_max, 1318 &nvmet_subsys_attr_attr_model, 1319 &nvmet_subsys_attr_attr_qid_max, 1320 #ifdef CONFIG_BLK_DEV_INTEGRITY 1321 &nvmet_subsys_attr_attr_pi_enable, 1322 #endif 1323 NULL, 1324 }; 1325 1326 /* 1327 * Subsystem structures & folder operation functions below 1328 */ 1329 static void nvmet_subsys_release(struct config_item *item) 1330 { 1331 struct nvmet_subsys *subsys = to_subsys(item); 1332 1333 nvmet_subsys_del_ctrls(subsys); 1334 nvmet_subsys_put(subsys); 1335 } 1336 1337 static struct configfs_item_operations nvmet_subsys_item_ops = { 1338 .release = nvmet_subsys_release, 1339 }; 1340 1341 static const struct config_item_type nvmet_subsys_type = { 1342 .ct_item_ops = &nvmet_subsys_item_ops, 1343 .ct_attrs = nvmet_subsys_attrs, 1344 .ct_owner = THIS_MODULE, 1345 }; 1346 1347 static struct config_group *nvmet_subsys_make(struct config_group *group, 1348 const char *name) 1349 { 1350 struct nvmet_subsys *subsys; 1351 1352 if (sysfs_streq(name, NVME_DISC_SUBSYS_NAME)) { 1353 pr_err("can't create discovery subsystem through configfs\n"); 1354 return ERR_PTR(-EINVAL); 1355 } 1356 1357 subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME); 1358 if (IS_ERR(subsys)) 1359 return ERR_CAST(subsys); 1360 1361 config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type); 1362 1363 config_group_init_type_name(&subsys->namespaces_group, 1364 "namespaces", &nvmet_namespaces_type); 1365 configfs_add_default_group(&subsys->namespaces_group, &subsys->group); 1366 1367 config_group_init_type_name(&subsys->allowed_hosts_group, 1368 "allowed_hosts", &nvmet_allowed_hosts_type); 1369 configfs_add_default_group(&subsys->allowed_hosts_group, 1370 &subsys->group); 1371 1372 nvmet_add_passthru_group(subsys); 1373 1374 return &subsys->group; 1375 } 1376 1377 static struct configfs_group_operations nvmet_subsystems_group_ops = { 1378 .make_group = nvmet_subsys_make, 1379 }; 1380 1381 static const struct config_item_type nvmet_subsystems_type = { 1382 .ct_group_ops = &nvmet_subsystems_group_ops, 1383 .ct_owner = THIS_MODULE, 1384 }; 1385 1386 static ssize_t nvmet_referral_enable_show(struct config_item *item, 1387 char *page) 1388 { 1389 return snprintf(page, PAGE_SIZE, "%d\n", to_nvmet_port(item)->enabled); 1390 } 1391 1392 static ssize_t nvmet_referral_enable_store(struct config_item *item, 1393 const char *page, size_t count) 1394 { 1395 struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent); 1396 struct nvmet_port *port = to_nvmet_port(item); 1397 bool enable; 1398 1399 if (strtobool(page, &enable)) 1400 goto inval; 1401 1402 if (enable) 1403 nvmet_referral_enable(parent, port); 1404 else 1405 nvmet_referral_disable(parent, port); 1406 1407 return count; 1408 inval: 1409 pr_err("Invalid value '%s' for enable\n", page); 1410 return -EINVAL; 1411 } 1412 1413 CONFIGFS_ATTR(nvmet_referral_, enable); 1414 1415 /* 1416 * Discovery Service subsystem definitions 1417 */ 1418 static struct configfs_attribute *nvmet_referral_attrs[] = { 1419 &nvmet_attr_addr_adrfam, 1420 &nvmet_attr_addr_portid, 1421 &nvmet_attr_addr_treq, 1422 &nvmet_attr_addr_traddr, 1423 &nvmet_attr_addr_trsvcid, 1424 &nvmet_attr_addr_trtype, 1425 &nvmet_referral_attr_enable, 1426 NULL, 1427 }; 1428 1429 static void nvmet_referral_notify(struct config_group *group, 1430 struct config_item *item) 1431 { 1432 struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent); 1433 struct nvmet_port *port = to_nvmet_port(item); 1434 1435 nvmet_referral_disable(parent, port); 1436 } 1437 1438 static void nvmet_referral_release(struct config_item *item) 1439 { 1440 struct nvmet_port *port = to_nvmet_port(item); 1441 1442 kfree(port); 1443 } 1444 1445 static struct configfs_item_operations nvmet_referral_item_ops = { 1446 .release = nvmet_referral_release, 1447 }; 1448 1449 static const struct config_item_type nvmet_referral_type = { 1450 .ct_owner = THIS_MODULE, 1451 .ct_attrs = nvmet_referral_attrs, 1452 .ct_item_ops = &nvmet_referral_item_ops, 1453 }; 1454 1455 static struct config_group *nvmet_referral_make( 1456 struct config_group *group, const char *name) 1457 { 1458 struct nvmet_port *port; 1459 1460 port = kzalloc(sizeof(*port), GFP_KERNEL); 1461 if (!port) 1462 return ERR_PTR(-ENOMEM); 1463 1464 INIT_LIST_HEAD(&port->entry); 1465 config_group_init_type_name(&port->group, name, &nvmet_referral_type); 1466 1467 return &port->group; 1468 } 1469 1470 static struct configfs_group_operations nvmet_referral_group_ops = { 1471 .make_group = nvmet_referral_make, 1472 .disconnect_notify = nvmet_referral_notify, 1473 }; 1474 1475 static const struct config_item_type nvmet_referrals_type = { 1476 .ct_owner = THIS_MODULE, 1477 .ct_group_ops = &nvmet_referral_group_ops, 1478 }; 1479 1480 static struct nvmet_type_name_map nvmet_ana_state[] = { 1481 { NVME_ANA_OPTIMIZED, "optimized" }, 1482 { NVME_ANA_NONOPTIMIZED, "non-optimized" }, 1483 { NVME_ANA_INACCESSIBLE, "inaccessible" }, 1484 { NVME_ANA_PERSISTENT_LOSS, "persistent-loss" }, 1485 { NVME_ANA_CHANGE, "change" }, 1486 }; 1487 1488 static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item, 1489 char *page) 1490 { 1491 struct nvmet_ana_group *grp = to_ana_group(item); 1492 enum nvme_ana_state state = grp->port->ana_state[grp->grpid]; 1493 int i; 1494 1495 for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) { 1496 if (state == nvmet_ana_state[i].type) 1497 return sprintf(page, "%s\n", nvmet_ana_state[i].name); 1498 } 1499 1500 return sprintf(page, "\n"); 1501 } 1502 1503 static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item, 1504 const char *page, size_t count) 1505 { 1506 struct nvmet_ana_group *grp = to_ana_group(item); 1507 enum nvme_ana_state *ana_state = grp->port->ana_state; 1508 int i; 1509 1510 for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) { 1511 if (sysfs_streq(page, nvmet_ana_state[i].name)) 1512 goto found; 1513 } 1514 1515 pr_err("Invalid value '%s' for ana_state\n", page); 1516 return -EINVAL; 1517 1518 found: 1519 down_write(&nvmet_ana_sem); 1520 ana_state[grp->grpid] = (enum nvme_ana_state) nvmet_ana_state[i].type; 1521 nvmet_ana_chgcnt++; 1522 up_write(&nvmet_ana_sem); 1523 nvmet_port_send_ana_event(grp->port); 1524 return count; 1525 } 1526 1527 CONFIGFS_ATTR(nvmet_ana_group_, ana_state); 1528 1529 static struct configfs_attribute *nvmet_ana_group_attrs[] = { 1530 &nvmet_ana_group_attr_ana_state, 1531 NULL, 1532 }; 1533 1534 static void nvmet_ana_group_release(struct config_item *item) 1535 { 1536 struct nvmet_ana_group *grp = to_ana_group(item); 1537 1538 if (grp == &grp->port->ana_default_group) 1539 return; 1540 1541 down_write(&nvmet_ana_sem); 1542 grp->port->ana_state[grp->grpid] = NVME_ANA_INACCESSIBLE; 1543 nvmet_ana_group_enabled[grp->grpid]--; 1544 up_write(&nvmet_ana_sem); 1545 1546 nvmet_port_send_ana_event(grp->port); 1547 kfree(grp); 1548 } 1549 1550 static struct configfs_item_operations nvmet_ana_group_item_ops = { 1551 .release = nvmet_ana_group_release, 1552 }; 1553 1554 static const struct config_item_type nvmet_ana_group_type = { 1555 .ct_item_ops = &nvmet_ana_group_item_ops, 1556 .ct_attrs = nvmet_ana_group_attrs, 1557 .ct_owner = THIS_MODULE, 1558 }; 1559 1560 static struct config_group *nvmet_ana_groups_make_group( 1561 struct config_group *group, const char *name) 1562 { 1563 struct nvmet_port *port = ana_groups_to_port(&group->cg_item); 1564 struct nvmet_ana_group *grp; 1565 u32 grpid; 1566 int ret; 1567 1568 ret = kstrtou32(name, 0, &grpid); 1569 if (ret) 1570 goto out; 1571 1572 ret = -EINVAL; 1573 if (grpid <= 1 || grpid > NVMET_MAX_ANAGRPS) 1574 goto out; 1575 1576 ret = -ENOMEM; 1577 grp = kzalloc(sizeof(*grp), GFP_KERNEL); 1578 if (!grp) 1579 goto out; 1580 grp->port = port; 1581 grp->grpid = grpid; 1582 1583 down_write(&nvmet_ana_sem); 1584 nvmet_ana_group_enabled[grpid]++; 1585 up_write(&nvmet_ana_sem); 1586 1587 nvmet_port_send_ana_event(grp->port); 1588 1589 config_group_init_type_name(&grp->group, name, &nvmet_ana_group_type); 1590 return &grp->group; 1591 out: 1592 return ERR_PTR(ret); 1593 } 1594 1595 static struct configfs_group_operations nvmet_ana_groups_group_ops = { 1596 .make_group = nvmet_ana_groups_make_group, 1597 }; 1598 1599 static const struct config_item_type nvmet_ana_groups_type = { 1600 .ct_group_ops = &nvmet_ana_groups_group_ops, 1601 .ct_owner = THIS_MODULE, 1602 }; 1603 1604 /* 1605 * Ports definitions. 1606 */ 1607 static void nvmet_port_release(struct config_item *item) 1608 { 1609 struct nvmet_port *port = to_nvmet_port(item); 1610 1611 /* Let inflight controllers teardown complete */ 1612 flush_workqueue(nvmet_wq); 1613 list_del(&port->global_entry); 1614 1615 kfree(port->ana_state); 1616 kfree(port); 1617 } 1618 1619 static struct configfs_attribute *nvmet_port_attrs[] = { 1620 &nvmet_attr_addr_adrfam, 1621 &nvmet_attr_addr_treq, 1622 &nvmet_attr_addr_traddr, 1623 &nvmet_attr_addr_trsvcid, 1624 &nvmet_attr_addr_trtype, 1625 &nvmet_attr_param_inline_data_size, 1626 #ifdef CONFIG_BLK_DEV_INTEGRITY 1627 &nvmet_attr_param_pi_enable, 1628 #endif 1629 NULL, 1630 }; 1631 1632 static struct configfs_item_operations nvmet_port_item_ops = { 1633 .release = nvmet_port_release, 1634 }; 1635 1636 static const struct config_item_type nvmet_port_type = { 1637 .ct_attrs = nvmet_port_attrs, 1638 .ct_item_ops = &nvmet_port_item_ops, 1639 .ct_owner = THIS_MODULE, 1640 }; 1641 1642 static struct config_group *nvmet_ports_make(struct config_group *group, 1643 const char *name) 1644 { 1645 struct nvmet_port *port; 1646 u16 portid; 1647 u32 i; 1648 1649 if (kstrtou16(name, 0, &portid)) 1650 return ERR_PTR(-EINVAL); 1651 1652 port = kzalloc(sizeof(*port), GFP_KERNEL); 1653 if (!port) 1654 return ERR_PTR(-ENOMEM); 1655 1656 port->ana_state = kcalloc(NVMET_MAX_ANAGRPS + 1, 1657 sizeof(*port->ana_state), GFP_KERNEL); 1658 if (!port->ana_state) { 1659 kfree(port); 1660 return ERR_PTR(-ENOMEM); 1661 } 1662 1663 for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) { 1664 if (i == NVMET_DEFAULT_ANA_GRPID) 1665 port->ana_state[1] = NVME_ANA_OPTIMIZED; 1666 else 1667 port->ana_state[i] = NVME_ANA_INACCESSIBLE; 1668 } 1669 1670 list_add(&port->global_entry, &nvmet_ports_list); 1671 1672 INIT_LIST_HEAD(&port->entry); 1673 INIT_LIST_HEAD(&port->subsystems); 1674 INIT_LIST_HEAD(&port->referrals); 1675 port->inline_data_size = -1; /* < 0 == let the transport choose */ 1676 1677 port->disc_addr.portid = cpu_to_le16(portid); 1678 port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX; 1679 port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW; 1680 config_group_init_type_name(&port->group, name, &nvmet_port_type); 1681 1682 config_group_init_type_name(&port->subsys_group, 1683 "subsystems", &nvmet_port_subsys_type); 1684 configfs_add_default_group(&port->subsys_group, &port->group); 1685 1686 config_group_init_type_name(&port->referrals_group, 1687 "referrals", &nvmet_referrals_type); 1688 configfs_add_default_group(&port->referrals_group, &port->group); 1689 1690 config_group_init_type_name(&port->ana_groups_group, 1691 "ana_groups", &nvmet_ana_groups_type); 1692 configfs_add_default_group(&port->ana_groups_group, &port->group); 1693 1694 port->ana_default_group.port = port; 1695 port->ana_default_group.grpid = NVMET_DEFAULT_ANA_GRPID; 1696 config_group_init_type_name(&port->ana_default_group.group, 1697 __stringify(NVMET_DEFAULT_ANA_GRPID), 1698 &nvmet_ana_group_type); 1699 configfs_add_default_group(&port->ana_default_group.group, 1700 &port->ana_groups_group); 1701 1702 return &port->group; 1703 } 1704 1705 static struct configfs_group_operations nvmet_ports_group_ops = { 1706 .make_group = nvmet_ports_make, 1707 }; 1708 1709 static const struct config_item_type nvmet_ports_type = { 1710 .ct_group_ops = &nvmet_ports_group_ops, 1711 .ct_owner = THIS_MODULE, 1712 }; 1713 1714 static struct config_group nvmet_subsystems_group; 1715 static struct config_group nvmet_ports_group; 1716 1717 #ifdef CONFIG_NVME_TARGET_AUTH 1718 static ssize_t nvmet_host_dhchap_key_show(struct config_item *item, 1719 char *page) 1720 { 1721 u8 *dhchap_secret = to_host(item)->dhchap_secret; 1722 1723 if (!dhchap_secret) 1724 return sprintf(page, "\n"); 1725 return sprintf(page, "%s\n", dhchap_secret); 1726 } 1727 1728 static ssize_t nvmet_host_dhchap_key_store(struct config_item *item, 1729 const char *page, size_t count) 1730 { 1731 struct nvmet_host *host = to_host(item); 1732 int ret; 1733 1734 ret = nvmet_auth_set_key(host, page, false); 1735 /* 1736 * Re-authentication is a soft state, so keep the 1737 * current authentication valid until the host 1738 * requests re-authentication. 1739 */ 1740 return ret < 0 ? ret : count; 1741 } 1742 1743 CONFIGFS_ATTR(nvmet_host_, dhchap_key); 1744 1745 static ssize_t nvmet_host_dhchap_ctrl_key_show(struct config_item *item, 1746 char *page) 1747 { 1748 u8 *dhchap_secret = to_host(item)->dhchap_ctrl_secret; 1749 1750 if (!dhchap_secret) 1751 return sprintf(page, "\n"); 1752 return sprintf(page, "%s\n", dhchap_secret); 1753 } 1754 1755 static ssize_t nvmet_host_dhchap_ctrl_key_store(struct config_item *item, 1756 const char *page, size_t count) 1757 { 1758 struct nvmet_host *host = to_host(item); 1759 int ret; 1760 1761 ret = nvmet_auth_set_key(host, page, true); 1762 /* 1763 * Re-authentication is a soft state, so keep the 1764 * current authentication valid until the host 1765 * requests re-authentication. 1766 */ 1767 return ret < 0 ? ret : count; 1768 } 1769 1770 CONFIGFS_ATTR(nvmet_host_, dhchap_ctrl_key); 1771 1772 static ssize_t nvmet_host_dhchap_hash_show(struct config_item *item, 1773 char *page) 1774 { 1775 struct nvmet_host *host = to_host(item); 1776 const char *hash_name = nvme_auth_hmac_name(host->dhchap_hash_id); 1777 1778 return sprintf(page, "%s\n", hash_name ? hash_name : "none"); 1779 } 1780 1781 static ssize_t nvmet_host_dhchap_hash_store(struct config_item *item, 1782 const char *page, size_t count) 1783 { 1784 struct nvmet_host *host = to_host(item); 1785 u8 hmac_id; 1786 1787 hmac_id = nvme_auth_hmac_id(page); 1788 if (hmac_id == NVME_AUTH_HASH_INVALID) 1789 return -EINVAL; 1790 if (!crypto_has_shash(nvme_auth_hmac_name(hmac_id), 0, 0)) 1791 return -ENOTSUPP; 1792 host->dhchap_hash_id = hmac_id; 1793 return count; 1794 } 1795 1796 CONFIGFS_ATTR(nvmet_host_, dhchap_hash); 1797 1798 static ssize_t nvmet_host_dhchap_dhgroup_show(struct config_item *item, 1799 char *page) 1800 { 1801 struct nvmet_host *host = to_host(item); 1802 const char *dhgroup = nvme_auth_dhgroup_name(host->dhchap_dhgroup_id); 1803 1804 return sprintf(page, "%s\n", dhgroup ? dhgroup : "none"); 1805 } 1806 1807 static ssize_t nvmet_host_dhchap_dhgroup_store(struct config_item *item, 1808 const char *page, size_t count) 1809 { 1810 struct nvmet_host *host = to_host(item); 1811 int dhgroup_id; 1812 1813 dhgroup_id = nvme_auth_dhgroup_id(page); 1814 if (dhgroup_id == NVME_AUTH_DHGROUP_INVALID) 1815 return -EINVAL; 1816 if (dhgroup_id != NVME_AUTH_DHGROUP_NULL) { 1817 const char *kpp = nvme_auth_dhgroup_kpp(dhgroup_id); 1818 1819 if (!crypto_has_kpp(kpp, 0, 0)) 1820 return -EINVAL; 1821 } 1822 host->dhchap_dhgroup_id = dhgroup_id; 1823 return count; 1824 } 1825 1826 CONFIGFS_ATTR(nvmet_host_, dhchap_dhgroup); 1827 1828 static struct configfs_attribute *nvmet_host_attrs[] = { 1829 &nvmet_host_attr_dhchap_key, 1830 &nvmet_host_attr_dhchap_ctrl_key, 1831 &nvmet_host_attr_dhchap_hash, 1832 &nvmet_host_attr_dhchap_dhgroup, 1833 NULL, 1834 }; 1835 #endif /* CONFIG_NVME_TARGET_AUTH */ 1836 1837 static void nvmet_host_release(struct config_item *item) 1838 { 1839 struct nvmet_host *host = to_host(item); 1840 1841 #ifdef CONFIG_NVME_TARGET_AUTH 1842 kfree(host->dhchap_secret); 1843 #endif 1844 kfree(host); 1845 } 1846 1847 static struct configfs_item_operations nvmet_host_item_ops = { 1848 .release = nvmet_host_release, 1849 }; 1850 1851 static const struct config_item_type nvmet_host_type = { 1852 .ct_item_ops = &nvmet_host_item_ops, 1853 #ifdef CONFIG_NVME_TARGET_AUTH 1854 .ct_attrs = nvmet_host_attrs, 1855 #endif 1856 .ct_owner = THIS_MODULE, 1857 }; 1858 1859 static struct config_group *nvmet_hosts_make_group(struct config_group *group, 1860 const char *name) 1861 { 1862 struct nvmet_host *host; 1863 1864 host = kzalloc(sizeof(*host), GFP_KERNEL); 1865 if (!host) 1866 return ERR_PTR(-ENOMEM); 1867 1868 #ifdef CONFIG_NVME_TARGET_AUTH 1869 /* Default to SHA256 */ 1870 host->dhchap_hash_id = NVME_AUTH_HASH_SHA256; 1871 #endif 1872 1873 config_group_init_type_name(&host->group, name, &nvmet_host_type); 1874 1875 return &host->group; 1876 } 1877 1878 static struct configfs_group_operations nvmet_hosts_group_ops = { 1879 .make_group = nvmet_hosts_make_group, 1880 }; 1881 1882 static const struct config_item_type nvmet_hosts_type = { 1883 .ct_group_ops = &nvmet_hosts_group_ops, 1884 .ct_owner = THIS_MODULE, 1885 }; 1886 1887 static struct config_group nvmet_hosts_group; 1888 1889 static const struct config_item_type nvmet_root_type = { 1890 .ct_owner = THIS_MODULE, 1891 }; 1892 1893 static struct configfs_subsystem nvmet_configfs_subsystem = { 1894 .su_group = { 1895 .cg_item = { 1896 .ci_namebuf = "nvmet", 1897 .ci_type = &nvmet_root_type, 1898 }, 1899 }, 1900 }; 1901 1902 int __init nvmet_init_configfs(void) 1903 { 1904 int ret; 1905 1906 config_group_init(&nvmet_configfs_subsystem.su_group); 1907 mutex_init(&nvmet_configfs_subsystem.su_mutex); 1908 1909 config_group_init_type_name(&nvmet_subsystems_group, 1910 "subsystems", &nvmet_subsystems_type); 1911 configfs_add_default_group(&nvmet_subsystems_group, 1912 &nvmet_configfs_subsystem.su_group); 1913 1914 config_group_init_type_name(&nvmet_ports_group, 1915 "ports", &nvmet_ports_type); 1916 configfs_add_default_group(&nvmet_ports_group, 1917 &nvmet_configfs_subsystem.su_group); 1918 1919 config_group_init_type_name(&nvmet_hosts_group, 1920 "hosts", &nvmet_hosts_type); 1921 configfs_add_default_group(&nvmet_hosts_group, 1922 &nvmet_configfs_subsystem.su_group); 1923 1924 ret = configfs_register_subsystem(&nvmet_configfs_subsystem); 1925 if (ret) { 1926 pr_err("configfs_register_subsystem: %d\n", ret); 1927 return ret; 1928 } 1929 1930 return 0; 1931 } 1932 1933 void __exit nvmet_exit_configfs(void) 1934 { 1935 configfs_unregister_subsystem(&nvmet_configfs_subsystem); 1936 } 1937