1 /* 2 * Configfs interface for the NVMe target. 3 * Copyright (c) 2015-2016 HGST, a Western Digital Company. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 */ 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/slab.h> 18 #include <linux/stat.h> 19 #include <linux/ctype.h> 20 #include <linux/pci.h> 21 #include <linux/pci-p2pdma.h> 22 23 #include "nvmet.h" 24 25 static const struct config_item_type nvmet_host_type; 26 static const struct config_item_type nvmet_subsys_type; 27 28 static const struct nvmet_transport_name { 29 u8 type; 30 const char *name; 31 } nvmet_transport_names[] = { 32 { NVMF_TRTYPE_RDMA, "rdma" }, 33 { NVMF_TRTYPE_FC, "fc" }, 34 { NVMF_TRTYPE_LOOP, "loop" }, 35 }; 36 37 /* 38 * nvmet_port Generic ConfigFS definitions. 39 * Used in any place in the ConfigFS tree that refers to an address. 40 */ 41 static ssize_t nvmet_addr_adrfam_show(struct config_item *item, 42 char *page) 43 { 44 switch (to_nvmet_port(item)->disc_addr.adrfam) { 45 case NVMF_ADDR_FAMILY_IP4: 46 return sprintf(page, "ipv4\n"); 47 case NVMF_ADDR_FAMILY_IP6: 48 return sprintf(page, "ipv6\n"); 49 case NVMF_ADDR_FAMILY_IB: 50 return sprintf(page, "ib\n"); 51 case NVMF_ADDR_FAMILY_FC: 52 return sprintf(page, "fc\n"); 53 default: 54 return sprintf(page, "\n"); 55 } 56 } 57 58 static ssize_t nvmet_addr_adrfam_store(struct config_item *item, 59 const char *page, size_t count) 60 { 61 struct nvmet_port *port = to_nvmet_port(item); 62 63 if (port->enabled) { 64 pr_err("Cannot modify address while enabled\n"); 65 pr_err("Disable the address before modifying\n"); 66 return -EACCES; 67 } 68 69 if (sysfs_streq(page, "ipv4")) { 70 port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP4; 71 } else if (sysfs_streq(page, "ipv6")) { 72 port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP6; 73 } else if (sysfs_streq(page, "ib")) { 74 port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IB; 75 } else if (sysfs_streq(page, "fc")) { 76 port->disc_addr.adrfam = NVMF_ADDR_FAMILY_FC; 77 } else { 78 pr_err("Invalid value '%s' for adrfam\n", page); 79 return -EINVAL; 80 } 81 82 return count; 83 } 84 85 CONFIGFS_ATTR(nvmet_, addr_adrfam); 86 87 static ssize_t nvmet_addr_portid_show(struct config_item *item, 88 char *page) 89 { 90 struct nvmet_port *port = to_nvmet_port(item); 91 92 return snprintf(page, PAGE_SIZE, "%d\n", 93 le16_to_cpu(port->disc_addr.portid)); 94 } 95 96 static ssize_t nvmet_addr_portid_store(struct config_item *item, 97 const char *page, size_t count) 98 { 99 struct nvmet_port *port = to_nvmet_port(item); 100 u16 portid = 0; 101 102 if (kstrtou16(page, 0, &portid)) { 103 pr_err("Invalid value '%s' for portid\n", page); 104 return -EINVAL; 105 } 106 107 if (port->enabled) { 108 pr_err("Cannot modify address while enabled\n"); 109 pr_err("Disable the address before modifying\n"); 110 return -EACCES; 111 } 112 port->disc_addr.portid = cpu_to_le16(portid); 113 return count; 114 } 115 116 CONFIGFS_ATTR(nvmet_, addr_portid); 117 118 static ssize_t nvmet_addr_traddr_show(struct config_item *item, 119 char *page) 120 { 121 struct nvmet_port *port = to_nvmet_port(item); 122 123 return snprintf(page, PAGE_SIZE, "%s\n", 124 port->disc_addr.traddr); 125 } 126 127 static ssize_t nvmet_addr_traddr_store(struct config_item *item, 128 const char *page, size_t count) 129 { 130 struct nvmet_port *port = to_nvmet_port(item); 131 132 if (count > NVMF_TRADDR_SIZE) { 133 pr_err("Invalid value '%s' for traddr\n", page); 134 return -EINVAL; 135 } 136 137 if (port->enabled) { 138 pr_err("Cannot modify address while enabled\n"); 139 pr_err("Disable the address before modifying\n"); 140 return -EACCES; 141 } 142 143 if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1) 144 return -EINVAL; 145 return count; 146 } 147 148 CONFIGFS_ATTR(nvmet_, addr_traddr); 149 150 static ssize_t nvmet_addr_treq_show(struct config_item *item, 151 char *page) 152 { 153 switch (to_nvmet_port(item)->disc_addr.treq) { 154 case NVMF_TREQ_NOT_SPECIFIED: 155 return sprintf(page, "not specified\n"); 156 case NVMF_TREQ_REQUIRED: 157 return sprintf(page, "required\n"); 158 case NVMF_TREQ_NOT_REQUIRED: 159 return sprintf(page, "not required\n"); 160 default: 161 return sprintf(page, "\n"); 162 } 163 } 164 165 static ssize_t nvmet_addr_treq_store(struct config_item *item, 166 const char *page, size_t count) 167 { 168 struct nvmet_port *port = to_nvmet_port(item); 169 170 if (port->enabled) { 171 pr_err("Cannot modify address while enabled\n"); 172 pr_err("Disable the address before modifying\n"); 173 return -EACCES; 174 } 175 176 if (sysfs_streq(page, "not specified")) { 177 port->disc_addr.treq = NVMF_TREQ_NOT_SPECIFIED; 178 } else if (sysfs_streq(page, "required")) { 179 port->disc_addr.treq = NVMF_TREQ_REQUIRED; 180 } else if (sysfs_streq(page, "not required")) { 181 port->disc_addr.treq = NVMF_TREQ_NOT_REQUIRED; 182 } else { 183 pr_err("Invalid value '%s' for treq\n", page); 184 return -EINVAL; 185 } 186 187 return count; 188 } 189 190 CONFIGFS_ATTR(nvmet_, addr_treq); 191 192 static ssize_t nvmet_addr_trsvcid_show(struct config_item *item, 193 char *page) 194 { 195 struct nvmet_port *port = to_nvmet_port(item); 196 197 return snprintf(page, PAGE_SIZE, "%s\n", 198 port->disc_addr.trsvcid); 199 } 200 201 static ssize_t nvmet_addr_trsvcid_store(struct config_item *item, 202 const char *page, size_t count) 203 { 204 struct nvmet_port *port = to_nvmet_port(item); 205 206 if (count > NVMF_TRSVCID_SIZE) { 207 pr_err("Invalid value '%s' for trsvcid\n", page); 208 return -EINVAL; 209 } 210 if (port->enabled) { 211 pr_err("Cannot modify address while enabled\n"); 212 pr_err("Disable the address before modifying\n"); 213 return -EACCES; 214 } 215 216 if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1) 217 return -EINVAL; 218 return count; 219 } 220 221 CONFIGFS_ATTR(nvmet_, addr_trsvcid); 222 223 static ssize_t nvmet_param_inline_data_size_show(struct config_item *item, 224 char *page) 225 { 226 struct nvmet_port *port = to_nvmet_port(item); 227 228 return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size); 229 } 230 231 static ssize_t nvmet_param_inline_data_size_store(struct config_item *item, 232 const char *page, size_t count) 233 { 234 struct nvmet_port *port = to_nvmet_port(item); 235 int ret; 236 237 if (port->enabled) { 238 pr_err("Cannot modify inline_data_size while port enabled\n"); 239 pr_err("Disable the port before modifying\n"); 240 return -EACCES; 241 } 242 ret = kstrtoint(page, 0, &port->inline_data_size); 243 if (ret) { 244 pr_err("Invalid value '%s' for inline_data_size\n", page); 245 return -EINVAL; 246 } 247 return count; 248 } 249 250 CONFIGFS_ATTR(nvmet_, param_inline_data_size); 251 252 static ssize_t nvmet_addr_trtype_show(struct config_item *item, 253 char *page) 254 { 255 struct nvmet_port *port = to_nvmet_port(item); 256 int i; 257 258 for (i = 0; i < ARRAY_SIZE(nvmet_transport_names); i++) { 259 if (port->disc_addr.trtype != nvmet_transport_names[i].type) 260 continue; 261 return sprintf(page, "%s\n", nvmet_transport_names[i].name); 262 } 263 264 return sprintf(page, "\n"); 265 } 266 267 static void nvmet_port_init_tsas_rdma(struct nvmet_port *port) 268 { 269 port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED; 270 port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED; 271 port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM; 272 } 273 274 static ssize_t nvmet_addr_trtype_store(struct config_item *item, 275 const char *page, size_t count) 276 { 277 struct nvmet_port *port = to_nvmet_port(item); 278 int i; 279 280 if (port->enabled) { 281 pr_err("Cannot modify address while enabled\n"); 282 pr_err("Disable the address before modifying\n"); 283 return -EACCES; 284 } 285 286 for (i = 0; i < ARRAY_SIZE(nvmet_transport_names); i++) { 287 if (sysfs_streq(page, nvmet_transport_names[i].name)) 288 goto found; 289 } 290 291 pr_err("Invalid value '%s' for trtype\n", page); 292 return -EINVAL; 293 found: 294 memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE); 295 port->disc_addr.trtype = nvmet_transport_names[i].type; 296 if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) 297 nvmet_port_init_tsas_rdma(port); 298 return count; 299 } 300 301 CONFIGFS_ATTR(nvmet_, addr_trtype); 302 303 /* 304 * Namespace structures & file operation functions below 305 */ 306 static ssize_t nvmet_ns_device_path_show(struct config_item *item, char *page) 307 { 308 return sprintf(page, "%s\n", to_nvmet_ns(item)->device_path); 309 } 310 311 static ssize_t nvmet_ns_device_path_store(struct config_item *item, 312 const char *page, size_t count) 313 { 314 struct nvmet_ns *ns = to_nvmet_ns(item); 315 struct nvmet_subsys *subsys = ns->subsys; 316 size_t len; 317 int ret; 318 319 mutex_lock(&subsys->lock); 320 ret = -EBUSY; 321 if (ns->enabled) 322 goto out_unlock; 323 324 ret = -EINVAL; 325 len = strcspn(page, "\n"); 326 if (!len) 327 goto out_unlock; 328 329 kfree(ns->device_path); 330 ret = -ENOMEM; 331 ns->device_path = kstrndup(page, len, GFP_KERNEL); 332 if (!ns->device_path) 333 goto out_unlock; 334 335 mutex_unlock(&subsys->lock); 336 return count; 337 338 out_unlock: 339 mutex_unlock(&subsys->lock); 340 return ret; 341 } 342 343 CONFIGFS_ATTR(nvmet_ns_, device_path); 344 345 #ifdef CONFIG_PCI_P2PDMA 346 static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page) 347 { 348 struct nvmet_ns *ns = to_nvmet_ns(item); 349 350 return pci_p2pdma_enable_show(page, ns->p2p_dev, ns->use_p2pmem); 351 } 352 353 static ssize_t nvmet_ns_p2pmem_store(struct config_item *item, 354 const char *page, size_t count) 355 { 356 struct nvmet_ns *ns = to_nvmet_ns(item); 357 struct pci_dev *p2p_dev = NULL; 358 bool use_p2pmem; 359 int ret = count; 360 int error; 361 362 mutex_lock(&ns->subsys->lock); 363 if (ns->enabled) { 364 ret = -EBUSY; 365 goto out_unlock; 366 } 367 368 error = pci_p2pdma_enable_store(page, &p2p_dev, &use_p2pmem); 369 if (error) { 370 ret = error; 371 goto out_unlock; 372 } 373 374 ns->use_p2pmem = use_p2pmem; 375 pci_dev_put(ns->p2p_dev); 376 ns->p2p_dev = p2p_dev; 377 378 out_unlock: 379 mutex_unlock(&ns->subsys->lock); 380 381 return ret; 382 } 383 384 CONFIGFS_ATTR(nvmet_ns_, p2pmem); 385 #endif /* CONFIG_PCI_P2PDMA */ 386 387 static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page) 388 { 389 return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid); 390 } 391 392 static ssize_t nvmet_ns_device_uuid_store(struct config_item *item, 393 const char *page, size_t count) 394 { 395 struct nvmet_ns *ns = to_nvmet_ns(item); 396 struct nvmet_subsys *subsys = ns->subsys; 397 int ret = 0; 398 399 400 mutex_lock(&subsys->lock); 401 if (ns->enabled) { 402 ret = -EBUSY; 403 goto out_unlock; 404 } 405 406 407 if (uuid_parse(page, &ns->uuid)) 408 ret = -EINVAL; 409 410 out_unlock: 411 mutex_unlock(&subsys->lock); 412 return ret ? ret : count; 413 } 414 415 CONFIGFS_ATTR(nvmet_ns_, device_uuid); 416 417 static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page) 418 { 419 return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid); 420 } 421 422 static ssize_t nvmet_ns_device_nguid_store(struct config_item *item, 423 const char *page, size_t count) 424 { 425 struct nvmet_ns *ns = to_nvmet_ns(item); 426 struct nvmet_subsys *subsys = ns->subsys; 427 u8 nguid[16]; 428 const char *p = page; 429 int i; 430 int ret = 0; 431 432 mutex_lock(&subsys->lock); 433 if (ns->enabled) { 434 ret = -EBUSY; 435 goto out_unlock; 436 } 437 438 for (i = 0; i < 16; i++) { 439 if (p + 2 > page + count) { 440 ret = -EINVAL; 441 goto out_unlock; 442 } 443 if (!isxdigit(p[0]) || !isxdigit(p[1])) { 444 ret = -EINVAL; 445 goto out_unlock; 446 } 447 448 nguid[i] = (hex_to_bin(p[0]) << 4) | hex_to_bin(p[1]); 449 p += 2; 450 451 if (*p == '-' || *p == ':') 452 p++; 453 } 454 455 memcpy(&ns->nguid, nguid, sizeof(nguid)); 456 out_unlock: 457 mutex_unlock(&subsys->lock); 458 return ret ? ret : count; 459 } 460 461 CONFIGFS_ATTR(nvmet_ns_, device_nguid); 462 463 static ssize_t nvmet_ns_ana_grpid_show(struct config_item *item, char *page) 464 { 465 return sprintf(page, "%u\n", to_nvmet_ns(item)->anagrpid); 466 } 467 468 static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item, 469 const char *page, size_t count) 470 { 471 struct nvmet_ns *ns = to_nvmet_ns(item); 472 u32 oldgrpid, newgrpid; 473 int ret; 474 475 ret = kstrtou32(page, 0, &newgrpid); 476 if (ret) 477 return ret; 478 479 if (newgrpid < 1 || newgrpid > NVMET_MAX_ANAGRPS) 480 return -EINVAL; 481 482 down_write(&nvmet_ana_sem); 483 oldgrpid = ns->anagrpid; 484 nvmet_ana_group_enabled[newgrpid]++; 485 ns->anagrpid = newgrpid; 486 nvmet_ana_group_enabled[oldgrpid]--; 487 nvmet_ana_chgcnt++; 488 up_write(&nvmet_ana_sem); 489 490 nvmet_send_ana_event(ns->subsys, NULL); 491 return count; 492 } 493 494 CONFIGFS_ATTR(nvmet_ns_, ana_grpid); 495 496 static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page) 497 { 498 return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled); 499 } 500 501 static ssize_t nvmet_ns_enable_store(struct config_item *item, 502 const char *page, size_t count) 503 { 504 struct nvmet_ns *ns = to_nvmet_ns(item); 505 bool enable; 506 int ret = 0; 507 508 if (strtobool(page, &enable)) 509 return -EINVAL; 510 511 if (enable) 512 ret = nvmet_ns_enable(ns); 513 else 514 nvmet_ns_disable(ns); 515 516 return ret ? ret : count; 517 } 518 519 CONFIGFS_ATTR(nvmet_ns_, enable); 520 521 static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page) 522 { 523 return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io); 524 } 525 526 static ssize_t nvmet_ns_buffered_io_store(struct config_item *item, 527 const char *page, size_t count) 528 { 529 struct nvmet_ns *ns = to_nvmet_ns(item); 530 bool val; 531 532 if (strtobool(page, &val)) 533 return -EINVAL; 534 535 mutex_lock(&ns->subsys->lock); 536 if (ns->enabled) { 537 pr_err("disable ns before setting buffered_io value.\n"); 538 mutex_unlock(&ns->subsys->lock); 539 return -EINVAL; 540 } 541 542 ns->buffered_io = val; 543 mutex_unlock(&ns->subsys->lock); 544 return count; 545 } 546 547 CONFIGFS_ATTR(nvmet_ns_, buffered_io); 548 549 static struct configfs_attribute *nvmet_ns_attrs[] = { 550 &nvmet_ns_attr_device_path, 551 &nvmet_ns_attr_device_nguid, 552 &nvmet_ns_attr_device_uuid, 553 &nvmet_ns_attr_ana_grpid, 554 &nvmet_ns_attr_enable, 555 &nvmet_ns_attr_buffered_io, 556 #ifdef CONFIG_PCI_P2PDMA 557 &nvmet_ns_attr_p2pmem, 558 #endif 559 NULL, 560 }; 561 562 static void nvmet_ns_release(struct config_item *item) 563 { 564 struct nvmet_ns *ns = to_nvmet_ns(item); 565 566 nvmet_ns_free(ns); 567 } 568 569 static struct configfs_item_operations nvmet_ns_item_ops = { 570 .release = nvmet_ns_release, 571 }; 572 573 static const struct config_item_type nvmet_ns_type = { 574 .ct_item_ops = &nvmet_ns_item_ops, 575 .ct_attrs = nvmet_ns_attrs, 576 .ct_owner = THIS_MODULE, 577 }; 578 579 static struct config_group *nvmet_ns_make(struct config_group *group, 580 const char *name) 581 { 582 struct nvmet_subsys *subsys = namespaces_to_subsys(&group->cg_item); 583 struct nvmet_ns *ns; 584 int ret; 585 u32 nsid; 586 587 ret = kstrtou32(name, 0, &nsid); 588 if (ret) 589 goto out; 590 591 ret = -EINVAL; 592 if (nsid == 0 || nsid == NVME_NSID_ALL) 593 goto out; 594 595 ret = -ENOMEM; 596 ns = nvmet_ns_alloc(subsys, nsid); 597 if (!ns) 598 goto out; 599 config_group_init_type_name(&ns->group, name, &nvmet_ns_type); 600 601 pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn); 602 603 return &ns->group; 604 out: 605 return ERR_PTR(ret); 606 } 607 608 static struct configfs_group_operations nvmet_namespaces_group_ops = { 609 .make_group = nvmet_ns_make, 610 }; 611 612 static const struct config_item_type nvmet_namespaces_type = { 613 .ct_group_ops = &nvmet_namespaces_group_ops, 614 .ct_owner = THIS_MODULE, 615 }; 616 617 static int nvmet_port_subsys_allow_link(struct config_item *parent, 618 struct config_item *target) 619 { 620 struct nvmet_port *port = to_nvmet_port(parent->ci_parent); 621 struct nvmet_subsys *subsys; 622 struct nvmet_subsys_link *link, *p; 623 int ret; 624 625 if (target->ci_type != &nvmet_subsys_type) { 626 pr_err("can only link subsystems into the subsystems dir.!\n"); 627 return -EINVAL; 628 } 629 subsys = to_subsys(target); 630 link = kmalloc(sizeof(*link), GFP_KERNEL); 631 if (!link) 632 return -ENOMEM; 633 link->subsys = subsys; 634 635 down_write(&nvmet_config_sem); 636 ret = -EEXIST; 637 list_for_each_entry(p, &port->subsystems, entry) { 638 if (p->subsys == subsys) 639 goto out_free_link; 640 } 641 642 if (list_empty(&port->subsystems)) { 643 ret = nvmet_enable_port(port); 644 if (ret) 645 goto out_free_link; 646 } 647 648 list_add_tail(&link->entry, &port->subsystems); 649 nvmet_genctr++; 650 up_write(&nvmet_config_sem); 651 return 0; 652 653 out_free_link: 654 up_write(&nvmet_config_sem); 655 kfree(link); 656 return ret; 657 } 658 659 static void nvmet_port_subsys_drop_link(struct config_item *parent, 660 struct config_item *target) 661 { 662 struct nvmet_port *port = to_nvmet_port(parent->ci_parent); 663 struct nvmet_subsys *subsys = to_subsys(target); 664 struct nvmet_subsys_link *p; 665 666 down_write(&nvmet_config_sem); 667 list_for_each_entry(p, &port->subsystems, entry) { 668 if (p->subsys == subsys) 669 goto found; 670 } 671 up_write(&nvmet_config_sem); 672 return; 673 674 found: 675 list_del(&p->entry); 676 nvmet_genctr++; 677 if (list_empty(&port->subsystems)) 678 nvmet_disable_port(port); 679 up_write(&nvmet_config_sem); 680 kfree(p); 681 } 682 683 static struct configfs_item_operations nvmet_port_subsys_item_ops = { 684 .allow_link = nvmet_port_subsys_allow_link, 685 .drop_link = nvmet_port_subsys_drop_link, 686 }; 687 688 static const struct config_item_type nvmet_port_subsys_type = { 689 .ct_item_ops = &nvmet_port_subsys_item_ops, 690 .ct_owner = THIS_MODULE, 691 }; 692 693 static int nvmet_allowed_hosts_allow_link(struct config_item *parent, 694 struct config_item *target) 695 { 696 struct nvmet_subsys *subsys = to_subsys(parent->ci_parent); 697 struct nvmet_host *host; 698 struct nvmet_host_link *link, *p; 699 int ret; 700 701 if (target->ci_type != &nvmet_host_type) { 702 pr_err("can only link hosts into the allowed_hosts directory!\n"); 703 return -EINVAL; 704 } 705 706 host = to_host(target); 707 link = kmalloc(sizeof(*link), GFP_KERNEL); 708 if (!link) 709 return -ENOMEM; 710 link->host = host; 711 712 down_write(&nvmet_config_sem); 713 ret = -EINVAL; 714 if (subsys->allow_any_host) { 715 pr_err("can't add hosts when allow_any_host is set!\n"); 716 goto out_free_link; 717 } 718 719 ret = -EEXIST; 720 list_for_each_entry(p, &subsys->hosts, entry) { 721 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host))) 722 goto out_free_link; 723 } 724 list_add_tail(&link->entry, &subsys->hosts); 725 nvmet_genctr++; 726 up_write(&nvmet_config_sem); 727 return 0; 728 out_free_link: 729 up_write(&nvmet_config_sem); 730 kfree(link); 731 return ret; 732 } 733 734 static void nvmet_allowed_hosts_drop_link(struct config_item *parent, 735 struct config_item *target) 736 { 737 struct nvmet_subsys *subsys = to_subsys(parent->ci_parent); 738 struct nvmet_host *host = to_host(target); 739 struct nvmet_host_link *p; 740 741 down_write(&nvmet_config_sem); 742 list_for_each_entry(p, &subsys->hosts, entry) { 743 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host))) 744 goto found; 745 } 746 up_write(&nvmet_config_sem); 747 return; 748 749 found: 750 list_del(&p->entry); 751 nvmet_genctr++; 752 up_write(&nvmet_config_sem); 753 kfree(p); 754 } 755 756 static struct configfs_item_operations nvmet_allowed_hosts_item_ops = { 757 .allow_link = nvmet_allowed_hosts_allow_link, 758 .drop_link = nvmet_allowed_hosts_drop_link, 759 }; 760 761 static const struct config_item_type nvmet_allowed_hosts_type = { 762 .ct_item_ops = &nvmet_allowed_hosts_item_ops, 763 .ct_owner = THIS_MODULE, 764 }; 765 766 static ssize_t nvmet_subsys_attr_allow_any_host_show(struct config_item *item, 767 char *page) 768 { 769 return snprintf(page, PAGE_SIZE, "%d\n", 770 to_subsys(item)->allow_any_host); 771 } 772 773 static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item, 774 const char *page, size_t count) 775 { 776 struct nvmet_subsys *subsys = to_subsys(item); 777 bool allow_any_host; 778 int ret = 0; 779 780 if (strtobool(page, &allow_any_host)) 781 return -EINVAL; 782 783 down_write(&nvmet_config_sem); 784 if (allow_any_host && !list_empty(&subsys->hosts)) { 785 pr_err("Can't set allow_any_host when explicit hosts are set!\n"); 786 ret = -EINVAL; 787 goto out_unlock; 788 } 789 790 subsys->allow_any_host = allow_any_host; 791 out_unlock: 792 up_write(&nvmet_config_sem); 793 return ret ? ret : count; 794 } 795 796 CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host); 797 798 static ssize_t nvmet_subsys_attr_version_show(struct config_item *item, 799 char *page) 800 { 801 struct nvmet_subsys *subsys = to_subsys(item); 802 803 if (NVME_TERTIARY(subsys->ver)) 804 return snprintf(page, PAGE_SIZE, "%d.%d.%d\n", 805 (int)NVME_MAJOR(subsys->ver), 806 (int)NVME_MINOR(subsys->ver), 807 (int)NVME_TERTIARY(subsys->ver)); 808 else 809 return snprintf(page, PAGE_SIZE, "%d.%d\n", 810 (int)NVME_MAJOR(subsys->ver), 811 (int)NVME_MINOR(subsys->ver)); 812 } 813 814 static ssize_t nvmet_subsys_attr_version_store(struct config_item *item, 815 const char *page, size_t count) 816 { 817 struct nvmet_subsys *subsys = to_subsys(item); 818 int major, minor, tertiary = 0; 819 int ret; 820 821 822 ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary); 823 if (ret != 2 && ret != 3) 824 return -EINVAL; 825 826 down_write(&nvmet_config_sem); 827 subsys->ver = NVME_VS(major, minor, tertiary); 828 up_write(&nvmet_config_sem); 829 830 return count; 831 } 832 CONFIGFS_ATTR(nvmet_subsys_, attr_version); 833 834 static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item, 835 char *page) 836 { 837 struct nvmet_subsys *subsys = to_subsys(item); 838 839 return snprintf(page, PAGE_SIZE, "%llx\n", subsys->serial); 840 } 841 842 static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item, 843 const char *page, size_t count) 844 { 845 struct nvmet_subsys *subsys = to_subsys(item); 846 847 down_write(&nvmet_config_sem); 848 sscanf(page, "%llx\n", &subsys->serial); 849 up_write(&nvmet_config_sem); 850 851 return count; 852 } 853 CONFIGFS_ATTR(nvmet_subsys_, attr_serial); 854 855 static struct configfs_attribute *nvmet_subsys_attrs[] = { 856 &nvmet_subsys_attr_attr_allow_any_host, 857 &nvmet_subsys_attr_attr_version, 858 &nvmet_subsys_attr_attr_serial, 859 NULL, 860 }; 861 862 /* 863 * Subsystem structures & folder operation functions below 864 */ 865 static void nvmet_subsys_release(struct config_item *item) 866 { 867 struct nvmet_subsys *subsys = to_subsys(item); 868 869 nvmet_subsys_del_ctrls(subsys); 870 nvmet_subsys_put(subsys); 871 } 872 873 static struct configfs_item_operations nvmet_subsys_item_ops = { 874 .release = nvmet_subsys_release, 875 }; 876 877 static const struct config_item_type nvmet_subsys_type = { 878 .ct_item_ops = &nvmet_subsys_item_ops, 879 .ct_attrs = nvmet_subsys_attrs, 880 .ct_owner = THIS_MODULE, 881 }; 882 883 static struct config_group *nvmet_subsys_make(struct config_group *group, 884 const char *name) 885 { 886 struct nvmet_subsys *subsys; 887 888 if (sysfs_streq(name, NVME_DISC_SUBSYS_NAME)) { 889 pr_err("can't create discovery subsystem through configfs\n"); 890 return ERR_PTR(-EINVAL); 891 } 892 893 subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME); 894 if (!subsys) 895 return ERR_PTR(-ENOMEM); 896 897 config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type); 898 899 config_group_init_type_name(&subsys->namespaces_group, 900 "namespaces", &nvmet_namespaces_type); 901 configfs_add_default_group(&subsys->namespaces_group, &subsys->group); 902 903 config_group_init_type_name(&subsys->allowed_hosts_group, 904 "allowed_hosts", &nvmet_allowed_hosts_type); 905 configfs_add_default_group(&subsys->allowed_hosts_group, 906 &subsys->group); 907 908 return &subsys->group; 909 } 910 911 static struct configfs_group_operations nvmet_subsystems_group_ops = { 912 .make_group = nvmet_subsys_make, 913 }; 914 915 static const struct config_item_type nvmet_subsystems_type = { 916 .ct_group_ops = &nvmet_subsystems_group_ops, 917 .ct_owner = THIS_MODULE, 918 }; 919 920 static ssize_t nvmet_referral_enable_show(struct config_item *item, 921 char *page) 922 { 923 return snprintf(page, PAGE_SIZE, "%d\n", to_nvmet_port(item)->enabled); 924 } 925 926 static ssize_t nvmet_referral_enable_store(struct config_item *item, 927 const char *page, size_t count) 928 { 929 struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent); 930 struct nvmet_port *port = to_nvmet_port(item); 931 bool enable; 932 933 if (strtobool(page, &enable)) 934 goto inval; 935 936 if (enable) 937 nvmet_referral_enable(parent, port); 938 else 939 nvmet_referral_disable(port); 940 941 return count; 942 inval: 943 pr_err("Invalid value '%s' for enable\n", page); 944 return -EINVAL; 945 } 946 947 CONFIGFS_ATTR(nvmet_referral_, enable); 948 949 /* 950 * Discovery Service subsystem definitions 951 */ 952 static struct configfs_attribute *nvmet_referral_attrs[] = { 953 &nvmet_attr_addr_adrfam, 954 &nvmet_attr_addr_portid, 955 &nvmet_attr_addr_treq, 956 &nvmet_attr_addr_traddr, 957 &nvmet_attr_addr_trsvcid, 958 &nvmet_attr_addr_trtype, 959 &nvmet_referral_attr_enable, 960 NULL, 961 }; 962 963 static void nvmet_referral_release(struct config_item *item) 964 { 965 struct nvmet_port *port = to_nvmet_port(item); 966 967 nvmet_referral_disable(port); 968 kfree(port); 969 } 970 971 static struct configfs_item_operations nvmet_referral_item_ops = { 972 .release = nvmet_referral_release, 973 }; 974 975 static const struct config_item_type nvmet_referral_type = { 976 .ct_owner = THIS_MODULE, 977 .ct_attrs = nvmet_referral_attrs, 978 .ct_item_ops = &nvmet_referral_item_ops, 979 }; 980 981 static struct config_group *nvmet_referral_make( 982 struct config_group *group, const char *name) 983 { 984 struct nvmet_port *port; 985 986 port = kzalloc(sizeof(*port), GFP_KERNEL); 987 if (!port) 988 return ERR_PTR(-ENOMEM); 989 990 INIT_LIST_HEAD(&port->entry); 991 config_group_init_type_name(&port->group, name, &nvmet_referral_type); 992 993 return &port->group; 994 } 995 996 static struct configfs_group_operations nvmet_referral_group_ops = { 997 .make_group = nvmet_referral_make, 998 }; 999 1000 static const struct config_item_type nvmet_referrals_type = { 1001 .ct_owner = THIS_MODULE, 1002 .ct_group_ops = &nvmet_referral_group_ops, 1003 }; 1004 1005 static struct { 1006 enum nvme_ana_state state; 1007 const char *name; 1008 } nvmet_ana_state_names[] = { 1009 { NVME_ANA_OPTIMIZED, "optimized" }, 1010 { NVME_ANA_NONOPTIMIZED, "non-optimized" }, 1011 { NVME_ANA_INACCESSIBLE, "inaccessible" }, 1012 { NVME_ANA_PERSISTENT_LOSS, "persistent-loss" }, 1013 { NVME_ANA_CHANGE, "change" }, 1014 }; 1015 1016 static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item, 1017 char *page) 1018 { 1019 struct nvmet_ana_group *grp = to_ana_group(item); 1020 enum nvme_ana_state state = grp->port->ana_state[grp->grpid]; 1021 int i; 1022 1023 for (i = 0; i < ARRAY_SIZE(nvmet_ana_state_names); i++) { 1024 if (state != nvmet_ana_state_names[i].state) 1025 continue; 1026 return sprintf(page, "%s\n", nvmet_ana_state_names[i].name); 1027 } 1028 1029 return sprintf(page, "\n"); 1030 } 1031 1032 static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item, 1033 const char *page, size_t count) 1034 { 1035 struct nvmet_ana_group *grp = to_ana_group(item); 1036 int i; 1037 1038 for (i = 0; i < ARRAY_SIZE(nvmet_ana_state_names); i++) { 1039 if (sysfs_streq(page, nvmet_ana_state_names[i].name)) 1040 goto found; 1041 } 1042 1043 pr_err("Invalid value '%s' for ana_state\n", page); 1044 return -EINVAL; 1045 1046 found: 1047 down_write(&nvmet_ana_sem); 1048 grp->port->ana_state[grp->grpid] = nvmet_ana_state_names[i].state; 1049 nvmet_ana_chgcnt++; 1050 up_write(&nvmet_ana_sem); 1051 1052 nvmet_port_send_ana_event(grp->port); 1053 return count; 1054 } 1055 1056 CONFIGFS_ATTR(nvmet_ana_group_, ana_state); 1057 1058 static struct configfs_attribute *nvmet_ana_group_attrs[] = { 1059 &nvmet_ana_group_attr_ana_state, 1060 NULL, 1061 }; 1062 1063 static void nvmet_ana_group_release(struct config_item *item) 1064 { 1065 struct nvmet_ana_group *grp = to_ana_group(item); 1066 1067 if (grp == &grp->port->ana_default_group) 1068 return; 1069 1070 down_write(&nvmet_ana_sem); 1071 grp->port->ana_state[grp->grpid] = NVME_ANA_INACCESSIBLE; 1072 nvmet_ana_group_enabled[grp->grpid]--; 1073 up_write(&nvmet_ana_sem); 1074 1075 nvmet_port_send_ana_event(grp->port); 1076 kfree(grp); 1077 } 1078 1079 static struct configfs_item_operations nvmet_ana_group_item_ops = { 1080 .release = nvmet_ana_group_release, 1081 }; 1082 1083 static const struct config_item_type nvmet_ana_group_type = { 1084 .ct_item_ops = &nvmet_ana_group_item_ops, 1085 .ct_attrs = nvmet_ana_group_attrs, 1086 .ct_owner = THIS_MODULE, 1087 }; 1088 1089 static struct config_group *nvmet_ana_groups_make_group( 1090 struct config_group *group, const char *name) 1091 { 1092 struct nvmet_port *port = ana_groups_to_port(&group->cg_item); 1093 struct nvmet_ana_group *grp; 1094 u32 grpid; 1095 int ret; 1096 1097 ret = kstrtou32(name, 0, &grpid); 1098 if (ret) 1099 goto out; 1100 1101 ret = -EINVAL; 1102 if (grpid <= 1 || grpid > NVMET_MAX_ANAGRPS) 1103 goto out; 1104 1105 ret = -ENOMEM; 1106 grp = kzalloc(sizeof(*grp), GFP_KERNEL); 1107 if (!grp) 1108 goto out; 1109 grp->port = port; 1110 grp->grpid = grpid; 1111 1112 down_write(&nvmet_ana_sem); 1113 nvmet_ana_group_enabled[grpid]++; 1114 up_write(&nvmet_ana_sem); 1115 1116 nvmet_port_send_ana_event(grp->port); 1117 1118 config_group_init_type_name(&grp->group, name, &nvmet_ana_group_type); 1119 return &grp->group; 1120 out: 1121 return ERR_PTR(ret); 1122 } 1123 1124 static struct configfs_group_operations nvmet_ana_groups_group_ops = { 1125 .make_group = nvmet_ana_groups_make_group, 1126 }; 1127 1128 static const struct config_item_type nvmet_ana_groups_type = { 1129 .ct_group_ops = &nvmet_ana_groups_group_ops, 1130 .ct_owner = THIS_MODULE, 1131 }; 1132 1133 /* 1134 * Ports definitions. 1135 */ 1136 static void nvmet_port_release(struct config_item *item) 1137 { 1138 struct nvmet_port *port = to_nvmet_port(item); 1139 1140 kfree(port->ana_state); 1141 kfree(port); 1142 } 1143 1144 static struct configfs_attribute *nvmet_port_attrs[] = { 1145 &nvmet_attr_addr_adrfam, 1146 &nvmet_attr_addr_treq, 1147 &nvmet_attr_addr_traddr, 1148 &nvmet_attr_addr_trsvcid, 1149 &nvmet_attr_addr_trtype, 1150 &nvmet_attr_param_inline_data_size, 1151 NULL, 1152 }; 1153 1154 static struct configfs_item_operations nvmet_port_item_ops = { 1155 .release = nvmet_port_release, 1156 }; 1157 1158 static const struct config_item_type nvmet_port_type = { 1159 .ct_attrs = nvmet_port_attrs, 1160 .ct_item_ops = &nvmet_port_item_ops, 1161 .ct_owner = THIS_MODULE, 1162 }; 1163 1164 static struct config_group *nvmet_ports_make(struct config_group *group, 1165 const char *name) 1166 { 1167 struct nvmet_port *port; 1168 u16 portid; 1169 u32 i; 1170 1171 if (kstrtou16(name, 0, &portid)) 1172 return ERR_PTR(-EINVAL); 1173 1174 port = kzalloc(sizeof(*port), GFP_KERNEL); 1175 if (!port) 1176 return ERR_PTR(-ENOMEM); 1177 1178 port->ana_state = kcalloc(NVMET_MAX_ANAGRPS + 1, 1179 sizeof(*port->ana_state), GFP_KERNEL); 1180 if (!port->ana_state) { 1181 kfree(port); 1182 return ERR_PTR(-ENOMEM); 1183 } 1184 1185 for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) { 1186 if (i == NVMET_DEFAULT_ANA_GRPID) 1187 port->ana_state[1] = NVME_ANA_OPTIMIZED; 1188 else 1189 port->ana_state[i] = NVME_ANA_INACCESSIBLE; 1190 } 1191 1192 INIT_LIST_HEAD(&port->entry); 1193 INIT_LIST_HEAD(&port->subsystems); 1194 INIT_LIST_HEAD(&port->referrals); 1195 port->inline_data_size = -1; /* < 0 == let the transport choose */ 1196 1197 port->disc_addr.portid = cpu_to_le16(portid); 1198 config_group_init_type_name(&port->group, name, &nvmet_port_type); 1199 1200 config_group_init_type_name(&port->subsys_group, 1201 "subsystems", &nvmet_port_subsys_type); 1202 configfs_add_default_group(&port->subsys_group, &port->group); 1203 1204 config_group_init_type_name(&port->referrals_group, 1205 "referrals", &nvmet_referrals_type); 1206 configfs_add_default_group(&port->referrals_group, &port->group); 1207 1208 config_group_init_type_name(&port->ana_groups_group, 1209 "ana_groups", &nvmet_ana_groups_type); 1210 configfs_add_default_group(&port->ana_groups_group, &port->group); 1211 1212 port->ana_default_group.port = port; 1213 port->ana_default_group.grpid = NVMET_DEFAULT_ANA_GRPID; 1214 config_group_init_type_name(&port->ana_default_group.group, 1215 __stringify(NVMET_DEFAULT_ANA_GRPID), 1216 &nvmet_ana_group_type); 1217 configfs_add_default_group(&port->ana_default_group.group, 1218 &port->ana_groups_group); 1219 1220 return &port->group; 1221 } 1222 1223 static struct configfs_group_operations nvmet_ports_group_ops = { 1224 .make_group = nvmet_ports_make, 1225 }; 1226 1227 static const struct config_item_type nvmet_ports_type = { 1228 .ct_group_ops = &nvmet_ports_group_ops, 1229 .ct_owner = THIS_MODULE, 1230 }; 1231 1232 static struct config_group nvmet_subsystems_group; 1233 static struct config_group nvmet_ports_group; 1234 1235 static void nvmet_host_release(struct config_item *item) 1236 { 1237 struct nvmet_host *host = to_host(item); 1238 1239 kfree(host); 1240 } 1241 1242 static struct configfs_item_operations nvmet_host_item_ops = { 1243 .release = nvmet_host_release, 1244 }; 1245 1246 static const struct config_item_type nvmet_host_type = { 1247 .ct_item_ops = &nvmet_host_item_ops, 1248 .ct_owner = THIS_MODULE, 1249 }; 1250 1251 static struct config_group *nvmet_hosts_make_group(struct config_group *group, 1252 const char *name) 1253 { 1254 struct nvmet_host *host; 1255 1256 host = kzalloc(sizeof(*host), GFP_KERNEL); 1257 if (!host) 1258 return ERR_PTR(-ENOMEM); 1259 1260 config_group_init_type_name(&host->group, name, &nvmet_host_type); 1261 1262 return &host->group; 1263 } 1264 1265 static struct configfs_group_operations nvmet_hosts_group_ops = { 1266 .make_group = nvmet_hosts_make_group, 1267 }; 1268 1269 static const struct config_item_type nvmet_hosts_type = { 1270 .ct_group_ops = &nvmet_hosts_group_ops, 1271 .ct_owner = THIS_MODULE, 1272 }; 1273 1274 static struct config_group nvmet_hosts_group; 1275 1276 static const struct config_item_type nvmet_root_type = { 1277 .ct_owner = THIS_MODULE, 1278 }; 1279 1280 static struct configfs_subsystem nvmet_configfs_subsystem = { 1281 .su_group = { 1282 .cg_item = { 1283 .ci_namebuf = "nvmet", 1284 .ci_type = &nvmet_root_type, 1285 }, 1286 }, 1287 }; 1288 1289 int __init nvmet_init_configfs(void) 1290 { 1291 int ret; 1292 1293 config_group_init(&nvmet_configfs_subsystem.su_group); 1294 mutex_init(&nvmet_configfs_subsystem.su_mutex); 1295 1296 config_group_init_type_name(&nvmet_subsystems_group, 1297 "subsystems", &nvmet_subsystems_type); 1298 configfs_add_default_group(&nvmet_subsystems_group, 1299 &nvmet_configfs_subsystem.su_group); 1300 1301 config_group_init_type_name(&nvmet_ports_group, 1302 "ports", &nvmet_ports_type); 1303 configfs_add_default_group(&nvmet_ports_group, 1304 &nvmet_configfs_subsystem.su_group); 1305 1306 config_group_init_type_name(&nvmet_hosts_group, 1307 "hosts", &nvmet_hosts_type); 1308 configfs_add_default_group(&nvmet_hosts_group, 1309 &nvmet_configfs_subsystem.su_group); 1310 1311 ret = configfs_register_subsystem(&nvmet_configfs_subsystem); 1312 if (ret) { 1313 pr_err("configfs_register_subsystem: %d\n", ret); 1314 return ret; 1315 } 1316 1317 return 0; 1318 } 1319 1320 void __exit nvmet_exit_configfs(void) 1321 { 1322 configfs_unregister_subsystem(&nvmet_configfs_subsystem); 1323 } 1324