1 /* 2 * Configfs interface for the NVMe target. 3 * Copyright (c) 2015-2016 HGST, a Western Digital Company. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 */ 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/slab.h> 18 #include <linux/stat.h> 19 #include <linux/ctype.h> 20 #include <linux/pci.h> 21 #include <linux/pci-p2pdma.h> 22 23 #include "nvmet.h" 24 25 static const struct config_item_type nvmet_host_type; 26 static const struct config_item_type nvmet_subsys_type; 27 28 static LIST_HEAD(nvmet_ports_list); 29 struct list_head *nvmet_ports = &nvmet_ports_list; 30 31 static const struct nvmet_transport_name { 32 u8 type; 33 const char *name; 34 } nvmet_transport_names[] = { 35 { NVMF_TRTYPE_RDMA, "rdma" }, 36 { NVMF_TRTYPE_FC, "fc" }, 37 { NVMF_TRTYPE_TCP, "tcp" }, 38 { NVMF_TRTYPE_LOOP, "loop" }, 39 }; 40 41 /* 42 * nvmet_port Generic ConfigFS definitions. 43 * Used in any place in the ConfigFS tree that refers to an address. 44 */ 45 static ssize_t nvmet_addr_adrfam_show(struct config_item *item, 46 char *page) 47 { 48 switch (to_nvmet_port(item)->disc_addr.adrfam) { 49 case NVMF_ADDR_FAMILY_IP4: 50 return sprintf(page, "ipv4\n"); 51 case NVMF_ADDR_FAMILY_IP6: 52 return sprintf(page, "ipv6\n"); 53 case NVMF_ADDR_FAMILY_IB: 54 return sprintf(page, "ib\n"); 55 case NVMF_ADDR_FAMILY_FC: 56 return sprintf(page, "fc\n"); 57 default: 58 return sprintf(page, "\n"); 59 } 60 } 61 62 static ssize_t nvmet_addr_adrfam_store(struct config_item *item, 63 const char *page, size_t count) 64 { 65 struct nvmet_port *port = to_nvmet_port(item); 66 67 if (port->enabled) { 68 pr_err("Cannot modify address while enabled\n"); 69 pr_err("Disable the address before modifying\n"); 70 return -EACCES; 71 } 72 73 if (sysfs_streq(page, "ipv4")) { 74 port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP4; 75 } else if (sysfs_streq(page, "ipv6")) { 76 port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP6; 77 } else if (sysfs_streq(page, "ib")) { 78 port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IB; 79 } else if (sysfs_streq(page, "fc")) { 80 port->disc_addr.adrfam = NVMF_ADDR_FAMILY_FC; 81 } else { 82 pr_err("Invalid value '%s' for adrfam\n", page); 83 return -EINVAL; 84 } 85 86 return count; 87 } 88 89 CONFIGFS_ATTR(nvmet_, addr_adrfam); 90 91 static ssize_t nvmet_addr_portid_show(struct config_item *item, 92 char *page) 93 { 94 struct nvmet_port *port = to_nvmet_port(item); 95 96 return snprintf(page, PAGE_SIZE, "%d\n", 97 le16_to_cpu(port->disc_addr.portid)); 98 } 99 100 static ssize_t nvmet_addr_portid_store(struct config_item *item, 101 const char *page, size_t count) 102 { 103 struct nvmet_port *port = to_nvmet_port(item); 104 u16 portid = 0; 105 106 if (kstrtou16(page, 0, &portid)) { 107 pr_err("Invalid value '%s' for portid\n", page); 108 return -EINVAL; 109 } 110 111 if (port->enabled) { 112 pr_err("Cannot modify address while enabled\n"); 113 pr_err("Disable the address before modifying\n"); 114 return -EACCES; 115 } 116 port->disc_addr.portid = cpu_to_le16(portid); 117 return count; 118 } 119 120 CONFIGFS_ATTR(nvmet_, addr_portid); 121 122 static ssize_t nvmet_addr_traddr_show(struct config_item *item, 123 char *page) 124 { 125 struct nvmet_port *port = to_nvmet_port(item); 126 127 return snprintf(page, PAGE_SIZE, "%s\n", 128 port->disc_addr.traddr); 129 } 130 131 static ssize_t nvmet_addr_traddr_store(struct config_item *item, 132 const char *page, size_t count) 133 { 134 struct nvmet_port *port = to_nvmet_port(item); 135 136 if (count > NVMF_TRADDR_SIZE) { 137 pr_err("Invalid value '%s' for traddr\n", page); 138 return -EINVAL; 139 } 140 141 if (port->enabled) { 142 pr_err("Cannot modify address while enabled\n"); 143 pr_err("Disable the address before modifying\n"); 144 return -EACCES; 145 } 146 147 if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1) 148 return -EINVAL; 149 return count; 150 } 151 152 CONFIGFS_ATTR(nvmet_, addr_traddr); 153 154 static ssize_t nvmet_addr_treq_show(struct config_item *item, 155 char *page) 156 { 157 switch (to_nvmet_port(item)->disc_addr.treq & 158 NVME_TREQ_SECURE_CHANNEL_MASK) { 159 case NVMF_TREQ_NOT_SPECIFIED: 160 return sprintf(page, "not specified\n"); 161 case NVMF_TREQ_REQUIRED: 162 return sprintf(page, "required\n"); 163 case NVMF_TREQ_NOT_REQUIRED: 164 return sprintf(page, "not required\n"); 165 default: 166 return sprintf(page, "\n"); 167 } 168 } 169 170 static ssize_t nvmet_addr_treq_store(struct config_item *item, 171 const char *page, size_t count) 172 { 173 struct nvmet_port *port = to_nvmet_port(item); 174 u8 treq = port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK; 175 176 if (port->enabled) { 177 pr_err("Cannot modify address while enabled\n"); 178 pr_err("Disable the address before modifying\n"); 179 return -EACCES; 180 } 181 182 if (sysfs_streq(page, "not specified")) { 183 treq |= NVMF_TREQ_NOT_SPECIFIED; 184 } else if (sysfs_streq(page, "required")) { 185 treq |= NVMF_TREQ_REQUIRED; 186 } else if (sysfs_streq(page, "not required")) { 187 treq |= NVMF_TREQ_NOT_REQUIRED; 188 } else { 189 pr_err("Invalid value '%s' for treq\n", page); 190 return -EINVAL; 191 } 192 port->disc_addr.treq = treq; 193 194 return count; 195 } 196 197 CONFIGFS_ATTR(nvmet_, addr_treq); 198 199 static ssize_t nvmet_addr_trsvcid_show(struct config_item *item, 200 char *page) 201 { 202 struct nvmet_port *port = to_nvmet_port(item); 203 204 return snprintf(page, PAGE_SIZE, "%s\n", 205 port->disc_addr.trsvcid); 206 } 207 208 static ssize_t nvmet_addr_trsvcid_store(struct config_item *item, 209 const char *page, size_t count) 210 { 211 struct nvmet_port *port = to_nvmet_port(item); 212 213 if (count > NVMF_TRSVCID_SIZE) { 214 pr_err("Invalid value '%s' for trsvcid\n", page); 215 return -EINVAL; 216 } 217 if (port->enabled) { 218 pr_err("Cannot modify address while enabled\n"); 219 pr_err("Disable the address before modifying\n"); 220 return -EACCES; 221 } 222 223 if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1) 224 return -EINVAL; 225 return count; 226 } 227 228 CONFIGFS_ATTR(nvmet_, addr_trsvcid); 229 230 static ssize_t nvmet_param_inline_data_size_show(struct config_item *item, 231 char *page) 232 { 233 struct nvmet_port *port = to_nvmet_port(item); 234 235 return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size); 236 } 237 238 static ssize_t nvmet_param_inline_data_size_store(struct config_item *item, 239 const char *page, size_t count) 240 { 241 struct nvmet_port *port = to_nvmet_port(item); 242 int ret; 243 244 if (port->enabled) { 245 pr_err("Cannot modify inline_data_size while port enabled\n"); 246 pr_err("Disable the port before modifying\n"); 247 return -EACCES; 248 } 249 ret = kstrtoint(page, 0, &port->inline_data_size); 250 if (ret) { 251 pr_err("Invalid value '%s' for inline_data_size\n", page); 252 return -EINVAL; 253 } 254 return count; 255 } 256 257 CONFIGFS_ATTR(nvmet_, param_inline_data_size); 258 259 static ssize_t nvmet_addr_trtype_show(struct config_item *item, 260 char *page) 261 { 262 struct nvmet_port *port = to_nvmet_port(item); 263 int i; 264 265 for (i = 0; i < ARRAY_SIZE(nvmet_transport_names); i++) { 266 if (port->disc_addr.trtype != nvmet_transport_names[i].type) 267 continue; 268 return sprintf(page, "%s\n", nvmet_transport_names[i].name); 269 } 270 271 return sprintf(page, "\n"); 272 } 273 274 static void nvmet_port_init_tsas_rdma(struct nvmet_port *port) 275 { 276 port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED; 277 port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED; 278 port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM; 279 } 280 281 static ssize_t nvmet_addr_trtype_store(struct config_item *item, 282 const char *page, size_t count) 283 { 284 struct nvmet_port *port = to_nvmet_port(item); 285 int i; 286 287 if (port->enabled) { 288 pr_err("Cannot modify address while enabled\n"); 289 pr_err("Disable the address before modifying\n"); 290 return -EACCES; 291 } 292 293 for (i = 0; i < ARRAY_SIZE(nvmet_transport_names); i++) { 294 if (sysfs_streq(page, nvmet_transport_names[i].name)) 295 goto found; 296 } 297 298 pr_err("Invalid value '%s' for trtype\n", page); 299 return -EINVAL; 300 found: 301 memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE); 302 port->disc_addr.trtype = nvmet_transport_names[i].type; 303 if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) 304 nvmet_port_init_tsas_rdma(port); 305 return count; 306 } 307 308 CONFIGFS_ATTR(nvmet_, addr_trtype); 309 310 /* 311 * Namespace structures & file operation functions below 312 */ 313 static ssize_t nvmet_ns_device_path_show(struct config_item *item, char *page) 314 { 315 return sprintf(page, "%s\n", to_nvmet_ns(item)->device_path); 316 } 317 318 static ssize_t nvmet_ns_device_path_store(struct config_item *item, 319 const char *page, size_t count) 320 { 321 struct nvmet_ns *ns = to_nvmet_ns(item); 322 struct nvmet_subsys *subsys = ns->subsys; 323 size_t len; 324 int ret; 325 326 mutex_lock(&subsys->lock); 327 ret = -EBUSY; 328 if (ns->enabled) 329 goto out_unlock; 330 331 ret = -EINVAL; 332 len = strcspn(page, "\n"); 333 if (!len) 334 goto out_unlock; 335 336 kfree(ns->device_path); 337 ret = -ENOMEM; 338 ns->device_path = kstrndup(page, len, GFP_KERNEL); 339 if (!ns->device_path) 340 goto out_unlock; 341 342 mutex_unlock(&subsys->lock); 343 return count; 344 345 out_unlock: 346 mutex_unlock(&subsys->lock); 347 return ret; 348 } 349 350 CONFIGFS_ATTR(nvmet_ns_, device_path); 351 352 #ifdef CONFIG_PCI_P2PDMA 353 static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page) 354 { 355 struct nvmet_ns *ns = to_nvmet_ns(item); 356 357 return pci_p2pdma_enable_show(page, ns->p2p_dev, ns->use_p2pmem); 358 } 359 360 static ssize_t nvmet_ns_p2pmem_store(struct config_item *item, 361 const char *page, size_t count) 362 { 363 struct nvmet_ns *ns = to_nvmet_ns(item); 364 struct pci_dev *p2p_dev = NULL; 365 bool use_p2pmem; 366 int ret = count; 367 int error; 368 369 mutex_lock(&ns->subsys->lock); 370 if (ns->enabled) { 371 ret = -EBUSY; 372 goto out_unlock; 373 } 374 375 error = pci_p2pdma_enable_store(page, &p2p_dev, &use_p2pmem); 376 if (error) { 377 ret = error; 378 goto out_unlock; 379 } 380 381 ns->use_p2pmem = use_p2pmem; 382 pci_dev_put(ns->p2p_dev); 383 ns->p2p_dev = p2p_dev; 384 385 out_unlock: 386 mutex_unlock(&ns->subsys->lock); 387 388 return ret; 389 } 390 391 CONFIGFS_ATTR(nvmet_ns_, p2pmem); 392 #endif /* CONFIG_PCI_P2PDMA */ 393 394 static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page) 395 { 396 return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid); 397 } 398 399 static ssize_t nvmet_ns_device_uuid_store(struct config_item *item, 400 const char *page, size_t count) 401 { 402 struct nvmet_ns *ns = to_nvmet_ns(item); 403 struct nvmet_subsys *subsys = ns->subsys; 404 int ret = 0; 405 406 407 mutex_lock(&subsys->lock); 408 if (ns->enabled) { 409 ret = -EBUSY; 410 goto out_unlock; 411 } 412 413 414 if (uuid_parse(page, &ns->uuid)) 415 ret = -EINVAL; 416 417 out_unlock: 418 mutex_unlock(&subsys->lock); 419 return ret ? ret : count; 420 } 421 422 CONFIGFS_ATTR(nvmet_ns_, device_uuid); 423 424 static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page) 425 { 426 return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid); 427 } 428 429 static ssize_t nvmet_ns_device_nguid_store(struct config_item *item, 430 const char *page, size_t count) 431 { 432 struct nvmet_ns *ns = to_nvmet_ns(item); 433 struct nvmet_subsys *subsys = ns->subsys; 434 u8 nguid[16]; 435 const char *p = page; 436 int i; 437 int ret = 0; 438 439 mutex_lock(&subsys->lock); 440 if (ns->enabled) { 441 ret = -EBUSY; 442 goto out_unlock; 443 } 444 445 for (i = 0; i < 16; i++) { 446 if (p + 2 > page + count) { 447 ret = -EINVAL; 448 goto out_unlock; 449 } 450 if (!isxdigit(p[0]) || !isxdigit(p[1])) { 451 ret = -EINVAL; 452 goto out_unlock; 453 } 454 455 nguid[i] = (hex_to_bin(p[0]) << 4) | hex_to_bin(p[1]); 456 p += 2; 457 458 if (*p == '-' || *p == ':') 459 p++; 460 } 461 462 memcpy(&ns->nguid, nguid, sizeof(nguid)); 463 out_unlock: 464 mutex_unlock(&subsys->lock); 465 return ret ? ret : count; 466 } 467 468 CONFIGFS_ATTR(nvmet_ns_, device_nguid); 469 470 static ssize_t nvmet_ns_ana_grpid_show(struct config_item *item, char *page) 471 { 472 return sprintf(page, "%u\n", to_nvmet_ns(item)->anagrpid); 473 } 474 475 static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item, 476 const char *page, size_t count) 477 { 478 struct nvmet_ns *ns = to_nvmet_ns(item); 479 u32 oldgrpid, newgrpid; 480 int ret; 481 482 ret = kstrtou32(page, 0, &newgrpid); 483 if (ret) 484 return ret; 485 486 if (newgrpid < 1 || newgrpid > NVMET_MAX_ANAGRPS) 487 return -EINVAL; 488 489 down_write(&nvmet_ana_sem); 490 oldgrpid = ns->anagrpid; 491 nvmet_ana_group_enabled[newgrpid]++; 492 ns->anagrpid = newgrpid; 493 nvmet_ana_group_enabled[oldgrpid]--; 494 nvmet_ana_chgcnt++; 495 up_write(&nvmet_ana_sem); 496 497 nvmet_send_ana_event(ns->subsys, NULL); 498 return count; 499 } 500 501 CONFIGFS_ATTR(nvmet_ns_, ana_grpid); 502 503 static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page) 504 { 505 return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled); 506 } 507 508 static ssize_t nvmet_ns_enable_store(struct config_item *item, 509 const char *page, size_t count) 510 { 511 struct nvmet_ns *ns = to_nvmet_ns(item); 512 bool enable; 513 int ret = 0; 514 515 if (strtobool(page, &enable)) 516 return -EINVAL; 517 518 if (enable) 519 ret = nvmet_ns_enable(ns); 520 else 521 nvmet_ns_disable(ns); 522 523 return ret ? ret : count; 524 } 525 526 CONFIGFS_ATTR(nvmet_ns_, enable); 527 528 static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page) 529 { 530 return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io); 531 } 532 533 static ssize_t nvmet_ns_buffered_io_store(struct config_item *item, 534 const char *page, size_t count) 535 { 536 struct nvmet_ns *ns = to_nvmet_ns(item); 537 bool val; 538 539 if (strtobool(page, &val)) 540 return -EINVAL; 541 542 mutex_lock(&ns->subsys->lock); 543 if (ns->enabled) { 544 pr_err("disable ns before setting buffered_io value.\n"); 545 mutex_unlock(&ns->subsys->lock); 546 return -EINVAL; 547 } 548 549 ns->buffered_io = val; 550 mutex_unlock(&ns->subsys->lock); 551 return count; 552 } 553 554 CONFIGFS_ATTR(nvmet_ns_, buffered_io); 555 556 static struct configfs_attribute *nvmet_ns_attrs[] = { 557 &nvmet_ns_attr_device_path, 558 &nvmet_ns_attr_device_nguid, 559 &nvmet_ns_attr_device_uuid, 560 &nvmet_ns_attr_ana_grpid, 561 &nvmet_ns_attr_enable, 562 &nvmet_ns_attr_buffered_io, 563 #ifdef CONFIG_PCI_P2PDMA 564 &nvmet_ns_attr_p2pmem, 565 #endif 566 NULL, 567 }; 568 569 static void nvmet_ns_release(struct config_item *item) 570 { 571 struct nvmet_ns *ns = to_nvmet_ns(item); 572 573 nvmet_ns_free(ns); 574 } 575 576 static struct configfs_item_operations nvmet_ns_item_ops = { 577 .release = nvmet_ns_release, 578 }; 579 580 static const struct config_item_type nvmet_ns_type = { 581 .ct_item_ops = &nvmet_ns_item_ops, 582 .ct_attrs = nvmet_ns_attrs, 583 .ct_owner = THIS_MODULE, 584 }; 585 586 static struct config_group *nvmet_ns_make(struct config_group *group, 587 const char *name) 588 { 589 struct nvmet_subsys *subsys = namespaces_to_subsys(&group->cg_item); 590 struct nvmet_ns *ns; 591 int ret; 592 u32 nsid; 593 594 ret = kstrtou32(name, 0, &nsid); 595 if (ret) 596 goto out; 597 598 ret = -EINVAL; 599 if (nsid == 0 || nsid == NVME_NSID_ALL) 600 goto out; 601 602 ret = -ENOMEM; 603 ns = nvmet_ns_alloc(subsys, nsid); 604 if (!ns) 605 goto out; 606 config_group_init_type_name(&ns->group, name, &nvmet_ns_type); 607 608 pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn); 609 610 return &ns->group; 611 out: 612 return ERR_PTR(ret); 613 } 614 615 static struct configfs_group_operations nvmet_namespaces_group_ops = { 616 .make_group = nvmet_ns_make, 617 }; 618 619 static const struct config_item_type nvmet_namespaces_type = { 620 .ct_group_ops = &nvmet_namespaces_group_ops, 621 .ct_owner = THIS_MODULE, 622 }; 623 624 static int nvmet_port_subsys_allow_link(struct config_item *parent, 625 struct config_item *target) 626 { 627 struct nvmet_port *port = to_nvmet_port(parent->ci_parent); 628 struct nvmet_subsys *subsys; 629 struct nvmet_subsys_link *link, *p; 630 int ret; 631 632 if (target->ci_type != &nvmet_subsys_type) { 633 pr_err("can only link subsystems into the subsystems dir.!\n"); 634 return -EINVAL; 635 } 636 subsys = to_subsys(target); 637 link = kmalloc(sizeof(*link), GFP_KERNEL); 638 if (!link) 639 return -ENOMEM; 640 link->subsys = subsys; 641 642 down_write(&nvmet_config_sem); 643 ret = -EEXIST; 644 list_for_each_entry(p, &port->subsystems, entry) { 645 if (p->subsys == subsys) 646 goto out_free_link; 647 } 648 649 if (list_empty(&port->subsystems)) { 650 ret = nvmet_enable_port(port); 651 if (ret) 652 goto out_free_link; 653 } 654 655 list_add_tail(&link->entry, &port->subsystems); 656 nvmet_port_disc_changed(port, subsys); 657 658 up_write(&nvmet_config_sem); 659 return 0; 660 661 out_free_link: 662 up_write(&nvmet_config_sem); 663 kfree(link); 664 return ret; 665 } 666 667 static void nvmet_port_subsys_drop_link(struct config_item *parent, 668 struct config_item *target) 669 { 670 struct nvmet_port *port = to_nvmet_port(parent->ci_parent); 671 struct nvmet_subsys *subsys = to_subsys(target); 672 struct nvmet_subsys_link *p; 673 674 down_write(&nvmet_config_sem); 675 list_for_each_entry(p, &port->subsystems, entry) { 676 if (p->subsys == subsys) 677 goto found; 678 } 679 up_write(&nvmet_config_sem); 680 return; 681 682 found: 683 list_del(&p->entry); 684 nvmet_port_disc_changed(port, subsys); 685 686 if (list_empty(&port->subsystems)) 687 nvmet_disable_port(port); 688 up_write(&nvmet_config_sem); 689 kfree(p); 690 } 691 692 static struct configfs_item_operations nvmet_port_subsys_item_ops = { 693 .allow_link = nvmet_port_subsys_allow_link, 694 .drop_link = nvmet_port_subsys_drop_link, 695 }; 696 697 static const struct config_item_type nvmet_port_subsys_type = { 698 .ct_item_ops = &nvmet_port_subsys_item_ops, 699 .ct_owner = THIS_MODULE, 700 }; 701 702 static int nvmet_allowed_hosts_allow_link(struct config_item *parent, 703 struct config_item *target) 704 { 705 struct nvmet_subsys *subsys = to_subsys(parent->ci_parent); 706 struct nvmet_host *host; 707 struct nvmet_host_link *link, *p; 708 int ret; 709 710 if (target->ci_type != &nvmet_host_type) { 711 pr_err("can only link hosts into the allowed_hosts directory!\n"); 712 return -EINVAL; 713 } 714 715 host = to_host(target); 716 link = kmalloc(sizeof(*link), GFP_KERNEL); 717 if (!link) 718 return -ENOMEM; 719 link->host = host; 720 721 down_write(&nvmet_config_sem); 722 ret = -EINVAL; 723 if (subsys->allow_any_host) { 724 pr_err("can't add hosts when allow_any_host is set!\n"); 725 goto out_free_link; 726 } 727 728 ret = -EEXIST; 729 list_for_each_entry(p, &subsys->hosts, entry) { 730 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host))) 731 goto out_free_link; 732 } 733 list_add_tail(&link->entry, &subsys->hosts); 734 nvmet_subsys_disc_changed(subsys, host); 735 736 up_write(&nvmet_config_sem); 737 return 0; 738 out_free_link: 739 up_write(&nvmet_config_sem); 740 kfree(link); 741 return ret; 742 } 743 744 static void nvmet_allowed_hosts_drop_link(struct config_item *parent, 745 struct config_item *target) 746 { 747 struct nvmet_subsys *subsys = to_subsys(parent->ci_parent); 748 struct nvmet_host *host = to_host(target); 749 struct nvmet_host_link *p; 750 751 down_write(&nvmet_config_sem); 752 list_for_each_entry(p, &subsys->hosts, entry) { 753 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host))) 754 goto found; 755 } 756 up_write(&nvmet_config_sem); 757 return; 758 759 found: 760 list_del(&p->entry); 761 nvmet_subsys_disc_changed(subsys, host); 762 763 up_write(&nvmet_config_sem); 764 kfree(p); 765 } 766 767 static struct configfs_item_operations nvmet_allowed_hosts_item_ops = { 768 .allow_link = nvmet_allowed_hosts_allow_link, 769 .drop_link = nvmet_allowed_hosts_drop_link, 770 }; 771 772 static const struct config_item_type nvmet_allowed_hosts_type = { 773 .ct_item_ops = &nvmet_allowed_hosts_item_ops, 774 .ct_owner = THIS_MODULE, 775 }; 776 777 static ssize_t nvmet_subsys_attr_allow_any_host_show(struct config_item *item, 778 char *page) 779 { 780 return snprintf(page, PAGE_SIZE, "%d\n", 781 to_subsys(item)->allow_any_host); 782 } 783 784 static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item, 785 const char *page, size_t count) 786 { 787 struct nvmet_subsys *subsys = to_subsys(item); 788 bool allow_any_host; 789 int ret = 0; 790 791 if (strtobool(page, &allow_any_host)) 792 return -EINVAL; 793 794 down_write(&nvmet_config_sem); 795 if (allow_any_host && !list_empty(&subsys->hosts)) { 796 pr_err("Can't set allow_any_host when explicit hosts are set!\n"); 797 ret = -EINVAL; 798 goto out_unlock; 799 } 800 801 if (subsys->allow_any_host != allow_any_host) { 802 subsys->allow_any_host = allow_any_host; 803 nvmet_subsys_disc_changed(subsys, NULL); 804 } 805 806 out_unlock: 807 up_write(&nvmet_config_sem); 808 return ret ? ret : count; 809 } 810 811 CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host); 812 813 static ssize_t nvmet_subsys_attr_version_show(struct config_item *item, 814 char *page) 815 { 816 struct nvmet_subsys *subsys = to_subsys(item); 817 818 if (NVME_TERTIARY(subsys->ver)) 819 return snprintf(page, PAGE_SIZE, "%d.%d.%d\n", 820 (int)NVME_MAJOR(subsys->ver), 821 (int)NVME_MINOR(subsys->ver), 822 (int)NVME_TERTIARY(subsys->ver)); 823 else 824 return snprintf(page, PAGE_SIZE, "%d.%d\n", 825 (int)NVME_MAJOR(subsys->ver), 826 (int)NVME_MINOR(subsys->ver)); 827 } 828 829 static ssize_t nvmet_subsys_attr_version_store(struct config_item *item, 830 const char *page, size_t count) 831 { 832 struct nvmet_subsys *subsys = to_subsys(item); 833 int major, minor, tertiary = 0; 834 int ret; 835 836 837 ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary); 838 if (ret != 2 && ret != 3) 839 return -EINVAL; 840 841 down_write(&nvmet_config_sem); 842 subsys->ver = NVME_VS(major, minor, tertiary); 843 up_write(&nvmet_config_sem); 844 845 return count; 846 } 847 CONFIGFS_ATTR(nvmet_subsys_, attr_version); 848 849 static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item, 850 char *page) 851 { 852 struct nvmet_subsys *subsys = to_subsys(item); 853 854 return snprintf(page, PAGE_SIZE, "%llx\n", subsys->serial); 855 } 856 857 static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item, 858 const char *page, size_t count) 859 { 860 struct nvmet_subsys *subsys = to_subsys(item); 861 862 down_write(&nvmet_config_sem); 863 sscanf(page, "%llx\n", &subsys->serial); 864 up_write(&nvmet_config_sem); 865 866 return count; 867 } 868 CONFIGFS_ATTR(nvmet_subsys_, attr_serial); 869 870 static struct configfs_attribute *nvmet_subsys_attrs[] = { 871 &nvmet_subsys_attr_attr_allow_any_host, 872 &nvmet_subsys_attr_attr_version, 873 &nvmet_subsys_attr_attr_serial, 874 NULL, 875 }; 876 877 /* 878 * Subsystem structures & folder operation functions below 879 */ 880 static void nvmet_subsys_release(struct config_item *item) 881 { 882 struct nvmet_subsys *subsys = to_subsys(item); 883 884 nvmet_subsys_del_ctrls(subsys); 885 nvmet_subsys_put(subsys); 886 } 887 888 static struct configfs_item_operations nvmet_subsys_item_ops = { 889 .release = nvmet_subsys_release, 890 }; 891 892 static const struct config_item_type nvmet_subsys_type = { 893 .ct_item_ops = &nvmet_subsys_item_ops, 894 .ct_attrs = nvmet_subsys_attrs, 895 .ct_owner = THIS_MODULE, 896 }; 897 898 static struct config_group *nvmet_subsys_make(struct config_group *group, 899 const char *name) 900 { 901 struct nvmet_subsys *subsys; 902 903 if (sysfs_streq(name, NVME_DISC_SUBSYS_NAME)) { 904 pr_err("can't create discovery subsystem through configfs\n"); 905 return ERR_PTR(-EINVAL); 906 } 907 908 subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME); 909 if (!subsys) 910 return ERR_PTR(-ENOMEM); 911 912 config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type); 913 914 config_group_init_type_name(&subsys->namespaces_group, 915 "namespaces", &nvmet_namespaces_type); 916 configfs_add_default_group(&subsys->namespaces_group, &subsys->group); 917 918 config_group_init_type_name(&subsys->allowed_hosts_group, 919 "allowed_hosts", &nvmet_allowed_hosts_type); 920 configfs_add_default_group(&subsys->allowed_hosts_group, 921 &subsys->group); 922 923 return &subsys->group; 924 } 925 926 static struct configfs_group_operations nvmet_subsystems_group_ops = { 927 .make_group = nvmet_subsys_make, 928 }; 929 930 static const struct config_item_type nvmet_subsystems_type = { 931 .ct_group_ops = &nvmet_subsystems_group_ops, 932 .ct_owner = THIS_MODULE, 933 }; 934 935 static ssize_t nvmet_referral_enable_show(struct config_item *item, 936 char *page) 937 { 938 return snprintf(page, PAGE_SIZE, "%d\n", to_nvmet_port(item)->enabled); 939 } 940 941 static ssize_t nvmet_referral_enable_store(struct config_item *item, 942 const char *page, size_t count) 943 { 944 struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent); 945 struct nvmet_port *port = to_nvmet_port(item); 946 bool enable; 947 948 if (strtobool(page, &enable)) 949 goto inval; 950 951 if (enable) 952 nvmet_referral_enable(parent, port); 953 else 954 nvmet_referral_disable(parent, port); 955 956 return count; 957 inval: 958 pr_err("Invalid value '%s' for enable\n", page); 959 return -EINVAL; 960 } 961 962 CONFIGFS_ATTR(nvmet_referral_, enable); 963 964 /* 965 * Discovery Service subsystem definitions 966 */ 967 static struct configfs_attribute *nvmet_referral_attrs[] = { 968 &nvmet_attr_addr_adrfam, 969 &nvmet_attr_addr_portid, 970 &nvmet_attr_addr_treq, 971 &nvmet_attr_addr_traddr, 972 &nvmet_attr_addr_trsvcid, 973 &nvmet_attr_addr_trtype, 974 &nvmet_referral_attr_enable, 975 NULL, 976 }; 977 978 static void nvmet_referral_release(struct config_item *item) 979 { 980 struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent); 981 struct nvmet_port *port = to_nvmet_port(item); 982 983 nvmet_referral_disable(parent, port); 984 kfree(port); 985 } 986 987 static struct configfs_item_operations nvmet_referral_item_ops = { 988 .release = nvmet_referral_release, 989 }; 990 991 static const struct config_item_type nvmet_referral_type = { 992 .ct_owner = THIS_MODULE, 993 .ct_attrs = nvmet_referral_attrs, 994 .ct_item_ops = &nvmet_referral_item_ops, 995 }; 996 997 static struct config_group *nvmet_referral_make( 998 struct config_group *group, const char *name) 999 { 1000 struct nvmet_port *port; 1001 1002 port = kzalloc(sizeof(*port), GFP_KERNEL); 1003 if (!port) 1004 return ERR_PTR(-ENOMEM); 1005 1006 INIT_LIST_HEAD(&port->entry); 1007 config_group_init_type_name(&port->group, name, &nvmet_referral_type); 1008 1009 return &port->group; 1010 } 1011 1012 static struct configfs_group_operations nvmet_referral_group_ops = { 1013 .make_group = nvmet_referral_make, 1014 }; 1015 1016 static const struct config_item_type nvmet_referrals_type = { 1017 .ct_owner = THIS_MODULE, 1018 .ct_group_ops = &nvmet_referral_group_ops, 1019 }; 1020 1021 static struct { 1022 enum nvme_ana_state state; 1023 const char *name; 1024 } nvmet_ana_state_names[] = { 1025 { NVME_ANA_OPTIMIZED, "optimized" }, 1026 { NVME_ANA_NONOPTIMIZED, "non-optimized" }, 1027 { NVME_ANA_INACCESSIBLE, "inaccessible" }, 1028 { NVME_ANA_PERSISTENT_LOSS, "persistent-loss" }, 1029 { NVME_ANA_CHANGE, "change" }, 1030 }; 1031 1032 static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item, 1033 char *page) 1034 { 1035 struct nvmet_ana_group *grp = to_ana_group(item); 1036 enum nvme_ana_state state = grp->port->ana_state[grp->grpid]; 1037 int i; 1038 1039 for (i = 0; i < ARRAY_SIZE(nvmet_ana_state_names); i++) { 1040 if (state != nvmet_ana_state_names[i].state) 1041 continue; 1042 return sprintf(page, "%s\n", nvmet_ana_state_names[i].name); 1043 } 1044 1045 return sprintf(page, "\n"); 1046 } 1047 1048 static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item, 1049 const char *page, size_t count) 1050 { 1051 struct nvmet_ana_group *grp = to_ana_group(item); 1052 int i; 1053 1054 for (i = 0; i < ARRAY_SIZE(nvmet_ana_state_names); i++) { 1055 if (sysfs_streq(page, nvmet_ana_state_names[i].name)) 1056 goto found; 1057 } 1058 1059 pr_err("Invalid value '%s' for ana_state\n", page); 1060 return -EINVAL; 1061 1062 found: 1063 down_write(&nvmet_ana_sem); 1064 grp->port->ana_state[grp->grpid] = nvmet_ana_state_names[i].state; 1065 nvmet_ana_chgcnt++; 1066 up_write(&nvmet_ana_sem); 1067 1068 nvmet_port_send_ana_event(grp->port); 1069 return count; 1070 } 1071 1072 CONFIGFS_ATTR(nvmet_ana_group_, ana_state); 1073 1074 static struct configfs_attribute *nvmet_ana_group_attrs[] = { 1075 &nvmet_ana_group_attr_ana_state, 1076 NULL, 1077 }; 1078 1079 static void nvmet_ana_group_release(struct config_item *item) 1080 { 1081 struct nvmet_ana_group *grp = to_ana_group(item); 1082 1083 if (grp == &grp->port->ana_default_group) 1084 return; 1085 1086 down_write(&nvmet_ana_sem); 1087 grp->port->ana_state[grp->grpid] = NVME_ANA_INACCESSIBLE; 1088 nvmet_ana_group_enabled[grp->grpid]--; 1089 up_write(&nvmet_ana_sem); 1090 1091 nvmet_port_send_ana_event(grp->port); 1092 kfree(grp); 1093 } 1094 1095 static struct configfs_item_operations nvmet_ana_group_item_ops = { 1096 .release = nvmet_ana_group_release, 1097 }; 1098 1099 static const struct config_item_type nvmet_ana_group_type = { 1100 .ct_item_ops = &nvmet_ana_group_item_ops, 1101 .ct_attrs = nvmet_ana_group_attrs, 1102 .ct_owner = THIS_MODULE, 1103 }; 1104 1105 static struct config_group *nvmet_ana_groups_make_group( 1106 struct config_group *group, const char *name) 1107 { 1108 struct nvmet_port *port = ana_groups_to_port(&group->cg_item); 1109 struct nvmet_ana_group *grp; 1110 u32 grpid; 1111 int ret; 1112 1113 ret = kstrtou32(name, 0, &grpid); 1114 if (ret) 1115 goto out; 1116 1117 ret = -EINVAL; 1118 if (grpid <= 1 || grpid > NVMET_MAX_ANAGRPS) 1119 goto out; 1120 1121 ret = -ENOMEM; 1122 grp = kzalloc(sizeof(*grp), GFP_KERNEL); 1123 if (!grp) 1124 goto out; 1125 grp->port = port; 1126 grp->grpid = grpid; 1127 1128 down_write(&nvmet_ana_sem); 1129 nvmet_ana_group_enabled[grpid]++; 1130 up_write(&nvmet_ana_sem); 1131 1132 nvmet_port_send_ana_event(grp->port); 1133 1134 config_group_init_type_name(&grp->group, name, &nvmet_ana_group_type); 1135 return &grp->group; 1136 out: 1137 return ERR_PTR(ret); 1138 } 1139 1140 static struct configfs_group_operations nvmet_ana_groups_group_ops = { 1141 .make_group = nvmet_ana_groups_make_group, 1142 }; 1143 1144 static const struct config_item_type nvmet_ana_groups_type = { 1145 .ct_group_ops = &nvmet_ana_groups_group_ops, 1146 .ct_owner = THIS_MODULE, 1147 }; 1148 1149 /* 1150 * Ports definitions. 1151 */ 1152 static void nvmet_port_release(struct config_item *item) 1153 { 1154 struct nvmet_port *port = to_nvmet_port(item); 1155 1156 list_del(&port->global_entry); 1157 1158 kfree(port->ana_state); 1159 kfree(port); 1160 } 1161 1162 static struct configfs_attribute *nvmet_port_attrs[] = { 1163 &nvmet_attr_addr_adrfam, 1164 &nvmet_attr_addr_treq, 1165 &nvmet_attr_addr_traddr, 1166 &nvmet_attr_addr_trsvcid, 1167 &nvmet_attr_addr_trtype, 1168 &nvmet_attr_param_inline_data_size, 1169 NULL, 1170 }; 1171 1172 static struct configfs_item_operations nvmet_port_item_ops = { 1173 .release = nvmet_port_release, 1174 }; 1175 1176 static const struct config_item_type nvmet_port_type = { 1177 .ct_attrs = nvmet_port_attrs, 1178 .ct_item_ops = &nvmet_port_item_ops, 1179 .ct_owner = THIS_MODULE, 1180 }; 1181 1182 static struct config_group *nvmet_ports_make(struct config_group *group, 1183 const char *name) 1184 { 1185 struct nvmet_port *port; 1186 u16 portid; 1187 u32 i; 1188 1189 if (kstrtou16(name, 0, &portid)) 1190 return ERR_PTR(-EINVAL); 1191 1192 port = kzalloc(sizeof(*port), GFP_KERNEL); 1193 if (!port) 1194 return ERR_PTR(-ENOMEM); 1195 1196 port->ana_state = kcalloc(NVMET_MAX_ANAGRPS + 1, 1197 sizeof(*port->ana_state), GFP_KERNEL); 1198 if (!port->ana_state) { 1199 kfree(port); 1200 return ERR_PTR(-ENOMEM); 1201 } 1202 1203 for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) { 1204 if (i == NVMET_DEFAULT_ANA_GRPID) 1205 port->ana_state[1] = NVME_ANA_OPTIMIZED; 1206 else 1207 port->ana_state[i] = NVME_ANA_INACCESSIBLE; 1208 } 1209 1210 list_add(&port->global_entry, &nvmet_ports_list); 1211 1212 INIT_LIST_HEAD(&port->entry); 1213 INIT_LIST_HEAD(&port->subsystems); 1214 INIT_LIST_HEAD(&port->referrals); 1215 port->inline_data_size = -1; /* < 0 == let the transport choose */ 1216 1217 port->disc_addr.portid = cpu_to_le16(portid); 1218 port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW; 1219 config_group_init_type_name(&port->group, name, &nvmet_port_type); 1220 1221 config_group_init_type_name(&port->subsys_group, 1222 "subsystems", &nvmet_port_subsys_type); 1223 configfs_add_default_group(&port->subsys_group, &port->group); 1224 1225 config_group_init_type_name(&port->referrals_group, 1226 "referrals", &nvmet_referrals_type); 1227 configfs_add_default_group(&port->referrals_group, &port->group); 1228 1229 config_group_init_type_name(&port->ana_groups_group, 1230 "ana_groups", &nvmet_ana_groups_type); 1231 configfs_add_default_group(&port->ana_groups_group, &port->group); 1232 1233 port->ana_default_group.port = port; 1234 port->ana_default_group.grpid = NVMET_DEFAULT_ANA_GRPID; 1235 config_group_init_type_name(&port->ana_default_group.group, 1236 __stringify(NVMET_DEFAULT_ANA_GRPID), 1237 &nvmet_ana_group_type); 1238 configfs_add_default_group(&port->ana_default_group.group, 1239 &port->ana_groups_group); 1240 1241 return &port->group; 1242 } 1243 1244 static struct configfs_group_operations nvmet_ports_group_ops = { 1245 .make_group = nvmet_ports_make, 1246 }; 1247 1248 static const struct config_item_type nvmet_ports_type = { 1249 .ct_group_ops = &nvmet_ports_group_ops, 1250 .ct_owner = THIS_MODULE, 1251 }; 1252 1253 static struct config_group nvmet_subsystems_group; 1254 static struct config_group nvmet_ports_group; 1255 1256 static void nvmet_host_release(struct config_item *item) 1257 { 1258 struct nvmet_host *host = to_host(item); 1259 1260 kfree(host); 1261 } 1262 1263 static struct configfs_item_operations nvmet_host_item_ops = { 1264 .release = nvmet_host_release, 1265 }; 1266 1267 static const struct config_item_type nvmet_host_type = { 1268 .ct_item_ops = &nvmet_host_item_ops, 1269 .ct_owner = THIS_MODULE, 1270 }; 1271 1272 static struct config_group *nvmet_hosts_make_group(struct config_group *group, 1273 const char *name) 1274 { 1275 struct nvmet_host *host; 1276 1277 host = kzalloc(sizeof(*host), GFP_KERNEL); 1278 if (!host) 1279 return ERR_PTR(-ENOMEM); 1280 1281 config_group_init_type_name(&host->group, name, &nvmet_host_type); 1282 1283 return &host->group; 1284 } 1285 1286 static struct configfs_group_operations nvmet_hosts_group_ops = { 1287 .make_group = nvmet_hosts_make_group, 1288 }; 1289 1290 static const struct config_item_type nvmet_hosts_type = { 1291 .ct_group_ops = &nvmet_hosts_group_ops, 1292 .ct_owner = THIS_MODULE, 1293 }; 1294 1295 static struct config_group nvmet_hosts_group; 1296 1297 static const struct config_item_type nvmet_root_type = { 1298 .ct_owner = THIS_MODULE, 1299 }; 1300 1301 static struct configfs_subsystem nvmet_configfs_subsystem = { 1302 .su_group = { 1303 .cg_item = { 1304 .ci_namebuf = "nvmet", 1305 .ci_type = &nvmet_root_type, 1306 }, 1307 }, 1308 }; 1309 1310 int __init nvmet_init_configfs(void) 1311 { 1312 int ret; 1313 1314 config_group_init(&nvmet_configfs_subsystem.su_group); 1315 mutex_init(&nvmet_configfs_subsystem.su_mutex); 1316 1317 config_group_init_type_name(&nvmet_subsystems_group, 1318 "subsystems", &nvmet_subsystems_type); 1319 configfs_add_default_group(&nvmet_subsystems_group, 1320 &nvmet_configfs_subsystem.su_group); 1321 1322 config_group_init_type_name(&nvmet_ports_group, 1323 "ports", &nvmet_ports_type); 1324 configfs_add_default_group(&nvmet_ports_group, 1325 &nvmet_configfs_subsystem.su_group); 1326 1327 config_group_init_type_name(&nvmet_hosts_group, 1328 "hosts", &nvmet_hosts_type); 1329 configfs_add_default_group(&nvmet_hosts_group, 1330 &nvmet_configfs_subsystem.su_group); 1331 1332 ret = configfs_register_subsystem(&nvmet_configfs_subsystem); 1333 if (ret) { 1334 pr_err("configfs_register_subsystem: %d\n", ret); 1335 return ret; 1336 } 1337 1338 return 0; 1339 } 1340 1341 void __exit nvmet_exit_configfs(void) 1342 { 1343 configfs_unregister_subsystem(&nvmet_configfs_subsystem); 1344 } 1345