1 /* 2 * Configfs interface for the NVMe target. 3 * Copyright (c) 2015-2016 HGST, a Western Digital Company. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 */ 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/slab.h> 18 #include <linux/stat.h> 19 #include <linux/ctype.h> 20 21 #include "nvmet.h" 22 23 static const struct config_item_type nvmet_host_type; 24 static const struct config_item_type nvmet_subsys_type; 25 26 static const struct nvmet_transport_name { 27 u8 type; 28 const char *name; 29 } nvmet_transport_names[] = { 30 { NVMF_TRTYPE_RDMA, "rdma" }, 31 { NVMF_TRTYPE_FC, "fc" }, 32 { NVMF_TRTYPE_LOOP, "loop" }, 33 }; 34 35 /* 36 * nvmet_port Generic ConfigFS definitions. 37 * Used in any place in the ConfigFS tree that refers to an address. 38 */ 39 static ssize_t nvmet_addr_adrfam_show(struct config_item *item, 40 char *page) 41 { 42 switch (to_nvmet_port(item)->disc_addr.adrfam) { 43 case NVMF_ADDR_FAMILY_IP4: 44 return sprintf(page, "ipv4\n"); 45 case NVMF_ADDR_FAMILY_IP6: 46 return sprintf(page, "ipv6\n"); 47 case NVMF_ADDR_FAMILY_IB: 48 return sprintf(page, "ib\n"); 49 case NVMF_ADDR_FAMILY_FC: 50 return sprintf(page, "fc\n"); 51 default: 52 return sprintf(page, "\n"); 53 } 54 } 55 56 static ssize_t nvmet_addr_adrfam_store(struct config_item *item, 57 const char *page, size_t count) 58 { 59 struct nvmet_port *port = to_nvmet_port(item); 60 61 if (port->enabled) { 62 pr_err("Cannot modify address while enabled\n"); 63 pr_err("Disable the address before modifying\n"); 64 return -EACCES; 65 } 66 67 if (sysfs_streq(page, "ipv4")) { 68 port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP4; 69 } else if (sysfs_streq(page, "ipv6")) { 70 port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP6; 71 } else if (sysfs_streq(page, "ib")) { 72 port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IB; 73 } else if (sysfs_streq(page, "fc")) { 74 port->disc_addr.adrfam = NVMF_ADDR_FAMILY_FC; 75 } else { 76 pr_err("Invalid value '%s' for adrfam\n", page); 77 return -EINVAL; 78 } 79 80 return count; 81 } 82 83 CONFIGFS_ATTR(nvmet_, addr_adrfam); 84 85 static ssize_t nvmet_addr_portid_show(struct config_item *item, 86 char *page) 87 { 88 struct nvmet_port *port = to_nvmet_port(item); 89 90 return snprintf(page, PAGE_SIZE, "%d\n", 91 le16_to_cpu(port->disc_addr.portid)); 92 } 93 94 static ssize_t nvmet_addr_portid_store(struct config_item *item, 95 const char *page, size_t count) 96 { 97 struct nvmet_port *port = to_nvmet_port(item); 98 u16 portid = 0; 99 100 if (kstrtou16(page, 0, &portid)) { 101 pr_err("Invalid value '%s' for portid\n", page); 102 return -EINVAL; 103 } 104 105 if (port->enabled) { 106 pr_err("Cannot modify address while enabled\n"); 107 pr_err("Disable the address before modifying\n"); 108 return -EACCES; 109 } 110 port->disc_addr.portid = cpu_to_le16(portid); 111 return count; 112 } 113 114 CONFIGFS_ATTR(nvmet_, addr_portid); 115 116 static ssize_t nvmet_addr_traddr_show(struct config_item *item, 117 char *page) 118 { 119 struct nvmet_port *port = to_nvmet_port(item); 120 121 return snprintf(page, PAGE_SIZE, "%s\n", 122 port->disc_addr.traddr); 123 } 124 125 static ssize_t nvmet_addr_traddr_store(struct config_item *item, 126 const char *page, size_t count) 127 { 128 struct nvmet_port *port = to_nvmet_port(item); 129 130 if (count > NVMF_TRADDR_SIZE) { 131 pr_err("Invalid value '%s' for traddr\n", page); 132 return -EINVAL; 133 } 134 135 if (port->enabled) { 136 pr_err("Cannot modify address while enabled\n"); 137 pr_err("Disable the address before modifying\n"); 138 return -EACCES; 139 } 140 141 if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1) 142 return -EINVAL; 143 return count; 144 } 145 146 CONFIGFS_ATTR(nvmet_, addr_traddr); 147 148 static ssize_t nvmet_addr_treq_show(struct config_item *item, 149 char *page) 150 { 151 switch (to_nvmet_port(item)->disc_addr.treq) { 152 case NVMF_TREQ_NOT_SPECIFIED: 153 return sprintf(page, "not specified\n"); 154 case NVMF_TREQ_REQUIRED: 155 return sprintf(page, "required\n"); 156 case NVMF_TREQ_NOT_REQUIRED: 157 return sprintf(page, "not required\n"); 158 default: 159 return sprintf(page, "\n"); 160 } 161 } 162 163 static ssize_t nvmet_addr_treq_store(struct config_item *item, 164 const char *page, size_t count) 165 { 166 struct nvmet_port *port = to_nvmet_port(item); 167 168 if (port->enabled) { 169 pr_err("Cannot modify address while enabled\n"); 170 pr_err("Disable the address before modifying\n"); 171 return -EACCES; 172 } 173 174 if (sysfs_streq(page, "not specified")) { 175 port->disc_addr.treq = NVMF_TREQ_NOT_SPECIFIED; 176 } else if (sysfs_streq(page, "required")) { 177 port->disc_addr.treq = NVMF_TREQ_REQUIRED; 178 } else if (sysfs_streq(page, "not required")) { 179 port->disc_addr.treq = NVMF_TREQ_NOT_REQUIRED; 180 } else { 181 pr_err("Invalid value '%s' for treq\n", page); 182 return -EINVAL; 183 } 184 185 return count; 186 } 187 188 CONFIGFS_ATTR(nvmet_, addr_treq); 189 190 static ssize_t nvmet_addr_trsvcid_show(struct config_item *item, 191 char *page) 192 { 193 struct nvmet_port *port = to_nvmet_port(item); 194 195 return snprintf(page, PAGE_SIZE, "%s\n", 196 port->disc_addr.trsvcid); 197 } 198 199 static ssize_t nvmet_addr_trsvcid_store(struct config_item *item, 200 const char *page, size_t count) 201 { 202 struct nvmet_port *port = to_nvmet_port(item); 203 204 if (count > NVMF_TRSVCID_SIZE) { 205 pr_err("Invalid value '%s' for trsvcid\n", page); 206 return -EINVAL; 207 } 208 if (port->enabled) { 209 pr_err("Cannot modify address while enabled\n"); 210 pr_err("Disable the address before modifying\n"); 211 return -EACCES; 212 } 213 214 if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1) 215 return -EINVAL; 216 return count; 217 } 218 219 CONFIGFS_ATTR(nvmet_, addr_trsvcid); 220 221 static ssize_t nvmet_param_inline_data_size_show(struct config_item *item, 222 char *page) 223 { 224 struct nvmet_port *port = to_nvmet_port(item); 225 226 return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size); 227 } 228 229 static ssize_t nvmet_param_inline_data_size_store(struct config_item *item, 230 const char *page, size_t count) 231 { 232 struct nvmet_port *port = to_nvmet_port(item); 233 int ret; 234 235 if (port->enabled) { 236 pr_err("Cannot modify inline_data_size while port enabled\n"); 237 pr_err("Disable the port before modifying\n"); 238 return -EACCES; 239 } 240 ret = kstrtoint(page, 0, &port->inline_data_size); 241 if (ret) { 242 pr_err("Invalid value '%s' for inline_data_size\n", page); 243 return -EINVAL; 244 } 245 return count; 246 } 247 248 CONFIGFS_ATTR(nvmet_, param_inline_data_size); 249 250 static ssize_t nvmet_addr_trtype_show(struct config_item *item, 251 char *page) 252 { 253 struct nvmet_port *port = to_nvmet_port(item); 254 int i; 255 256 for (i = 0; i < ARRAY_SIZE(nvmet_transport_names); i++) { 257 if (port->disc_addr.trtype != nvmet_transport_names[i].type) 258 continue; 259 return sprintf(page, "%s\n", nvmet_transport_names[i].name); 260 } 261 262 return sprintf(page, "\n"); 263 } 264 265 static void nvmet_port_init_tsas_rdma(struct nvmet_port *port) 266 { 267 port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED; 268 port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED; 269 port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM; 270 } 271 272 static ssize_t nvmet_addr_trtype_store(struct config_item *item, 273 const char *page, size_t count) 274 { 275 struct nvmet_port *port = to_nvmet_port(item); 276 int i; 277 278 if (port->enabled) { 279 pr_err("Cannot modify address while enabled\n"); 280 pr_err("Disable the address before modifying\n"); 281 return -EACCES; 282 } 283 284 for (i = 0; i < ARRAY_SIZE(nvmet_transport_names); i++) { 285 if (sysfs_streq(page, nvmet_transport_names[i].name)) 286 goto found; 287 } 288 289 pr_err("Invalid value '%s' for trtype\n", page); 290 return -EINVAL; 291 found: 292 memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE); 293 port->disc_addr.trtype = nvmet_transport_names[i].type; 294 if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) 295 nvmet_port_init_tsas_rdma(port); 296 return count; 297 } 298 299 CONFIGFS_ATTR(nvmet_, addr_trtype); 300 301 /* 302 * Namespace structures & file operation functions below 303 */ 304 static ssize_t nvmet_ns_device_path_show(struct config_item *item, char *page) 305 { 306 return sprintf(page, "%s\n", to_nvmet_ns(item)->device_path); 307 } 308 309 static ssize_t nvmet_ns_device_path_store(struct config_item *item, 310 const char *page, size_t count) 311 { 312 struct nvmet_ns *ns = to_nvmet_ns(item); 313 struct nvmet_subsys *subsys = ns->subsys; 314 size_t len; 315 int ret; 316 317 mutex_lock(&subsys->lock); 318 ret = -EBUSY; 319 if (ns->enabled) 320 goto out_unlock; 321 322 ret = -EINVAL; 323 len = strcspn(page, "\n"); 324 if (!len) 325 goto out_unlock; 326 327 kfree(ns->device_path); 328 ret = -ENOMEM; 329 ns->device_path = kstrndup(page, len, GFP_KERNEL); 330 if (!ns->device_path) 331 goto out_unlock; 332 333 mutex_unlock(&subsys->lock); 334 return count; 335 336 out_unlock: 337 mutex_unlock(&subsys->lock); 338 return ret; 339 } 340 341 CONFIGFS_ATTR(nvmet_ns_, device_path); 342 343 static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page) 344 { 345 return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid); 346 } 347 348 static ssize_t nvmet_ns_device_uuid_store(struct config_item *item, 349 const char *page, size_t count) 350 { 351 struct nvmet_ns *ns = to_nvmet_ns(item); 352 struct nvmet_subsys *subsys = ns->subsys; 353 int ret = 0; 354 355 356 mutex_lock(&subsys->lock); 357 if (ns->enabled) { 358 ret = -EBUSY; 359 goto out_unlock; 360 } 361 362 363 if (uuid_parse(page, &ns->uuid)) 364 ret = -EINVAL; 365 366 out_unlock: 367 mutex_unlock(&subsys->lock); 368 return ret ? ret : count; 369 } 370 371 CONFIGFS_ATTR(nvmet_ns_, device_uuid); 372 373 static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page) 374 { 375 return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid); 376 } 377 378 static ssize_t nvmet_ns_device_nguid_store(struct config_item *item, 379 const char *page, size_t count) 380 { 381 struct nvmet_ns *ns = to_nvmet_ns(item); 382 struct nvmet_subsys *subsys = ns->subsys; 383 u8 nguid[16]; 384 const char *p = page; 385 int i; 386 int ret = 0; 387 388 mutex_lock(&subsys->lock); 389 if (ns->enabled) { 390 ret = -EBUSY; 391 goto out_unlock; 392 } 393 394 for (i = 0; i < 16; i++) { 395 if (p + 2 > page + count) { 396 ret = -EINVAL; 397 goto out_unlock; 398 } 399 if (!isxdigit(p[0]) || !isxdigit(p[1])) { 400 ret = -EINVAL; 401 goto out_unlock; 402 } 403 404 nguid[i] = (hex_to_bin(p[0]) << 4) | hex_to_bin(p[1]); 405 p += 2; 406 407 if (*p == '-' || *p == ':') 408 p++; 409 } 410 411 memcpy(&ns->nguid, nguid, sizeof(nguid)); 412 out_unlock: 413 mutex_unlock(&subsys->lock); 414 return ret ? ret : count; 415 } 416 417 CONFIGFS_ATTR(nvmet_ns_, device_nguid); 418 419 static ssize_t nvmet_ns_ana_grpid_show(struct config_item *item, char *page) 420 { 421 return sprintf(page, "%u\n", to_nvmet_ns(item)->anagrpid); 422 } 423 424 static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item, 425 const char *page, size_t count) 426 { 427 struct nvmet_ns *ns = to_nvmet_ns(item); 428 u32 oldgrpid, newgrpid; 429 int ret; 430 431 ret = kstrtou32(page, 0, &newgrpid); 432 if (ret) 433 return ret; 434 435 if (newgrpid < 1 || newgrpid > NVMET_MAX_ANAGRPS) 436 return -EINVAL; 437 438 down_write(&nvmet_ana_sem); 439 oldgrpid = ns->anagrpid; 440 nvmet_ana_group_enabled[newgrpid]++; 441 ns->anagrpid = newgrpid; 442 nvmet_ana_group_enabled[oldgrpid]--; 443 nvmet_ana_chgcnt++; 444 up_write(&nvmet_ana_sem); 445 446 nvmet_send_ana_event(ns->subsys, NULL); 447 return count; 448 } 449 450 CONFIGFS_ATTR(nvmet_ns_, ana_grpid); 451 452 static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page) 453 { 454 return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled); 455 } 456 457 static ssize_t nvmet_ns_enable_store(struct config_item *item, 458 const char *page, size_t count) 459 { 460 struct nvmet_ns *ns = to_nvmet_ns(item); 461 bool enable; 462 int ret = 0; 463 464 if (strtobool(page, &enable)) 465 return -EINVAL; 466 467 if (enable) 468 ret = nvmet_ns_enable(ns); 469 else 470 nvmet_ns_disable(ns); 471 472 return ret ? ret : count; 473 } 474 475 CONFIGFS_ATTR(nvmet_ns_, enable); 476 477 static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page) 478 { 479 return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io); 480 } 481 482 static ssize_t nvmet_ns_buffered_io_store(struct config_item *item, 483 const char *page, size_t count) 484 { 485 struct nvmet_ns *ns = to_nvmet_ns(item); 486 bool val; 487 488 if (strtobool(page, &val)) 489 return -EINVAL; 490 491 mutex_lock(&ns->subsys->lock); 492 if (ns->enabled) { 493 pr_err("disable ns before setting buffered_io value.\n"); 494 mutex_unlock(&ns->subsys->lock); 495 return -EINVAL; 496 } 497 498 ns->buffered_io = val; 499 mutex_unlock(&ns->subsys->lock); 500 return count; 501 } 502 503 CONFIGFS_ATTR(nvmet_ns_, buffered_io); 504 505 static struct configfs_attribute *nvmet_ns_attrs[] = { 506 &nvmet_ns_attr_device_path, 507 &nvmet_ns_attr_device_nguid, 508 &nvmet_ns_attr_device_uuid, 509 &nvmet_ns_attr_ana_grpid, 510 &nvmet_ns_attr_enable, 511 &nvmet_ns_attr_buffered_io, 512 NULL, 513 }; 514 515 static void nvmet_ns_release(struct config_item *item) 516 { 517 struct nvmet_ns *ns = to_nvmet_ns(item); 518 519 nvmet_ns_free(ns); 520 } 521 522 static struct configfs_item_operations nvmet_ns_item_ops = { 523 .release = nvmet_ns_release, 524 }; 525 526 static const struct config_item_type nvmet_ns_type = { 527 .ct_item_ops = &nvmet_ns_item_ops, 528 .ct_attrs = nvmet_ns_attrs, 529 .ct_owner = THIS_MODULE, 530 }; 531 532 static struct config_group *nvmet_ns_make(struct config_group *group, 533 const char *name) 534 { 535 struct nvmet_subsys *subsys = namespaces_to_subsys(&group->cg_item); 536 struct nvmet_ns *ns; 537 int ret; 538 u32 nsid; 539 540 ret = kstrtou32(name, 0, &nsid); 541 if (ret) 542 goto out; 543 544 ret = -EINVAL; 545 if (nsid == 0 || nsid == NVME_NSID_ALL) 546 goto out; 547 548 ret = -ENOMEM; 549 ns = nvmet_ns_alloc(subsys, nsid); 550 if (!ns) 551 goto out; 552 config_group_init_type_name(&ns->group, name, &nvmet_ns_type); 553 554 pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn); 555 556 return &ns->group; 557 out: 558 return ERR_PTR(ret); 559 } 560 561 static struct configfs_group_operations nvmet_namespaces_group_ops = { 562 .make_group = nvmet_ns_make, 563 }; 564 565 static const struct config_item_type nvmet_namespaces_type = { 566 .ct_group_ops = &nvmet_namespaces_group_ops, 567 .ct_owner = THIS_MODULE, 568 }; 569 570 static int nvmet_port_subsys_allow_link(struct config_item *parent, 571 struct config_item *target) 572 { 573 struct nvmet_port *port = to_nvmet_port(parent->ci_parent); 574 struct nvmet_subsys *subsys; 575 struct nvmet_subsys_link *link, *p; 576 int ret; 577 578 if (target->ci_type != &nvmet_subsys_type) { 579 pr_err("can only link subsystems into the subsystems dir.!\n"); 580 return -EINVAL; 581 } 582 subsys = to_subsys(target); 583 link = kmalloc(sizeof(*link), GFP_KERNEL); 584 if (!link) 585 return -ENOMEM; 586 link->subsys = subsys; 587 588 down_write(&nvmet_config_sem); 589 ret = -EEXIST; 590 list_for_each_entry(p, &port->subsystems, entry) { 591 if (p->subsys == subsys) 592 goto out_free_link; 593 } 594 595 if (list_empty(&port->subsystems)) { 596 ret = nvmet_enable_port(port); 597 if (ret) 598 goto out_free_link; 599 } 600 601 list_add_tail(&link->entry, &port->subsystems); 602 nvmet_genctr++; 603 up_write(&nvmet_config_sem); 604 return 0; 605 606 out_free_link: 607 up_write(&nvmet_config_sem); 608 kfree(link); 609 return ret; 610 } 611 612 static void nvmet_port_subsys_drop_link(struct config_item *parent, 613 struct config_item *target) 614 { 615 struct nvmet_port *port = to_nvmet_port(parent->ci_parent); 616 struct nvmet_subsys *subsys = to_subsys(target); 617 struct nvmet_subsys_link *p; 618 619 down_write(&nvmet_config_sem); 620 list_for_each_entry(p, &port->subsystems, entry) { 621 if (p->subsys == subsys) 622 goto found; 623 } 624 up_write(&nvmet_config_sem); 625 return; 626 627 found: 628 list_del(&p->entry); 629 nvmet_genctr++; 630 if (list_empty(&port->subsystems)) 631 nvmet_disable_port(port); 632 up_write(&nvmet_config_sem); 633 kfree(p); 634 } 635 636 static struct configfs_item_operations nvmet_port_subsys_item_ops = { 637 .allow_link = nvmet_port_subsys_allow_link, 638 .drop_link = nvmet_port_subsys_drop_link, 639 }; 640 641 static const struct config_item_type nvmet_port_subsys_type = { 642 .ct_item_ops = &nvmet_port_subsys_item_ops, 643 .ct_owner = THIS_MODULE, 644 }; 645 646 static int nvmet_allowed_hosts_allow_link(struct config_item *parent, 647 struct config_item *target) 648 { 649 struct nvmet_subsys *subsys = to_subsys(parent->ci_parent); 650 struct nvmet_host *host; 651 struct nvmet_host_link *link, *p; 652 int ret; 653 654 if (target->ci_type != &nvmet_host_type) { 655 pr_err("can only link hosts into the allowed_hosts directory!\n"); 656 return -EINVAL; 657 } 658 659 host = to_host(target); 660 link = kmalloc(sizeof(*link), GFP_KERNEL); 661 if (!link) 662 return -ENOMEM; 663 link->host = host; 664 665 down_write(&nvmet_config_sem); 666 ret = -EINVAL; 667 if (subsys->allow_any_host) { 668 pr_err("can't add hosts when allow_any_host is set!\n"); 669 goto out_free_link; 670 } 671 672 ret = -EEXIST; 673 list_for_each_entry(p, &subsys->hosts, entry) { 674 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host))) 675 goto out_free_link; 676 } 677 list_add_tail(&link->entry, &subsys->hosts); 678 nvmet_genctr++; 679 up_write(&nvmet_config_sem); 680 return 0; 681 out_free_link: 682 up_write(&nvmet_config_sem); 683 kfree(link); 684 return ret; 685 } 686 687 static void nvmet_allowed_hosts_drop_link(struct config_item *parent, 688 struct config_item *target) 689 { 690 struct nvmet_subsys *subsys = to_subsys(parent->ci_parent); 691 struct nvmet_host *host = to_host(target); 692 struct nvmet_host_link *p; 693 694 down_write(&nvmet_config_sem); 695 list_for_each_entry(p, &subsys->hosts, entry) { 696 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host))) 697 goto found; 698 } 699 up_write(&nvmet_config_sem); 700 return; 701 702 found: 703 list_del(&p->entry); 704 nvmet_genctr++; 705 up_write(&nvmet_config_sem); 706 kfree(p); 707 } 708 709 static struct configfs_item_operations nvmet_allowed_hosts_item_ops = { 710 .allow_link = nvmet_allowed_hosts_allow_link, 711 .drop_link = nvmet_allowed_hosts_drop_link, 712 }; 713 714 static const struct config_item_type nvmet_allowed_hosts_type = { 715 .ct_item_ops = &nvmet_allowed_hosts_item_ops, 716 .ct_owner = THIS_MODULE, 717 }; 718 719 static ssize_t nvmet_subsys_attr_allow_any_host_show(struct config_item *item, 720 char *page) 721 { 722 return snprintf(page, PAGE_SIZE, "%d\n", 723 to_subsys(item)->allow_any_host); 724 } 725 726 static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item, 727 const char *page, size_t count) 728 { 729 struct nvmet_subsys *subsys = to_subsys(item); 730 bool allow_any_host; 731 int ret = 0; 732 733 if (strtobool(page, &allow_any_host)) 734 return -EINVAL; 735 736 down_write(&nvmet_config_sem); 737 if (allow_any_host && !list_empty(&subsys->hosts)) { 738 pr_err("Can't set allow_any_host when explicit hosts are set!\n"); 739 ret = -EINVAL; 740 goto out_unlock; 741 } 742 743 subsys->allow_any_host = allow_any_host; 744 out_unlock: 745 up_write(&nvmet_config_sem); 746 return ret ? ret : count; 747 } 748 749 CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host); 750 751 static ssize_t nvmet_subsys_attr_version_show(struct config_item *item, 752 char *page) 753 { 754 struct nvmet_subsys *subsys = to_subsys(item); 755 756 if (NVME_TERTIARY(subsys->ver)) 757 return snprintf(page, PAGE_SIZE, "%d.%d.%d\n", 758 (int)NVME_MAJOR(subsys->ver), 759 (int)NVME_MINOR(subsys->ver), 760 (int)NVME_TERTIARY(subsys->ver)); 761 else 762 return snprintf(page, PAGE_SIZE, "%d.%d\n", 763 (int)NVME_MAJOR(subsys->ver), 764 (int)NVME_MINOR(subsys->ver)); 765 } 766 767 static ssize_t nvmet_subsys_attr_version_store(struct config_item *item, 768 const char *page, size_t count) 769 { 770 struct nvmet_subsys *subsys = to_subsys(item); 771 int major, minor, tertiary = 0; 772 int ret; 773 774 775 ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary); 776 if (ret != 2 && ret != 3) 777 return -EINVAL; 778 779 down_write(&nvmet_config_sem); 780 subsys->ver = NVME_VS(major, minor, tertiary); 781 up_write(&nvmet_config_sem); 782 783 return count; 784 } 785 CONFIGFS_ATTR(nvmet_subsys_, attr_version); 786 787 static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item, 788 char *page) 789 { 790 struct nvmet_subsys *subsys = to_subsys(item); 791 792 return snprintf(page, PAGE_SIZE, "%llx\n", subsys->serial); 793 } 794 795 static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item, 796 const char *page, size_t count) 797 { 798 struct nvmet_subsys *subsys = to_subsys(item); 799 800 down_write(&nvmet_config_sem); 801 sscanf(page, "%llx\n", &subsys->serial); 802 up_write(&nvmet_config_sem); 803 804 return count; 805 } 806 CONFIGFS_ATTR(nvmet_subsys_, attr_serial); 807 808 static struct configfs_attribute *nvmet_subsys_attrs[] = { 809 &nvmet_subsys_attr_attr_allow_any_host, 810 &nvmet_subsys_attr_attr_version, 811 &nvmet_subsys_attr_attr_serial, 812 NULL, 813 }; 814 815 /* 816 * Subsystem structures & folder operation functions below 817 */ 818 static void nvmet_subsys_release(struct config_item *item) 819 { 820 struct nvmet_subsys *subsys = to_subsys(item); 821 822 nvmet_subsys_del_ctrls(subsys); 823 nvmet_subsys_put(subsys); 824 } 825 826 static struct configfs_item_operations nvmet_subsys_item_ops = { 827 .release = nvmet_subsys_release, 828 }; 829 830 static const struct config_item_type nvmet_subsys_type = { 831 .ct_item_ops = &nvmet_subsys_item_ops, 832 .ct_attrs = nvmet_subsys_attrs, 833 .ct_owner = THIS_MODULE, 834 }; 835 836 static struct config_group *nvmet_subsys_make(struct config_group *group, 837 const char *name) 838 { 839 struct nvmet_subsys *subsys; 840 841 if (sysfs_streq(name, NVME_DISC_SUBSYS_NAME)) { 842 pr_err("can't create discovery subsystem through configfs\n"); 843 return ERR_PTR(-EINVAL); 844 } 845 846 subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME); 847 if (!subsys) 848 return ERR_PTR(-ENOMEM); 849 850 config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type); 851 852 config_group_init_type_name(&subsys->namespaces_group, 853 "namespaces", &nvmet_namespaces_type); 854 configfs_add_default_group(&subsys->namespaces_group, &subsys->group); 855 856 config_group_init_type_name(&subsys->allowed_hosts_group, 857 "allowed_hosts", &nvmet_allowed_hosts_type); 858 configfs_add_default_group(&subsys->allowed_hosts_group, 859 &subsys->group); 860 861 return &subsys->group; 862 } 863 864 static struct configfs_group_operations nvmet_subsystems_group_ops = { 865 .make_group = nvmet_subsys_make, 866 }; 867 868 static const struct config_item_type nvmet_subsystems_type = { 869 .ct_group_ops = &nvmet_subsystems_group_ops, 870 .ct_owner = THIS_MODULE, 871 }; 872 873 static ssize_t nvmet_referral_enable_show(struct config_item *item, 874 char *page) 875 { 876 return snprintf(page, PAGE_SIZE, "%d\n", to_nvmet_port(item)->enabled); 877 } 878 879 static ssize_t nvmet_referral_enable_store(struct config_item *item, 880 const char *page, size_t count) 881 { 882 struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent); 883 struct nvmet_port *port = to_nvmet_port(item); 884 bool enable; 885 886 if (strtobool(page, &enable)) 887 goto inval; 888 889 if (enable) 890 nvmet_referral_enable(parent, port); 891 else 892 nvmet_referral_disable(port); 893 894 return count; 895 inval: 896 pr_err("Invalid value '%s' for enable\n", page); 897 return -EINVAL; 898 } 899 900 CONFIGFS_ATTR(nvmet_referral_, enable); 901 902 /* 903 * Discovery Service subsystem definitions 904 */ 905 static struct configfs_attribute *nvmet_referral_attrs[] = { 906 &nvmet_attr_addr_adrfam, 907 &nvmet_attr_addr_portid, 908 &nvmet_attr_addr_treq, 909 &nvmet_attr_addr_traddr, 910 &nvmet_attr_addr_trsvcid, 911 &nvmet_attr_addr_trtype, 912 &nvmet_referral_attr_enable, 913 NULL, 914 }; 915 916 static void nvmet_referral_release(struct config_item *item) 917 { 918 struct nvmet_port *port = to_nvmet_port(item); 919 920 nvmet_referral_disable(port); 921 kfree(port); 922 } 923 924 static struct configfs_item_operations nvmet_referral_item_ops = { 925 .release = nvmet_referral_release, 926 }; 927 928 static const struct config_item_type nvmet_referral_type = { 929 .ct_owner = THIS_MODULE, 930 .ct_attrs = nvmet_referral_attrs, 931 .ct_item_ops = &nvmet_referral_item_ops, 932 }; 933 934 static struct config_group *nvmet_referral_make( 935 struct config_group *group, const char *name) 936 { 937 struct nvmet_port *port; 938 939 port = kzalloc(sizeof(*port), GFP_KERNEL); 940 if (!port) 941 return ERR_PTR(-ENOMEM); 942 943 INIT_LIST_HEAD(&port->entry); 944 config_group_init_type_name(&port->group, name, &nvmet_referral_type); 945 946 return &port->group; 947 } 948 949 static struct configfs_group_operations nvmet_referral_group_ops = { 950 .make_group = nvmet_referral_make, 951 }; 952 953 static const struct config_item_type nvmet_referrals_type = { 954 .ct_owner = THIS_MODULE, 955 .ct_group_ops = &nvmet_referral_group_ops, 956 }; 957 958 static struct { 959 enum nvme_ana_state state; 960 const char *name; 961 } nvmet_ana_state_names[] = { 962 { NVME_ANA_OPTIMIZED, "optimized" }, 963 { NVME_ANA_NONOPTIMIZED, "non-optimized" }, 964 { NVME_ANA_INACCESSIBLE, "inaccessible" }, 965 { NVME_ANA_PERSISTENT_LOSS, "persistent-loss" }, 966 { NVME_ANA_CHANGE, "change" }, 967 }; 968 969 static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item, 970 char *page) 971 { 972 struct nvmet_ana_group *grp = to_ana_group(item); 973 enum nvme_ana_state state = grp->port->ana_state[grp->grpid]; 974 int i; 975 976 for (i = 0; i < ARRAY_SIZE(nvmet_ana_state_names); i++) { 977 if (state != nvmet_ana_state_names[i].state) 978 continue; 979 return sprintf(page, "%s\n", nvmet_ana_state_names[i].name); 980 } 981 982 return sprintf(page, "\n"); 983 } 984 985 static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item, 986 const char *page, size_t count) 987 { 988 struct nvmet_ana_group *grp = to_ana_group(item); 989 int i; 990 991 for (i = 0; i < ARRAY_SIZE(nvmet_ana_state_names); i++) { 992 if (sysfs_streq(page, nvmet_ana_state_names[i].name)) 993 goto found; 994 } 995 996 pr_err("Invalid value '%s' for ana_state\n", page); 997 return -EINVAL; 998 999 found: 1000 down_write(&nvmet_ana_sem); 1001 grp->port->ana_state[grp->grpid] = nvmet_ana_state_names[i].state; 1002 nvmet_ana_chgcnt++; 1003 up_write(&nvmet_ana_sem); 1004 1005 nvmet_port_send_ana_event(grp->port); 1006 return count; 1007 } 1008 1009 CONFIGFS_ATTR(nvmet_ana_group_, ana_state); 1010 1011 static struct configfs_attribute *nvmet_ana_group_attrs[] = { 1012 &nvmet_ana_group_attr_ana_state, 1013 NULL, 1014 }; 1015 1016 static void nvmet_ana_group_release(struct config_item *item) 1017 { 1018 struct nvmet_ana_group *grp = to_ana_group(item); 1019 1020 if (grp == &grp->port->ana_default_group) 1021 return; 1022 1023 down_write(&nvmet_ana_sem); 1024 grp->port->ana_state[grp->grpid] = NVME_ANA_INACCESSIBLE; 1025 nvmet_ana_group_enabled[grp->grpid]--; 1026 up_write(&nvmet_ana_sem); 1027 1028 nvmet_port_send_ana_event(grp->port); 1029 kfree(grp); 1030 } 1031 1032 static struct configfs_item_operations nvmet_ana_group_item_ops = { 1033 .release = nvmet_ana_group_release, 1034 }; 1035 1036 static const struct config_item_type nvmet_ana_group_type = { 1037 .ct_item_ops = &nvmet_ana_group_item_ops, 1038 .ct_attrs = nvmet_ana_group_attrs, 1039 .ct_owner = THIS_MODULE, 1040 }; 1041 1042 static struct config_group *nvmet_ana_groups_make_group( 1043 struct config_group *group, const char *name) 1044 { 1045 struct nvmet_port *port = ana_groups_to_port(&group->cg_item); 1046 struct nvmet_ana_group *grp; 1047 u32 grpid; 1048 int ret; 1049 1050 ret = kstrtou32(name, 0, &grpid); 1051 if (ret) 1052 goto out; 1053 1054 ret = -EINVAL; 1055 if (grpid <= 1 || grpid > NVMET_MAX_ANAGRPS) 1056 goto out; 1057 1058 ret = -ENOMEM; 1059 grp = kzalloc(sizeof(*grp), GFP_KERNEL); 1060 if (!grp) 1061 goto out; 1062 grp->port = port; 1063 grp->grpid = grpid; 1064 1065 down_write(&nvmet_ana_sem); 1066 nvmet_ana_group_enabled[grpid]++; 1067 up_write(&nvmet_ana_sem); 1068 1069 nvmet_port_send_ana_event(grp->port); 1070 1071 config_group_init_type_name(&grp->group, name, &nvmet_ana_group_type); 1072 return &grp->group; 1073 out: 1074 return ERR_PTR(ret); 1075 } 1076 1077 static struct configfs_group_operations nvmet_ana_groups_group_ops = { 1078 .make_group = nvmet_ana_groups_make_group, 1079 }; 1080 1081 static const struct config_item_type nvmet_ana_groups_type = { 1082 .ct_group_ops = &nvmet_ana_groups_group_ops, 1083 .ct_owner = THIS_MODULE, 1084 }; 1085 1086 /* 1087 * Ports definitions. 1088 */ 1089 static void nvmet_port_release(struct config_item *item) 1090 { 1091 struct nvmet_port *port = to_nvmet_port(item); 1092 1093 kfree(port->ana_state); 1094 kfree(port); 1095 } 1096 1097 static struct configfs_attribute *nvmet_port_attrs[] = { 1098 &nvmet_attr_addr_adrfam, 1099 &nvmet_attr_addr_treq, 1100 &nvmet_attr_addr_traddr, 1101 &nvmet_attr_addr_trsvcid, 1102 &nvmet_attr_addr_trtype, 1103 &nvmet_attr_param_inline_data_size, 1104 NULL, 1105 }; 1106 1107 static struct configfs_item_operations nvmet_port_item_ops = { 1108 .release = nvmet_port_release, 1109 }; 1110 1111 static const struct config_item_type nvmet_port_type = { 1112 .ct_attrs = nvmet_port_attrs, 1113 .ct_item_ops = &nvmet_port_item_ops, 1114 .ct_owner = THIS_MODULE, 1115 }; 1116 1117 static struct config_group *nvmet_ports_make(struct config_group *group, 1118 const char *name) 1119 { 1120 struct nvmet_port *port; 1121 u16 portid; 1122 u32 i; 1123 1124 if (kstrtou16(name, 0, &portid)) 1125 return ERR_PTR(-EINVAL); 1126 1127 port = kzalloc(sizeof(*port), GFP_KERNEL); 1128 if (!port) 1129 return ERR_PTR(-ENOMEM); 1130 1131 port->ana_state = kcalloc(NVMET_MAX_ANAGRPS + 1, 1132 sizeof(*port->ana_state), GFP_KERNEL); 1133 if (!port->ana_state) { 1134 kfree(port); 1135 return ERR_PTR(-ENOMEM); 1136 } 1137 1138 for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) { 1139 if (i == NVMET_DEFAULT_ANA_GRPID) 1140 port->ana_state[1] = NVME_ANA_OPTIMIZED; 1141 else 1142 port->ana_state[i] = NVME_ANA_INACCESSIBLE; 1143 } 1144 1145 INIT_LIST_HEAD(&port->entry); 1146 INIT_LIST_HEAD(&port->subsystems); 1147 INIT_LIST_HEAD(&port->referrals); 1148 port->inline_data_size = -1; /* < 0 == let the transport choose */ 1149 1150 port->disc_addr.portid = cpu_to_le16(portid); 1151 config_group_init_type_name(&port->group, name, &nvmet_port_type); 1152 1153 config_group_init_type_name(&port->subsys_group, 1154 "subsystems", &nvmet_port_subsys_type); 1155 configfs_add_default_group(&port->subsys_group, &port->group); 1156 1157 config_group_init_type_name(&port->referrals_group, 1158 "referrals", &nvmet_referrals_type); 1159 configfs_add_default_group(&port->referrals_group, &port->group); 1160 1161 config_group_init_type_name(&port->ana_groups_group, 1162 "ana_groups", &nvmet_ana_groups_type); 1163 configfs_add_default_group(&port->ana_groups_group, &port->group); 1164 1165 port->ana_default_group.port = port; 1166 port->ana_default_group.grpid = NVMET_DEFAULT_ANA_GRPID; 1167 config_group_init_type_name(&port->ana_default_group.group, 1168 __stringify(NVMET_DEFAULT_ANA_GRPID), 1169 &nvmet_ana_group_type); 1170 configfs_add_default_group(&port->ana_default_group.group, 1171 &port->ana_groups_group); 1172 1173 return &port->group; 1174 } 1175 1176 static struct configfs_group_operations nvmet_ports_group_ops = { 1177 .make_group = nvmet_ports_make, 1178 }; 1179 1180 static const struct config_item_type nvmet_ports_type = { 1181 .ct_group_ops = &nvmet_ports_group_ops, 1182 .ct_owner = THIS_MODULE, 1183 }; 1184 1185 static struct config_group nvmet_subsystems_group; 1186 static struct config_group nvmet_ports_group; 1187 1188 static void nvmet_host_release(struct config_item *item) 1189 { 1190 struct nvmet_host *host = to_host(item); 1191 1192 kfree(host); 1193 } 1194 1195 static struct configfs_item_operations nvmet_host_item_ops = { 1196 .release = nvmet_host_release, 1197 }; 1198 1199 static const struct config_item_type nvmet_host_type = { 1200 .ct_item_ops = &nvmet_host_item_ops, 1201 .ct_owner = THIS_MODULE, 1202 }; 1203 1204 static struct config_group *nvmet_hosts_make_group(struct config_group *group, 1205 const char *name) 1206 { 1207 struct nvmet_host *host; 1208 1209 host = kzalloc(sizeof(*host), GFP_KERNEL); 1210 if (!host) 1211 return ERR_PTR(-ENOMEM); 1212 1213 config_group_init_type_name(&host->group, name, &nvmet_host_type); 1214 1215 return &host->group; 1216 } 1217 1218 static struct configfs_group_operations nvmet_hosts_group_ops = { 1219 .make_group = nvmet_hosts_make_group, 1220 }; 1221 1222 static const struct config_item_type nvmet_hosts_type = { 1223 .ct_group_ops = &nvmet_hosts_group_ops, 1224 .ct_owner = THIS_MODULE, 1225 }; 1226 1227 static struct config_group nvmet_hosts_group; 1228 1229 static const struct config_item_type nvmet_root_type = { 1230 .ct_owner = THIS_MODULE, 1231 }; 1232 1233 static struct configfs_subsystem nvmet_configfs_subsystem = { 1234 .su_group = { 1235 .cg_item = { 1236 .ci_namebuf = "nvmet", 1237 .ci_type = &nvmet_root_type, 1238 }, 1239 }, 1240 }; 1241 1242 int __init nvmet_init_configfs(void) 1243 { 1244 int ret; 1245 1246 config_group_init(&nvmet_configfs_subsystem.su_group); 1247 mutex_init(&nvmet_configfs_subsystem.su_mutex); 1248 1249 config_group_init_type_name(&nvmet_subsystems_group, 1250 "subsystems", &nvmet_subsystems_type); 1251 configfs_add_default_group(&nvmet_subsystems_group, 1252 &nvmet_configfs_subsystem.su_group); 1253 1254 config_group_init_type_name(&nvmet_ports_group, 1255 "ports", &nvmet_ports_type); 1256 configfs_add_default_group(&nvmet_ports_group, 1257 &nvmet_configfs_subsystem.su_group); 1258 1259 config_group_init_type_name(&nvmet_hosts_group, 1260 "hosts", &nvmet_hosts_type); 1261 configfs_add_default_group(&nvmet_hosts_group, 1262 &nvmet_configfs_subsystem.su_group); 1263 1264 ret = configfs_register_subsystem(&nvmet_configfs_subsystem); 1265 if (ret) { 1266 pr_err("configfs_register_subsystem: %d\n", ret); 1267 return ret; 1268 } 1269 1270 return 0; 1271 } 1272 1273 void __exit nvmet_exit_configfs(void) 1274 { 1275 configfs_unregister_subsystem(&nvmet_configfs_subsystem); 1276 } 1277