1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /******************************************************************************* 3 * Filename: target_core_configfs.c 4 * 5 * This file contains ConfigFS logic for the Generic Target Engine project. 6 * 7 * (c) Copyright 2008-2013 Datera, Inc. 8 * 9 * Nicholas A. Bellinger <nab@kernel.org> 10 * 11 * based on configfs Copyright (C) 2005 Oracle. All rights reserved. 12 * 13 ****************************************************************************/ 14 15 #include <linux/kstrtox.h> 16 #include <linux/module.h> 17 #include <linux/moduleparam.h> 18 #include <generated/utsrelease.h> 19 #include <linux/utsname.h> 20 #include <linux/init.h> 21 #include <linux/fs.h> 22 #include <linux/namei.h> 23 #include <linux/slab.h> 24 #include <linux/types.h> 25 #include <linux/delay.h> 26 #include <linux/unistd.h> 27 #include <linux/string.h> 28 #include <linux/parser.h> 29 #include <linux/syscalls.h> 30 #include <linux/configfs.h> 31 #include <linux/spinlock.h> 32 33 #include <target/target_core_base.h> 34 #include <target/target_core_backend.h> 35 #include <target/target_core_fabric.h> 36 37 #include "target_core_internal.h" 38 #include "target_core_alua.h" 39 #include "target_core_pr.h" 40 #include "target_core_rd.h" 41 #include "target_core_xcopy.h" 42 43 #define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \ 44 static void target_core_setup_##_name##_cit(struct target_backend *tb) \ 45 { \ 46 struct config_item_type *cit = &tb->tb_##_name##_cit; \ 47 \ 48 cit->ct_item_ops = _item_ops; \ 49 cit->ct_group_ops = _group_ops; \ 50 cit->ct_attrs = _attrs; \ 51 cit->ct_owner = tb->ops->owner; \ 52 pr_debug("Setup generic %s\n", __stringify(_name)); \ 53 } 54 55 #define TB_CIT_SETUP_DRV(_name, _item_ops, _group_ops) \ 56 static void target_core_setup_##_name##_cit(struct target_backend *tb) \ 57 { \ 58 struct config_item_type *cit = &tb->tb_##_name##_cit; \ 59 \ 60 cit->ct_item_ops = _item_ops; \ 61 cit->ct_group_ops = _group_ops; \ 62 cit->ct_attrs = tb->ops->tb_##_name##_attrs; \ 63 cit->ct_owner = tb->ops->owner; \ 64 pr_debug("Setup generic %s\n", __stringify(_name)); \ 65 } 66 67 extern struct t10_alua_lu_gp *default_lu_gp; 68 69 static LIST_HEAD(g_tf_list); 70 static DEFINE_MUTEX(g_tf_lock); 71 72 static struct config_group target_core_hbagroup; 73 static struct config_group alua_group; 74 static struct config_group alua_lu_gps_group; 75 76 static unsigned int target_devices; 77 static DEFINE_MUTEX(target_devices_lock); 78 79 static inline struct se_hba * 80 item_to_hba(struct config_item *item) 81 { 82 return container_of(to_config_group(item), struct se_hba, hba_group); 83 } 84 85 /* 86 * Attributes for /sys/kernel/config/target/ 87 */ 88 static ssize_t target_core_item_version_show(struct config_item *item, 89 char *page) 90 { 91 return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s" 92 " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_VERSION, 93 utsname()->sysname, utsname()->machine); 94 } 95 96 CONFIGFS_ATTR_RO(target_core_item_, version); 97 98 char db_root[DB_ROOT_LEN] = DB_ROOT_DEFAULT; 99 static char db_root_stage[DB_ROOT_LEN]; 100 101 static ssize_t target_core_item_dbroot_show(struct config_item *item, 102 char *page) 103 { 104 return sprintf(page, "%s\n", db_root); 105 } 106 107 static ssize_t target_core_item_dbroot_store(struct config_item *item, 108 const char *page, size_t count) 109 { 110 ssize_t read_bytes; 111 struct file *fp; 112 ssize_t r = -EINVAL; 113 114 mutex_lock(&target_devices_lock); 115 if (target_devices) { 116 pr_err("db_root: cannot be changed because it's in use\n"); 117 goto unlock; 118 } 119 120 if (count > (DB_ROOT_LEN - 1)) { 121 pr_err("db_root: count %d exceeds DB_ROOT_LEN-1: %u\n", 122 (int)count, DB_ROOT_LEN - 1); 123 goto unlock; 124 } 125 126 read_bytes = snprintf(db_root_stage, DB_ROOT_LEN, "%s", page); 127 if (!read_bytes) 128 goto unlock; 129 130 if (db_root_stage[read_bytes - 1] == '\n') 131 db_root_stage[read_bytes - 1] = '\0'; 132 133 /* validate new db root before accepting it */ 134 fp = filp_open(db_root_stage, O_RDONLY, 0); 135 if (IS_ERR(fp)) { 136 pr_err("db_root: cannot open: %s\n", db_root_stage); 137 goto unlock; 138 } 139 if (!S_ISDIR(file_inode(fp)->i_mode)) { 140 filp_close(fp, NULL); 141 pr_err("db_root: not a directory: %s\n", db_root_stage); 142 goto unlock; 143 } 144 filp_close(fp, NULL); 145 146 strncpy(db_root, db_root_stage, read_bytes); 147 pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root); 148 149 r = read_bytes; 150 151 unlock: 152 mutex_unlock(&target_devices_lock); 153 return r; 154 } 155 156 CONFIGFS_ATTR(target_core_item_, dbroot); 157 158 static struct target_fabric_configfs *target_core_get_fabric( 159 const char *name) 160 { 161 struct target_fabric_configfs *tf; 162 163 if (!name) 164 return NULL; 165 166 mutex_lock(&g_tf_lock); 167 list_for_each_entry(tf, &g_tf_list, tf_list) { 168 const char *cmp_name = tf->tf_ops->fabric_alias; 169 if (!cmp_name) 170 cmp_name = tf->tf_ops->fabric_name; 171 if (!strcmp(cmp_name, name)) { 172 atomic_inc(&tf->tf_access_cnt); 173 mutex_unlock(&g_tf_lock); 174 return tf; 175 } 176 } 177 mutex_unlock(&g_tf_lock); 178 179 return NULL; 180 } 181 182 /* 183 * Called from struct target_core_group_ops->make_group() 184 */ 185 static struct config_group *target_core_register_fabric( 186 struct config_group *group, 187 const char *name) 188 { 189 struct target_fabric_configfs *tf; 190 int ret; 191 192 pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:" 193 " %s\n", group, name); 194 195 tf = target_core_get_fabric(name); 196 if (!tf) { 197 pr_debug("target_core_register_fabric() trying autoload for %s\n", 198 name); 199 200 /* 201 * Below are some hardcoded request_module() calls to automatically 202 * local fabric modules when the following is called: 203 * 204 * mkdir -p /sys/kernel/config/target/$MODULE_NAME 205 * 206 * Note that this does not limit which TCM fabric module can be 207 * registered, but simply provids auto loading logic for modules with 208 * mkdir(2) system calls with known TCM fabric modules. 209 */ 210 211 if (!strncmp(name, "iscsi", 5)) { 212 /* 213 * Automatically load the LIO Target fabric module when the 214 * following is called: 215 * 216 * mkdir -p $CONFIGFS/target/iscsi 217 */ 218 ret = request_module("iscsi_target_mod"); 219 if (ret < 0) { 220 pr_debug("request_module() failed for" 221 " iscsi_target_mod.ko: %d\n", ret); 222 return ERR_PTR(-EINVAL); 223 } 224 } else if (!strncmp(name, "loopback", 8)) { 225 /* 226 * Automatically load the tcm_loop fabric module when the 227 * following is called: 228 * 229 * mkdir -p $CONFIGFS/target/loopback 230 */ 231 ret = request_module("tcm_loop"); 232 if (ret < 0) { 233 pr_debug("request_module() failed for" 234 " tcm_loop.ko: %d\n", ret); 235 return ERR_PTR(-EINVAL); 236 } 237 } 238 239 tf = target_core_get_fabric(name); 240 } 241 242 if (!tf) { 243 pr_debug("target_core_get_fabric() failed for %s\n", 244 name); 245 return ERR_PTR(-EINVAL); 246 } 247 pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:" 248 " %s\n", tf->tf_ops->fabric_name); 249 /* 250 * On a successful target_core_get_fabric() look, the returned 251 * struct target_fabric_configfs *tf will contain a usage reference. 252 */ 253 pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n", 254 &tf->tf_wwn_cit); 255 256 config_group_init_type_name(&tf->tf_group, name, &tf->tf_wwn_cit); 257 258 config_group_init_type_name(&tf->tf_disc_group, "discovery_auth", 259 &tf->tf_discovery_cit); 260 configfs_add_default_group(&tf->tf_disc_group, &tf->tf_group); 261 262 pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric: %s\n", 263 config_item_name(&tf->tf_group.cg_item)); 264 return &tf->tf_group; 265 } 266 267 /* 268 * Called from struct target_core_group_ops->drop_item() 269 */ 270 static void target_core_deregister_fabric( 271 struct config_group *group, 272 struct config_item *item) 273 { 274 struct target_fabric_configfs *tf = container_of( 275 to_config_group(item), struct target_fabric_configfs, tf_group); 276 277 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in" 278 " tf list\n", config_item_name(item)); 279 280 pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:" 281 " %s\n", tf->tf_ops->fabric_name); 282 atomic_dec(&tf->tf_access_cnt); 283 284 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci" 285 " %s\n", config_item_name(item)); 286 287 configfs_remove_default_groups(&tf->tf_group); 288 config_item_put(item); 289 } 290 291 static struct configfs_group_operations target_core_fabric_group_ops = { 292 .make_group = &target_core_register_fabric, 293 .drop_item = &target_core_deregister_fabric, 294 }; 295 296 /* 297 * All item attributes appearing in /sys/kernel/target/ appear here. 298 */ 299 static struct configfs_attribute *target_core_fabric_item_attrs[] = { 300 &target_core_item_attr_version, 301 &target_core_item_attr_dbroot, 302 NULL, 303 }; 304 305 /* 306 * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/ 307 */ 308 static const struct config_item_type target_core_fabrics_item = { 309 .ct_group_ops = &target_core_fabric_group_ops, 310 .ct_attrs = target_core_fabric_item_attrs, 311 .ct_owner = THIS_MODULE, 312 }; 313 314 static struct configfs_subsystem target_core_fabrics = { 315 .su_group = { 316 .cg_item = { 317 .ci_namebuf = "target", 318 .ci_type = &target_core_fabrics_item, 319 }, 320 }, 321 }; 322 323 int target_depend_item(struct config_item *item) 324 { 325 return configfs_depend_item(&target_core_fabrics, item); 326 } 327 EXPORT_SYMBOL(target_depend_item); 328 329 void target_undepend_item(struct config_item *item) 330 { 331 return configfs_undepend_item(item); 332 } 333 EXPORT_SYMBOL(target_undepend_item); 334 335 /*############################################################################## 336 // Start functions called by external Target Fabrics Modules 337 //############################################################################*/ 338 339 static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo) 340 { 341 if (tfo->fabric_alias) { 342 if (strlen(tfo->fabric_alias) >= TARGET_FABRIC_NAME_SIZE) { 343 pr_err("Passed alias: %s exceeds " 344 "TARGET_FABRIC_NAME_SIZE\n", tfo->fabric_alias); 345 return -EINVAL; 346 } 347 } 348 if (!tfo->fabric_name) { 349 pr_err("Missing tfo->fabric_name\n"); 350 return -EINVAL; 351 } 352 if (strlen(tfo->fabric_name) >= TARGET_FABRIC_NAME_SIZE) { 353 pr_err("Passed name: %s exceeds " 354 "TARGET_FABRIC_NAME_SIZE\n", tfo->fabric_name); 355 return -EINVAL; 356 } 357 if (!tfo->tpg_get_wwn) { 358 pr_err("Missing tfo->tpg_get_wwn()\n"); 359 return -EINVAL; 360 } 361 if (!tfo->tpg_get_tag) { 362 pr_err("Missing tfo->tpg_get_tag()\n"); 363 return -EINVAL; 364 } 365 if (!tfo->tpg_check_demo_mode) { 366 pr_err("Missing tfo->tpg_check_demo_mode()\n"); 367 return -EINVAL; 368 } 369 if (!tfo->tpg_check_demo_mode_cache) { 370 pr_err("Missing tfo->tpg_check_demo_mode_cache()\n"); 371 return -EINVAL; 372 } 373 if (!tfo->tpg_check_demo_mode_write_protect) { 374 pr_err("Missing tfo->tpg_check_demo_mode_write_protect()\n"); 375 return -EINVAL; 376 } 377 if (!tfo->tpg_check_prod_mode_write_protect) { 378 pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n"); 379 return -EINVAL; 380 } 381 if (!tfo->tpg_get_inst_index) { 382 pr_err("Missing tfo->tpg_get_inst_index()\n"); 383 return -EINVAL; 384 } 385 if (!tfo->release_cmd) { 386 pr_err("Missing tfo->release_cmd()\n"); 387 return -EINVAL; 388 } 389 if (!tfo->sess_get_index) { 390 pr_err("Missing tfo->sess_get_index()\n"); 391 return -EINVAL; 392 } 393 if (!tfo->write_pending) { 394 pr_err("Missing tfo->write_pending()\n"); 395 return -EINVAL; 396 } 397 if (!tfo->set_default_node_attributes) { 398 pr_err("Missing tfo->set_default_node_attributes()\n"); 399 return -EINVAL; 400 } 401 if (!tfo->get_cmd_state) { 402 pr_err("Missing tfo->get_cmd_state()\n"); 403 return -EINVAL; 404 } 405 if (!tfo->queue_data_in) { 406 pr_err("Missing tfo->queue_data_in()\n"); 407 return -EINVAL; 408 } 409 if (!tfo->queue_status) { 410 pr_err("Missing tfo->queue_status()\n"); 411 return -EINVAL; 412 } 413 if (!tfo->queue_tm_rsp) { 414 pr_err("Missing tfo->queue_tm_rsp()\n"); 415 return -EINVAL; 416 } 417 if (!tfo->aborted_task) { 418 pr_err("Missing tfo->aborted_task()\n"); 419 return -EINVAL; 420 } 421 if (!tfo->check_stop_free) { 422 pr_err("Missing tfo->check_stop_free()\n"); 423 return -EINVAL; 424 } 425 /* 426 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() 427 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in 428 * target_core_fabric_configfs.c WWN+TPG group context code. 429 */ 430 if (!tfo->fabric_make_wwn) { 431 pr_err("Missing tfo->fabric_make_wwn()\n"); 432 return -EINVAL; 433 } 434 if (!tfo->fabric_drop_wwn) { 435 pr_err("Missing tfo->fabric_drop_wwn()\n"); 436 return -EINVAL; 437 } 438 if (!tfo->fabric_make_tpg) { 439 pr_err("Missing tfo->fabric_make_tpg()\n"); 440 return -EINVAL; 441 } 442 if (!tfo->fabric_drop_tpg) { 443 pr_err("Missing tfo->fabric_drop_tpg()\n"); 444 return -EINVAL; 445 } 446 447 return 0; 448 } 449 450 int target_register_template(const struct target_core_fabric_ops *fo) 451 { 452 struct target_fabric_configfs *tf; 453 int ret; 454 455 ret = target_fabric_tf_ops_check(fo); 456 if (ret) 457 return ret; 458 459 tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL); 460 if (!tf) { 461 pr_err("%s: could not allocate memory!\n", __func__); 462 return -ENOMEM; 463 } 464 465 INIT_LIST_HEAD(&tf->tf_list); 466 atomic_set(&tf->tf_access_cnt, 0); 467 tf->tf_ops = fo; 468 target_fabric_setup_cits(tf); 469 470 mutex_lock(&g_tf_lock); 471 list_add_tail(&tf->tf_list, &g_tf_list); 472 mutex_unlock(&g_tf_lock); 473 474 return 0; 475 } 476 EXPORT_SYMBOL(target_register_template); 477 478 void target_unregister_template(const struct target_core_fabric_ops *fo) 479 { 480 struct target_fabric_configfs *t; 481 482 mutex_lock(&g_tf_lock); 483 list_for_each_entry(t, &g_tf_list, tf_list) { 484 if (!strcmp(t->tf_ops->fabric_name, fo->fabric_name)) { 485 BUG_ON(atomic_read(&t->tf_access_cnt)); 486 list_del(&t->tf_list); 487 mutex_unlock(&g_tf_lock); 488 /* 489 * Wait for any outstanding fabric se_deve_entry->rcu_head 490 * callbacks to complete post kfree_rcu(), before allowing 491 * fabric driver unload of TFO->module to proceed. 492 */ 493 rcu_barrier(); 494 kfree(t->tf_tpg_base_cit.ct_attrs); 495 kfree(t); 496 return; 497 } 498 } 499 mutex_unlock(&g_tf_lock); 500 } 501 EXPORT_SYMBOL(target_unregister_template); 502 503 /*############################################################################## 504 // Stop functions called by external Target Fabrics Modules 505 //############################################################################*/ 506 507 static inline struct se_dev_attrib *to_attrib(struct config_item *item) 508 { 509 return container_of(to_config_group(item), struct se_dev_attrib, 510 da_group); 511 } 512 513 /* Start functions for struct config_item_type tb_dev_attrib_cit */ 514 #define DEF_CONFIGFS_ATTRIB_SHOW(_name) \ 515 static ssize_t _name##_show(struct config_item *item, char *page) \ 516 { \ 517 return snprintf(page, PAGE_SIZE, "%u\n", to_attrib(item)->_name); \ 518 } 519 520 DEF_CONFIGFS_ATTRIB_SHOW(emulate_model_alias); 521 DEF_CONFIGFS_ATTRIB_SHOW(emulate_dpo); 522 DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_write); 523 DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_read); 524 DEF_CONFIGFS_ATTRIB_SHOW(emulate_write_cache); 525 DEF_CONFIGFS_ATTRIB_SHOW(emulate_ua_intlck_ctrl); 526 DEF_CONFIGFS_ATTRIB_SHOW(emulate_tas); 527 DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpu); 528 DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpws); 529 DEF_CONFIGFS_ATTRIB_SHOW(emulate_caw); 530 DEF_CONFIGFS_ATTRIB_SHOW(emulate_3pc); 531 DEF_CONFIGFS_ATTRIB_SHOW(emulate_pr); 532 DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_type); 533 DEF_CONFIGFS_ATTRIB_SHOW(hw_pi_prot_type); 534 DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_verify); 535 DEF_CONFIGFS_ATTRIB_SHOW(enforce_pr_isids); 536 DEF_CONFIGFS_ATTRIB_SHOW(is_nonrot); 537 DEF_CONFIGFS_ATTRIB_SHOW(emulate_rest_reord); 538 DEF_CONFIGFS_ATTRIB_SHOW(force_pr_aptpl); 539 DEF_CONFIGFS_ATTRIB_SHOW(hw_block_size); 540 DEF_CONFIGFS_ATTRIB_SHOW(block_size); 541 DEF_CONFIGFS_ATTRIB_SHOW(hw_max_sectors); 542 DEF_CONFIGFS_ATTRIB_SHOW(optimal_sectors); 543 DEF_CONFIGFS_ATTRIB_SHOW(hw_queue_depth); 544 DEF_CONFIGFS_ATTRIB_SHOW(queue_depth); 545 DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_lba_count); 546 DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_block_desc_count); 547 DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity); 548 DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity_alignment); 549 DEF_CONFIGFS_ATTRIB_SHOW(unmap_zeroes_data); 550 DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len); 551 DEF_CONFIGFS_ATTRIB_SHOW(emulate_rsoc); 552 553 #define DEF_CONFIGFS_ATTRIB_STORE_U32(_name) \ 554 static ssize_t _name##_store(struct config_item *item, const char *page,\ 555 size_t count) \ 556 { \ 557 struct se_dev_attrib *da = to_attrib(item); \ 558 u32 val; \ 559 int ret; \ 560 \ 561 ret = kstrtou32(page, 0, &val); \ 562 if (ret < 0) \ 563 return ret; \ 564 da->_name = val; \ 565 return count; \ 566 } 567 568 DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_lba_count); 569 DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_block_desc_count); 570 DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity); 571 DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity_alignment); 572 DEF_CONFIGFS_ATTRIB_STORE_U32(max_write_same_len); 573 574 #define DEF_CONFIGFS_ATTRIB_STORE_BOOL(_name) \ 575 static ssize_t _name##_store(struct config_item *item, const char *page, \ 576 size_t count) \ 577 { \ 578 struct se_dev_attrib *da = to_attrib(item); \ 579 bool flag; \ 580 int ret; \ 581 \ 582 ret = kstrtobool(page, &flag); \ 583 if (ret < 0) \ 584 return ret; \ 585 da->_name = flag; \ 586 return count; \ 587 } 588 589 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_fua_write); 590 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_caw); 591 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_3pc); 592 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_pr); 593 DEF_CONFIGFS_ATTRIB_STORE_BOOL(enforce_pr_isids); 594 DEF_CONFIGFS_ATTRIB_STORE_BOOL(is_nonrot); 595 596 #define DEF_CONFIGFS_ATTRIB_STORE_STUB(_name) \ 597 static ssize_t _name##_store(struct config_item *item, const char *page,\ 598 size_t count) \ 599 { \ 600 printk_once(KERN_WARNING \ 601 "ignoring deprecated %s attribute\n", \ 602 __stringify(_name)); \ 603 return count; \ 604 } 605 606 DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_dpo); 607 DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_fua_read); 608 609 static void dev_set_t10_wwn_model_alias(struct se_device *dev) 610 { 611 const char *configname; 612 613 configname = config_item_name(&dev->dev_group.cg_item); 614 if (strlen(configname) >= INQUIRY_MODEL_LEN) { 615 pr_warn("dev[%p]: Backstore name '%s' is too long for " 616 "INQUIRY_MODEL, truncating to 15 characters\n", dev, 617 configname); 618 } 619 /* 620 * XXX We can't use sizeof(dev->t10_wwn.model) (INQUIRY_MODEL_LEN + 1) 621 * here without potentially breaking existing setups, so continue to 622 * truncate one byte shorter than what can be carried in INQUIRY. 623 */ 624 strlcpy(dev->t10_wwn.model, configname, INQUIRY_MODEL_LEN); 625 } 626 627 static ssize_t emulate_model_alias_store(struct config_item *item, 628 const char *page, size_t count) 629 { 630 struct se_dev_attrib *da = to_attrib(item); 631 struct se_device *dev = da->da_dev; 632 bool flag; 633 int ret; 634 635 if (dev->export_count) { 636 pr_err("dev[%p]: Unable to change model alias" 637 " while export_count is %d\n", 638 dev, dev->export_count); 639 return -EINVAL; 640 } 641 642 ret = kstrtobool(page, &flag); 643 if (ret < 0) 644 return ret; 645 646 BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1); 647 if (flag) { 648 dev_set_t10_wwn_model_alias(dev); 649 } else { 650 strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod, 651 sizeof(dev->t10_wwn.model)); 652 } 653 da->emulate_model_alias = flag; 654 return count; 655 } 656 657 static ssize_t emulate_write_cache_store(struct config_item *item, 658 const char *page, size_t count) 659 { 660 struct se_dev_attrib *da = to_attrib(item); 661 bool flag; 662 int ret; 663 664 ret = kstrtobool(page, &flag); 665 if (ret < 0) 666 return ret; 667 668 if (flag && da->da_dev->transport->get_write_cache) { 669 pr_err("emulate_write_cache not supported for this device\n"); 670 return -EINVAL; 671 } 672 673 da->emulate_write_cache = flag; 674 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", 675 da->da_dev, flag); 676 return count; 677 } 678 679 static ssize_t emulate_ua_intlck_ctrl_store(struct config_item *item, 680 const char *page, size_t count) 681 { 682 struct se_dev_attrib *da = to_attrib(item); 683 u32 val; 684 int ret; 685 686 ret = kstrtou32(page, 0, &val); 687 if (ret < 0) 688 return ret; 689 690 if (val != TARGET_UA_INTLCK_CTRL_CLEAR 691 && val != TARGET_UA_INTLCK_CTRL_NO_CLEAR 692 && val != TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) { 693 pr_err("Illegal value %d\n", val); 694 return -EINVAL; 695 } 696 697 if (da->da_dev->export_count) { 698 pr_err("dev[%p]: Unable to change SE Device" 699 " UA_INTRLCK_CTRL while export_count is %d\n", 700 da->da_dev, da->da_dev->export_count); 701 return -EINVAL; 702 } 703 da->emulate_ua_intlck_ctrl = val; 704 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", 705 da->da_dev, val); 706 return count; 707 } 708 709 static ssize_t emulate_tas_store(struct config_item *item, 710 const char *page, size_t count) 711 { 712 struct se_dev_attrib *da = to_attrib(item); 713 bool flag; 714 int ret; 715 716 ret = kstrtobool(page, &flag); 717 if (ret < 0) 718 return ret; 719 720 if (da->da_dev->export_count) { 721 pr_err("dev[%p]: Unable to change SE Device TAS while" 722 " export_count is %d\n", 723 da->da_dev, da->da_dev->export_count); 724 return -EINVAL; 725 } 726 da->emulate_tas = flag; 727 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n", 728 da->da_dev, flag ? "Enabled" : "Disabled"); 729 730 return count; 731 } 732 733 static ssize_t emulate_tpu_store(struct config_item *item, 734 const char *page, size_t count) 735 { 736 struct se_dev_attrib *da = to_attrib(item); 737 struct se_device *dev = da->da_dev; 738 bool flag; 739 int ret; 740 741 ret = kstrtobool(page, &flag); 742 if (ret < 0) 743 return ret; 744 745 /* 746 * We expect this value to be non-zero when generic Block Layer 747 * Discard supported is detected iblock_create_virtdevice(). 748 */ 749 if (flag && !da->max_unmap_block_desc_count) { 750 if (!dev->transport->configure_unmap || 751 !dev->transport->configure_unmap(dev)) { 752 pr_err("Generic Block Discard not supported\n"); 753 return -ENOSYS; 754 } 755 } 756 757 da->emulate_tpu = flag; 758 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", 759 da->da_dev, flag); 760 return count; 761 } 762 763 static ssize_t emulate_tpws_store(struct config_item *item, 764 const char *page, size_t count) 765 { 766 struct se_dev_attrib *da = to_attrib(item); 767 struct se_device *dev = da->da_dev; 768 bool flag; 769 int ret; 770 771 ret = kstrtobool(page, &flag); 772 if (ret < 0) 773 return ret; 774 775 /* 776 * We expect this value to be non-zero when generic Block Layer 777 * Discard supported is detected iblock_create_virtdevice(). 778 */ 779 if (flag && !da->max_unmap_block_desc_count) { 780 if (!dev->transport->configure_unmap || 781 !dev->transport->configure_unmap(dev)) { 782 pr_err("Generic Block Discard not supported\n"); 783 return -ENOSYS; 784 } 785 } 786 787 da->emulate_tpws = flag; 788 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", 789 da->da_dev, flag); 790 return count; 791 } 792 793 static ssize_t pi_prot_type_store(struct config_item *item, 794 const char *page, size_t count) 795 { 796 struct se_dev_attrib *da = to_attrib(item); 797 int old_prot = da->pi_prot_type, ret; 798 struct se_device *dev = da->da_dev; 799 u32 flag; 800 801 ret = kstrtou32(page, 0, &flag); 802 if (ret < 0) 803 return ret; 804 805 if (flag != 0 && flag != 1 && flag != 2 && flag != 3) { 806 pr_err("Illegal value %d for pi_prot_type\n", flag); 807 return -EINVAL; 808 } 809 if (flag == 2) { 810 pr_err("DIF TYPE2 protection currently not supported\n"); 811 return -ENOSYS; 812 } 813 if (da->hw_pi_prot_type) { 814 pr_warn("DIF protection enabled on underlying hardware," 815 " ignoring\n"); 816 return count; 817 } 818 if (!dev->transport->init_prot || !dev->transport->free_prot) { 819 /* 0 is only allowed value for non-supporting backends */ 820 if (flag == 0) 821 return count; 822 823 pr_err("DIF protection not supported by backend: %s\n", 824 dev->transport->name); 825 return -ENOSYS; 826 } 827 if (!target_dev_configured(dev)) { 828 pr_err("DIF protection requires device to be configured\n"); 829 return -ENODEV; 830 } 831 if (dev->export_count) { 832 pr_err("dev[%p]: Unable to change SE Device PROT type while" 833 " export_count is %d\n", dev, dev->export_count); 834 return -EINVAL; 835 } 836 837 da->pi_prot_type = flag; 838 839 if (flag && !old_prot) { 840 ret = dev->transport->init_prot(dev); 841 if (ret) { 842 da->pi_prot_type = old_prot; 843 da->pi_prot_verify = (bool) da->pi_prot_type; 844 return ret; 845 } 846 847 } else if (!flag && old_prot) { 848 dev->transport->free_prot(dev); 849 } 850 851 da->pi_prot_verify = (bool) da->pi_prot_type; 852 pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag); 853 return count; 854 } 855 856 /* always zero, but attr needs to remain RW to avoid userspace breakage */ 857 static ssize_t pi_prot_format_show(struct config_item *item, char *page) 858 { 859 return snprintf(page, PAGE_SIZE, "0\n"); 860 } 861 862 static ssize_t pi_prot_format_store(struct config_item *item, 863 const char *page, size_t count) 864 { 865 struct se_dev_attrib *da = to_attrib(item); 866 struct se_device *dev = da->da_dev; 867 bool flag; 868 int ret; 869 870 ret = kstrtobool(page, &flag); 871 if (ret < 0) 872 return ret; 873 874 if (!flag) 875 return count; 876 877 if (!dev->transport->format_prot) { 878 pr_err("DIF protection format not supported by backend %s\n", 879 dev->transport->name); 880 return -ENOSYS; 881 } 882 if (!target_dev_configured(dev)) { 883 pr_err("DIF protection format requires device to be configured\n"); 884 return -ENODEV; 885 } 886 if (dev->export_count) { 887 pr_err("dev[%p]: Unable to format SE Device PROT type while" 888 " export_count is %d\n", dev, dev->export_count); 889 return -EINVAL; 890 } 891 892 ret = dev->transport->format_prot(dev); 893 if (ret) 894 return ret; 895 896 pr_debug("dev[%p]: SE Device Protection Format complete\n", dev); 897 return count; 898 } 899 900 static ssize_t pi_prot_verify_store(struct config_item *item, 901 const char *page, size_t count) 902 { 903 struct se_dev_attrib *da = to_attrib(item); 904 bool flag; 905 int ret; 906 907 ret = kstrtobool(page, &flag); 908 if (ret < 0) 909 return ret; 910 911 if (!flag) { 912 da->pi_prot_verify = flag; 913 return count; 914 } 915 if (da->hw_pi_prot_type) { 916 pr_warn("DIF protection enabled on underlying hardware," 917 " ignoring\n"); 918 return count; 919 } 920 if (!da->pi_prot_type) { 921 pr_warn("DIF protection not supported by backend, ignoring\n"); 922 return count; 923 } 924 da->pi_prot_verify = flag; 925 926 return count; 927 } 928 929 static ssize_t force_pr_aptpl_store(struct config_item *item, 930 const char *page, size_t count) 931 { 932 struct se_dev_attrib *da = to_attrib(item); 933 bool flag; 934 int ret; 935 936 ret = kstrtobool(page, &flag); 937 if (ret < 0) 938 return ret; 939 if (da->da_dev->export_count) { 940 pr_err("dev[%p]: Unable to set force_pr_aptpl while" 941 " export_count is %d\n", 942 da->da_dev, da->da_dev->export_count); 943 return -EINVAL; 944 } 945 946 da->force_pr_aptpl = flag; 947 pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", da->da_dev, flag); 948 return count; 949 } 950 951 static ssize_t emulate_rest_reord_store(struct config_item *item, 952 const char *page, size_t count) 953 { 954 struct se_dev_attrib *da = to_attrib(item); 955 bool flag; 956 int ret; 957 958 ret = kstrtobool(page, &flag); 959 if (ret < 0) 960 return ret; 961 962 if (flag != 0) { 963 printk(KERN_ERR "dev[%p]: SE Device emulation of restricted" 964 " reordering not implemented\n", da->da_dev); 965 return -ENOSYS; 966 } 967 da->emulate_rest_reord = flag; 968 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", 969 da->da_dev, flag); 970 return count; 971 } 972 973 static ssize_t unmap_zeroes_data_store(struct config_item *item, 974 const char *page, size_t count) 975 { 976 struct se_dev_attrib *da = to_attrib(item); 977 struct se_device *dev = da->da_dev; 978 bool flag; 979 int ret; 980 981 ret = kstrtobool(page, &flag); 982 if (ret < 0) 983 return ret; 984 985 if (da->da_dev->export_count) { 986 pr_err("dev[%p]: Unable to change SE Device" 987 " unmap_zeroes_data while export_count is %d\n", 988 da->da_dev, da->da_dev->export_count); 989 return -EINVAL; 990 } 991 /* 992 * We expect this value to be non-zero when generic Block Layer 993 * Discard supported is detected iblock_configure_device(). 994 */ 995 if (flag && !da->max_unmap_block_desc_count) { 996 if (!dev->transport->configure_unmap || 997 !dev->transport->configure_unmap(dev)) { 998 pr_err("dev[%p]: Thin Provisioning LBPRZ will not be set because max_unmap_block_desc_count is zero\n", 999 da->da_dev); 1000 return -ENOSYS; 1001 } 1002 } 1003 da->unmap_zeroes_data = flag; 1004 pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n", 1005 da->da_dev, flag); 1006 return count; 1007 } 1008 1009 /* 1010 * Note, this can only be called on unexported SE Device Object. 1011 */ 1012 static ssize_t queue_depth_store(struct config_item *item, 1013 const char *page, size_t count) 1014 { 1015 struct se_dev_attrib *da = to_attrib(item); 1016 struct se_device *dev = da->da_dev; 1017 u32 val; 1018 int ret; 1019 1020 ret = kstrtou32(page, 0, &val); 1021 if (ret < 0) 1022 return ret; 1023 1024 if (dev->export_count) { 1025 pr_err("dev[%p]: Unable to change SE Device TCQ while" 1026 " export_count is %d\n", 1027 dev, dev->export_count); 1028 return -EINVAL; 1029 } 1030 if (!val) { 1031 pr_err("dev[%p]: Illegal ZERO value for queue_depth\n", dev); 1032 return -EINVAL; 1033 } 1034 1035 if (val > dev->dev_attrib.queue_depth) { 1036 if (val > dev->dev_attrib.hw_queue_depth) { 1037 pr_err("dev[%p]: Passed queue_depth:" 1038 " %u exceeds TCM/SE_Device MAX" 1039 " TCQ: %u\n", dev, val, 1040 dev->dev_attrib.hw_queue_depth); 1041 return -EINVAL; 1042 } 1043 } 1044 da->queue_depth = dev->queue_depth = val; 1045 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", dev, val); 1046 return count; 1047 } 1048 1049 static ssize_t optimal_sectors_store(struct config_item *item, 1050 const char *page, size_t count) 1051 { 1052 struct se_dev_attrib *da = to_attrib(item); 1053 u32 val; 1054 int ret; 1055 1056 ret = kstrtou32(page, 0, &val); 1057 if (ret < 0) 1058 return ret; 1059 1060 if (da->da_dev->export_count) { 1061 pr_err("dev[%p]: Unable to change SE Device" 1062 " optimal_sectors while export_count is %d\n", 1063 da->da_dev, da->da_dev->export_count); 1064 return -EINVAL; 1065 } 1066 if (val > da->hw_max_sectors) { 1067 pr_err("dev[%p]: Passed optimal_sectors %u cannot be" 1068 " greater than hw_max_sectors: %u\n", 1069 da->da_dev, val, da->hw_max_sectors); 1070 return -EINVAL; 1071 } 1072 1073 da->optimal_sectors = val; 1074 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n", 1075 da->da_dev, val); 1076 return count; 1077 } 1078 1079 static ssize_t block_size_store(struct config_item *item, 1080 const char *page, size_t count) 1081 { 1082 struct se_dev_attrib *da = to_attrib(item); 1083 u32 val; 1084 int ret; 1085 1086 ret = kstrtou32(page, 0, &val); 1087 if (ret < 0) 1088 return ret; 1089 1090 if (da->da_dev->export_count) { 1091 pr_err("dev[%p]: Unable to change SE Device block_size" 1092 " while export_count is %d\n", 1093 da->da_dev, da->da_dev->export_count); 1094 return -EINVAL; 1095 } 1096 1097 if (val != 512 && val != 1024 && val != 2048 && val != 4096) { 1098 pr_err("dev[%p]: Illegal value for block_device: %u" 1099 " for SE device, must be 512, 1024, 2048 or 4096\n", 1100 da->da_dev, val); 1101 return -EINVAL; 1102 } 1103 1104 da->block_size = val; 1105 if (da->max_bytes_per_io) 1106 da->hw_max_sectors = da->max_bytes_per_io / val; 1107 1108 pr_debug("dev[%p]: SE Device block_size changed to %u\n", 1109 da->da_dev, val); 1110 return count; 1111 } 1112 1113 static ssize_t alua_support_show(struct config_item *item, char *page) 1114 { 1115 struct se_dev_attrib *da = to_attrib(item); 1116 u8 flags = da->da_dev->transport_flags; 1117 1118 return snprintf(page, PAGE_SIZE, "%d\n", 1119 flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ? 0 : 1); 1120 } 1121 1122 static ssize_t alua_support_store(struct config_item *item, 1123 const char *page, size_t count) 1124 { 1125 struct se_dev_attrib *da = to_attrib(item); 1126 struct se_device *dev = da->da_dev; 1127 bool flag, oldflag; 1128 int ret; 1129 1130 ret = kstrtobool(page, &flag); 1131 if (ret < 0) 1132 return ret; 1133 1134 oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA); 1135 if (flag == oldflag) 1136 return count; 1137 1138 if (!(dev->transport->transport_flags_changeable & 1139 TRANSPORT_FLAG_PASSTHROUGH_ALUA)) { 1140 pr_err("dev[%p]: Unable to change SE Device alua_support:" 1141 " alua_support has fixed value\n", dev); 1142 return -ENOSYS; 1143 } 1144 1145 if (flag) 1146 dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_ALUA; 1147 else 1148 dev->transport_flags |= TRANSPORT_FLAG_PASSTHROUGH_ALUA; 1149 return count; 1150 } 1151 1152 static ssize_t pgr_support_show(struct config_item *item, char *page) 1153 { 1154 struct se_dev_attrib *da = to_attrib(item); 1155 u8 flags = da->da_dev->transport_flags; 1156 1157 return snprintf(page, PAGE_SIZE, "%d\n", 1158 flags & TRANSPORT_FLAG_PASSTHROUGH_PGR ? 0 : 1); 1159 } 1160 1161 static ssize_t pgr_support_store(struct config_item *item, 1162 const char *page, size_t count) 1163 { 1164 struct se_dev_attrib *da = to_attrib(item); 1165 struct se_device *dev = da->da_dev; 1166 bool flag, oldflag; 1167 int ret; 1168 1169 ret = kstrtobool(page, &flag); 1170 if (ret < 0) 1171 return ret; 1172 1173 oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR); 1174 if (flag == oldflag) 1175 return count; 1176 1177 if (!(dev->transport->transport_flags_changeable & 1178 TRANSPORT_FLAG_PASSTHROUGH_PGR)) { 1179 pr_err("dev[%p]: Unable to change SE Device pgr_support:" 1180 " pgr_support has fixed value\n", dev); 1181 return -ENOSYS; 1182 } 1183 1184 if (flag) 1185 dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_PGR; 1186 else 1187 dev->transport_flags |= TRANSPORT_FLAG_PASSTHROUGH_PGR; 1188 return count; 1189 } 1190 1191 static ssize_t emulate_rsoc_store(struct config_item *item, 1192 const char *page, size_t count) 1193 { 1194 struct se_dev_attrib *da = to_attrib(item); 1195 bool flag; 1196 int ret; 1197 1198 ret = kstrtobool(page, &flag); 1199 if (ret < 0) 1200 return ret; 1201 1202 da->emulate_rsoc = flag; 1203 pr_debug("dev[%p]: SE Device REPORT_SUPPORTED_OPERATION_CODES_EMULATION flag: %d\n", 1204 da->da_dev, flag); 1205 return count; 1206 } 1207 1208 CONFIGFS_ATTR(, emulate_model_alias); 1209 CONFIGFS_ATTR(, emulate_dpo); 1210 CONFIGFS_ATTR(, emulate_fua_write); 1211 CONFIGFS_ATTR(, emulate_fua_read); 1212 CONFIGFS_ATTR(, emulate_write_cache); 1213 CONFIGFS_ATTR(, emulate_ua_intlck_ctrl); 1214 CONFIGFS_ATTR(, emulate_tas); 1215 CONFIGFS_ATTR(, emulate_tpu); 1216 CONFIGFS_ATTR(, emulate_tpws); 1217 CONFIGFS_ATTR(, emulate_caw); 1218 CONFIGFS_ATTR(, emulate_3pc); 1219 CONFIGFS_ATTR(, emulate_pr); 1220 CONFIGFS_ATTR(, emulate_rsoc); 1221 CONFIGFS_ATTR(, pi_prot_type); 1222 CONFIGFS_ATTR_RO(, hw_pi_prot_type); 1223 CONFIGFS_ATTR(, pi_prot_format); 1224 CONFIGFS_ATTR(, pi_prot_verify); 1225 CONFIGFS_ATTR(, enforce_pr_isids); 1226 CONFIGFS_ATTR(, is_nonrot); 1227 CONFIGFS_ATTR(, emulate_rest_reord); 1228 CONFIGFS_ATTR(, force_pr_aptpl); 1229 CONFIGFS_ATTR_RO(, hw_block_size); 1230 CONFIGFS_ATTR(, block_size); 1231 CONFIGFS_ATTR_RO(, hw_max_sectors); 1232 CONFIGFS_ATTR(, optimal_sectors); 1233 CONFIGFS_ATTR_RO(, hw_queue_depth); 1234 CONFIGFS_ATTR(, queue_depth); 1235 CONFIGFS_ATTR(, max_unmap_lba_count); 1236 CONFIGFS_ATTR(, max_unmap_block_desc_count); 1237 CONFIGFS_ATTR(, unmap_granularity); 1238 CONFIGFS_ATTR(, unmap_granularity_alignment); 1239 CONFIGFS_ATTR(, unmap_zeroes_data); 1240 CONFIGFS_ATTR(, max_write_same_len); 1241 CONFIGFS_ATTR(, alua_support); 1242 CONFIGFS_ATTR(, pgr_support); 1243 1244 /* 1245 * dev_attrib attributes for devices using the target core SBC/SPC 1246 * interpreter. Any backend using spc_parse_cdb should be using 1247 * these. 1248 */ 1249 struct configfs_attribute *sbc_attrib_attrs[] = { 1250 &attr_emulate_model_alias, 1251 &attr_emulate_dpo, 1252 &attr_emulate_fua_write, 1253 &attr_emulate_fua_read, 1254 &attr_emulate_write_cache, 1255 &attr_emulate_ua_intlck_ctrl, 1256 &attr_emulate_tas, 1257 &attr_emulate_tpu, 1258 &attr_emulate_tpws, 1259 &attr_emulate_caw, 1260 &attr_emulate_3pc, 1261 &attr_emulate_pr, 1262 &attr_pi_prot_type, 1263 &attr_hw_pi_prot_type, 1264 &attr_pi_prot_format, 1265 &attr_pi_prot_verify, 1266 &attr_enforce_pr_isids, 1267 &attr_is_nonrot, 1268 &attr_emulate_rest_reord, 1269 &attr_force_pr_aptpl, 1270 &attr_hw_block_size, 1271 &attr_block_size, 1272 &attr_hw_max_sectors, 1273 &attr_optimal_sectors, 1274 &attr_hw_queue_depth, 1275 &attr_queue_depth, 1276 &attr_max_unmap_lba_count, 1277 &attr_max_unmap_block_desc_count, 1278 &attr_unmap_granularity, 1279 &attr_unmap_granularity_alignment, 1280 &attr_unmap_zeroes_data, 1281 &attr_max_write_same_len, 1282 &attr_alua_support, 1283 &attr_pgr_support, 1284 &attr_emulate_rsoc, 1285 NULL, 1286 }; 1287 EXPORT_SYMBOL(sbc_attrib_attrs); 1288 1289 /* 1290 * Minimal dev_attrib attributes for devices passing through CDBs. 1291 * In this case we only provide a few read-only attributes for 1292 * backwards compatibility. 1293 */ 1294 struct configfs_attribute *passthrough_attrib_attrs[] = { 1295 &attr_hw_pi_prot_type, 1296 &attr_hw_block_size, 1297 &attr_hw_max_sectors, 1298 &attr_hw_queue_depth, 1299 &attr_emulate_pr, 1300 &attr_alua_support, 1301 &attr_pgr_support, 1302 NULL, 1303 }; 1304 EXPORT_SYMBOL(passthrough_attrib_attrs); 1305 1306 /* 1307 * pr related dev_attrib attributes for devices passing through CDBs, 1308 * but allowing in core pr emulation. 1309 */ 1310 struct configfs_attribute *passthrough_pr_attrib_attrs[] = { 1311 &attr_enforce_pr_isids, 1312 &attr_force_pr_aptpl, 1313 NULL, 1314 }; 1315 EXPORT_SYMBOL(passthrough_pr_attrib_attrs); 1316 1317 TB_CIT_SETUP_DRV(dev_attrib, NULL, NULL); 1318 TB_CIT_SETUP_DRV(dev_action, NULL, NULL); 1319 1320 /* End functions for struct config_item_type tb_dev_attrib_cit */ 1321 1322 /* Start functions for struct config_item_type tb_dev_wwn_cit */ 1323 1324 static struct t10_wwn *to_t10_wwn(struct config_item *item) 1325 { 1326 return container_of(to_config_group(item), struct t10_wwn, t10_wwn_group); 1327 } 1328 1329 static ssize_t target_check_inquiry_data(char *buf) 1330 { 1331 size_t len; 1332 int i; 1333 1334 len = strlen(buf); 1335 1336 /* 1337 * SPC 4.3.1: 1338 * ASCII data fields shall contain only ASCII printable characters 1339 * (i.e., code values 20h to 7Eh) and may be terminated with one or 1340 * more ASCII null (00h) characters. 1341 */ 1342 for (i = 0; i < len; i++) { 1343 if (buf[i] < 0x20 || buf[i] > 0x7E) { 1344 pr_err("Emulated T10 Inquiry Data contains non-ASCII-printable characters\n"); 1345 return -EINVAL; 1346 } 1347 } 1348 1349 return len; 1350 } 1351 1352 /* 1353 * STANDARD and VPD page 0x83 T10 Vendor Identification 1354 */ 1355 static ssize_t target_wwn_vendor_id_show(struct config_item *item, 1356 char *page) 1357 { 1358 return sprintf(page, "%s\n", &to_t10_wwn(item)->vendor[0]); 1359 } 1360 1361 static ssize_t target_wwn_vendor_id_store(struct config_item *item, 1362 const char *page, size_t count) 1363 { 1364 struct t10_wwn *t10_wwn = to_t10_wwn(item); 1365 struct se_device *dev = t10_wwn->t10_dev; 1366 /* +2 to allow for a trailing (stripped) '\n' and null-terminator */ 1367 unsigned char buf[INQUIRY_VENDOR_LEN + 2]; 1368 char *stripped = NULL; 1369 size_t len; 1370 ssize_t ret; 1371 1372 len = strlcpy(buf, page, sizeof(buf)); 1373 if (len < sizeof(buf)) { 1374 /* Strip any newline added from userspace. */ 1375 stripped = strstrip(buf); 1376 len = strlen(stripped); 1377 } 1378 if (len > INQUIRY_VENDOR_LEN) { 1379 pr_err("Emulated T10 Vendor Identification exceeds" 1380 " INQUIRY_VENDOR_LEN: " __stringify(INQUIRY_VENDOR_LEN) 1381 "\n"); 1382 return -EOVERFLOW; 1383 } 1384 1385 ret = target_check_inquiry_data(stripped); 1386 1387 if (ret < 0) 1388 return ret; 1389 1390 /* 1391 * Check to see if any active exports exist. If they do exist, fail 1392 * here as changing this information on the fly (underneath the 1393 * initiator side OS dependent multipath code) could cause negative 1394 * effects. 1395 */ 1396 if (dev->export_count) { 1397 pr_err("Unable to set T10 Vendor Identification while" 1398 " active %d exports exist\n", dev->export_count); 1399 return -EINVAL; 1400 } 1401 1402 BUILD_BUG_ON(sizeof(dev->t10_wwn.vendor) != INQUIRY_VENDOR_LEN + 1); 1403 strlcpy(dev->t10_wwn.vendor, stripped, sizeof(dev->t10_wwn.vendor)); 1404 1405 pr_debug("Target_Core_ConfigFS: Set emulated T10 Vendor Identification:" 1406 " %s\n", dev->t10_wwn.vendor); 1407 1408 return count; 1409 } 1410 1411 static ssize_t target_wwn_product_id_show(struct config_item *item, 1412 char *page) 1413 { 1414 return sprintf(page, "%s\n", &to_t10_wwn(item)->model[0]); 1415 } 1416 1417 static ssize_t target_wwn_product_id_store(struct config_item *item, 1418 const char *page, size_t count) 1419 { 1420 struct t10_wwn *t10_wwn = to_t10_wwn(item); 1421 struct se_device *dev = t10_wwn->t10_dev; 1422 /* +2 to allow for a trailing (stripped) '\n' and null-terminator */ 1423 unsigned char buf[INQUIRY_MODEL_LEN + 2]; 1424 char *stripped = NULL; 1425 size_t len; 1426 ssize_t ret; 1427 1428 len = strlcpy(buf, page, sizeof(buf)); 1429 if (len < sizeof(buf)) { 1430 /* Strip any newline added from userspace. */ 1431 stripped = strstrip(buf); 1432 len = strlen(stripped); 1433 } 1434 if (len > INQUIRY_MODEL_LEN) { 1435 pr_err("Emulated T10 Vendor exceeds INQUIRY_MODEL_LEN: " 1436 __stringify(INQUIRY_MODEL_LEN) 1437 "\n"); 1438 return -EOVERFLOW; 1439 } 1440 1441 ret = target_check_inquiry_data(stripped); 1442 1443 if (ret < 0) 1444 return ret; 1445 1446 /* 1447 * Check to see if any active exports exist. If they do exist, fail 1448 * here as changing this information on the fly (underneath the 1449 * initiator side OS dependent multipath code) could cause negative 1450 * effects. 1451 */ 1452 if (dev->export_count) { 1453 pr_err("Unable to set T10 Model while active %d exports exist\n", 1454 dev->export_count); 1455 return -EINVAL; 1456 } 1457 1458 BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1); 1459 strlcpy(dev->t10_wwn.model, stripped, sizeof(dev->t10_wwn.model)); 1460 1461 pr_debug("Target_Core_ConfigFS: Set emulated T10 Model Identification: %s\n", 1462 dev->t10_wwn.model); 1463 1464 return count; 1465 } 1466 1467 static ssize_t target_wwn_revision_show(struct config_item *item, 1468 char *page) 1469 { 1470 return sprintf(page, "%s\n", &to_t10_wwn(item)->revision[0]); 1471 } 1472 1473 static ssize_t target_wwn_revision_store(struct config_item *item, 1474 const char *page, size_t count) 1475 { 1476 struct t10_wwn *t10_wwn = to_t10_wwn(item); 1477 struct se_device *dev = t10_wwn->t10_dev; 1478 /* +2 to allow for a trailing (stripped) '\n' and null-terminator */ 1479 unsigned char buf[INQUIRY_REVISION_LEN + 2]; 1480 char *stripped = NULL; 1481 size_t len; 1482 ssize_t ret; 1483 1484 len = strlcpy(buf, page, sizeof(buf)); 1485 if (len < sizeof(buf)) { 1486 /* Strip any newline added from userspace. */ 1487 stripped = strstrip(buf); 1488 len = strlen(stripped); 1489 } 1490 if (len > INQUIRY_REVISION_LEN) { 1491 pr_err("Emulated T10 Revision exceeds INQUIRY_REVISION_LEN: " 1492 __stringify(INQUIRY_REVISION_LEN) 1493 "\n"); 1494 return -EOVERFLOW; 1495 } 1496 1497 ret = target_check_inquiry_data(stripped); 1498 1499 if (ret < 0) 1500 return ret; 1501 1502 /* 1503 * Check to see if any active exports exist. If they do exist, fail 1504 * here as changing this information on the fly (underneath the 1505 * initiator side OS dependent multipath code) could cause negative 1506 * effects. 1507 */ 1508 if (dev->export_count) { 1509 pr_err("Unable to set T10 Revision while active %d exports exist\n", 1510 dev->export_count); 1511 return -EINVAL; 1512 } 1513 1514 BUILD_BUG_ON(sizeof(dev->t10_wwn.revision) != INQUIRY_REVISION_LEN + 1); 1515 strlcpy(dev->t10_wwn.revision, stripped, sizeof(dev->t10_wwn.revision)); 1516 1517 pr_debug("Target_Core_ConfigFS: Set emulated T10 Revision: %s\n", 1518 dev->t10_wwn.revision); 1519 1520 return count; 1521 } 1522 1523 static ssize_t 1524 target_wwn_company_id_show(struct config_item *item, 1525 char *page) 1526 { 1527 return snprintf(page, PAGE_SIZE, "%#08x\n", 1528 to_t10_wwn(item)->company_id); 1529 } 1530 1531 static ssize_t 1532 target_wwn_company_id_store(struct config_item *item, 1533 const char *page, size_t count) 1534 { 1535 struct t10_wwn *t10_wwn = to_t10_wwn(item); 1536 struct se_device *dev = t10_wwn->t10_dev; 1537 u32 val; 1538 int ret; 1539 1540 /* 1541 * The IEEE COMPANY_ID field should contain a 24-bit canonical 1542 * form OUI assigned by the IEEE. 1543 */ 1544 ret = kstrtou32(page, 0, &val); 1545 if (ret < 0) 1546 return ret; 1547 1548 if (val >= 0x1000000) 1549 return -EOVERFLOW; 1550 1551 /* 1552 * Check to see if any active exports exist. If they do exist, fail 1553 * here as changing this information on the fly (underneath the 1554 * initiator side OS dependent multipath code) could cause negative 1555 * effects. 1556 */ 1557 if (dev->export_count) { 1558 pr_err("Unable to set Company ID while %u exports exist\n", 1559 dev->export_count); 1560 return -EINVAL; 1561 } 1562 1563 t10_wwn->company_id = val; 1564 1565 pr_debug("Target_Core_ConfigFS: Set IEEE Company ID: %#08x\n", 1566 t10_wwn->company_id); 1567 1568 return count; 1569 } 1570 1571 /* 1572 * VPD page 0x80 Unit serial 1573 */ 1574 static ssize_t target_wwn_vpd_unit_serial_show(struct config_item *item, 1575 char *page) 1576 { 1577 return sprintf(page, "T10 VPD Unit Serial Number: %s\n", 1578 &to_t10_wwn(item)->unit_serial[0]); 1579 } 1580 1581 static ssize_t target_wwn_vpd_unit_serial_store(struct config_item *item, 1582 const char *page, size_t count) 1583 { 1584 struct t10_wwn *t10_wwn = to_t10_wwn(item); 1585 struct se_device *dev = t10_wwn->t10_dev; 1586 unsigned char buf[INQUIRY_VPD_SERIAL_LEN] = { }; 1587 1588 /* 1589 * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial 1590 * from the struct scsi_device level firmware, do not allow 1591 * VPD Unit Serial to be emulated. 1592 * 1593 * Note this struct scsi_device could also be emulating VPD 1594 * information from its drivers/scsi LLD. But for now we assume 1595 * it is doing 'the right thing' wrt a world wide unique 1596 * VPD Unit Serial Number that OS dependent multipath can depend on. 1597 */ 1598 if (dev->dev_flags & DF_FIRMWARE_VPD_UNIT_SERIAL) { 1599 pr_err("Underlying SCSI device firmware provided VPD" 1600 " Unit Serial, ignoring request\n"); 1601 return -EOPNOTSUPP; 1602 } 1603 1604 if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) { 1605 pr_err("Emulated VPD Unit Serial exceeds" 1606 " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN); 1607 return -EOVERFLOW; 1608 } 1609 /* 1610 * Check to see if any active $FABRIC_MOD exports exist. If they 1611 * do exist, fail here as changing this information on the fly 1612 * (underneath the initiator side OS dependent multipath code) 1613 * could cause negative effects. 1614 */ 1615 if (dev->export_count) { 1616 pr_err("Unable to set VPD Unit Serial while" 1617 " active %d $FABRIC_MOD exports exist\n", 1618 dev->export_count); 1619 return -EINVAL; 1620 } 1621 1622 /* 1623 * This currently assumes ASCII encoding for emulated VPD Unit Serial. 1624 * 1625 * Also, strip any newline added from the userspace 1626 * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial 1627 */ 1628 snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page); 1629 snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN, 1630 "%s", strstrip(buf)); 1631 dev->dev_flags |= DF_EMULATED_VPD_UNIT_SERIAL; 1632 1633 pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:" 1634 " %s\n", dev->t10_wwn.unit_serial); 1635 1636 return count; 1637 } 1638 1639 /* 1640 * VPD page 0x83 Protocol Identifier 1641 */ 1642 static ssize_t target_wwn_vpd_protocol_identifier_show(struct config_item *item, 1643 char *page) 1644 { 1645 struct t10_wwn *t10_wwn = to_t10_wwn(item); 1646 struct t10_vpd *vpd; 1647 unsigned char buf[VPD_TMP_BUF_SIZE] = { }; 1648 ssize_t len = 0; 1649 1650 spin_lock(&t10_wwn->t10_vpd_lock); 1651 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { 1652 if (!vpd->protocol_identifier_set) 1653 continue; 1654 1655 transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE); 1656 1657 if (len + strlen(buf) >= PAGE_SIZE) 1658 break; 1659 1660 len += sprintf(page+len, "%s", buf); 1661 } 1662 spin_unlock(&t10_wwn->t10_vpd_lock); 1663 1664 return len; 1665 } 1666 1667 /* 1668 * Generic wrapper for dumping VPD identifiers by association. 1669 */ 1670 #define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc) \ 1671 static ssize_t target_wwn_##_name##_show(struct config_item *item, \ 1672 char *page) \ 1673 { \ 1674 struct t10_wwn *t10_wwn = to_t10_wwn(item); \ 1675 struct t10_vpd *vpd; \ 1676 unsigned char buf[VPD_TMP_BUF_SIZE]; \ 1677 ssize_t len = 0; \ 1678 \ 1679 spin_lock(&t10_wwn->t10_vpd_lock); \ 1680 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \ 1681 if (vpd->association != _assoc) \ 1682 continue; \ 1683 \ 1684 memset(buf, 0, VPD_TMP_BUF_SIZE); \ 1685 transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \ 1686 if (len + strlen(buf) >= PAGE_SIZE) \ 1687 break; \ 1688 len += sprintf(page+len, "%s", buf); \ 1689 \ 1690 memset(buf, 0, VPD_TMP_BUF_SIZE); \ 1691 transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \ 1692 if (len + strlen(buf) >= PAGE_SIZE) \ 1693 break; \ 1694 len += sprintf(page+len, "%s", buf); \ 1695 \ 1696 memset(buf, 0, VPD_TMP_BUF_SIZE); \ 1697 transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \ 1698 if (len + strlen(buf) >= PAGE_SIZE) \ 1699 break; \ 1700 len += sprintf(page+len, "%s", buf); \ 1701 } \ 1702 spin_unlock(&t10_wwn->t10_vpd_lock); \ 1703 \ 1704 return len; \ 1705 } 1706 1707 /* VPD page 0x83 Association: Logical Unit */ 1708 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00); 1709 /* VPD page 0x83 Association: Target Port */ 1710 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10); 1711 /* VPD page 0x83 Association: SCSI Target Device */ 1712 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20); 1713 1714 CONFIGFS_ATTR(target_wwn_, vendor_id); 1715 CONFIGFS_ATTR(target_wwn_, product_id); 1716 CONFIGFS_ATTR(target_wwn_, revision); 1717 CONFIGFS_ATTR(target_wwn_, company_id); 1718 CONFIGFS_ATTR(target_wwn_, vpd_unit_serial); 1719 CONFIGFS_ATTR_RO(target_wwn_, vpd_protocol_identifier); 1720 CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_logical_unit); 1721 CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_target_port); 1722 CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_scsi_target_device); 1723 1724 static struct configfs_attribute *target_core_dev_wwn_attrs[] = { 1725 &target_wwn_attr_vendor_id, 1726 &target_wwn_attr_product_id, 1727 &target_wwn_attr_revision, 1728 &target_wwn_attr_company_id, 1729 &target_wwn_attr_vpd_unit_serial, 1730 &target_wwn_attr_vpd_protocol_identifier, 1731 &target_wwn_attr_vpd_assoc_logical_unit, 1732 &target_wwn_attr_vpd_assoc_target_port, 1733 &target_wwn_attr_vpd_assoc_scsi_target_device, 1734 NULL, 1735 }; 1736 1737 TB_CIT_SETUP(dev_wwn, NULL, NULL, target_core_dev_wwn_attrs); 1738 1739 /* End functions for struct config_item_type tb_dev_wwn_cit */ 1740 1741 /* Start functions for struct config_item_type tb_dev_pr_cit */ 1742 1743 static struct se_device *pr_to_dev(struct config_item *item) 1744 { 1745 return container_of(to_config_group(item), struct se_device, 1746 dev_pr_group); 1747 } 1748 1749 static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev, 1750 char *page) 1751 { 1752 struct se_node_acl *se_nacl; 1753 struct t10_pr_registration *pr_reg; 1754 char i_buf[PR_REG_ISID_ID_LEN] = { }; 1755 1756 pr_reg = dev->dev_pr_res_holder; 1757 if (!pr_reg) 1758 return sprintf(page, "No SPC-3 Reservation holder\n"); 1759 1760 se_nacl = pr_reg->pr_reg_nacl; 1761 core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); 1762 1763 return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n", 1764 se_nacl->se_tpg->se_tpg_tfo->fabric_name, 1765 se_nacl->initiatorname, i_buf); 1766 } 1767 1768 static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev, 1769 char *page) 1770 { 1771 struct se_session *sess = dev->reservation_holder; 1772 struct se_node_acl *se_nacl; 1773 ssize_t len; 1774 1775 if (sess) { 1776 se_nacl = sess->se_node_acl; 1777 len = sprintf(page, 1778 "SPC-2 Reservation: %s Initiator: %s\n", 1779 se_nacl->se_tpg->se_tpg_tfo->fabric_name, 1780 se_nacl->initiatorname); 1781 } else { 1782 len = sprintf(page, "No SPC-2 Reservation holder\n"); 1783 } 1784 return len; 1785 } 1786 1787 static ssize_t target_pr_res_holder_show(struct config_item *item, char *page) 1788 { 1789 struct se_device *dev = pr_to_dev(item); 1790 int ret; 1791 1792 if (!dev->dev_attrib.emulate_pr) 1793 return sprintf(page, "SPC_RESERVATIONS_DISABLED\n"); 1794 1795 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) 1796 return sprintf(page, "Passthrough\n"); 1797 1798 spin_lock(&dev->dev_reservation_lock); 1799 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) 1800 ret = target_core_dev_pr_show_spc2_res(dev, page); 1801 else 1802 ret = target_core_dev_pr_show_spc3_res(dev, page); 1803 spin_unlock(&dev->dev_reservation_lock); 1804 return ret; 1805 } 1806 1807 static ssize_t target_pr_res_pr_all_tgt_pts_show(struct config_item *item, 1808 char *page) 1809 { 1810 struct se_device *dev = pr_to_dev(item); 1811 ssize_t len = 0; 1812 1813 spin_lock(&dev->dev_reservation_lock); 1814 if (!dev->dev_pr_res_holder) { 1815 len = sprintf(page, "No SPC-3 Reservation holder\n"); 1816 } else if (dev->dev_pr_res_holder->pr_reg_all_tg_pt) { 1817 len = sprintf(page, "SPC-3 Reservation: All Target" 1818 " Ports registration\n"); 1819 } else { 1820 len = sprintf(page, "SPC-3 Reservation: Single" 1821 " Target Port registration\n"); 1822 } 1823 1824 spin_unlock(&dev->dev_reservation_lock); 1825 return len; 1826 } 1827 1828 static ssize_t target_pr_res_pr_generation_show(struct config_item *item, 1829 char *page) 1830 { 1831 return sprintf(page, "0x%08x\n", pr_to_dev(item)->t10_pr.pr_generation); 1832 } 1833 1834 1835 static ssize_t target_pr_res_pr_holder_tg_port_show(struct config_item *item, 1836 char *page) 1837 { 1838 struct se_device *dev = pr_to_dev(item); 1839 struct se_node_acl *se_nacl; 1840 struct se_portal_group *se_tpg; 1841 struct t10_pr_registration *pr_reg; 1842 const struct target_core_fabric_ops *tfo; 1843 ssize_t len = 0; 1844 1845 spin_lock(&dev->dev_reservation_lock); 1846 pr_reg = dev->dev_pr_res_holder; 1847 if (!pr_reg) { 1848 len = sprintf(page, "No SPC-3 Reservation holder\n"); 1849 goto out_unlock; 1850 } 1851 1852 se_nacl = pr_reg->pr_reg_nacl; 1853 se_tpg = se_nacl->se_tpg; 1854 tfo = se_tpg->se_tpg_tfo; 1855 1856 len += sprintf(page+len, "SPC-3 Reservation: %s" 1857 " Target Node Endpoint: %s\n", tfo->fabric_name, 1858 tfo->tpg_get_wwn(se_tpg)); 1859 len += sprintf(page+len, "SPC-3 Reservation: Relative Port" 1860 " Identifier Tag: %hu %s Portal Group Tag: %hu" 1861 " %s Logical Unit: %llu\n", pr_reg->tg_pt_sep_rtpi, 1862 tfo->fabric_name, tfo->tpg_get_tag(se_tpg), 1863 tfo->fabric_name, pr_reg->pr_aptpl_target_lun); 1864 1865 out_unlock: 1866 spin_unlock(&dev->dev_reservation_lock); 1867 return len; 1868 } 1869 1870 1871 static ssize_t target_pr_res_pr_registered_i_pts_show(struct config_item *item, 1872 char *page) 1873 { 1874 struct se_device *dev = pr_to_dev(item); 1875 const struct target_core_fabric_ops *tfo; 1876 struct t10_pr_registration *pr_reg; 1877 unsigned char buf[384]; 1878 char i_buf[PR_REG_ISID_ID_LEN]; 1879 ssize_t len = 0; 1880 int reg_count = 0; 1881 1882 len += sprintf(page+len, "SPC-3 PR Registrations:\n"); 1883 1884 spin_lock(&dev->t10_pr.registration_lock); 1885 list_for_each_entry(pr_reg, &dev->t10_pr.registration_list, 1886 pr_reg_list) { 1887 1888 memset(buf, 0, 384); 1889 memset(i_buf, 0, PR_REG_ISID_ID_LEN); 1890 tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo; 1891 core_pr_dump_initiator_port(pr_reg, i_buf, 1892 PR_REG_ISID_ID_LEN); 1893 sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n", 1894 tfo->fabric_name, 1895 pr_reg->pr_reg_nacl->initiatorname, i_buf, pr_reg->pr_res_key, 1896 pr_reg->pr_res_generation); 1897 1898 if (len + strlen(buf) >= PAGE_SIZE) 1899 break; 1900 1901 len += sprintf(page+len, "%s", buf); 1902 reg_count++; 1903 } 1904 spin_unlock(&dev->t10_pr.registration_lock); 1905 1906 if (!reg_count) 1907 len += sprintf(page+len, "None\n"); 1908 1909 return len; 1910 } 1911 1912 static ssize_t target_pr_res_pr_type_show(struct config_item *item, char *page) 1913 { 1914 struct se_device *dev = pr_to_dev(item); 1915 struct t10_pr_registration *pr_reg; 1916 ssize_t len = 0; 1917 1918 spin_lock(&dev->dev_reservation_lock); 1919 pr_reg = dev->dev_pr_res_holder; 1920 if (pr_reg) { 1921 len = sprintf(page, "SPC-3 Reservation Type: %s\n", 1922 core_scsi3_pr_dump_type(pr_reg->pr_res_type)); 1923 } else { 1924 len = sprintf(page, "No SPC-3 Reservation holder\n"); 1925 } 1926 1927 spin_unlock(&dev->dev_reservation_lock); 1928 return len; 1929 } 1930 1931 static ssize_t target_pr_res_type_show(struct config_item *item, char *page) 1932 { 1933 struct se_device *dev = pr_to_dev(item); 1934 1935 if (!dev->dev_attrib.emulate_pr) 1936 return sprintf(page, "SPC_RESERVATIONS_DISABLED\n"); 1937 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) 1938 return sprintf(page, "SPC_PASSTHROUGH\n"); 1939 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) 1940 return sprintf(page, "SPC2_RESERVATIONS\n"); 1941 1942 return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n"); 1943 } 1944 1945 static ssize_t target_pr_res_aptpl_active_show(struct config_item *item, 1946 char *page) 1947 { 1948 struct se_device *dev = pr_to_dev(item); 1949 1950 if (!dev->dev_attrib.emulate_pr || 1951 (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)) 1952 return 0; 1953 1954 return sprintf(page, "APTPL Bit Status: %s\n", 1955 (dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled"); 1956 } 1957 1958 static ssize_t target_pr_res_aptpl_metadata_show(struct config_item *item, 1959 char *page) 1960 { 1961 struct se_device *dev = pr_to_dev(item); 1962 1963 if (!dev->dev_attrib.emulate_pr || 1964 (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)) 1965 return 0; 1966 1967 return sprintf(page, "Ready to process PR APTPL metadata..\n"); 1968 } 1969 1970 enum { 1971 Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid, 1972 Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope, 1973 Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric, 1974 Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err 1975 }; 1976 1977 static match_table_t tokens = { 1978 {Opt_initiator_fabric, "initiator_fabric=%s"}, 1979 {Opt_initiator_node, "initiator_node=%s"}, 1980 {Opt_initiator_sid, "initiator_sid=%s"}, 1981 {Opt_sa_res_key, "sa_res_key=%s"}, 1982 {Opt_res_holder, "res_holder=%d"}, 1983 {Opt_res_type, "res_type=%d"}, 1984 {Opt_res_scope, "res_scope=%d"}, 1985 {Opt_res_all_tg_pt, "res_all_tg_pt=%d"}, 1986 {Opt_mapped_lun, "mapped_lun=%u"}, 1987 {Opt_target_fabric, "target_fabric=%s"}, 1988 {Opt_target_node, "target_node=%s"}, 1989 {Opt_tpgt, "tpgt=%d"}, 1990 {Opt_port_rtpi, "port_rtpi=%d"}, 1991 {Opt_target_lun, "target_lun=%u"}, 1992 {Opt_err, NULL} 1993 }; 1994 1995 static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item, 1996 const char *page, size_t count) 1997 { 1998 struct se_device *dev = pr_to_dev(item); 1999 unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL; 2000 unsigned char *t_fabric = NULL, *t_port = NULL; 2001 char *orig, *ptr, *opts; 2002 substring_t args[MAX_OPT_ARGS]; 2003 unsigned long long tmp_ll; 2004 u64 sa_res_key = 0; 2005 u64 mapped_lun = 0, target_lun = 0; 2006 int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token; 2007 u16 tpgt = 0; 2008 u8 type = 0; 2009 2010 if (!dev->dev_attrib.emulate_pr || 2011 (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)) 2012 return count; 2013 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) 2014 return count; 2015 2016 if (dev->export_count) { 2017 pr_debug("Unable to process APTPL metadata while" 2018 " active fabric exports exist\n"); 2019 return -EINVAL; 2020 } 2021 2022 opts = kstrdup(page, GFP_KERNEL); 2023 if (!opts) 2024 return -ENOMEM; 2025 2026 orig = opts; 2027 while ((ptr = strsep(&opts, ",\n")) != NULL) { 2028 if (!*ptr) 2029 continue; 2030 2031 token = match_token(ptr, tokens, args); 2032 switch (token) { 2033 case Opt_initiator_fabric: 2034 i_fabric = match_strdup(args); 2035 if (!i_fabric) { 2036 ret = -ENOMEM; 2037 goto out; 2038 } 2039 break; 2040 case Opt_initiator_node: 2041 i_port = match_strdup(args); 2042 if (!i_port) { 2043 ret = -ENOMEM; 2044 goto out; 2045 } 2046 if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) { 2047 pr_err("APTPL metadata initiator_node=" 2048 " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n", 2049 PR_APTPL_MAX_IPORT_LEN); 2050 ret = -EINVAL; 2051 break; 2052 } 2053 break; 2054 case Opt_initiator_sid: 2055 isid = match_strdup(args); 2056 if (!isid) { 2057 ret = -ENOMEM; 2058 goto out; 2059 } 2060 if (strlen(isid) >= PR_REG_ISID_LEN) { 2061 pr_err("APTPL metadata initiator_isid" 2062 "= exceeds PR_REG_ISID_LEN: %d\n", 2063 PR_REG_ISID_LEN); 2064 ret = -EINVAL; 2065 break; 2066 } 2067 break; 2068 case Opt_sa_res_key: 2069 ret = match_u64(args, &tmp_ll); 2070 if (ret < 0) { 2071 pr_err("kstrtoull() failed for sa_res_key=\n"); 2072 goto out; 2073 } 2074 sa_res_key = (u64)tmp_ll; 2075 break; 2076 /* 2077 * PR APTPL Metadata for Reservation 2078 */ 2079 case Opt_res_holder: 2080 ret = match_int(args, &arg); 2081 if (ret) 2082 goto out; 2083 res_holder = arg; 2084 break; 2085 case Opt_res_type: 2086 ret = match_int(args, &arg); 2087 if (ret) 2088 goto out; 2089 type = (u8)arg; 2090 break; 2091 case Opt_res_scope: 2092 ret = match_int(args, &arg); 2093 if (ret) 2094 goto out; 2095 break; 2096 case Opt_res_all_tg_pt: 2097 ret = match_int(args, &arg); 2098 if (ret) 2099 goto out; 2100 all_tg_pt = (int)arg; 2101 break; 2102 case Opt_mapped_lun: 2103 ret = match_u64(args, &tmp_ll); 2104 if (ret) 2105 goto out; 2106 mapped_lun = (u64)tmp_ll; 2107 break; 2108 /* 2109 * PR APTPL Metadata for Target Port 2110 */ 2111 case Opt_target_fabric: 2112 t_fabric = match_strdup(args); 2113 if (!t_fabric) { 2114 ret = -ENOMEM; 2115 goto out; 2116 } 2117 break; 2118 case Opt_target_node: 2119 t_port = match_strdup(args); 2120 if (!t_port) { 2121 ret = -ENOMEM; 2122 goto out; 2123 } 2124 if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) { 2125 pr_err("APTPL metadata target_node=" 2126 " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n", 2127 PR_APTPL_MAX_TPORT_LEN); 2128 ret = -EINVAL; 2129 break; 2130 } 2131 break; 2132 case Opt_tpgt: 2133 ret = match_int(args, &arg); 2134 if (ret) 2135 goto out; 2136 tpgt = (u16)arg; 2137 break; 2138 case Opt_port_rtpi: 2139 ret = match_int(args, &arg); 2140 if (ret) 2141 goto out; 2142 break; 2143 case Opt_target_lun: 2144 ret = match_u64(args, &tmp_ll); 2145 if (ret) 2146 goto out; 2147 target_lun = (u64)tmp_ll; 2148 break; 2149 default: 2150 break; 2151 } 2152 } 2153 2154 if (!i_port || !t_port || !sa_res_key) { 2155 pr_err("Illegal parameters for APTPL registration\n"); 2156 ret = -EINVAL; 2157 goto out; 2158 } 2159 2160 if (res_holder && !(type)) { 2161 pr_err("Illegal PR type: 0x%02x for reservation" 2162 " holder\n", type); 2163 ret = -EINVAL; 2164 goto out; 2165 } 2166 2167 ret = core_scsi3_alloc_aptpl_registration(&dev->t10_pr, sa_res_key, 2168 i_port, isid, mapped_lun, t_port, tpgt, target_lun, 2169 res_holder, all_tg_pt, type); 2170 out: 2171 kfree(i_fabric); 2172 kfree(i_port); 2173 kfree(isid); 2174 kfree(t_fabric); 2175 kfree(t_port); 2176 kfree(orig); 2177 return (ret == 0) ? count : ret; 2178 } 2179 2180 2181 CONFIGFS_ATTR_RO(target_pr_, res_holder); 2182 CONFIGFS_ATTR_RO(target_pr_, res_pr_all_tgt_pts); 2183 CONFIGFS_ATTR_RO(target_pr_, res_pr_generation); 2184 CONFIGFS_ATTR_RO(target_pr_, res_pr_holder_tg_port); 2185 CONFIGFS_ATTR_RO(target_pr_, res_pr_registered_i_pts); 2186 CONFIGFS_ATTR_RO(target_pr_, res_pr_type); 2187 CONFIGFS_ATTR_RO(target_pr_, res_type); 2188 CONFIGFS_ATTR_RO(target_pr_, res_aptpl_active); 2189 CONFIGFS_ATTR(target_pr_, res_aptpl_metadata); 2190 2191 static struct configfs_attribute *target_core_dev_pr_attrs[] = { 2192 &target_pr_attr_res_holder, 2193 &target_pr_attr_res_pr_all_tgt_pts, 2194 &target_pr_attr_res_pr_generation, 2195 &target_pr_attr_res_pr_holder_tg_port, 2196 &target_pr_attr_res_pr_registered_i_pts, 2197 &target_pr_attr_res_pr_type, 2198 &target_pr_attr_res_type, 2199 &target_pr_attr_res_aptpl_active, 2200 &target_pr_attr_res_aptpl_metadata, 2201 NULL, 2202 }; 2203 2204 TB_CIT_SETUP(dev_pr, NULL, NULL, target_core_dev_pr_attrs); 2205 2206 /* End functions for struct config_item_type tb_dev_pr_cit */ 2207 2208 /* Start functions for struct config_item_type tb_dev_cit */ 2209 2210 static inline struct se_device *to_device(struct config_item *item) 2211 { 2212 return container_of(to_config_group(item), struct se_device, dev_group); 2213 } 2214 2215 static ssize_t target_dev_info_show(struct config_item *item, char *page) 2216 { 2217 struct se_device *dev = to_device(item); 2218 int bl = 0; 2219 ssize_t read_bytes = 0; 2220 2221 transport_dump_dev_state(dev, page, &bl); 2222 read_bytes += bl; 2223 read_bytes += dev->transport->show_configfs_dev_params(dev, 2224 page+read_bytes); 2225 return read_bytes; 2226 } 2227 2228 static ssize_t target_dev_control_store(struct config_item *item, 2229 const char *page, size_t count) 2230 { 2231 struct se_device *dev = to_device(item); 2232 2233 return dev->transport->set_configfs_dev_params(dev, page, count); 2234 } 2235 2236 static ssize_t target_dev_alias_show(struct config_item *item, char *page) 2237 { 2238 struct se_device *dev = to_device(item); 2239 2240 if (!(dev->dev_flags & DF_USING_ALIAS)) 2241 return 0; 2242 2243 return snprintf(page, PAGE_SIZE, "%s\n", dev->dev_alias); 2244 } 2245 2246 static ssize_t target_dev_alias_store(struct config_item *item, 2247 const char *page, size_t count) 2248 { 2249 struct se_device *dev = to_device(item); 2250 struct se_hba *hba = dev->se_hba; 2251 ssize_t read_bytes; 2252 2253 if (count > (SE_DEV_ALIAS_LEN-1)) { 2254 pr_err("alias count: %d exceeds" 2255 " SE_DEV_ALIAS_LEN-1: %u\n", (int)count, 2256 SE_DEV_ALIAS_LEN-1); 2257 return -EINVAL; 2258 } 2259 2260 read_bytes = snprintf(&dev->dev_alias[0], SE_DEV_ALIAS_LEN, "%s", page); 2261 if (!read_bytes) 2262 return -EINVAL; 2263 if (dev->dev_alias[read_bytes - 1] == '\n') 2264 dev->dev_alias[read_bytes - 1] = '\0'; 2265 2266 dev->dev_flags |= DF_USING_ALIAS; 2267 2268 pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n", 2269 config_item_name(&hba->hba_group.cg_item), 2270 config_item_name(&dev->dev_group.cg_item), 2271 dev->dev_alias); 2272 2273 return read_bytes; 2274 } 2275 2276 static ssize_t target_dev_udev_path_show(struct config_item *item, char *page) 2277 { 2278 struct se_device *dev = to_device(item); 2279 2280 if (!(dev->dev_flags & DF_USING_UDEV_PATH)) 2281 return 0; 2282 2283 return snprintf(page, PAGE_SIZE, "%s\n", dev->udev_path); 2284 } 2285 2286 static ssize_t target_dev_udev_path_store(struct config_item *item, 2287 const char *page, size_t count) 2288 { 2289 struct se_device *dev = to_device(item); 2290 struct se_hba *hba = dev->se_hba; 2291 ssize_t read_bytes; 2292 2293 if (count > (SE_UDEV_PATH_LEN-1)) { 2294 pr_err("udev_path count: %d exceeds" 2295 " SE_UDEV_PATH_LEN-1: %u\n", (int)count, 2296 SE_UDEV_PATH_LEN-1); 2297 return -EINVAL; 2298 } 2299 2300 read_bytes = snprintf(&dev->udev_path[0], SE_UDEV_PATH_LEN, 2301 "%s", page); 2302 if (!read_bytes) 2303 return -EINVAL; 2304 if (dev->udev_path[read_bytes - 1] == '\n') 2305 dev->udev_path[read_bytes - 1] = '\0'; 2306 2307 dev->dev_flags |= DF_USING_UDEV_PATH; 2308 2309 pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n", 2310 config_item_name(&hba->hba_group.cg_item), 2311 config_item_name(&dev->dev_group.cg_item), 2312 dev->udev_path); 2313 2314 return read_bytes; 2315 } 2316 2317 static ssize_t target_dev_enable_show(struct config_item *item, char *page) 2318 { 2319 struct se_device *dev = to_device(item); 2320 2321 return snprintf(page, PAGE_SIZE, "%d\n", target_dev_configured(dev)); 2322 } 2323 2324 static ssize_t target_dev_enable_store(struct config_item *item, 2325 const char *page, size_t count) 2326 { 2327 struct se_device *dev = to_device(item); 2328 char *ptr; 2329 int ret; 2330 2331 ptr = strstr(page, "1"); 2332 if (!ptr) { 2333 pr_err("For dev_enable ops, only valid value" 2334 " is \"1\"\n"); 2335 return -EINVAL; 2336 } 2337 2338 ret = target_configure_device(dev); 2339 if (ret) 2340 return ret; 2341 return count; 2342 } 2343 2344 static ssize_t target_dev_alua_lu_gp_show(struct config_item *item, char *page) 2345 { 2346 struct se_device *dev = to_device(item); 2347 struct config_item *lu_ci; 2348 struct t10_alua_lu_gp *lu_gp; 2349 struct t10_alua_lu_gp_member *lu_gp_mem; 2350 ssize_t len = 0; 2351 2352 lu_gp_mem = dev->dev_alua_lu_gp_mem; 2353 if (!lu_gp_mem) 2354 return 0; 2355 2356 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 2357 lu_gp = lu_gp_mem->lu_gp; 2358 if (lu_gp) { 2359 lu_ci = &lu_gp->lu_gp_group.cg_item; 2360 len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n", 2361 config_item_name(lu_ci), lu_gp->lu_gp_id); 2362 } 2363 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 2364 2365 return len; 2366 } 2367 2368 static ssize_t target_dev_alua_lu_gp_store(struct config_item *item, 2369 const char *page, size_t count) 2370 { 2371 struct se_device *dev = to_device(item); 2372 struct se_hba *hba = dev->se_hba; 2373 struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL; 2374 struct t10_alua_lu_gp_member *lu_gp_mem; 2375 unsigned char buf[LU_GROUP_NAME_BUF] = { }; 2376 int move = 0; 2377 2378 lu_gp_mem = dev->dev_alua_lu_gp_mem; 2379 if (!lu_gp_mem) 2380 return count; 2381 2382 if (count > LU_GROUP_NAME_BUF) { 2383 pr_err("ALUA LU Group Alias too large!\n"); 2384 return -EINVAL; 2385 } 2386 memcpy(buf, page, count); 2387 /* 2388 * Any ALUA logical unit alias besides "NULL" means we will be 2389 * making a new group association. 2390 */ 2391 if (strcmp(strstrip(buf), "NULL")) { 2392 /* 2393 * core_alua_get_lu_gp_by_name() will increment reference to 2394 * struct t10_alua_lu_gp. This reference is released with 2395 * core_alua_get_lu_gp_by_name below(). 2396 */ 2397 lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf)); 2398 if (!lu_gp_new) 2399 return -ENODEV; 2400 } 2401 2402 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 2403 lu_gp = lu_gp_mem->lu_gp; 2404 if (lu_gp) { 2405 /* 2406 * Clearing an existing lu_gp association, and replacing 2407 * with NULL 2408 */ 2409 if (!lu_gp_new) { 2410 pr_debug("Target_Core_ConfigFS: Releasing %s/%s" 2411 " from ALUA LU Group: core/alua/lu_gps/%s, ID:" 2412 " %hu\n", 2413 config_item_name(&hba->hba_group.cg_item), 2414 config_item_name(&dev->dev_group.cg_item), 2415 config_item_name(&lu_gp->lu_gp_group.cg_item), 2416 lu_gp->lu_gp_id); 2417 2418 __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp); 2419 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 2420 2421 return count; 2422 } 2423 /* 2424 * Removing existing association of lu_gp_mem with lu_gp 2425 */ 2426 __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp); 2427 move = 1; 2428 } 2429 /* 2430 * Associate lu_gp_mem with lu_gp_new. 2431 */ 2432 __core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new); 2433 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 2434 2435 pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:" 2436 " core/alua/lu_gps/%s, ID: %hu\n", 2437 (move) ? "Moving" : "Adding", 2438 config_item_name(&hba->hba_group.cg_item), 2439 config_item_name(&dev->dev_group.cg_item), 2440 config_item_name(&lu_gp_new->lu_gp_group.cg_item), 2441 lu_gp_new->lu_gp_id); 2442 2443 core_alua_put_lu_gp_from_name(lu_gp_new); 2444 return count; 2445 } 2446 2447 static ssize_t target_dev_lba_map_show(struct config_item *item, char *page) 2448 { 2449 struct se_device *dev = to_device(item); 2450 struct t10_alua_lba_map *map; 2451 struct t10_alua_lba_map_member *mem; 2452 char *b = page; 2453 int bl = 0; 2454 char state; 2455 2456 spin_lock(&dev->t10_alua.lba_map_lock); 2457 if (!list_empty(&dev->t10_alua.lba_map_list)) 2458 bl += sprintf(b + bl, "%u %u\n", 2459 dev->t10_alua.lba_map_segment_size, 2460 dev->t10_alua.lba_map_segment_multiplier); 2461 list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) { 2462 bl += sprintf(b + bl, "%llu %llu", 2463 map->lba_map_first_lba, map->lba_map_last_lba); 2464 list_for_each_entry(mem, &map->lba_map_mem_list, 2465 lba_map_mem_list) { 2466 switch (mem->lba_map_mem_alua_state) { 2467 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: 2468 state = 'O'; 2469 break; 2470 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 2471 state = 'A'; 2472 break; 2473 case ALUA_ACCESS_STATE_STANDBY: 2474 state = 'S'; 2475 break; 2476 case ALUA_ACCESS_STATE_UNAVAILABLE: 2477 state = 'U'; 2478 break; 2479 default: 2480 state = '.'; 2481 break; 2482 } 2483 bl += sprintf(b + bl, " %d:%c", 2484 mem->lba_map_mem_alua_pg_id, state); 2485 } 2486 bl += sprintf(b + bl, "\n"); 2487 } 2488 spin_unlock(&dev->t10_alua.lba_map_lock); 2489 return bl; 2490 } 2491 2492 static ssize_t target_dev_lba_map_store(struct config_item *item, 2493 const char *page, size_t count) 2494 { 2495 struct se_device *dev = to_device(item); 2496 struct t10_alua_lba_map *lba_map = NULL; 2497 struct list_head lba_list; 2498 char *map_entries, *orig, *ptr; 2499 char state; 2500 int pg_num = -1, pg; 2501 int ret = 0, num = 0, pg_id, alua_state; 2502 unsigned long start_lba = -1, end_lba = -1; 2503 unsigned long segment_size = -1, segment_mult = -1; 2504 2505 orig = map_entries = kstrdup(page, GFP_KERNEL); 2506 if (!map_entries) 2507 return -ENOMEM; 2508 2509 INIT_LIST_HEAD(&lba_list); 2510 while ((ptr = strsep(&map_entries, "\n")) != NULL) { 2511 if (!*ptr) 2512 continue; 2513 2514 if (num == 0) { 2515 if (sscanf(ptr, "%lu %lu\n", 2516 &segment_size, &segment_mult) != 2) { 2517 pr_err("Invalid line %d\n", num); 2518 ret = -EINVAL; 2519 break; 2520 } 2521 num++; 2522 continue; 2523 } 2524 if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) { 2525 pr_err("Invalid line %d\n", num); 2526 ret = -EINVAL; 2527 break; 2528 } 2529 ptr = strchr(ptr, ' '); 2530 if (!ptr) { 2531 pr_err("Invalid line %d, missing end lba\n", num); 2532 ret = -EINVAL; 2533 break; 2534 } 2535 ptr++; 2536 ptr = strchr(ptr, ' '); 2537 if (!ptr) { 2538 pr_err("Invalid line %d, missing state definitions\n", 2539 num); 2540 ret = -EINVAL; 2541 break; 2542 } 2543 ptr++; 2544 lba_map = core_alua_allocate_lba_map(&lba_list, 2545 start_lba, end_lba); 2546 if (IS_ERR(lba_map)) { 2547 ret = PTR_ERR(lba_map); 2548 break; 2549 } 2550 pg = 0; 2551 while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) { 2552 switch (state) { 2553 case 'O': 2554 alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED; 2555 break; 2556 case 'A': 2557 alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED; 2558 break; 2559 case 'S': 2560 alua_state = ALUA_ACCESS_STATE_STANDBY; 2561 break; 2562 case 'U': 2563 alua_state = ALUA_ACCESS_STATE_UNAVAILABLE; 2564 break; 2565 default: 2566 pr_err("Invalid ALUA state '%c'\n", state); 2567 ret = -EINVAL; 2568 goto out; 2569 } 2570 2571 ret = core_alua_allocate_lba_map_mem(lba_map, 2572 pg_id, alua_state); 2573 if (ret) { 2574 pr_err("Invalid target descriptor %d:%c " 2575 "at line %d\n", 2576 pg_id, state, num); 2577 break; 2578 } 2579 pg++; 2580 ptr = strchr(ptr, ' '); 2581 if (ptr) 2582 ptr++; 2583 else 2584 break; 2585 } 2586 if (pg_num == -1) 2587 pg_num = pg; 2588 else if (pg != pg_num) { 2589 pr_err("Only %d from %d port groups definitions " 2590 "at line %d\n", pg, pg_num, num); 2591 ret = -EINVAL; 2592 break; 2593 } 2594 num++; 2595 } 2596 out: 2597 if (ret) { 2598 core_alua_free_lba_map(&lba_list); 2599 count = ret; 2600 } else 2601 core_alua_set_lba_map(dev, &lba_list, 2602 segment_size, segment_mult); 2603 kfree(orig); 2604 return count; 2605 } 2606 2607 CONFIGFS_ATTR_RO(target_dev_, info); 2608 CONFIGFS_ATTR_WO(target_dev_, control); 2609 CONFIGFS_ATTR(target_dev_, alias); 2610 CONFIGFS_ATTR(target_dev_, udev_path); 2611 CONFIGFS_ATTR(target_dev_, enable); 2612 CONFIGFS_ATTR(target_dev_, alua_lu_gp); 2613 CONFIGFS_ATTR(target_dev_, lba_map); 2614 2615 static struct configfs_attribute *target_core_dev_attrs[] = { 2616 &target_dev_attr_info, 2617 &target_dev_attr_control, 2618 &target_dev_attr_alias, 2619 &target_dev_attr_udev_path, 2620 &target_dev_attr_enable, 2621 &target_dev_attr_alua_lu_gp, 2622 &target_dev_attr_lba_map, 2623 NULL, 2624 }; 2625 2626 static void target_core_dev_release(struct config_item *item) 2627 { 2628 struct config_group *dev_cg = to_config_group(item); 2629 struct se_device *dev = 2630 container_of(dev_cg, struct se_device, dev_group); 2631 2632 target_free_device(dev); 2633 } 2634 2635 /* 2636 * Used in target_core_fabric_configfs.c to verify valid se_device symlink 2637 * within target_fabric_port_link() 2638 */ 2639 struct configfs_item_operations target_core_dev_item_ops = { 2640 .release = target_core_dev_release, 2641 }; 2642 2643 TB_CIT_SETUP(dev, &target_core_dev_item_ops, NULL, target_core_dev_attrs); 2644 2645 /* End functions for struct config_item_type tb_dev_cit */ 2646 2647 /* Start functions for struct config_item_type target_core_alua_lu_gp_cit */ 2648 2649 static inline struct t10_alua_lu_gp *to_lu_gp(struct config_item *item) 2650 { 2651 return container_of(to_config_group(item), struct t10_alua_lu_gp, 2652 lu_gp_group); 2653 } 2654 2655 static ssize_t target_lu_gp_lu_gp_id_show(struct config_item *item, char *page) 2656 { 2657 struct t10_alua_lu_gp *lu_gp = to_lu_gp(item); 2658 2659 if (!lu_gp->lu_gp_valid_id) 2660 return 0; 2661 return sprintf(page, "%hu\n", lu_gp->lu_gp_id); 2662 } 2663 2664 static ssize_t target_lu_gp_lu_gp_id_store(struct config_item *item, 2665 const char *page, size_t count) 2666 { 2667 struct t10_alua_lu_gp *lu_gp = to_lu_gp(item); 2668 struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group; 2669 unsigned long lu_gp_id; 2670 int ret; 2671 2672 ret = kstrtoul(page, 0, &lu_gp_id); 2673 if (ret < 0) { 2674 pr_err("kstrtoul() returned %d for" 2675 " lu_gp_id\n", ret); 2676 return ret; 2677 } 2678 if (lu_gp_id > 0x0000ffff) { 2679 pr_err("ALUA lu_gp_id: %lu exceeds maximum:" 2680 " 0x0000ffff\n", lu_gp_id); 2681 return -EINVAL; 2682 } 2683 2684 ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id); 2685 if (ret < 0) 2686 return -EINVAL; 2687 2688 pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit" 2689 " Group: core/alua/lu_gps/%s to ID: %hu\n", 2690 config_item_name(&alua_lu_gp_cg->cg_item), 2691 lu_gp->lu_gp_id); 2692 2693 return count; 2694 } 2695 2696 static ssize_t target_lu_gp_members_show(struct config_item *item, char *page) 2697 { 2698 struct t10_alua_lu_gp *lu_gp = to_lu_gp(item); 2699 struct se_device *dev; 2700 struct se_hba *hba; 2701 struct t10_alua_lu_gp_member *lu_gp_mem; 2702 ssize_t len = 0, cur_len; 2703 unsigned char buf[LU_GROUP_NAME_BUF] = { }; 2704 2705 spin_lock(&lu_gp->lu_gp_lock); 2706 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) { 2707 dev = lu_gp_mem->lu_gp_mem_dev; 2708 hba = dev->se_hba; 2709 2710 cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n", 2711 config_item_name(&hba->hba_group.cg_item), 2712 config_item_name(&dev->dev_group.cg_item)); 2713 cur_len++; /* Extra byte for NULL terminator */ 2714 2715 if ((cur_len + len) > PAGE_SIZE) { 2716 pr_warn("Ran out of lu_gp_show_attr" 2717 "_members buffer\n"); 2718 break; 2719 } 2720 memcpy(page+len, buf, cur_len); 2721 len += cur_len; 2722 } 2723 spin_unlock(&lu_gp->lu_gp_lock); 2724 2725 return len; 2726 } 2727 2728 CONFIGFS_ATTR(target_lu_gp_, lu_gp_id); 2729 CONFIGFS_ATTR_RO(target_lu_gp_, members); 2730 2731 static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = { 2732 &target_lu_gp_attr_lu_gp_id, 2733 &target_lu_gp_attr_members, 2734 NULL, 2735 }; 2736 2737 static void target_core_alua_lu_gp_release(struct config_item *item) 2738 { 2739 struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item), 2740 struct t10_alua_lu_gp, lu_gp_group); 2741 2742 core_alua_free_lu_gp(lu_gp); 2743 } 2744 2745 static struct configfs_item_operations target_core_alua_lu_gp_ops = { 2746 .release = target_core_alua_lu_gp_release, 2747 }; 2748 2749 static const struct config_item_type target_core_alua_lu_gp_cit = { 2750 .ct_item_ops = &target_core_alua_lu_gp_ops, 2751 .ct_attrs = target_core_alua_lu_gp_attrs, 2752 .ct_owner = THIS_MODULE, 2753 }; 2754 2755 /* End functions for struct config_item_type target_core_alua_lu_gp_cit */ 2756 2757 /* Start functions for struct config_item_type target_core_alua_lu_gps_cit */ 2758 2759 static struct config_group *target_core_alua_create_lu_gp( 2760 struct config_group *group, 2761 const char *name) 2762 { 2763 struct t10_alua_lu_gp *lu_gp; 2764 struct config_group *alua_lu_gp_cg = NULL; 2765 struct config_item *alua_lu_gp_ci = NULL; 2766 2767 lu_gp = core_alua_allocate_lu_gp(name, 0); 2768 if (IS_ERR(lu_gp)) 2769 return NULL; 2770 2771 alua_lu_gp_cg = &lu_gp->lu_gp_group; 2772 alua_lu_gp_ci = &alua_lu_gp_cg->cg_item; 2773 2774 config_group_init_type_name(alua_lu_gp_cg, name, 2775 &target_core_alua_lu_gp_cit); 2776 2777 pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit" 2778 " Group: core/alua/lu_gps/%s\n", 2779 config_item_name(alua_lu_gp_ci)); 2780 2781 return alua_lu_gp_cg; 2782 2783 } 2784 2785 static void target_core_alua_drop_lu_gp( 2786 struct config_group *group, 2787 struct config_item *item) 2788 { 2789 struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item), 2790 struct t10_alua_lu_gp, lu_gp_group); 2791 2792 pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit" 2793 " Group: core/alua/lu_gps/%s, ID: %hu\n", 2794 config_item_name(item), lu_gp->lu_gp_id); 2795 /* 2796 * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release() 2797 * -> target_core_alua_lu_gp_release() 2798 */ 2799 config_item_put(item); 2800 } 2801 2802 static struct configfs_group_operations target_core_alua_lu_gps_group_ops = { 2803 .make_group = &target_core_alua_create_lu_gp, 2804 .drop_item = &target_core_alua_drop_lu_gp, 2805 }; 2806 2807 static const struct config_item_type target_core_alua_lu_gps_cit = { 2808 .ct_item_ops = NULL, 2809 .ct_group_ops = &target_core_alua_lu_gps_group_ops, 2810 .ct_owner = THIS_MODULE, 2811 }; 2812 2813 /* End functions for struct config_item_type target_core_alua_lu_gps_cit */ 2814 2815 /* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */ 2816 2817 static inline struct t10_alua_tg_pt_gp *to_tg_pt_gp(struct config_item *item) 2818 { 2819 return container_of(to_config_group(item), struct t10_alua_tg_pt_gp, 2820 tg_pt_gp_group); 2821 } 2822 2823 static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item, 2824 char *page) 2825 { 2826 return sprintf(page, "%d\n", 2827 to_tg_pt_gp(item)->tg_pt_gp_alua_access_state); 2828 } 2829 2830 static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item, 2831 const char *page, size_t count) 2832 { 2833 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); 2834 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 2835 unsigned long tmp; 2836 int new_state, ret; 2837 2838 if (!tg_pt_gp->tg_pt_gp_valid_id) { 2839 pr_err("Unable to do implicit ALUA on invalid tg_pt_gp ID\n"); 2840 return -EINVAL; 2841 } 2842 if (!target_dev_configured(dev)) { 2843 pr_err("Unable to set alua_access_state while device is" 2844 " not configured\n"); 2845 return -ENODEV; 2846 } 2847 2848 ret = kstrtoul(page, 0, &tmp); 2849 if (ret < 0) { 2850 pr_err("Unable to extract new ALUA access state from" 2851 " %s\n", page); 2852 return ret; 2853 } 2854 new_state = (int)tmp; 2855 2856 if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) { 2857 pr_err("Unable to process implicit configfs ALUA" 2858 " transition while TPGS_IMPLICIT_ALUA is disabled\n"); 2859 return -EINVAL; 2860 } 2861 if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA && 2862 new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) { 2863 /* LBA DEPENDENT is only allowed with implicit ALUA */ 2864 pr_err("Unable to process implicit configfs ALUA transition" 2865 " while explicit ALUA management is enabled\n"); 2866 return -EINVAL; 2867 } 2868 2869 ret = core_alua_do_port_transition(tg_pt_gp, dev, 2870 NULL, NULL, new_state, 0); 2871 return (!ret) ? count : -EINVAL; 2872 } 2873 2874 static ssize_t target_tg_pt_gp_alua_access_status_show(struct config_item *item, 2875 char *page) 2876 { 2877 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); 2878 return sprintf(page, "%s\n", 2879 core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status)); 2880 } 2881 2882 static ssize_t target_tg_pt_gp_alua_access_status_store( 2883 struct config_item *item, const char *page, size_t count) 2884 { 2885 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); 2886 unsigned long tmp; 2887 int new_status, ret; 2888 2889 if (!tg_pt_gp->tg_pt_gp_valid_id) { 2890 pr_err("Unable to set ALUA access status on invalid tg_pt_gp ID\n"); 2891 return -EINVAL; 2892 } 2893 2894 ret = kstrtoul(page, 0, &tmp); 2895 if (ret < 0) { 2896 pr_err("Unable to extract new ALUA access status" 2897 " from %s\n", page); 2898 return ret; 2899 } 2900 new_status = (int)tmp; 2901 2902 if ((new_status != ALUA_STATUS_NONE) && 2903 (new_status != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && 2904 (new_status != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) { 2905 pr_err("Illegal ALUA access status: 0x%02x\n", 2906 new_status); 2907 return -EINVAL; 2908 } 2909 2910 tg_pt_gp->tg_pt_gp_alua_access_status = new_status; 2911 return count; 2912 } 2913 2914 static ssize_t target_tg_pt_gp_alua_access_type_show(struct config_item *item, 2915 char *page) 2916 { 2917 return core_alua_show_access_type(to_tg_pt_gp(item), page); 2918 } 2919 2920 static ssize_t target_tg_pt_gp_alua_access_type_store(struct config_item *item, 2921 const char *page, size_t count) 2922 { 2923 return core_alua_store_access_type(to_tg_pt_gp(item), page, count); 2924 } 2925 2926 #define ALUA_SUPPORTED_STATE_ATTR(_name, _bit) \ 2927 static ssize_t target_tg_pt_gp_alua_support_##_name##_show( \ 2928 struct config_item *item, char *p) \ 2929 { \ 2930 struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item); \ 2931 return sprintf(p, "%d\n", \ 2932 !!(t->tg_pt_gp_alua_supported_states & _bit)); \ 2933 } \ 2934 \ 2935 static ssize_t target_tg_pt_gp_alua_support_##_name##_store( \ 2936 struct config_item *item, const char *p, size_t c) \ 2937 { \ 2938 struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item); \ 2939 unsigned long tmp; \ 2940 int ret; \ 2941 \ 2942 if (!t->tg_pt_gp_valid_id) { \ 2943 pr_err("Unable to set " #_name " ALUA state on invalid tg_pt_gp ID\n"); \ 2944 return -EINVAL; \ 2945 } \ 2946 \ 2947 ret = kstrtoul(p, 0, &tmp); \ 2948 if (ret < 0) { \ 2949 pr_err("Invalid value '%s', must be '0' or '1'\n", p); \ 2950 return -EINVAL; \ 2951 } \ 2952 if (tmp > 1) { \ 2953 pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \ 2954 return -EINVAL; \ 2955 } \ 2956 if (tmp) \ 2957 t->tg_pt_gp_alua_supported_states |= _bit; \ 2958 else \ 2959 t->tg_pt_gp_alua_supported_states &= ~_bit; \ 2960 \ 2961 return c; \ 2962 } 2963 2964 ALUA_SUPPORTED_STATE_ATTR(transitioning, ALUA_T_SUP); 2965 ALUA_SUPPORTED_STATE_ATTR(offline, ALUA_O_SUP); 2966 ALUA_SUPPORTED_STATE_ATTR(lba_dependent, ALUA_LBD_SUP); 2967 ALUA_SUPPORTED_STATE_ATTR(unavailable, ALUA_U_SUP); 2968 ALUA_SUPPORTED_STATE_ATTR(standby, ALUA_S_SUP); 2969 ALUA_SUPPORTED_STATE_ATTR(active_optimized, ALUA_AO_SUP); 2970 ALUA_SUPPORTED_STATE_ATTR(active_nonoptimized, ALUA_AN_SUP); 2971 2972 static ssize_t target_tg_pt_gp_alua_write_metadata_show( 2973 struct config_item *item, char *page) 2974 { 2975 return sprintf(page, "%d\n", 2976 to_tg_pt_gp(item)->tg_pt_gp_write_metadata); 2977 } 2978 2979 static ssize_t target_tg_pt_gp_alua_write_metadata_store( 2980 struct config_item *item, const char *page, size_t count) 2981 { 2982 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); 2983 unsigned long tmp; 2984 int ret; 2985 2986 ret = kstrtoul(page, 0, &tmp); 2987 if (ret < 0) { 2988 pr_err("Unable to extract alua_write_metadata\n"); 2989 return ret; 2990 } 2991 2992 if ((tmp != 0) && (tmp != 1)) { 2993 pr_err("Illegal value for alua_write_metadata:" 2994 " %lu\n", tmp); 2995 return -EINVAL; 2996 } 2997 tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp; 2998 2999 return count; 3000 } 3001 3002 static ssize_t target_tg_pt_gp_nonop_delay_msecs_show(struct config_item *item, 3003 char *page) 3004 { 3005 return core_alua_show_nonop_delay_msecs(to_tg_pt_gp(item), page); 3006 } 3007 3008 static ssize_t target_tg_pt_gp_nonop_delay_msecs_store(struct config_item *item, 3009 const char *page, size_t count) 3010 { 3011 return core_alua_store_nonop_delay_msecs(to_tg_pt_gp(item), page, 3012 count); 3013 } 3014 3015 static ssize_t target_tg_pt_gp_trans_delay_msecs_show(struct config_item *item, 3016 char *page) 3017 { 3018 return core_alua_show_trans_delay_msecs(to_tg_pt_gp(item), page); 3019 } 3020 3021 static ssize_t target_tg_pt_gp_trans_delay_msecs_store(struct config_item *item, 3022 const char *page, size_t count) 3023 { 3024 return core_alua_store_trans_delay_msecs(to_tg_pt_gp(item), page, 3025 count); 3026 } 3027 3028 static ssize_t target_tg_pt_gp_implicit_trans_secs_show( 3029 struct config_item *item, char *page) 3030 { 3031 return core_alua_show_implicit_trans_secs(to_tg_pt_gp(item), page); 3032 } 3033 3034 static ssize_t target_tg_pt_gp_implicit_trans_secs_store( 3035 struct config_item *item, const char *page, size_t count) 3036 { 3037 return core_alua_store_implicit_trans_secs(to_tg_pt_gp(item), page, 3038 count); 3039 } 3040 3041 static ssize_t target_tg_pt_gp_preferred_show(struct config_item *item, 3042 char *page) 3043 { 3044 return core_alua_show_preferred_bit(to_tg_pt_gp(item), page); 3045 } 3046 3047 static ssize_t target_tg_pt_gp_preferred_store(struct config_item *item, 3048 const char *page, size_t count) 3049 { 3050 return core_alua_store_preferred_bit(to_tg_pt_gp(item), page, count); 3051 } 3052 3053 static ssize_t target_tg_pt_gp_tg_pt_gp_id_show(struct config_item *item, 3054 char *page) 3055 { 3056 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); 3057 3058 if (!tg_pt_gp->tg_pt_gp_valid_id) 3059 return 0; 3060 return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id); 3061 } 3062 3063 static ssize_t target_tg_pt_gp_tg_pt_gp_id_store(struct config_item *item, 3064 const char *page, size_t count) 3065 { 3066 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); 3067 struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group; 3068 unsigned long tg_pt_gp_id; 3069 int ret; 3070 3071 ret = kstrtoul(page, 0, &tg_pt_gp_id); 3072 if (ret < 0) { 3073 pr_err("ALUA tg_pt_gp_id: invalid value '%s' for tg_pt_gp_id\n", 3074 page); 3075 return ret; 3076 } 3077 if (tg_pt_gp_id > 0x0000ffff) { 3078 pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum: 0x0000ffff\n", 3079 tg_pt_gp_id); 3080 return -EINVAL; 3081 } 3082 3083 ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id); 3084 if (ret < 0) 3085 return -EINVAL; 3086 3087 pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: " 3088 "core/alua/tg_pt_gps/%s to ID: %hu\n", 3089 config_item_name(&alua_tg_pt_gp_cg->cg_item), 3090 tg_pt_gp->tg_pt_gp_id); 3091 3092 return count; 3093 } 3094 3095 static ssize_t target_tg_pt_gp_members_show(struct config_item *item, 3096 char *page) 3097 { 3098 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); 3099 struct se_lun *lun; 3100 ssize_t len = 0, cur_len; 3101 unsigned char buf[TG_PT_GROUP_NAME_BUF] = { }; 3102 3103 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 3104 list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list, 3105 lun_tg_pt_gp_link) { 3106 struct se_portal_group *tpg = lun->lun_tpg; 3107 3108 cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu" 3109 "/%s\n", tpg->se_tpg_tfo->fabric_name, 3110 tpg->se_tpg_tfo->tpg_get_wwn(tpg), 3111 tpg->se_tpg_tfo->tpg_get_tag(tpg), 3112 config_item_name(&lun->lun_group.cg_item)); 3113 cur_len++; /* Extra byte for NULL terminator */ 3114 3115 if ((cur_len + len) > PAGE_SIZE) { 3116 pr_warn("Ran out of lu_gp_show_attr" 3117 "_members buffer\n"); 3118 break; 3119 } 3120 memcpy(page+len, buf, cur_len); 3121 len += cur_len; 3122 } 3123 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 3124 3125 return len; 3126 } 3127 3128 CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_state); 3129 CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_status); 3130 CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_type); 3131 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_transitioning); 3132 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_offline); 3133 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_lba_dependent); 3134 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_unavailable); 3135 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_standby); 3136 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_optimized); 3137 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_nonoptimized); 3138 CONFIGFS_ATTR(target_tg_pt_gp_, alua_write_metadata); 3139 CONFIGFS_ATTR(target_tg_pt_gp_, nonop_delay_msecs); 3140 CONFIGFS_ATTR(target_tg_pt_gp_, trans_delay_msecs); 3141 CONFIGFS_ATTR(target_tg_pt_gp_, implicit_trans_secs); 3142 CONFIGFS_ATTR(target_tg_pt_gp_, preferred); 3143 CONFIGFS_ATTR(target_tg_pt_gp_, tg_pt_gp_id); 3144 CONFIGFS_ATTR_RO(target_tg_pt_gp_, members); 3145 3146 static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = { 3147 &target_tg_pt_gp_attr_alua_access_state, 3148 &target_tg_pt_gp_attr_alua_access_status, 3149 &target_tg_pt_gp_attr_alua_access_type, 3150 &target_tg_pt_gp_attr_alua_support_transitioning, 3151 &target_tg_pt_gp_attr_alua_support_offline, 3152 &target_tg_pt_gp_attr_alua_support_lba_dependent, 3153 &target_tg_pt_gp_attr_alua_support_unavailable, 3154 &target_tg_pt_gp_attr_alua_support_standby, 3155 &target_tg_pt_gp_attr_alua_support_active_nonoptimized, 3156 &target_tg_pt_gp_attr_alua_support_active_optimized, 3157 &target_tg_pt_gp_attr_alua_write_metadata, 3158 &target_tg_pt_gp_attr_nonop_delay_msecs, 3159 &target_tg_pt_gp_attr_trans_delay_msecs, 3160 &target_tg_pt_gp_attr_implicit_trans_secs, 3161 &target_tg_pt_gp_attr_preferred, 3162 &target_tg_pt_gp_attr_tg_pt_gp_id, 3163 &target_tg_pt_gp_attr_members, 3164 NULL, 3165 }; 3166 3167 static void target_core_alua_tg_pt_gp_release(struct config_item *item) 3168 { 3169 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item), 3170 struct t10_alua_tg_pt_gp, tg_pt_gp_group); 3171 3172 core_alua_free_tg_pt_gp(tg_pt_gp); 3173 } 3174 3175 static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = { 3176 .release = target_core_alua_tg_pt_gp_release, 3177 }; 3178 3179 static const struct config_item_type target_core_alua_tg_pt_gp_cit = { 3180 .ct_item_ops = &target_core_alua_tg_pt_gp_ops, 3181 .ct_attrs = target_core_alua_tg_pt_gp_attrs, 3182 .ct_owner = THIS_MODULE, 3183 }; 3184 3185 /* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */ 3186 3187 /* Start functions for struct config_item_type tb_alua_tg_pt_gps_cit */ 3188 3189 static struct config_group *target_core_alua_create_tg_pt_gp( 3190 struct config_group *group, 3191 const char *name) 3192 { 3193 struct t10_alua *alua = container_of(group, struct t10_alua, 3194 alua_tg_pt_gps_group); 3195 struct t10_alua_tg_pt_gp *tg_pt_gp; 3196 struct config_group *alua_tg_pt_gp_cg = NULL; 3197 struct config_item *alua_tg_pt_gp_ci = NULL; 3198 3199 tg_pt_gp = core_alua_allocate_tg_pt_gp(alua->t10_dev, name, 0); 3200 if (!tg_pt_gp) 3201 return NULL; 3202 3203 alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group; 3204 alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item; 3205 3206 config_group_init_type_name(alua_tg_pt_gp_cg, name, 3207 &target_core_alua_tg_pt_gp_cit); 3208 3209 pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port" 3210 " Group: alua/tg_pt_gps/%s\n", 3211 config_item_name(alua_tg_pt_gp_ci)); 3212 3213 return alua_tg_pt_gp_cg; 3214 } 3215 3216 static void target_core_alua_drop_tg_pt_gp( 3217 struct config_group *group, 3218 struct config_item *item) 3219 { 3220 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item), 3221 struct t10_alua_tg_pt_gp, tg_pt_gp_group); 3222 3223 pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port" 3224 " Group: alua/tg_pt_gps/%s, ID: %hu\n", 3225 config_item_name(item), tg_pt_gp->tg_pt_gp_id); 3226 /* 3227 * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release() 3228 * -> target_core_alua_tg_pt_gp_release(). 3229 */ 3230 config_item_put(item); 3231 } 3232 3233 static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = { 3234 .make_group = &target_core_alua_create_tg_pt_gp, 3235 .drop_item = &target_core_alua_drop_tg_pt_gp, 3236 }; 3237 3238 TB_CIT_SETUP(dev_alua_tg_pt_gps, NULL, &target_core_alua_tg_pt_gps_group_ops, NULL); 3239 3240 /* End functions for struct config_item_type tb_alua_tg_pt_gps_cit */ 3241 3242 /* Start functions for struct config_item_type target_core_alua_cit */ 3243 3244 /* 3245 * target_core_alua_cit is a ConfigFS group that lives under 3246 * /sys/kernel/config/target/core/alua. There are default groups 3247 * core/alua/lu_gps and core/alua/tg_pt_gps that are attached to 3248 * target_core_alua_cit in target_core_init_configfs() below. 3249 */ 3250 static const struct config_item_type target_core_alua_cit = { 3251 .ct_item_ops = NULL, 3252 .ct_attrs = NULL, 3253 .ct_owner = THIS_MODULE, 3254 }; 3255 3256 /* End functions for struct config_item_type target_core_alua_cit */ 3257 3258 /* Start functions for struct config_item_type tb_dev_stat_cit */ 3259 3260 static struct config_group *target_core_stat_mkdir( 3261 struct config_group *group, 3262 const char *name) 3263 { 3264 return ERR_PTR(-ENOSYS); 3265 } 3266 3267 static void target_core_stat_rmdir( 3268 struct config_group *group, 3269 struct config_item *item) 3270 { 3271 return; 3272 } 3273 3274 static struct configfs_group_operations target_core_stat_group_ops = { 3275 .make_group = &target_core_stat_mkdir, 3276 .drop_item = &target_core_stat_rmdir, 3277 }; 3278 3279 TB_CIT_SETUP(dev_stat, NULL, &target_core_stat_group_ops, NULL); 3280 3281 /* End functions for struct config_item_type tb_dev_stat_cit */ 3282 3283 /* Start functions for struct config_item_type target_core_hba_cit */ 3284 3285 static struct config_group *target_core_make_subdev( 3286 struct config_group *group, 3287 const char *name) 3288 { 3289 struct t10_alua_tg_pt_gp *tg_pt_gp; 3290 struct config_item *hba_ci = &group->cg_item; 3291 struct se_hba *hba = item_to_hba(hba_ci); 3292 struct target_backend *tb = hba->backend; 3293 struct se_device *dev; 3294 int errno = -ENOMEM, ret; 3295 3296 ret = mutex_lock_interruptible(&hba->hba_access_mutex); 3297 if (ret) 3298 return ERR_PTR(ret); 3299 3300 dev = target_alloc_device(hba, name); 3301 if (!dev) 3302 goto out_unlock; 3303 3304 config_group_init_type_name(&dev->dev_group, name, &tb->tb_dev_cit); 3305 3306 config_group_init_type_name(&dev->dev_action_group, "action", 3307 &tb->tb_dev_action_cit); 3308 configfs_add_default_group(&dev->dev_action_group, &dev->dev_group); 3309 3310 config_group_init_type_name(&dev->dev_attrib.da_group, "attrib", 3311 &tb->tb_dev_attrib_cit); 3312 configfs_add_default_group(&dev->dev_attrib.da_group, &dev->dev_group); 3313 3314 config_group_init_type_name(&dev->dev_pr_group, "pr", 3315 &tb->tb_dev_pr_cit); 3316 configfs_add_default_group(&dev->dev_pr_group, &dev->dev_group); 3317 3318 config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn", 3319 &tb->tb_dev_wwn_cit); 3320 configfs_add_default_group(&dev->t10_wwn.t10_wwn_group, 3321 &dev->dev_group); 3322 3323 config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group, 3324 "alua", &tb->tb_dev_alua_tg_pt_gps_cit); 3325 configfs_add_default_group(&dev->t10_alua.alua_tg_pt_gps_group, 3326 &dev->dev_group); 3327 3328 config_group_init_type_name(&dev->dev_stat_grps.stat_group, 3329 "statistics", &tb->tb_dev_stat_cit); 3330 configfs_add_default_group(&dev->dev_stat_grps.stat_group, 3331 &dev->dev_group); 3332 3333 /* 3334 * Add core/$HBA/$DEV/alua/default_tg_pt_gp 3335 */ 3336 tg_pt_gp = core_alua_allocate_tg_pt_gp(dev, "default_tg_pt_gp", 1); 3337 if (!tg_pt_gp) 3338 goto out_free_device; 3339 dev->t10_alua.default_tg_pt_gp = tg_pt_gp; 3340 3341 config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group, 3342 "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit); 3343 configfs_add_default_group(&tg_pt_gp->tg_pt_gp_group, 3344 &dev->t10_alua.alua_tg_pt_gps_group); 3345 3346 /* 3347 * Add core/$HBA/$DEV/statistics/ default groups 3348 */ 3349 target_stat_setup_dev_default_groups(dev); 3350 3351 mutex_lock(&target_devices_lock); 3352 target_devices++; 3353 mutex_unlock(&target_devices_lock); 3354 3355 mutex_unlock(&hba->hba_access_mutex); 3356 return &dev->dev_group; 3357 3358 out_free_device: 3359 target_free_device(dev); 3360 out_unlock: 3361 mutex_unlock(&hba->hba_access_mutex); 3362 return ERR_PTR(errno); 3363 } 3364 3365 static void target_core_drop_subdev( 3366 struct config_group *group, 3367 struct config_item *item) 3368 { 3369 struct config_group *dev_cg = to_config_group(item); 3370 struct se_device *dev = 3371 container_of(dev_cg, struct se_device, dev_group); 3372 struct se_hba *hba; 3373 3374 hba = item_to_hba(&dev->se_hba->hba_group.cg_item); 3375 3376 mutex_lock(&hba->hba_access_mutex); 3377 3378 configfs_remove_default_groups(&dev->dev_stat_grps.stat_group); 3379 configfs_remove_default_groups(&dev->t10_alua.alua_tg_pt_gps_group); 3380 3381 /* 3382 * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp 3383 * directly from target_core_alua_tg_pt_gp_release(). 3384 */ 3385 dev->t10_alua.default_tg_pt_gp = NULL; 3386 3387 configfs_remove_default_groups(dev_cg); 3388 3389 /* 3390 * se_dev is released from target_core_dev_item_ops->release() 3391 */ 3392 config_item_put(item); 3393 3394 mutex_lock(&target_devices_lock); 3395 target_devices--; 3396 mutex_unlock(&target_devices_lock); 3397 3398 mutex_unlock(&hba->hba_access_mutex); 3399 } 3400 3401 static struct configfs_group_operations target_core_hba_group_ops = { 3402 .make_group = target_core_make_subdev, 3403 .drop_item = target_core_drop_subdev, 3404 }; 3405 3406 3407 static inline struct se_hba *to_hba(struct config_item *item) 3408 { 3409 return container_of(to_config_group(item), struct se_hba, hba_group); 3410 } 3411 3412 static ssize_t target_hba_info_show(struct config_item *item, char *page) 3413 { 3414 struct se_hba *hba = to_hba(item); 3415 3416 return sprintf(page, "HBA Index: %d plugin: %s version: %s\n", 3417 hba->hba_id, hba->backend->ops->name, 3418 TARGET_CORE_VERSION); 3419 } 3420 3421 static ssize_t target_hba_mode_show(struct config_item *item, char *page) 3422 { 3423 struct se_hba *hba = to_hba(item); 3424 int hba_mode = 0; 3425 3426 if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE) 3427 hba_mode = 1; 3428 3429 return sprintf(page, "%d\n", hba_mode); 3430 } 3431 3432 static ssize_t target_hba_mode_store(struct config_item *item, 3433 const char *page, size_t count) 3434 { 3435 struct se_hba *hba = to_hba(item); 3436 unsigned long mode_flag; 3437 int ret; 3438 3439 if (hba->backend->ops->pmode_enable_hba == NULL) 3440 return -EINVAL; 3441 3442 ret = kstrtoul(page, 0, &mode_flag); 3443 if (ret < 0) { 3444 pr_err("Unable to extract hba mode flag: %d\n", ret); 3445 return ret; 3446 } 3447 3448 if (hba->dev_count) { 3449 pr_err("Unable to set hba_mode with active devices\n"); 3450 return -EINVAL; 3451 } 3452 3453 ret = hba->backend->ops->pmode_enable_hba(hba, mode_flag); 3454 if (ret < 0) 3455 return -EINVAL; 3456 if (ret > 0) 3457 hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; 3458 else if (ret == 0) 3459 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; 3460 3461 return count; 3462 } 3463 3464 CONFIGFS_ATTR_RO(target_, hba_info); 3465 CONFIGFS_ATTR(target_, hba_mode); 3466 3467 static void target_core_hba_release(struct config_item *item) 3468 { 3469 struct se_hba *hba = container_of(to_config_group(item), 3470 struct se_hba, hba_group); 3471 core_delete_hba(hba); 3472 } 3473 3474 static struct configfs_attribute *target_core_hba_attrs[] = { 3475 &target_attr_hba_info, 3476 &target_attr_hba_mode, 3477 NULL, 3478 }; 3479 3480 static struct configfs_item_operations target_core_hba_item_ops = { 3481 .release = target_core_hba_release, 3482 }; 3483 3484 static const struct config_item_type target_core_hba_cit = { 3485 .ct_item_ops = &target_core_hba_item_ops, 3486 .ct_group_ops = &target_core_hba_group_ops, 3487 .ct_attrs = target_core_hba_attrs, 3488 .ct_owner = THIS_MODULE, 3489 }; 3490 3491 static struct config_group *target_core_call_addhbatotarget( 3492 struct config_group *group, 3493 const char *name) 3494 { 3495 char *se_plugin_str, *str, *str2; 3496 struct se_hba *hba; 3497 char buf[TARGET_CORE_NAME_MAX_LEN] = { }; 3498 unsigned long plugin_dep_id = 0; 3499 int ret; 3500 3501 if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) { 3502 pr_err("Passed *name strlen(): %d exceeds" 3503 " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name), 3504 TARGET_CORE_NAME_MAX_LEN); 3505 return ERR_PTR(-ENAMETOOLONG); 3506 } 3507 snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name); 3508 3509 str = strstr(buf, "_"); 3510 if (!str) { 3511 pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n"); 3512 return ERR_PTR(-EINVAL); 3513 } 3514 se_plugin_str = buf; 3515 /* 3516 * Special case for subsystem plugins that have "_" in their names. 3517 * Namely rd_direct and rd_mcp.. 3518 */ 3519 str2 = strstr(str+1, "_"); 3520 if (str2) { 3521 *str2 = '\0'; /* Terminate for *se_plugin_str */ 3522 str2++; /* Skip to start of plugin dependent ID */ 3523 str = str2; 3524 } else { 3525 *str = '\0'; /* Terminate for *se_plugin_str */ 3526 str++; /* Skip to start of plugin dependent ID */ 3527 } 3528 3529 ret = kstrtoul(str, 0, &plugin_dep_id); 3530 if (ret < 0) { 3531 pr_err("kstrtoul() returned %d for" 3532 " plugin_dep_id\n", ret); 3533 return ERR_PTR(ret); 3534 } 3535 /* 3536 * Load up TCM subsystem plugins if they have not already been loaded. 3537 */ 3538 transport_subsystem_check_init(); 3539 3540 hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0); 3541 if (IS_ERR(hba)) 3542 return ERR_CAST(hba); 3543 3544 config_group_init_type_name(&hba->hba_group, name, 3545 &target_core_hba_cit); 3546 3547 return &hba->hba_group; 3548 } 3549 3550 static void target_core_call_delhbafromtarget( 3551 struct config_group *group, 3552 struct config_item *item) 3553 { 3554 /* 3555 * core_delete_hba() is called from target_core_hba_item_ops->release() 3556 * -> target_core_hba_release() 3557 */ 3558 config_item_put(item); 3559 } 3560 3561 static struct configfs_group_operations target_core_group_ops = { 3562 .make_group = target_core_call_addhbatotarget, 3563 .drop_item = target_core_call_delhbafromtarget, 3564 }; 3565 3566 static const struct config_item_type target_core_cit = { 3567 .ct_item_ops = NULL, 3568 .ct_group_ops = &target_core_group_ops, 3569 .ct_attrs = NULL, 3570 .ct_owner = THIS_MODULE, 3571 }; 3572 3573 /* Stop functions for struct config_item_type target_core_hba_cit */ 3574 3575 void target_setup_backend_cits(struct target_backend *tb) 3576 { 3577 target_core_setup_dev_cit(tb); 3578 target_core_setup_dev_action_cit(tb); 3579 target_core_setup_dev_attrib_cit(tb); 3580 target_core_setup_dev_pr_cit(tb); 3581 target_core_setup_dev_wwn_cit(tb); 3582 target_core_setup_dev_alua_tg_pt_gps_cit(tb); 3583 target_core_setup_dev_stat_cit(tb); 3584 } 3585 3586 static void target_init_dbroot(void) 3587 { 3588 struct file *fp; 3589 3590 snprintf(db_root_stage, DB_ROOT_LEN, DB_ROOT_PREFERRED); 3591 fp = filp_open(db_root_stage, O_RDONLY, 0); 3592 if (IS_ERR(fp)) { 3593 pr_err("db_root: cannot open: %s\n", db_root_stage); 3594 return; 3595 } 3596 if (!S_ISDIR(file_inode(fp)->i_mode)) { 3597 filp_close(fp, NULL); 3598 pr_err("db_root: not a valid directory: %s\n", db_root_stage); 3599 return; 3600 } 3601 filp_close(fp, NULL); 3602 3603 strncpy(db_root, db_root_stage, DB_ROOT_LEN); 3604 pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root); 3605 } 3606 3607 static int __init target_core_init_configfs(void) 3608 { 3609 struct configfs_subsystem *subsys = &target_core_fabrics; 3610 struct t10_alua_lu_gp *lu_gp; 3611 int ret; 3612 3613 pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage" 3614 " Engine: %s on %s/%s on "UTS_RELEASE"\n", 3615 TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine); 3616 3617 config_group_init(&subsys->su_group); 3618 mutex_init(&subsys->su_mutex); 3619 3620 ret = init_se_kmem_caches(); 3621 if (ret < 0) 3622 return ret; 3623 /* 3624 * Create $CONFIGFS/target/core default group for HBA <-> Storage Object 3625 * and ALUA Logical Unit Group and Target Port Group infrastructure. 3626 */ 3627 config_group_init_type_name(&target_core_hbagroup, "core", 3628 &target_core_cit); 3629 configfs_add_default_group(&target_core_hbagroup, &subsys->su_group); 3630 3631 /* 3632 * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/ 3633 */ 3634 config_group_init_type_name(&alua_group, "alua", &target_core_alua_cit); 3635 configfs_add_default_group(&alua_group, &target_core_hbagroup); 3636 3637 /* 3638 * Add ALUA Logical Unit Group and Target Port Group ConfigFS 3639 * groups under /sys/kernel/config/target/core/alua/ 3640 */ 3641 config_group_init_type_name(&alua_lu_gps_group, "lu_gps", 3642 &target_core_alua_lu_gps_cit); 3643 configfs_add_default_group(&alua_lu_gps_group, &alua_group); 3644 3645 /* 3646 * Add core/alua/lu_gps/default_lu_gp 3647 */ 3648 lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1); 3649 if (IS_ERR(lu_gp)) { 3650 ret = -ENOMEM; 3651 goto out_global; 3652 } 3653 3654 config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp", 3655 &target_core_alua_lu_gp_cit); 3656 configfs_add_default_group(&lu_gp->lu_gp_group, &alua_lu_gps_group); 3657 3658 default_lu_gp = lu_gp; 3659 3660 /* 3661 * Register the target_core_mod subsystem with configfs. 3662 */ 3663 ret = configfs_register_subsystem(subsys); 3664 if (ret < 0) { 3665 pr_err("Error %d while registering subsystem %s\n", 3666 ret, subsys->su_group.cg_item.ci_namebuf); 3667 goto out_global; 3668 } 3669 pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric" 3670 " Infrastructure: "TARGET_CORE_VERSION" on %s/%s" 3671 " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine); 3672 /* 3673 * Register built-in RAMDISK subsystem logic for virtual LUN 0 3674 */ 3675 ret = rd_module_init(); 3676 if (ret < 0) 3677 goto out; 3678 3679 ret = core_dev_setup_virtual_lun0(); 3680 if (ret < 0) 3681 goto out; 3682 3683 ret = target_xcopy_setup_pt(); 3684 if (ret < 0) 3685 goto out; 3686 3687 target_init_dbroot(); 3688 3689 return 0; 3690 3691 out: 3692 configfs_unregister_subsystem(subsys); 3693 core_dev_release_virtual_lun0(); 3694 rd_module_exit(); 3695 out_global: 3696 if (default_lu_gp) { 3697 core_alua_free_lu_gp(default_lu_gp); 3698 default_lu_gp = NULL; 3699 } 3700 release_se_kmem_caches(); 3701 return ret; 3702 } 3703 3704 static void __exit target_core_exit_configfs(void) 3705 { 3706 configfs_remove_default_groups(&alua_lu_gps_group); 3707 configfs_remove_default_groups(&alua_group); 3708 configfs_remove_default_groups(&target_core_hbagroup); 3709 3710 /* 3711 * We expect subsys->su_group.default_groups to be released 3712 * by configfs subsystem provider logic.. 3713 */ 3714 configfs_unregister_subsystem(&target_core_fabrics); 3715 3716 core_alua_free_lu_gp(default_lu_gp); 3717 default_lu_gp = NULL; 3718 3719 pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric" 3720 " Infrastructure\n"); 3721 3722 core_dev_release_virtual_lun0(); 3723 rd_module_exit(); 3724 target_xcopy_release_pt(); 3725 release_se_kmem_caches(); 3726 } 3727 3728 MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS"); 3729 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 3730 MODULE_LICENSE("GPL"); 3731 3732 module_init(target_core_init_configfs); 3733 module_exit(target_core_exit_configfs); 3734