1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /******************************************************************************* 3 * Filename: target_core_configfs.c 4 * 5 * This file contains ConfigFS logic for the Generic Target Engine project. 6 * 7 * (c) Copyright 2008-2013 Datera, Inc. 8 * 9 * Nicholas A. Bellinger <nab@kernel.org> 10 * 11 * based on configfs Copyright (C) 2005 Oracle. All rights reserved. 12 * 13 ****************************************************************************/ 14 15 #include <linux/module.h> 16 #include <linux/moduleparam.h> 17 #include <generated/utsrelease.h> 18 #include <linux/utsname.h> 19 #include <linux/init.h> 20 #include <linux/fs.h> 21 #include <linux/namei.h> 22 #include <linux/slab.h> 23 #include <linux/types.h> 24 #include <linux/delay.h> 25 #include <linux/unistd.h> 26 #include <linux/string.h> 27 #include <linux/parser.h> 28 #include <linux/syscalls.h> 29 #include <linux/configfs.h> 30 #include <linux/spinlock.h> 31 32 #include <target/target_core_base.h> 33 #include <target/target_core_backend.h> 34 #include <target/target_core_fabric.h> 35 36 #include "target_core_internal.h" 37 #include "target_core_alua.h" 38 #include "target_core_pr.h" 39 #include "target_core_rd.h" 40 #include "target_core_xcopy.h" 41 42 #define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \ 43 static void target_core_setup_##_name##_cit(struct target_backend *tb) \ 44 { \ 45 struct config_item_type *cit = &tb->tb_##_name##_cit; \ 46 \ 47 cit->ct_item_ops = _item_ops; \ 48 cit->ct_group_ops = _group_ops; \ 49 cit->ct_attrs = _attrs; \ 50 cit->ct_owner = tb->ops->owner; \ 51 pr_debug("Setup generic %s\n", __stringify(_name)); \ 52 } 53 54 #define TB_CIT_SETUP_DRV(_name, _item_ops, _group_ops) \ 55 static void target_core_setup_##_name##_cit(struct target_backend *tb) \ 56 { \ 57 struct config_item_type *cit = &tb->tb_##_name##_cit; \ 58 \ 59 cit->ct_item_ops = _item_ops; \ 60 cit->ct_group_ops = _group_ops; \ 61 cit->ct_attrs = tb->ops->tb_##_name##_attrs; \ 62 cit->ct_owner = tb->ops->owner; \ 63 pr_debug("Setup generic %s\n", __stringify(_name)); \ 64 } 65 66 extern struct t10_alua_lu_gp *default_lu_gp; 67 68 static LIST_HEAD(g_tf_list); 69 static DEFINE_MUTEX(g_tf_lock); 70 71 static struct config_group target_core_hbagroup; 72 static struct config_group alua_group; 73 static struct config_group alua_lu_gps_group; 74 75 static unsigned int target_devices; 76 static DEFINE_MUTEX(target_devices_lock); 77 78 static inline struct se_hba * 79 item_to_hba(struct config_item *item) 80 { 81 return container_of(to_config_group(item), struct se_hba, hba_group); 82 } 83 84 /* 85 * Attributes for /sys/kernel/config/target/ 86 */ 87 static ssize_t target_core_item_version_show(struct config_item *item, 88 char *page) 89 { 90 return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s" 91 " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_VERSION, 92 utsname()->sysname, utsname()->machine); 93 } 94 95 CONFIGFS_ATTR_RO(target_core_item_, version); 96 97 char db_root[DB_ROOT_LEN] = DB_ROOT_DEFAULT; 98 static char db_root_stage[DB_ROOT_LEN]; 99 100 static ssize_t target_core_item_dbroot_show(struct config_item *item, 101 char *page) 102 { 103 return sprintf(page, "%s\n", db_root); 104 } 105 106 static ssize_t target_core_item_dbroot_store(struct config_item *item, 107 const char *page, size_t count) 108 { 109 ssize_t read_bytes; 110 struct file *fp; 111 ssize_t r = -EINVAL; 112 113 mutex_lock(&target_devices_lock); 114 if (target_devices) { 115 pr_err("db_root: cannot be changed because it's in use\n"); 116 goto unlock; 117 } 118 119 if (count > (DB_ROOT_LEN - 1)) { 120 pr_err("db_root: count %d exceeds DB_ROOT_LEN-1: %u\n", 121 (int)count, DB_ROOT_LEN - 1); 122 goto unlock; 123 } 124 125 read_bytes = snprintf(db_root_stage, DB_ROOT_LEN, "%s", page); 126 if (!read_bytes) 127 goto unlock; 128 129 if (db_root_stage[read_bytes - 1] == '\n') 130 db_root_stage[read_bytes - 1] = '\0'; 131 132 /* validate new db root before accepting it */ 133 fp = filp_open(db_root_stage, O_RDONLY, 0); 134 if (IS_ERR(fp)) { 135 pr_err("db_root: cannot open: %s\n", db_root_stage); 136 goto unlock; 137 } 138 if (!S_ISDIR(file_inode(fp)->i_mode)) { 139 filp_close(fp, NULL); 140 pr_err("db_root: not a directory: %s\n", db_root_stage); 141 goto unlock; 142 } 143 filp_close(fp, NULL); 144 145 strncpy(db_root, db_root_stage, read_bytes); 146 pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root); 147 148 r = read_bytes; 149 150 unlock: 151 mutex_unlock(&target_devices_lock); 152 return r; 153 } 154 155 CONFIGFS_ATTR(target_core_item_, dbroot); 156 157 static struct target_fabric_configfs *target_core_get_fabric( 158 const char *name) 159 { 160 struct target_fabric_configfs *tf; 161 162 if (!name) 163 return NULL; 164 165 mutex_lock(&g_tf_lock); 166 list_for_each_entry(tf, &g_tf_list, tf_list) { 167 const char *cmp_name = tf->tf_ops->fabric_alias; 168 if (!cmp_name) 169 cmp_name = tf->tf_ops->fabric_name; 170 if (!strcmp(cmp_name, name)) { 171 atomic_inc(&tf->tf_access_cnt); 172 mutex_unlock(&g_tf_lock); 173 return tf; 174 } 175 } 176 mutex_unlock(&g_tf_lock); 177 178 return NULL; 179 } 180 181 /* 182 * Called from struct target_core_group_ops->make_group() 183 */ 184 static struct config_group *target_core_register_fabric( 185 struct config_group *group, 186 const char *name) 187 { 188 struct target_fabric_configfs *tf; 189 int ret; 190 191 pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:" 192 " %s\n", group, name); 193 194 tf = target_core_get_fabric(name); 195 if (!tf) { 196 pr_debug("target_core_register_fabric() trying autoload for %s\n", 197 name); 198 199 /* 200 * Below are some hardcoded request_module() calls to automatically 201 * local fabric modules when the following is called: 202 * 203 * mkdir -p /sys/kernel/config/target/$MODULE_NAME 204 * 205 * Note that this does not limit which TCM fabric module can be 206 * registered, but simply provids auto loading logic for modules with 207 * mkdir(2) system calls with known TCM fabric modules. 208 */ 209 210 if (!strncmp(name, "iscsi", 5)) { 211 /* 212 * Automatically load the LIO Target fabric module when the 213 * following is called: 214 * 215 * mkdir -p $CONFIGFS/target/iscsi 216 */ 217 ret = request_module("iscsi_target_mod"); 218 if (ret < 0) { 219 pr_debug("request_module() failed for" 220 " iscsi_target_mod.ko: %d\n", ret); 221 return ERR_PTR(-EINVAL); 222 } 223 } else if (!strncmp(name, "loopback", 8)) { 224 /* 225 * Automatically load the tcm_loop fabric module when the 226 * following is called: 227 * 228 * mkdir -p $CONFIGFS/target/loopback 229 */ 230 ret = request_module("tcm_loop"); 231 if (ret < 0) { 232 pr_debug("request_module() failed for" 233 " tcm_loop.ko: %d\n", ret); 234 return ERR_PTR(-EINVAL); 235 } 236 } 237 238 tf = target_core_get_fabric(name); 239 } 240 241 if (!tf) { 242 pr_debug("target_core_get_fabric() failed for %s\n", 243 name); 244 return ERR_PTR(-EINVAL); 245 } 246 pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:" 247 " %s\n", tf->tf_ops->fabric_name); 248 /* 249 * On a successful target_core_get_fabric() look, the returned 250 * struct target_fabric_configfs *tf will contain a usage reference. 251 */ 252 pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n", 253 &tf->tf_wwn_cit); 254 255 config_group_init_type_name(&tf->tf_group, name, &tf->tf_wwn_cit); 256 257 config_group_init_type_name(&tf->tf_disc_group, "discovery_auth", 258 &tf->tf_discovery_cit); 259 configfs_add_default_group(&tf->tf_disc_group, &tf->tf_group); 260 261 pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric: %s\n", 262 config_item_name(&tf->tf_group.cg_item)); 263 return &tf->tf_group; 264 } 265 266 /* 267 * Called from struct target_core_group_ops->drop_item() 268 */ 269 static void target_core_deregister_fabric( 270 struct config_group *group, 271 struct config_item *item) 272 { 273 struct target_fabric_configfs *tf = container_of( 274 to_config_group(item), struct target_fabric_configfs, tf_group); 275 276 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in" 277 " tf list\n", config_item_name(item)); 278 279 pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:" 280 " %s\n", tf->tf_ops->fabric_name); 281 atomic_dec(&tf->tf_access_cnt); 282 283 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci" 284 " %s\n", config_item_name(item)); 285 286 configfs_remove_default_groups(&tf->tf_group); 287 config_item_put(item); 288 } 289 290 static struct configfs_group_operations target_core_fabric_group_ops = { 291 .make_group = &target_core_register_fabric, 292 .drop_item = &target_core_deregister_fabric, 293 }; 294 295 /* 296 * All item attributes appearing in /sys/kernel/target/ appear here. 297 */ 298 static struct configfs_attribute *target_core_fabric_item_attrs[] = { 299 &target_core_item_attr_version, 300 &target_core_item_attr_dbroot, 301 NULL, 302 }; 303 304 /* 305 * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/ 306 */ 307 static const struct config_item_type target_core_fabrics_item = { 308 .ct_group_ops = &target_core_fabric_group_ops, 309 .ct_attrs = target_core_fabric_item_attrs, 310 .ct_owner = THIS_MODULE, 311 }; 312 313 static struct configfs_subsystem target_core_fabrics = { 314 .su_group = { 315 .cg_item = { 316 .ci_namebuf = "target", 317 .ci_type = &target_core_fabrics_item, 318 }, 319 }, 320 }; 321 322 int target_depend_item(struct config_item *item) 323 { 324 return configfs_depend_item(&target_core_fabrics, item); 325 } 326 EXPORT_SYMBOL(target_depend_item); 327 328 void target_undepend_item(struct config_item *item) 329 { 330 return configfs_undepend_item(item); 331 } 332 EXPORT_SYMBOL(target_undepend_item); 333 334 /*############################################################################## 335 // Start functions called by external Target Fabrics Modules 336 //############################################################################*/ 337 338 static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo) 339 { 340 if (tfo->fabric_alias) { 341 if (strlen(tfo->fabric_alias) >= TARGET_FABRIC_NAME_SIZE) { 342 pr_err("Passed alias: %s exceeds " 343 "TARGET_FABRIC_NAME_SIZE\n", tfo->fabric_alias); 344 return -EINVAL; 345 } 346 } 347 if (!tfo->fabric_name) { 348 pr_err("Missing tfo->fabric_name\n"); 349 return -EINVAL; 350 } 351 if (strlen(tfo->fabric_name) >= TARGET_FABRIC_NAME_SIZE) { 352 pr_err("Passed name: %s exceeds " 353 "TARGET_FABRIC_NAME_SIZE\n", tfo->fabric_name); 354 return -EINVAL; 355 } 356 if (!tfo->tpg_get_wwn) { 357 pr_err("Missing tfo->tpg_get_wwn()\n"); 358 return -EINVAL; 359 } 360 if (!tfo->tpg_get_tag) { 361 pr_err("Missing tfo->tpg_get_tag()\n"); 362 return -EINVAL; 363 } 364 if (!tfo->tpg_check_demo_mode) { 365 pr_err("Missing tfo->tpg_check_demo_mode()\n"); 366 return -EINVAL; 367 } 368 if (!tfo->tpg_check_demo_mode_cache) { 369 pr_err("Missing tfo->tpg_check_demo_mode_cache()\n"); 370 return -EINVAL; 371 } 372 if (!tfo->tpg_check_demo_mode_write_protect) { 373 pr_err("Missing tfo->tpg_check_demo_mode_write_protect()\n"); 374 return -EINVAL; 375 } 376 if (!tfo->tpg_check_prod_mode_write_protect) { 377 pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n"); 378 return -EINVAL; 379 } 380 if (!tfo->tpg_get_inst_index) { 381 pr_err("Missing tfo->tpg_get_inst_index()\n"); 382 return -EINVAL; 383 } 384 if (!tfo->release_cmd) { 385 pr_err("Missing tfo->release_cmd()\n"); 386 return -EINVAL; 387 } 388 if (!tfo->sess_get_index) { 389 pr_err("Missing tfo->sess_get_index()\n"); 390 return -EINVAL; 391 } 392 if (!tfo->write_pending) { 393 pr_err("Missing tfo->write_pending()\n"); 394 return -EINVAL; 395 } 396 if (!tfo->set_default_node_attributes) { 397 pr_err("Missing tfo->set_default_node_attributes()\n"); 398 return -EINVAL; 399 } 400 if (!tfo->get_cmd_state) { 401 pr_err("Missing tfo->get_cmd_state()\n"); 402 return -EINVAL; 403 } 404 if (!tfo->queue_data_in) { 405 pr_err("Missing tfo->queue_data_in()\n"); 406 return -EINVAL; 407 } 408 if (!tfo->queue_status) { 409 pr_err("Missing tfo->queue_status()\n"); 410 return -EINVAL; 411 } 412 if (!tfo->queue_tm_rsp) { 413 pr_err("Missing tfo->queue_tm_rsp()\n"); 414 return -EINVAL; 415 } 416 if (!tfo->aborted_task) { 417 pr_err("Missing tfo->aborted_task()\n"); 418 return -EINVAL; 419 } 420 if (!tfo->check_stop_free) { 421 pr_err("Missing tfo->check_stop_free()\n"); 422 return -EINVAL; 423 } 424 /* 425 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() 426 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in 427 * target_core_fabric_configfs.c WWN+TPG group context code. 428 */ 429 if (!tfo->fabric_make_wwn) { 430 pr_err("Missing tfo->fabric_make_wwn()\n"); 431 return -EINVAL; 432 } 433 if (!tfo->fabric_drop_wwn) { 434 pr_err("Missing tfo->fabric_drop_wwn()\n"); 435 return -EINVAL; 436 } 437 if (!tfo->fabric_make_tpg) { 438 pr_err("Missing tfo->fabric_make_tpg()\n"); 439 return -EINVAL; 440 } 441 if (!tfo->fabric_drop_tpg) { 442 pr_err("Missing tfo->fabric_drop_tpg()\n"); 443 return -EINVAL; 444 } 445 446 return 0; 447 } 448 449 int target_register_template(const struct target_core_fabric_ops *fo) 450 { 451 struct target_fabric_configfs *tf; 452 int ret; 453 454 ret = target_fabric_tf_ops_check(fo); 455 if (ret) 456 return ret; 457 458 tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL); 459 if (!tf) { 460 pr_err("%s: could not allocate memory!\n", __func__); 461 return -ENOMEM; 462 } 463 464 INIT_LIST_HEAD(&tf->tf_list); 465 atomic_set(&tf->tf_access_cnt, 0); 466 tf->tf_ops = fo; 467 target_fabric_setup_cits(tf); 468 469 mutex_lock(&g_tf_lock); 470 list_add_tail(&tf->tf_list, &g_tf_list); 471 mutex_unlock(&g_tf_lock); 472 473 return 0; 474 } 475 EXPORT_SYMBOL(target_register_template); 476 477 void target_unregister_template(const struct target_core_fabric_ops *fo) 478 { 479 struct target_fabric_configfs *t; 480 481 mutex_lock(&g_tf_lock); 482 list_for_each_entry(t, &g_tf_list, tf_list) { 483 if (!strcmp(t->tf_ops->fabric_name, fo->fabric_name)) { 484 BUG_ON(atomic_read(&t->tf_access_cnt)); 485 list_del(&t->tf_list); 486 mutex_unlock(&g_tf_lock); 487 /* 488 * Wait for any outstanding fabric se_deve_entry->rcu_head 489 * callbacks to complete post kfree_rcu(), before allowing 490 * fabric driver unload of TFO->module to proceed. 491 */ 492 rcu_barrier(); 493 kfree(t->tf_tpg_base_cit.ct_attrs); 494 kfree(t); 495 return; 496 } 497 } 498 mutex_unlock(&g_tf_lock); 499 } 500 EXPORT_SYMBOL(target_unregister_template); 501 502 /*############################################################################## 503 // Stop functions called by external Target Fabrics Modules 504 //############################################################################*/ 505 506 static inline struct se_dev_attrib *to_attrib(struct config_item *item) 507 { 508 return container_of(to_config_group(item), struct se_dev_attrib, 509 da_group); 510 } 511 512 /* Start functions for struct config_item_type tb_dev_attrib_cit */ 513 #define DEF_CONFIGFS_ATTRIB_SHOW(_name) \ 514 static ssize_t _name##_show(struct config_item *item, char *page) \ 515 { \ 516 return snprintf(page, PAGE_SIZE, "%u\n", to_attrib(item)->_name); \ 517 } 518 519 DEF_CONFIGFS_ATTRIB_SHOW(emulate_model_alias); 520 DEF_CONFIGFS_ATTRIB_SHOW(emulate_dpo); 521 DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_write); 522 DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_read); 523 DEF_CONFIGFS_ATTRIB_SHOW(emulate_write_cache); 524 DEF_CONFIGFS_ATTRIB_SHOW(emulate_ua_intlck_ctrl); 525 DEF_CONFIGFS_ATTRIB_SHOW(emulate_tas); 526 DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpu); 527 DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpws); 528 DEF_CONFIGFS_ATTRIB_SHOW(emulate_caw); 529 DEF_CONFIGFS_ATTRIB_SHOW(emulate_3pc); 530 DEF_CONFIGFS_ATTRIB_SHOW(emulate_pr); 531 DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_type); 532 DEF_CONFIGFS_ATTRIB_SHOW(hw_pi_prot_type); 533 DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_verify); 534 DEF_CONFIGFS_ATTRIB_SHOW(enforce_pr_isids); 535 DEF_CONFIGFS_ATTRIB_SHOW(is_nonrot); 536 DEF_CONFIGFS_ATTRIB_SHOW(emulate_rest_reord); 537 DEF_CONFIGFS_ATTRIB_SHOW(force_pr_aptpl); 538 DEF_CONFIGFS_ATTRIB_SHOW(hw_block_size); 539 DEF_CONFIGFS_ATTRIB_SHOW(block_size); 540 DEF_CONFIGFS_ATTRIB_SHOW(hw_max_sectors); 541 DEF_CONFIGFS_ATTRIB_SHOW(optimal_sectors); 542 DEF_CONFIGFS_ATTRIB_SHOW(hw_queue_depth); 543 DEF_CONFIGFS_ATTRIB_SHOW(queue_depth); 544 DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_lba_count); 545 DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_block_desc_count); 546 DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity); 547 DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity_alignment); 548 DEF_CONFIGFS_ATTRIB_SHOW(unmap_zeroes_data); 549 DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len); 550 DEF_CONFIGFS_ATTRIB_SHOW(emulate_rsoc); 551 552 #define DEF_CONFIGFS_ATTRIB_STORE_U32(_name) \ 553 static ssize_t _name##_store(struct config_item *item, const char *page,\ 554 size_t count) \ 555 { \ 556 struct se_dev_attrib *da = to_attrib(item); \ 557 u32 val; \ 558 int ret; \ 559 \ 560 ret = kstrtou32(page, 0, &val); \ 561 if (ret < 0) \ 562 return ret; \ 563 da->_name = val; \ 564 return count; \ 565 } 566 567 DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_lba_count); 568 DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_block_desc_count); 569 DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity); 570 DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity_alignment); 571 DEF_CONFIGFS_ATTRIB_STORE_U32(max_write_same_len); 572 573 #define DEF_CONFIGFS_ATTRIB_STORE_BOOL(_name) \ 574 static ssize_t _name##_store(struct config_item *item, const char *page, \ 575 size_t count) \ 576 { \ 577 struct se_dev_attrib *da = to_attrib(item); \ 578 bool flag; \ 579 int ret; \ 580 \ 581 ret = strtobool(page, &flag); \ 582 if (ret < 0) \ 583 return ret; \ 584 da->_name = flag; \ 585 return count; \ 586 } 587 588 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_fua_write); 589 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_caw); 590 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_3pc); 591 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_pr); 592 DEF_CONFIGFS_ATTRIB_STORE_BOOL(enforce_pr_isids); 593 DEF_CONFIGFS_ATTRIB_STORE_BOOL(is_nonrot); 594 595 #define DEF_CONFIGFS_ATTRIB_STORE_STUB(_name) \ 596 static ssize_t _name##_store(struct config_item *item, const char *page,\ 597 size_t count) \ 598 { \ 599 printk_once(KERN_WARNING \ 600 "ignoring deprecated %s attribute\n", \ 601 __stringify(_name)); \ 602 return count; \ 603 } 604 605 DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_dpo); 606 DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_fua_read); 607 608 static void dev_set_t10_wwn_model_alias(struct se_device *dev) 609 { 610 const char *configname; 611 612 configname = config_item_name(&dev->dev_group.cg_item); 613 if (strlen(configname) >= INQUIRY_MODEL_LEN) { 614 pr_warn("dev[%p]: Backstore name '%s' is too long for " 615 "INQUIRY_MODEL, truncating to 15 characters\n", dev, 616 configname); 617 } 618 /* 619 * XXX We can't use sizeof(dev->t10_wwn.model) (INQUIRY_MODEL_LEN + 1) 620 * here without potentially breaking existing setups, so continue to 621 * truncate one byte shorter than what can be carried in INQUIRY. 622 */ 623 strlcpy(dev->t10_wwn.model, configname, INQUIRY_MODEL_LEN); 624 } 625 626 static ssize_t emulate_model_alias_store(struct config_item *item, 627 const char *page, size_t count) 628 { 629 struct se_dev_attrib *da = to_attrib(item); 630 struct se_device *dev = da->da_dev; 631 bool flag; 632 int ret; 633 634 if (dev->export_count) { 635 pr_err("dev[%p]: Unable to change model alias" 636 " while export_count is %d\n", 637 dev, dev->export_count); 638 return -EINVAL; 639 } 640 641 ret = strtobool(page, &flag); 642 if (ret < 0) 643 return ret; 644 645 BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1); 646 if (flag) { 647 dev_set_t10_wwn_model_alias(dev); 648 } else { 649 strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod, 650 sizeof(dev->t10_wwn.model)); 651 } 652 da->emulate_model_alias = flag; 653 return count; 654 } 655 656 static ssize_t emulate_write_cache_store(struct config_item *item, 657 const char *page, size_t count) 658 { 659 struct se_dev_attrib *da = to_attrib(item); 660 bool flag; 661 int ret; 662 663 ret = strtobool(page, &flag); 664 if (ret < 0) 665 return ret; 666 667 if (flag && da->da_dev->transport->get_write_cache) { 668 pr_err("emulate_write_cache not supported for this device\n"); 669 return -EINVAL; 670 } 671 672 da->emulate_write_cache = flag; 673 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", 674 da->da_dev, flag); 675 return count; 676 } 677 678 static ssize_t emulate_ua_intlck_ctrl_store(struct config_item *item, 679 const char *page, size_t count) 680 { 681 struct se_dev_attrib *da = to_attrib(item); 682 u32 val; 683 int ret; 684 685 ret = kstrtou32(page, 0, &val); 686 if (ret < 0) 687 return ret; 688 689 if (val != TARGET_UA_INTLCK_CTRL_CLEAR 690 && val != TARGET_UA_INTLCK_CTRL_NO_CLEAR 691 && val != TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) { 692 pr_err("Illegal value %d\n", val); 693 return -EINVAL; 694 } 695 696 if (da->da_dev->export_count) { 697 pr_err("dev[%p]: Unable to change SE Device" 698 " UA_INTRLCK_CTRL while export_count is %d\n", 699 da->da_dev, da->da_dev->export_count); 700 return -EINVAL; 701 } 702 da->emulate_ua_intlck_ctrl = val; 703 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", 704 da->da_dev, val); 705 return count; 706 } 707 708 static ssize_t emulate_tas_store(struct config_item *item, 709 const char *page, size_t count) 710 { 711 struct se_dev_attrib *da = to_attrib(item); 712 bool flag; 713 int ret; 714 715 ret = strtobool(page, &flag); 716 if (ret < 0) 717 return ret; 718 719 if (da->da_dev->export_count) { 720 pr_err("dev[%p]: Unable to change SE Device TAS while" 721 " export_count is %d\n", 722 da->da_dev, da->da_dev->export_count); 723 return -EINVAL; 724 } 725 da->emulate_tas = flag; 726 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n", 727 da->da_dev, flag ? "Enabled" : "Disabled"); 728 729 return count; 730 } 731 732 static ssize_t emulate_tpu_store(struct config_item *item, 733 const char *page, size_t count) 734 { 735 struct se_dev_attrib *da = to_attrib(item); 736 struct se_device *dev = da->da_dev; 737 bool flag; 738 int ret; 739 740 ret = strtobool(page, &flag); 741 if (ret < 0) 742 return ret; 743 744 /* 745 * We expect this value to be non-zero when generic Block Layer 746 * Discard supported is detected iblock_create_virtdevice(). 747 */ 748 if (flag && !da->max_unmap_block_desc_count) { 749 if (!dev->transport->configure_unmap || 750 !dev->transport->configure_unmap(dev)) { 751 pr_err("Generic Block Discard not supported\n"); 752 return -ENOSYS; 753 } 754 } 755 756 da->emulate_tpu = flag; 757 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", 758 da->da_dev, flag); 759 return count; 760 } 761 762 static ssize_t emulate_tpws_store(struct config_item *item, 763 const char *page, size_t count) 764 { 765 struct se_dev_attrib *da = to_attrib(item); 766 struct se_device *dev = da->da_dev; 767 bool flag; 768 int ret; 769 770 ret = strtobool(page, &flag); 771 if (ret < 0) 772 return ret; 773 774 /* 775 * We expect this value to be non-zero when generic Block Layer 776 * Discard supported is detected iblock_create_virtdevice(). 777 */ 778 if (flag && !da->max_unmap_block_desc_count) { 779 if (!dev->transport->configure_unmap || 780 !dev->transport->configure_unmap(dev)) { 781 pr_err("Generic Block Discard not supported\n"); 782 return -ENOSYS; 783 } 784 } 785 786 da->emulate_tpws = flag; 787 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", 788 da->da_dev, flag); 789 return count; 790 } 791 792 static ssize_t pi_prot_type_store(struct config_item *item, 793 const char *page, size_t count) 794 { 795 struct se_dev_attrib *da = to_attrib(item); 796 int old_prot = da->pi_prot_type, ret; 797 struct se_device *dev = da->da_dev; 798 u32 flag; 799 800 ret = kstrtou32(page, 0, &flag); 801 if (ret < 0) 802 return ret; 803 804 if (flag != 0 && flag != 1 && flag != 2 && flag != 3) { 805 pr_err("Illegal value %d for pi_prot_type\n", flag); 806 return -EINVAL; 807 } 808 if (flag == 2) { 809 pr_err("DIF TYPE2 protection currently not supported\n"); 810 return -ENOSYS; 811 } 812 if (da->hw_pi_prot_type) { 813 pr_warn("DIF protection enabled on underlying hardware," 814 " ignoring\n"); 815 return count; 816 } 817 if (!dev->transport->init_prot || !dev->transport->free_prot) { 818 /* 0 is only allowed value for non-supporting backends */ 819 if (flag == 0) 820 return count; 821 822 pr_err("DIF protection not supported by backend: %s\n", 823 dev->transport->name); 824 return -ENOSYS; 825 } 826 if (!target_dev_configured(dev)) { 827 pr_err("DIF protection requires device to be configured\n"); 828 return -ENODEV; 829 } 830 if (dev->export_count) { 831 pr_err("dev[%p]: Unable to change SE Device PROT type while" 832 " export_count is %d\n", dev, dev->export_count); 833 return -EINVAL; 834 } 835 836 da->pi_prot_type = flag; 837 838 if (flag && !old_prot) { 839 ret = dev->transport->init_prot(dev); 840 if (ret) { 841 da->pi_prot_type = old_prot; 842 da->pi_prot_verify = (bool) da->pi_prot_type; 843 return ret; 844 } 845 846 } else if (!flag && old_prot) { 847 dev->transport->free_prot(dev); 848 } 849 850 da->pi_prot_verify = (bool) da->pi_prot_type; 851 pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag); 852 return count; 853 } 854 855 /* always zero, but attr needs to remain RW to avoid userspace breakage */ 856 static ssize_t pi_prot_format_show(struct config_item *item, char *page) 857 { 858 return snprintf(page, PAGE_SIZE, "0\n"); 859 } 860 861 static ssize_t pi_prot_format_store(struct config_item *item, 862 const char *page, size_t count) 863 { 864 struct se_dev_attrib *da = to_attrib(item); 865 struct se_device *dev = da->da_dev; 866 bool flag; 867 int ret; 868 869 ret = strtobool(page, &flag); 870 if (ret < 0) 871 return ret; 872 873 if (!flag) 874 return count; 875 876 if (!dev->transport->format_prot) { 877 pr_err("DIF protection format not supported by backend %s\n", 878 dev->transport->name); 879 return -ENOSYS; 880 } 881 if (!target_dev_configured(dev)) { 882 pr_err("DIF protection format requires device to be configured\n"); 883 return -ENODEV; 884 } 885 if (dev->export_count) { 886 pr_err("dev[%p]: Unable to format SE Device PROT type while" 887 " export_count is %d\n", dev, dev->export_count); 888 return -EINVAL; 889 } 890 891 ret = dev->transport->format_prot(dev); 892 if (ret) 893 return ret; 894 895 pr_debug("dev[%p]: SE Device Protection Format complete\n", dev); 896 return count; 897 } 898 899 static ssize_t pi_prot_verify_store(struct config_item *item, 900 const char *page, size_t count) 901 { 902 struct se_dev_attrib *da = to_attrib(item); 903 bool flag; 904 int ret; 905 906 ret = strtobool(page, &flag); 907 if (ret < 0) 908 return ret; 909 910 if (!flag) { 911 da->pi_prot_verify = flag; 912 return count; 913 } 914 if (da->hw_pi_prot_type) { 915 pr_warn("DIF protection enabled on underlying hardware," 916 " ignoring\n"); 917 return count; 918 } 919 if (!da->pi_prot_type) { 920 pr_warn("DIF protection not supported by backend, ignoring\n"); 921 return count; 922 } 923 da->pi_prot_verify = flag; 924 925 return count; 926 } 927 928 static ssize_t force_pr_aptpl_store(struct config_item *item, 929 const char *page, size_t count) 930 { 931 struct se_dev_attrib *da = to_attrib(item); 932 bool flag; 933 int ret; 934 935 ret = strtobool(page, &flag); 936 if (ret < 0) 937 return ret; 938 if (da->da_dev->export_count) { 939 pr_err("dev[%p]: Unable to set force_pr_aptpl while" 940 " export_count is %d\n", 941 da->da_dev, da->da_dev->export_count); 942 return -EINVAL; 943 } 944 945 da->force_pr_aptpl = flag; 946 pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", da->da_dev, flag); 947 return count; 948 } 949 950 static ssize_t emulate_rest_reord_store(struct config_item *item, 951 const char *page, size_t count) 952 { 953 struct se_dev_attrib *da = to_attrib(item); 954 bool flag; 955 int ret; 956 957 ret = strtobool(page, &flag); 958 if (ret < 0) 959 return ret; 960 961 if (flag != 0) { 962 printk(KERN_ERR "dev[%p]: SE Device emulation of restricted" 963 " reordering not implemented\n", da->da_dev); 964 return -ENOSYS; 965 } 966 da->emulate_rest_reord = flag; 967 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", 968 da->da_dev, flag); 969 return count; 970 } 971 972 static ssize_t unmap_zeroes_data_store(struct config_item *item, 973 const char *page, size_t count) 974 { 975 struct se_dev_attrib *da = to_attrib(item); 976 struct se_device *dev = da->da_dev; 977 bool flag; 978 int ret; 979 980 ret = strtobool(page, &flag); 981 if (ret < 0) 982 return ret; 983 984 if (da->da_dev->export_count) { 985 pr_err("dev[%p]: Unable to change SE Device" 986 " unmap_zeroes_data while export_count is %d\n", 987 da->da_dev, da->da_dev->export_count); 988 return -EINVAL; 989 } 990 /* 991 * We expect this value to be non-zero when generic Block Layer 992 * Discard supported is detected iblock_configure_device(). 993 */ 994 if (flag && !da->max_unmap_block_desc_count) { 995 if (!dev->transport->configure_unmap || 996 !dev->transport->configure_unmap(dev)) { 997 pr_err("dev[%p]: Thin Provisioning LBPRZ will not be set because max_unmap_block_desc_count is zero\n", 998 da->da_dev); 999 return -ENOSYS; 1000 } 1001 } 1002 da->unmap_zeroes_data = flag; 1003 pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n", 1004 da->da_dev, flag); 1005 return count; 1006 } 1007 1008 /* 1009 * Note, this can only be called on unexported SE Device Object. 1010 */ 1011 static ssize_t queue_depth_store(struct config_item *item, 1012 const char *page, size_t count) 1013 { 1014 struct se_dev_attrib *da = to_attrib(item); 1015 struct se_device *dev = da->da_dev; 1016 u32 val; 1017 int ret; 1018 1019 ret = kstrtou32(page, 0, &val); 1020 if (ret < 0) 1021 return ret; 1022 1023 if (dev->export_count) { 1024 pr_err("dev[%p]: Unable to change SE Device TCQ while" 1025 " export_count is %d\n", 1026 dev, dev->export_count); 1027 return -EINVAL; 1028 } 1029 if (!val) { 1030 pr_err("dev[%p]: Illegal ZERO value for queue_depth\n", dev); 1031 return -EINVAL; 1032 } 1033 1034 if (val > dev->dev_attrib.queue_depth) { 1035 if (val > dev->dev_attrib.hw_queue_depth) { 1036 pr_err("dev[%p]: Passed queue_depth:" 1037 " %u exceeds TCM/SE_Device MAX" 1038 " TCQ: %u\n", dev, val, 1039 dev->dev_attrib.hw_queue_depth); 1040 return -EINVAL; 1041 } 1042 } 1043 da->queue_depth = dev->queue_depth = val; 1044 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", dev, val); 1045 return count; 1046 } 1047 1048 static ssize_t optimal_sectors_store(struct config_item *item, 1049 const char *page, size_t count) 1050 { 1051 struct se_dev_attrib *da = to_attrib(item); 1052 u32 val; 1053 int ret; 1054 1055 ret = kstrtou32(page, 0, &val); 1056 if (ret < 0) 1057 return ret; 1058 1059 if (da->da_dev->export_count) { 1060 pr_err("dev[%p]: Unable to change SE Device" 1061 " optimal_sectors while export_count is %d\n", 1062 da->da_dev, da->da_dev->export_count); 1063 return -EINVAL; 1064 } 1065 if (val > da->hw_max_sectors) { 1066 pr_err("dev[%p]: Passed optimal_sectors %u cannot be" 1067 " greater than hw_max_sectors: %u\n", 1068 da->da_dev, val, da->hw_max_sectors); 1069 return -EINVAL; 1070 } 1071 1072 da->optimal_sectors = val; 1073 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n", 1074 da->da_dev, val); 1075 return count; 1076 } 1077 1078 static ssize_t block_size_store(struct config_item *item, 1079 const char *page, size_t count) 1080 { 1081 struct se_dev_attrib *da = to_attrib(item); 1082 u32 val; 1083 int ret; 1084 1085 ret = kstrtou32(page, 0, &val); 1086 if (ret < 0) 1087 return ret; 1088 1089 if (da->da_dev->export_count) { 1090 pr_err("dev[%p]: Unable to change SE Device block_size" 1091 " while export_count is %d\n", 1092 da->da_dev, da->da_dev->export_count); 1093 return -EINVAL; 1094 } 1095 1096 if (val != 512 && val != 1024 && val != 2048 && val != 4096) { 1097 pr_err("dev[%p]: Illegal value for block_device: %u" 1098 " for SE device, must be 512, 1024, 2048 or 4096\n", 1099 da->da_dev, val); 1100 return -EINVAL; 1101 } 1102 1103 da->block_size = val; 1104 if (da->max_bytes_per_io) 1105 da->hw_max_sectors = da->max_bytes_per_io / val; 1106 1107 pr_debug("dev[%p]: SE Device block_size changed to %u\n", 1108 da->da_dev, val); 1109 return count; 1110 } 1111 1112 static ssize_t alua_support_show(struct config_item *item, char *page) 1113 { 1114 struct se_dev_attrib *da = to_attrib(item); 1115 u8 flags = da->da_dev->transport_flags; 1116 1117 return snprintf(page, PAGE_SIZE, "%d\n", 1118 flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ? 0 : 1); 1119 } 1120 1121 static ssize_t alua_support_store(struct config_item *item, 1122 const char *page, size_t count) 1123 { 1124 struct se_dev_attrib *da = to_attrib(item); 1125 struct se_device *dev = da->da_dev; 1126 bool flag, oldflag; 1127 int ret; 1128 1129 ret = strtobool(page, &flag); 1130 if (ret < 0) 1131 return ret; 1132 1133 oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA); 1134 if (flag == oldflag) 1135 return count; 1136 1137 if (!(dev->transport->transport_flags_changeable & 1138 TRANSPORT_FLAG_PASSTHROUGH_ALUA)) { 1139 pr_err("dev[%p]: Unable to change SE Device alua_support:" 1140 " alua_support has fixed value\n", dev); 1141 return -ENOSYS; 1142 } 1143 1144 if (flag) 1145 dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_ALUA; 1146 else 1147 dev->transport_flags |= TRANSPORT_FLAG_PASSTHROUGH_ALUA; 1148 return count; 1149 } 1150 1151 static ssize_t pgr_support_show(struct config_item *item, char *page) 1152 { 1153 struct se_dev_attrib *da = to_attrib(item); 1154 u8 flags = da->da_dev->transport_flags; 1155 1156 return snprintf(page, PAGE_SIZE, "%d\n", 1157 flags & TRANSPORT_FLAG_PASSTHROUGH_PGR ? 0 : 1); 1158 } 1159 1160 static ssize_t pgr_support_store(struct config_item *item, 1161 const char *page, size_t count) 1162 { 1163 struct se_dev_attrib *da = to_attrib(item); 1164 struct se_device *dev = da->da_dev; 1165 bool flag, oldflag; 1166 int ret; 1167 1168 ret = strtobool(page, &flag); 1169 if (ret < 0) 1170 return ret; 1171 1172 oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR); 1173 if (flag == oldflag) 1174 return count; 1175 1176 if (!(dev->transport->transport_flags_changeable & 1177 TRANSPORT_FLAG_PASSTHROUGH_PGR)) { 1178 pr_err("dev[%p]: Unable to change SE Device pgr_support:" 1179 " pgr_support has fixed value\n", dev); 1180 return -ENOSYS; 1181 } 1182 1183 if (flag) 1184 dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_PGR; 1185 else 1186 dev->transport_flags |= TRANSPORT_FLAG_PASSTHROUGH_PGR; 1187 return count; 1188 } 1189 1190 static ssize_t emulate_rsoc_store(struct config_item *item, 1191 const char *page, size_t count) 1192 { 1193 struct se_dev_attrib *da = to_attrib(item); 1194 bool flag; 1195 int ret; 1196 1197 ret = strtobool(page, &flag); 1198 if (ret < 0) 1199 return ret; 1200 1201 da->emulate_rsoc = flag; 1202 pr_debug("dev[%p]: SE Device REPORT_SUPPORTED_OPERATION_CODES_EMULATION flag: %d\n", 1203 da->da_dev, flag); 1204 return count; 1205 } 1206 1207 CONFIGFS_ATTR(, emulate_model_alias); 1208 CONFIGFS_ATTR(, emulate_dpo); 1209 CONFIGFS_ATTR(, emulate_fua_write); 1210 CONFIGFS_ATTR(, emulate_fua_read); 1211 CONFIGFS_ATTR(, emulate_write_cache); 1212 CONFIGFS_ATTR(, emulate_ua_intlck_ctrl); 1213 CONFIGFS_ATTR(, emulate_tas); 1214 CONFIGFS_ATTR(, emulate_tpu); 1215 CONFIGFS_ATTR(, emulate_tpws); 1216 CONFIGFS_ATTR(, emulate_caw); 1217 CONFIGFS_ATTR(, emulate_3pc); 1218 CONFIGFS_ATTR(, emulate_pr); 1219 CONFIGFS_ATTR(, emulate_rsoc); 1220 CONFIGFS_ATTR(, pi_prot_type); 1221 CONFIGFS_ATTR_RO(, hw_pi_prot_type); 1222 CONFIGFS_ATTR(, pi_prot_format); 1223 CONFIGFS_ATTR(, pi_prot_verify); 1224 CONFIGFS_ATTR(, enforce_pr_isids); 1225 CONFIGFS_ATTR(, is_nonrot); 1226 CONFIGFS_ATTR(, emulate_rest_reord); 1227 CONFIGFS_ATTR(, force_pr_aptpl); 1228 CONFIGFS_ATTR_RO(, hw_block_size); 1229 CONFIGFS_ATTR(, block_size); 1230 CONFIGFS_ATTR_RO(, hw_max_sectors); 1231 CONFIGFS_ATTR(, optimal_sectors); 1232 CONFIGFS_ATTR_RO(, hw_queue_depth); 1233 CONFIGFS_ATTR(, queue_depth); 1234 CONFIGFS_ATTR(, max_unmap_lba_count); 1235 CONFIGFS_ATTR(, max_unmap_block_desc_count); 1236 CONFIGFS_ATTR(, unmap_granularity); 1237 CONFIGFS_ATTR(, unmap_granularity_alignment); 1238 CONFIGFS_ATTR(, unmap_zeroes_data); 1239 CONFIGFS_ATTR(, max_write_same_len); 1240 CONFIGFS_ATTR(, alua_support); 1241 CONFIGFS_ATTR(, pgr_support); 1242 1243 /* 1244 * dev_attrib attributes for devices using the target core SBC/SPC 1245 * interpreter. Any backend using spc_parse_cdb should be using 1246 * these. 1247 */ 1248 struct configfs_attribute *sbc_attrib_attrs[] = { 1249 &attr_emulate_model_alias, 1250 &attr_emulate_dpo, 1251 &attr_emulate_fua_write, 1252 &attr_emulate_fua_read, 1253 &attr_emulate_write_cache, 1254 &attr_emulate_ua_intlck_ctrl, 1255 &attr_emulate_tas, 1256 &attr_emulate_tpu, 1257 &attr_emulate_tpws, 1258 &attr_emulate_caw, 1259 &attr_emulate_3pc, 1260 &attr_emulate_pr, 1261 &attr_pi_prot_type, 1262 &attr_hw_pi_prot_type, 1263 &attr_pi_prot_format, 1264 &attr_pi_prot_verify, 1265 &attr_enforce_pr_isids, 1266 &attr_is_nonrot, 1267 &attr_emulate_rest_reord, 1268 &attr_force_pr_aptpl, 1269 &attr_hw_block_size, 1270 &attr_block_size, 1271 &attr_hw_max_sectors, 1272 &attr_optimal_sectors, 1273 &attr_hw_queue_depth, 1274 &attr_queue_depth, 1275 &attr_max_unmap_lba_count, 1276 &attr_max_unmap_block_desc_count, 1277 &attr_unmap_granularity, 1278 &attr_unmap_granularity_alignment, 1279 &attr_unmap_zeroes_data, 1280 &attr_max_write_same_len, 1281 &attr_alua_support, 1282 &attr_pgr_support, 1283 &attr_emulate_rsoc, 1284 NULL, 1285 }; 1286 EXPORT_SYMBOL(sbc_attrib_attrs); 1287 1288 /* 1289 * Minimal dev_attrib attributes for devices passing through CDBs. 1290 * In this case we only provide a few read-only attributes for 1291 * backwards compatibility. 1292 */ 1293 struct configfs_attribute *passthrough_attrib_attrs[] = { 1294 &attr_hw_pi_prot_type, 1295 &attr_hw_block_size, 1296 &attr_hw_max_sectors, 1297 &attr_hw_queue_depth, 1298 &attr_emulate_pr, 1299 &attr_alua_support, 1300 &attr_pgr_support, 1301 NULL, 1302 }; 1303 EXPORT_SYMBOL(passthrough_attrib_attrs); 1304 1305 /* 1306 * pr related dev_attrib attributes for devices passing through CDBs, 1307 * but allowing in core pr emulation. 1308 */ 1309 struct configfs_attribute *passthrough_pr_attrib_attrs[] = { 1310 &attr_enforce_pr_isids, 1311 &attr_force_pr_aptpl, 1312 NULL, 1313 }; 1314 EXPORT_SYMBOL(passthrough_pr_attrib_attrs); 1315 1316 TB_CIT_SETUP_DRV(dev_attrib, NULL, NULL); 1317 TB_CIT_SETUP_DRV(dev_action, NULL, NULL); 1318 1319 /* End functions for struct config_item_type tb_dev_attrib_cit */ 1320 1321 /* Start functions for struct config_item_type tb_dev_wwn_cit */ 1322 1323 static struct t10_wwn *to_t10_wwn(struct config_item *item) 1324 { 1325 return container_of(to_config_group(item), struct t10_wwn, t10_wwn_group); 1326 } 1327 1328 static ssize_t target_check_inquiry_data(char *buf) 1329 { 1330 size_t len; 1331 int i; 1332 1333 len = strlen(buf); 1334 1335 /* 1336 * SPC 4.3.1: 1337 * ASCII data fields shall contain only ASCII printable characters 1338 * (i.e., code values 20h to 7Eh) and may be terminated with one or 1339 * more ASCII null (00h) characters. 1340 */ 1341 for (i = 0; i < len; i++) { 1342 if (buf[i] < 0x20 || buf[i] > 0x7E) { 1343 pr_err("Emulated T10 Inquiry Data contains non-ASCII-printable characters\n"); 1344 return -EINVAL; 1345 } 1346 } 1347 1348 return len; 1349 } 1350 1351 /* 1352 * STANDARD and VPD page 0x83 T10 Vendor Identification 1353 */ 1354 static ssize_t target_wwn_vendor_id_show(struct config_item *item, 1355 char *page) 1356 { 1357 return sprintf(page, "%s\n", &to_t10_wwn(item)->vendor[0]); 1358 } 1359 1360 static ssize_t target_wwn_vendor_id_store(struct config_item *item, 1361 const char *page, size_t count) 1362 { 1363 struct t10_wwn *t10_wwn = to_t10_wwn(item); 1364 struct se_device *dev = t10_wwn->t10_dev; 1365 /* +2 to allow for a trailing (stripped) '\n' and null-terminator */ 1366 unsigned char buf[INQUIRY_VENDOR_LEN + 2]; 1367 char *stripped = NULL; 1368 size_t len; 1369 ssize_t ret; 1370 1371 len = strlcpy(buf, page, sizeof(buf)); 1372 if (len < sizeof(buf)) { 1373 /* Strip any newline added from userspace. */ 1374 stripped = strstrip(buf); 1375 len = strlen(stripped); 1376 } 1377 if (len > INQUIRY_VENDOR_LEN) { 1378 pr_err("Emulated T10 Vendor Identification exceeds" 1379 " INQUIRY_VENDOR_LEN: " __stringify(INQUIRY_VENDOR_LEN) 1380 "\n"); 1381 return -EOVERFLOW; 1382 } 1383 1384 ret = target_check_inquiry_data(stripped); 1385 1386 if (ret < 0) 1387 return ret; 1388 1389 /* 1390 * Check to see if any active exports exist. If they do exist, fail 1391 * here as changing this information on the fly (underneath the 1392 * initiator side OS dependent multipath code) could cause negative 1393 * effects. 1394 */ 1395 if (dev->export_count) { 1396 pr_err("Unable to set T10 Vendor Identification while" 1397 " active %d exports exist\n", dev->export_count); 1398 return -EINVAL; 1399 } 1400 1401 BUILD_BUG_ON(sizeof(dev->t10_wwn.vendor) != INQUIRY_VENDOR_LEN + 1); 1402 strlcpy(dev->t10_wwn.vendor, stripped, sizeof(dev->t10_wwn.vendor)); 1403 1404 pr_debug("Target_Core_ConfigFS: Set emulated T10 Vendor Identification:" 1405 " %s\n", dev->t10_wwn.vendor); 1406 1407 return count; 1408 } 1409 1410 static ssize_t target_wwn_product_id_show(struct config_item *item, 1411 char *page) 1412 { 1413 return sprintf(page, "%s\n", &to_t10_wwn(item)->model[0]); 1414 } 1415 1416 static ssize_t target_wwn_product_id_store(struct config_item *item, 1417 const char *page, size_t count) 1418 { 1419 struct t10_wwn *t10_wwn = to_t10_wwn(item); 1420 struct se_device *dev = t10_wwn->t10_dev; 1421 /* +2 to allow for a trailing (stripped) '\n' and null-terminator */ 1422 unsigned char buf[INQUIRY_MODEL_LEN + 2]; 1423 char *stripped = NULL; 1424 size_t len; 1425 ssize_t ret; 1426 1427 len = strlcpy(buf, page, sizeof(buf)); 1428 if (len < sizeof(buf)) { 1429 /* Strip any newline added from userspace. */ 1430 stripped = strstrip(buf); 1431 len = strlen(stripped); 1432 } 1433 if (len > INQUIRY_MODEL_LEN) { 1434 pr_err("Emulated T10 Vendor exceeds INQUIRY_MODEL_LEN: " 1435 __stringify(INQUIRY_MODEL_LEN) 1436 "\n"); 1437 return -EOVERFLOW; 1438 } 1439 1440 ret = target_check_inquiry_data(stripped); 1441 1442 if (ret < 0) 1443 return ret; 1444 1445 /* 1446 * Check to see if any active exports exist. If they do exist, fail 1447 * here as changing this information on the fly (underneath the 1448 * initiator side OS dependent multipath code) could cause negative 1449 * effects. 1450 */ 1451 if (dev->export_count) { 1452 pr_err("Unable to set T10 Model while active %d exports exist\n", 1453 dev->export_count); 1454 return -EINVAL; 1455 } 1456 1457 BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1); 1458 strlcpy(dev->t10_wwn.model, stripped, sizeof(dev->t10_wwn.model)); 1459 1460 pr_debug("Target_Core_ConfigFS: Set emulated T10 Model Identification: %s\n", 1461 dev->t10_wwn.model); 1462 1463 return count; 1464 } 1465 1466 static ssize_t target_wwn_revision_show(struct config_item *item, 1467 char *page) 1468 { 1469 return sprintf(page, "%s\n", &to_t10_wwn(item)->revision[0]); 1470 } 1471 1472 static ssize_t target_wwn_revision_store(struct config_item *item, 1473 const char *page, size_t count) 1474 { 1475 struct t10_wwn *t10_wwn = to_t10_wwn(item); 1476 struct se_device *dev = t10_wwn->t10_dev; 1477 /* +2 to allow for a trailing (stripped) '\n' and null-terminator */ 1478 unsigned char buf[INQUIRY_REVISION_LEN + 2]; 1479 char *stripped = NULL; 1480 size_t len; 1481 ssize_t ret; 1482 1483 len = strlcpy(buf, page, sizeof(buf)); 1484 if (len < sizeof(buf)) { 1485 /* Strip any newline added from userspace. */ 1486 stripped = strstrip(buf); 1487 len = strlen(stripped); 1488 } 1489 if (len > INQUIRY_REVISION_LEN) { 1490 pr_err("Emulated T10 Revision exceeds INQUIRY_REVISION_LEN: " 1491 __stringify(INQUIRY_REVISION_LEN) 1492 "\n"); 1493 return -EOVERFLOW; 1494 } 1495 1496 ret = target_check_inquiry_data(stripped); 1497 1498 if (ret < 0) 1499 return ret; 1500 1501 /* 1502 * Check to see if any active exports exist. If they do exist, fail 1503 * here as changing this information on the fly (underneath the 1504 * initiator side OS dependent multipath code) could cause negative 1505 * effects. 1506 */ 1507 if (dev->export_count) { 1508 pr_err("Unable to set T10 Revision while active %d exports exist\n", 1509 dev->export_count); 1510 return -EINVAL; 1511 } 1512 1513 BUILD_BUG_ON(sizeof(dev->t10_wwn.revision) != INQUIRY_REVISION_LEN + 1); 1514 strlcpy(dev->t10_wwn.revision, stripped, sizeof(dev->t10_wwn.revision)); 1515 1516 pr_debug("Target_Core_ConfigFS: Set emulated T10 Revision: %s\n", 1517 dev->t10_wwn.revision); 1518 1519 return count; 1520 } 1521 1522 static ssize_t 1523 target_wwn_company_id_show(struct config_item *item, 1524 char *page) 1525 { 1526 return snprintf(page, PAGE_SIZE, "%#08x\n", 1527 to_t10_wwn(item)->company_id); 1528 } 1529 1530 static ssize_t 1531 target_wwn_company_id_store(struct config_item *item, 1532 const char *page, size_t count) 1533 { 1534 struct t10_wwn *t10_wwn = to_t10_wwn(item); 1535 struct se_device *dev = t10_wwn->t10_dev; 1536 u32 val; 1537 int ret; 1538 1539 /* 1540 * The IEEE COMPANY_ID field should contain a 24-bit canonical 1541 * form OUI assigned by the IEEE. 1542 */ 1543 ret = kstrtou32(page, 0, &val); 1544 if (ret < 0) 1545 return ret; 1546 1547 if (val >= 0x1000000) 1548 return -EOVERFLOW; 1549 1550 /* 1551 * Check to see if any active exports exist. If they do exist, fail 1552 * here as changing this information on the fly (underneath the 1553 * initiator side OS dependent multipath code) could cause negative 1554 * effects. 1555 */ 1556 if (dev->export_count) { 1557 pr_err("Unable to set Company ID while %u exports exist\n", 1558 dev->export_count); 1559 return -EINVAL; 1560 } 1561 1562 t10_wwn->company_id = val; 1563 1564 pr_debug("Target_Core_ConfigFS: Set IEEE Company ID: %#08x\n", 1565 t10_wwn->company_id); 1566 1567 return count; 1568 } 1569 1570 /* 1571 * VPD page 0x80 Unit serial 1572 */ 1573 static ssize_t target_wwn_vpd_unit_serial_show(struct config_item *item, 1574 char *page) 1575 { 1576 return sprintf(page, "T10 VPD Unit Serial Number: %s\n", 1577 &to_t10_wwn(item)->unit_serial[0]); 1578 } 1579 1580 static ssize_t target_wwn_vpd_unit_serial_store(struct config_item *item, 1581 const char *page, size_t count) 1582 { 1583 struct t10_wwn *t10_wwn = to_t10_wwn(item); 1584 struct se_device *dev = t10_wwn->t10_dev; 1585 unsigned char buf[INQUIRY_VPD_SERIAL_LEN] = { }; 1586 1587 /* 1588 * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial 1589 * from the struct scsi_device level firmware, do not allow 1590 * VPD Unit Serial to be emulated. 1591 * 1592 * Note this struct scsi_device could also be emulating VPD 1593 * information from its drivers/scsi LLD. But for now we assume 1594 * it is doing 'the right thing' wrt a world wide unique 1595 * VPD Unit Serial Number that OS dependent multipath can depend on. 1596 */ 1597 if (dev->dev_flags & DF_FIRMWARE_VPD_UNIT_SERIAL) { 1598 pr_err("Underlying SCSI device firmware provided VPD" 1599 " Unit Serial, ignoring request\n"); 1600 return -EOPNOTSUPP; 1601 } 1602 1603 if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) { 1604 pr_err("Emulated VPD Unit Serial exceeds" 1605 " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN); 1606 return -EOVERFLOW; 1607 } 1608 /* 1609 * Check to see if any active $FABRIC_MOD exports exist. If they 1610 * do exist, fail here as changing this information on the fly 1611 * (underneath the initiator side OS dependent multipath code) 1612 * could cause negative effects. 1613 */ 1614 if (dev->export_count) { 1615 pr_err("Unable to set VPD Unit Serial while" 1616 " active %d $FABRIC_MOD exports exist\n", 1617 dev->export_count); 1618 return -EINVAL; 1619 } 1620 1621 /* 1622 * This currently assumes ASCII encoding for emulated VPD Unit Serial. 1623 * 1624 * Also, strip any newline added from the userspace 1625 * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial 1626 */ 1627 snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page); 1628 snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN, 1629 "%s", strstrip(buf)); 1630 dev->dev_flags |= DF_EMULATED_VPD_UNIT_SERIAL; 1631 1632 pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:" 1633 " %s\n", dev->t10_wwn.unit_serial); 1634 1635 return count; 1636 } 1637 1638 /* 1639 * VPD page 0x83 Protocol Identifier 1640 */ 1641 static ssize_t target_wwn_vpd_protocol_identifier_show(struct config_item *item, 1642 char *page) 1643 { 1644 struct t10_wwn *t10_wwn = to_t10_wwn(item); 1645 struct t10_vpd *vpd; 1646 unsigned char buf[VPD_TMP_BUF_SIZE] = { }; 1647 ssize_t len = 0; 1648 1649 spin_lock(&t10_wwn->t10_vpd_lock); 1650 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { 1651 if (!vpd->protocol_identifier_set) 1652 continue; 1653 1654 transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE); 1655 1656 if (len + strlen(buf) >= PAGE_SIZE) 1657 break; 1658 1659 len += sprintf(page+len, "%s", buf); 1660 } 1661 spin_unlock(&t10_wwn->t10_vpd_lock); 1662 1663 return len; 1664 } 1665 1666 /* 1667 * Generic wrapper for dumping VPD identifiers by association. 1668 */ 1669 #define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc) \ 1670 static ssize_t target_wwn_##_name##_show(struct config_item *item, \ 1671 char *page) \ 1672 { \ 1673 struct t10_wwn *t10_wwn = to_t10_wwn(item); \ 1674 struct t10_vpd *vpd; \ 1675 unsigned char buf[VPD_TMP_BUF_SIZE]; \ 1676 ssize_t len = 0; \ 1677 \ 1678 spin_lock(&t10_wwn->t10_vpd_lock); \ 1679 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \ 1680 if (vpd->association != _assoc) \ 1681 continue; \ 1682 \ 1683 memset(buf, 0, VPD_TMP_BUF_SIZE); \ 1684 transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \ 1685 if (len + strlen(buf) >= PAGE_SIZE) \ 1686 break; \ 1687 len += sprintf(page+len, "%s", buf); \ 1688 \ 1689 memset(buf, 0, VPD_TMP_BUF_SIZE); \ 1690 transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \ 1691 if (len + strlen(buf) >= PAGE_SIZE) \ 1692 break; \ 1693 len += sprintf(page+len, "%s", buf); \ 1694 \ 1695 memset(buf, 0, VPD_TMP_BUF_SIZE); \ 1696 transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \ 1697 if (len + strlen(buf) >= PAGE_SIZE) \ 1698 break; \ 1699 len += sprintf(page+len, "%s", buf); \ 1700 } \ 1701 spin_unlock(&t10_wwn->t10_vpd_lock); \ 1702 \ 1703 return len; \ 1704 } 1705 1706 /* VPD page 0x83 Association: Logical Unit */ 1707 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00); 1708 /* VPD page 0x83 Association: Target Port */ 1709 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10); 1710 /* VPD page 0x83 Association: SCSI Target Device */ 1711 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20); 1712 1713 CONFIGFS_ATTR(target_wwn_, vendor_id); 1714 CONFIGFS_ATTR(target_wwn_, product_id); 1715 CONFIGFS_ATTR(target_wwn_, revision); 1716 CONFIGFS_ATTR(target_wwn_, company_id); 1717 CONFIGFS_ATTR(target_wwn_, vpd_unit_serial); 1718 CONFIGFS_ATTR_RO(target_wwn_, vpd_protocol_identifier); 1719 CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_logical_unit); 1720 CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_target_port); 1721 CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_scsi_target_device); 1722 1723 static struct configfs_attribute *target_core_dev_wwn_attrs[] = { 1724 &target_wwn_attr_vendor_id, 1725 &target_wwn_attr_product_id, 1726 &target_wwn_attr_revision, 1727 &target_wwn_attr_company_id, 1728 &target_wwn_attr_vpd_unit_serial, 1729 &target_wwn_attr_vpd_protocol_identifier, 1730 &target_wwn_attr_vpd_assoc_logical_unit, 1731 &target_wwn_attr_vpd_assoc_target_port, 1732 &target_wwn_attr_vpd_assoc_scsi_target_device, 1733 NULL, 1734 }; 1735 1736 TB_CIT_SETUP(dev_wwn, NULL, NULL, target_core_dev_wwn_attrs); 1737 1738 /* End functions for struct config_item_type tb_dev_wwn_cit */ 1739 1740 /* Start functions for struct config_item_type tb_dev_pr_cit */ 1741 1742 static struct se_device *pr_to_dev(struct config_item *item) 1743 { 1744 return container_of(to_config_group(item), struct se_device, 1745 dev_pr_group); 1746 } 1747 1748 static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev, 1749 char *page) 1750 { 1751 struct se_node_acl *se_nacl; 1752 struct t10_pr_registration *pr_reg; 1753 char i_buf[PR_REG_ISID_ID_LEN] = { }; 1754 1755 pr_reg = dev->dev_pr_res_holder; 1756 if (!pr_reg) 1757 return sprintf(page, "No SPC-3 Reservation holder\n"); 1758 1759 se_nacl = pr_reg->pr_reg_nacl; 1760 core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); 1761 1762 return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n", 1763 se_nacl->se_tpg->se_tpg_tfo->fabric_name, 1764 se_nacl->initiatorname, i_buf); 1765 } 1766 1767 static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev, 1768 char *page) 1769 { 1770 struct se_session *sess = dev->reservation_holder; 1771 struct se_node_acl *se_nacl; 1772 ssize_t len; 1773 1774 if (sess) { 1775 se_nacl = sess->se_node_acl; 1776 len = sprintf(page, 1777 "SPC-2 Reservation: %s Initiator: %s\n", 1778 se_nacl->se_tpg->se_tpg_tfo->fabric_name, 1779 se_nacl->initiatorname); 1780 } else { 1781 len = sprintf(page, "No SPC-2 Reservation holder\n"); 1782 } 1783 return len; 1784 } 1785 1786 static ssize_t target_pr_res_holder_show(struct config_item *item, char *page) 1787 { 1788 struct se_device *dev = pr_to_dev(item); 1789 int ret; 1790 1791 if (!dev->dev_attrib.emulate_pr) 1792 return sprintf(page, "SPC_RESERVATIONS_DISABLED\n"); 1793 1794 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) 1795 return sprintf(page, "Passthrough\n"); 1796 1797 spin_lock(&dev->dev_reservation_lock); 1798 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) 1799 ret = target_core_dev_pr_show_spc2_res(dev, page); 1800 else 1801 ret = target_core_dev_pr_show_spc3_res(dev, page); 1802 spin_unlock(&dev->dev_reservation_lock); 1803 return ret; 1804 } 1805 1806 static ssize_t target_pr_res_pr_all_tgt_pts_show(struct config_item *item, 1807 char *page) 1808 { 1809 struct se_device *dev = pr_to_dev(item); 1810 ssize_t len = 0; 1811 1812 spin_lock(&dev->dev_reservation_lock); 1813 if (!dev->dev_pr_res_holder) { 1814 len = sprintf(page, "No SPC-3 Reservation holder\n"); 1815 } else if (dev->dev_pr_res_holder->pr_reg_all_tg_pt) { 1816 len = sprintf(page, "SPC-3 Reservation: All Target" 1817 " Ports registration\n"); 1818 } else { 1819 len = sprintf(page, "SPC-3 Reservation: Single" 1820 " Target Port registration\n"); 1821 } 1822 1823 spin_unlock(&dev->dev_reservation_lock); 1824 return len; 1825 } 1826 1827 static ssize_t target_pr_res_pr_generation_show(struct config_item *item, 1828 char *page) 1829 { 1830 return sprintf(page, "0x%08x\n", pr_to_dev(item)->t10_pr.pr_generation); 1831 } 1832 1833 1834 static ssize_t target_pr_res_pr_holder_tg_port_show(struct config_item *item, 1835 char *page) 1836 { 1837 struct se_device *dev = pr_to_dev(item); 1838 struct se_node_acl *se_nacl; 1839 struct se_portal_group *se_tpg; 1840 struct t10_pr_registration *pr_reg; 1841 const struct target_core_fabric_ops *tfo; 1842 ssize_t len = 0; 1843 1844 spin_lock(&dev->dev_reservation_lock); 1845 pr_reg = dev->dev_pr_res_holder; 1846 if (!pr_reg) { 1847 len = sprintf(page, "No SPC-3 Reservation holder\n"); 1848 goto out_unlock; 1849 } 1850 1851 se_nacl = pr_reg->pr_reg_nacl; 1852 se_tpg = se_nacl->se_tpg; 1853 tfo = se_tpg->se_tpg_tfo; 1854 1855 len += sprintf(page+len, "SPC-3 Reservation: %s" 1856 " Target Node Endpoint: %s\n", tfo->fabric_name, 1857 tfo->tpg_get_wwn(se_tpg)); 1858 len += sprintf(page+len, "SPC-3 Reservation: Relative Port" 1859 " Identifier Tag: %hu %s Portal Group Tag: %hu" 1860 " %s Logical Unit: %llu\n", pr_reg->tg_pt_sep_rtpi, 1861 tfo->fabric_name, tfo->tpg_get_tag(se_tpg), 1862 tfo->fabric_name, pr_reg->pr_aptpl_target_lun); 1863 1864 out_unlock: 1865 spin_unlock(&dev->dev_reservation_lock); 1866 return len; 1867 } 1868 1869 1870 static ssize_t target_pr_res_pr_registered_i_pts_show(struct config_item *item, 1871 char *page) 1872 { 1873 struct se_device *dev = pr_to_dev(item); 1874 const struct target_core_fabric_ops *tfo; 1875 struct t10_pr_registration *pr_reg; 1876 unsigned char buf[384]; 1877 char i_buf[PR_REG_ISID_ID_LEN]; 1878 ssize_t len = 0; 1879 int reg_count = 0; 1880 1881 len += sprintf(page+len, "SPC-3 PR Registrations:\n"); 1882 1883 spin_lock(&dev->t10_pr.registration_lock); 1884 list_for_each_entry(pr_reg, &dev->t10_pr.registration_list, 1885 pr_reg_list) { 1886 1887 memset(buf, 0, 384); 1888 memset(i_buf, 0, PR_REG_ISID_ID_LEN); 1889 tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo; 1890 core_pr_dump_initiator_port(pr_reg, i_buf, 1891 PR_REG_ISID_ID_LEN); 1892 sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n", 1893 tfo->fabric_name, 1894 pr_reg->pr_reg_nacl->initiatorname, i_buf, pr_reg->pr_res_key, 1895 pr_reg->pr_res_generation); 1896 1897 if (len + strlen(buf) >= PAGE_SIZE) 1898 break; 1899 1900 len += sprintf(page+len, "%s", buf); 1901 reg_count++; 1902 } 1903 spin_unlock(&dev->t10_pr.registration_lock); 1904 1905 if (!reg_count) 1906 len += sprintf(page+len, "None\n"); 1907 1908 return len; 1909 } 1910 1911 static ssize_t target_pr_res_pr_type_show(struct config_item *item, char *page) 1912 { 1913 struct se_device *dev = pr_to_dev(item); 1914 struct t10_pr_registration *pr_reg; 1915 ssize_t len = 0; 1916 1917 spin_lock(&dev->dev_reservation_lock); 1918 pr_reg = dev->dev_pr_res_holder; 1919 if (pr_reg) { 1920 len = sprintf(page, "SPC-3 Reservation Type: %s\n", 1921 core_scsi3_pr_dump_type(pr_reg->pr_res_type)); 1922 } else { 1923 len = sprintf(page, "No SPC-3 Reservation holder\n"); 1924 } 1925 1926 spin_unlock(&dev->dev_reservation_lock); 1927 return len; 1928 } 1929 1930 static ssize_t target_pr_res_type_show(struct config_item *item, char *page) 1931 { 1932 struct se_device *dev = pr_to_dev(item); 1933 1934 if (!dev->dev_attrib.emulate_pr) 1935 return sprintf(page, "SPC_RESERVATIONS_DISABLED\n"); 1936 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) 1937 return sprintf(page, "SPC_PASSTHROUGH\n"); 1938 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) 1939 return sprintf(page, "SPC2_RESERVATIONS\n"); 1940 1941 return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n"); 1942 } 1943 1944 static ssize_t target_pr_res_aptpl_active_show(struct config_item *item, 1945 char *page) 1946 { 1947 struct se_device *dev = pr_to_dev(item); 1948 1949 if (!dev->dev_attrib.emulate_pr || 1950 (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)) 1951 return 0; 1952 1953 return sprintf(page, "APTPL Bit Status: %s\n", 1954 (dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled"); 1955 } 1956 1957 static ssize_t target_pr_res_aptpl_metadata_show(struct config_item *item, 1958 char *page) 1959 { 1960 struct se_device *dev = pr_to_dev(item); 1961 1962 if (!dev->dev_attrib.emulate_pr || 1963 (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)) 1964 return 0; 1965 1966 return sprintf(page, "Ready to process PR APTPL metadata..\n"); 1967 } 1968 1969 enum { 1970 Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid, 1971 Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope, 1972 Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric, 1973 Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err 1974 }; 1975 1976 static match_table_t tokens = { 1977 {Opt_initiator_fabric, "initiator_fabric=%s"}, 1978 {Opt_initiator_node, "initiator_node=%s"}, 1979 {Opt_initiator_sid, "initiator_sid=%s"}, 1980 {Opt_sa_res_key, "sa_res_key=%s"}, 1981 {Opt_res_holder, "res_holder=%d"}, 1982 {Opt_res_type, "res_type=%d"}, 1983 {Opt_res_scope, "res_scope=%d"}, 1984 {Opt_res_all_tg_pt, "res_all_tg_pt=%d"}, 1985 {Opt_mapped_lun, "mapped_lun=%u"}, 1986 {Opt_target_fabric, "target_fabric=%s"}, 1987 {Opt_target_node, "target_node=%s"}, 1988 {Opt_tpgt, "tpgt=%d"}, 1989 {Opt_port_rtpi, "port_rtpi=%d"}, 1990 {Opt_target_lun, "target_lun=%u"}, 1991 {Opt_err, NULL} 1992 }; 1993 1994 static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item, 1995 const char *page, size_t count) 1996 { 1997 struct se_device *dev = pr_to_dev(item); 1998 unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL; 1999 unsigned char *t_fabric = NULL, *t_port = NULL; 2000 char *orig, *ptr, *opts; 2001 substring_t args[MAX_OPT_ARGS]; 2002 unsigned long long tmp_ll; 2003 u64 sa_res_key = 0; 2004 u64 mapped_lun = 0, target_lun = 0; 2005 int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token; 2006 u16 tpgt = 0; 2007 u8 type = 0; 2008 2009 if (!dev->dev_attrib.emulate_pr || 2010 (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)) 2011 return count; 2012 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) 2013 return count; 2014 2015 if (dev->export_count) { 2016 pr_debug("Unable to process APTPL metadata while" 2017 " active fabric exports exist\n"); 2018 return -EINVAL; 2019 } 2020 2021 opts = kstrdup(page, GFP_KERNEL); 2022 if (!opts) 2023 return -ENOMEM; 2024 2025 orig = opts; 2026 while ((ptr = strsep(&opts, ",\n")) != NULL) { 2027 if (!*ptr) 2028 continue; 2029 2030 token = match_token(ptr, tokens, args); 2031 switch (token) { 2032 case Opt_initiator_fabric: 2033 i_fabric = match_strdup(args); 2034 if (!i_fabric) { 2035 ret = -ENOMEM; 2036 goto out; 2037 } 2038 break; 2039 case Opt_initiator_node: 2040 i_port = match_strdup(args); 2041 if (!i_port) { 2042 ret = -ENOMEM; 2043 goto out; 2044 } 2045 if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) { 2046 pr_err("APTPL metadata initiator_node=" 2047 " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n", 2048 PR_APTPL_MAX_IPORT_LEN); 2049 ret = -EINVAL; 2050 break; 2051 } 2052 break; 2053 case Opt_initiator_sid: 2054 isid = match_strdup(args); 2055 if (!isid) { 2056 ret = -ENOMEM; 2057 goto out; 2058 } 2059 if (strlen(isid) >= PR_REG_ISID_LEN) { 2060 pr_err("APTPL metadata initiator_isid" 2061 "= exceeds PR_REG_ISID_LEN: %d\n", 2062 PR_REG_ISID_LEN); 2063 ret = -EINVAL; 2064 break; 2065 } 2066 break; 2067 case Opt_sa_res_key: 2068 ret = match_u64(args, &tmp_ll); 2069 if (ret < 0) { 2070 pr_err("kstrtoull() failed for sa_res_key=\n"); 2071 goto out; 2072 } 2073 sa_res_key = (u64)tmp_ll; 2074 break; 2075 /* 2076 * PR APTPL Metadata for Reservation 2077 */ 2078 case Opt_res_holder: 2079 ret = match_int(args, &arg); 2080 if (ret) 2081 goto out; 2082 res_holder = arg; 2083 break; 2084 case Opt_res_type: 2085 ret = match_int(args, &arg); 2086 if (ret) 2087 goto out; 2088 type = (u8)arg; 2089 break; 2090 case Opt_res_scope: 2091 ret = match_int(args, &arg); 2092 if (ret) 2093 goto out; 2094 break; 2095 case Opt_res_all_tg_pt: 2096 ret = match_int(args, &arg); 2097 if (ret) 2098 goto out; 2099 all_tg_pt = (int)arg; 2100 break; 2101 case Opt_mapped_lun: 2102 ret = match_u64(args, &tmp_ll); 2103 if (ret) 2104 goto out; 2105 mapped_lun = (u64)tmp_ll; 2106 break; 2107 /* 2108 * PR APTPL Metadata for Target Port 2109 */ 2110 case Opt_target_fabric: 2111 t_fabric = match_strdup(args); 2112 if (!t_fabric) { 2113 ret = -ENOMEM; 2114 goto out; 2115 } 2116 break; 2117 case Opt_target_node: 2118 t_port = match_strdup(args); 2119 if (!t_port) { 2120 ret = -ENOMEM; 2121 goto out; 2122 } 2123 if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) { 2124 pr_err("APTPL metadata target_node=" 2125 " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n", 2126 PR_APTPL_MAX_TPORT_LEN); 2127 ret = -EINVAL; 2128 break; 2129 } 2130 break; 2131 case Opt_tpgt: 2132 ret = match_int(args, &arg); 2133 if (ret) 2134 goto out; 2135 tpgt = (u16)arg; 2136 break; 2137 case Opt_port_rtpi: 2138 ret = match_int(args, &arg); 2139 if (ret) 2140 goto out; 2141 break; 2142 case Opt_target_lun: 2143 ret = match_u64(args, &tmp_ll); 2144 if (ret) 2145 goto out; 2146 target_lun = (u64)tmp_ll; 2147 break; 2148 default: 2149 break; 2150 } 2151 } 2152 2153 if (!i_port || !t_port || !sa_res_key) { 2154 pr_err("Illegal parameters for APTPL registration\n"); 2155 ret = -EINVAL; 2156 goto out; 2157 } 2158 2159 if (res_holder && !(type)) { 2160 pr_err("Illegal PR type: 0x%02x for reservation" 2161 " holder\n", type); 2162 ret = -EINVAL; 2163 goto out; 2164 } 2165 2166 ret = core_scsi3_alloc_aptpl_registration(&dev->t10_pr, sa_res_key, 2167 i_port, isid, mapped_lun, t_port, tpgt, target_lun, 2168 res_holder, all_tg_pt, type); 2169 out: 2170 kfree(i_fabric); 2171 kfree(i_port); 2172 kfree(isid); 2173 kfree(t_fabric); 2174 kfree(t_port); 2175 kfree(orig); 2176 return (ret == 0) ? count : ret; 2177 } 2178 2179 2180 CONFIGFS_ATTR_RO(target_pr_, res_holder); 2181 CONFIGFS_ATTR_RO(target_pr_, res_pr_all_tgt_pts); 2182 CONFIGFS_ATTR_RO(target_pr_, res_pr_generation); 2183 CONFIGFS_ATTR_RO(target_pr_, res_pr_holder_tg_port); 2184 CONFIGFS_ATTR_RO(target_pr_, res_pr_registered_i_pts); 2185 CONFIGFS_ATTR_RO(target_pr_, res_pr_type); 2186 CONFIGFS_ATTR_RO(target_pr_, res_type); 2187 CONFIGFS_ATTR_RO(target_pr_, res_aptpl_active); 2188 CONFIGFS_ATTR(target_pr_, res_aptpl_metadata); 2189 2190 static struct configfs_attribute *target_core_dev_pr_attrs[] = { 2191 &target_pr_attr_res_holder, 2192 &target_pr_attr_res_pr_all_tgt_pts, 2193 &target_pr_attr_res_pr_generation, 2194 &target_pr_attr_res_pr_holder_tg_port, 2195 &target_pr_attr_res_pr_registered_i_pts, 2196 &target_pr_attr_res_pr_type, 2197 &target_pr_attr_res_type, 2198 &target_pr_attr_res_aptpl_active, 2199 &target_pr_attr_res_aptpl_metadata, 2200 NULL, 2201 }; 2202 2203 TB_CIT_SETUP(dev_pr, NULL, NULL, target_core_dev_pr_attrs); 2204 2205 /* End functions for struct config_item_type tb_dev_pr_cit */ 2206 2207 /* Start functions for struct config_item_type tb_dev_cit */ 2208 2209 static inline struct se_device *to_device(struct config_item *item) 2210 { 2211 return container_of(to_config_group(item), struct se_device, dev_group); 2212 } 2213 2214 static ssize_t target_dev_info_show(struct config_item *item, char *page) 2215 { 2216 struct se_device *dev = to_device(item); 2217 int bl = 0; 2218 ssize_t read_bytes = 0; 2219 2220 transport_dump_dev_state(dev, page, &bl); 2221 read_bytes += bl; 2222 read_bytes += dev->transport->show_configfs_dev_params(dev, 2223 page+read_bytes); 2224 return read_bytes; 2225 } 2226 2227 static ssize_t target_dev_control_store(struct config_item *item, 2228 const char *page, size_t count) 2229 { 2230 struct se_device *dev = to_device(item); 2231 2232 return dev->transport->set_configfs_dev_params(dev, page, count); 2233 } 2234 2235 static ssize_t target_dev_alias_show(struct config_item *item, char *page) 2236 { 2237 struct se_device *dev = to_device(item); 2238 2239 if (!(dev->dev_flags & DF_USING_ALIAS)) 2240 return 0; 2241 2242 return snprintf(page, PAGE_SIZE, "%s\n", dev->dev_alias); 2243 } 2244 2245 static ssize_t target_dev_alias_store(struct config_item *item, 2246 const char *page, size_t count) 2247 { 2248 struct se_device *dev = to_device(item); 2249 struct se_hba *hba = dev->se_hba; 2250 ssize_t read_bytes; 2251 2252 if (count > (SE_DEV_ALIAS_LEN-1)) { 2253 pr_err("alias count: %d exceeds" 2254 " SE_DEV_ALIAS_LEN-1: %u\n", (int)count, 2255 SE_DEV_ALIAS_LEN-1); 2256 return -EINVAL; 2257 } 2258 2259 read_bytes = snprintf(&dev->dev_alias[0], SE_DEV_ALIAS_LEN, "%s", page); 2260 if (!read_bytes) 2261 return -EINVAL; 2262 if (dev->dev_alias[read_bytes - 1] == '\n') 2263 dev->dev_alias[read_bytes - 1] = '\0'; 2264 2265 dev->dev_flags |= DF_USING_ALIAS; 2266 2267 pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n", 2268 config_item_name(&hba->hba_group.cg_item), 2269 config_item_name(&dev->dev_group.cg_item), 2270 dev->dev_alias); 2271 2272 return read_bytes; 2273 } 2274 2275 static ssize_t target_dev_udev_path_show(struct config_item *item, char *page) 2276 { 2277 struct se_device *dev = to_device(item); 2278 2279 if (!(dev->dev_flags & DF_USING_UDEV_PATH)) 2280 return 0; 2281 2282 return snprintf(page, PAGE_SIZE, "%s\n", dev->udev_path); 2283 } 2284 2285 static ssize_t target_dev_udev_path_store(struct config_item *item, 2286 const char *page, size_t count) 2287 { 2288 struct se_device *dev = to_device(item); 2289 struct se_hba *hba = dev->se_hba; 2290 ssize_t read_bytes; 2291 2292 if (count > (SE_UDEV_PATH_LEN-1)) { 2293 pr_err("udev_path count: %d exceeds" 2294 " SE_UDEV_PATH_LEN-1: %u\n", (int)count, 2295 SE_UDEV_PATH_LEN-1); 2296 return -EINVAL; 2297 } 2298 2299 read_bytes = snprintf(&dev->udev_path[0], SE_UDEV_PATH_LEN, 2300 "%s", page); 2301 if (!read_bytes) 2302 return -EINVAL; 2303 if (dev->udev_path[read_bytes - 1] == '\n') 2304 dev->udev_path[read_bytes - 1] = '\0'; 2305 2306 dev->dev_flags |= DF_USING_UDEV_PATH; 2307 2308 pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n", 2309 config_item_name(&hba->hba_group.cg_item), 2310 config_item_name(&dev->dev_group.cg_item), 2311 dev->udev_path); 2312 2313 return read_bytes; 2314 } 2315 2316 static ssize_t target_dev_enable_show(struct config_item *item, char *page) 2317 { 2318 struct se_device *dev = to_device(item); 2319 2320 return snprintf(page, PAGE_SIZE, "%d\n", target_dev_configured(dev)); 2321 } 2322 2323 static ssize_t target_dev_enable_store(struct config_item *item, 2324 const char *page, size_t count) 2325 { 2326 struct se_device *dev = to_device(item); 2327 char *ptr; 2328 int ret; 2329 2330 ptr = strstr(page, "1"); 2331 if (!ptr) { 2332 pr_err("For dev_enable ops, only valid value" 2333 " is \"1\"\n"); 2334 return -EINVAL; 2335 } 2336 2337 ret = target_configure_device(dev); 2338 if (ret) 2339 return ret; 2340 return count; 2341 } 2342 2343 static ssize_t target_dev_alua_lu_gp_show(struct config_item *item, char *page) 2344 { 2345 struct se_device *dev = to_device(item); 2346 struct config_item *lu_ci; 2347 struct t10_alua_lu_gp *lu_gp; 2348 struct t10_alua_lu_gp_member *lu_gp_mem; 2349 ssize_t len = 0; 2350 2351 lu_gp_mem = dev->dev_alua_lu_gp_mem; 2352 if (!lu_gp_mem) 2353 return 0; 2354 2355 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 2356 lu_gp = lu_gp_mem->lu_gp; 2357 if (lu_gp) { 2358 lu_ci = &lu_gp->lu_gp_group.cg_item; 2359 len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n", 2360 config_item_name(lu_ci), lu_gp->lu_gp_id); 2361 } 2362 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 2363 2364 return len; 2365 } 2366 2367 static ssize_t target_dev_alua_lu_gp_store(struct config_item *item, 2368 const char *page, size_t count) 2369 { 2370 struct se_device *dev = to_device(item); 2371 struct se_hba *hba = dev->se_hba; 2372 struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL; 2373 struct t10_alua_lu_gp_member *lu_gp_mem; 2374 unsigned char buf[LU_GROUP_NAME_BUF] = { }; 2375 int move = 0; 2376 2377 lu_gp_mem = dev->dev_alua_lu_gp_mem; 2378 if (!lu_gp_mem) 2379 return count; 2380 2381 if (count > LU_GROUP_NAME_BUF) { 2382 pr_err("ALUA LU Group Alias too large!\n"); 2383 return -EINVAL; 2384 } 2385 memcpy(buf, page, count); 2386 /* 2387 * Any ALUA logical unit alias besides "NULL" means we will be 2388 * making a new group association. 2389 */ 2390 if (strcmp(strstrip(buf), "NULL")) { 2391 /* 2392 * core_alua_get_lu_gp_by_name() will increment reference to 2393 * struct t10_alua_lu_gp. This reference is released with 2394 * core_alua_get_lu_gp_by_name below(). 2395 */ 2396 lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf)); 2397 if (!lu_gp_new) 2398 return -ENODEV; 2399 } 2400 2401 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 2402 lu_gp = lu_gp_mem->lu_gp; 2403 if (lu_gp) { 2404 /* 2405 * Clearing an existing lu_gp association, and replacing 2406 * with NULL 2407 */ 2408 if (!lu_gp_new) { 2409 pr_debug("Target_Core_ConfigFS: Releasing %s/%s" 2410 " from ALUA LU Group: core/alua/lu_gps/%s, ID:" 2411 " %hu\n", 2412 config_item_name(&hba->hba_group.cg_item), 2413 config_item_name(&dev->dev_group.cg_item), 2414 config_item_name(&lu_gp->lu_gp_group.cg_item), 2415 lu_gp->lu_gp_id); 2416 2417 __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp); 2418 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 2419 2420 return count; 2421 } 2422 /* 2423 * Removing existing association of lu_gp_mem with lu_gp 2424 */ 2425 __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp); 2426 move = 1; 2427 } 2428 /* 2429 * Associate lu_gp_mem with lu_gp_new. 2430 */ 2431 __core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new); 2432 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 2433 2434 pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:" 2435 " core/alua/lu_gps/%s, ID: %hu\n", 2436 (move) ? "Moving" : "Adding", 2437 config_item_name(&hba->hba_group.cg_item), 2438 config_item_name(&dev->dev_group.cg_item), 2439 config_item_name(&lu_gp_new->lu_gp_group.cg_item), 2440 lu_gp_new->lu_gp_id); 2441 2442 core_alua_put_lu_gp_from_name(lu_gp_new); 2443 return count; 2444 } 2445 2446 static ssize_t target_dev_lba_map_show(struct config_item *item, char *page) 2447 { 2448 struct se_device *dev = to_device(item); 2449 struct t10_alua_lba_map *map; 2450 struct t10_alua_lba_map_member *mem; 2451 char *b = page; 2452 int bl = 0; 2453 char state; 2454 2455 spin_lock(&dev->t10_alua.lba_map_lock); 2456 if (!list_empty(&dev->t10_alua.lba_map_list)) 2457 bl += sprintf(b + bl, "%u %u\n", 2458 dev->t10_alua.lba_map_segment_size, 2459 dev->t10_alua.lba_map_segment_multiplier); 2460 list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) { 2461 bl += sprintf(b + bl, "%llu %llu", 2462 map->lba_map_first_lba, map->lba_map_last_lba); 2463 list_for_each_entry(mem, &map->lba_map_mem_list, 2464 lba_map_mem_list) { 2465 switch (mem->lba_map_mem_alua_state) { 2466 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: 2467 state = 'O'; 2468 break; 2469 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 2470 state = 'A'; 2471 break; 2472 case ALUA_ACCESS_STATE_STANDBY: 2473 state = 'S'; 2474 break; 2475 case ALUA_ACCESS_STATE_UNAVAILABLE: 2476 state = 'U'; 2477 break; 2478 default: 2479 state = '.'; 2480 break; 2481 } 2482 bl += sprintf(b + bl, " %d:%c", 2483 mem->lba_map_mem_alua_pg_id, state); 2484 } 2485 bl += sprintf(b + bl, "\n"); 2486 } 2487 spin_unlock(&dev->t10_alua.lba_map_lock); 2488 return bl; 2489 } 2490 2491 static ssize_t target_dev_lba_map_store(struct config_item *item, 2492 const char *page, size_t count) 2493 { 2494 struct se_device *dev = to_device(item); 2495 struct t10_alua_lba_map *lba_map = NULL; 2496 struct list_head lba_list; 2497 char *map_entries, *orig, *ptr; 2498 char state; 2499 int pg_num = -1, pg; 2500 int ret = 0, num = 0, pg_id, alua_state; 2501 unsigned long start_lba = -1, end_lba = -1; 2502 unsigned long segment_size = -1, segment_mult = -1; 2503 2504 orig = map_entries = kstrdup(page, GFP_KERNEL); 2505 if (!map_entries) 2506 return -ENOMEM; 2507 2508 INIT_LIST_HEAD(&lba_list); 2509 while ((ptr = strsep(&map_entries, "\n")) != NULL) { 2510 if (!*ptr) 2511 continue; 2512 2513 if (num == 0) { 2514 if (sscanf(ptr, "%lu %lu\n", 2515 &segment_size, &segment_mult) != 2) { 2516 pr_err("Invalid line %d\n", num); 2517 ret = -EINVAL; 2518 break; 2519 } 2520 num++; 2521 continue; 2522 } 2523 if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) { 2524 pr_err("Invalid line %d\n", num); 2525 ret = -EINVAL; 2526 break; 2527 } 2528 ptr = strchr(ptr, ' '); 2529 if (!ptr) { 2530 pr_err("Invalid line %d, missing end lba\n", num); 2531 ret = -EINVAL; 2532 break; 2533 } 2534 ptr++; 2535 ptr = strchr(ptr, ' '); 2536 if (!ptr) { 2537 pr_err("Invalid line %d, missing state definitions\n", 2538 num); 2539 ret = -EINVAL; 2540 break; 2541 } 2542 ptr++; 2543 lba_map = core_alua_allocate_lba_map(&lba_list, 2544 start_lba, end_lba); 2545 if (IS_ERR(lba_map)) { 2546 ret = PTR_ERR(lba_map); 2547 break; 2548 } 2549 pg = 0; 2550 while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) { 2551 switch (state) { 2552 case 'O': 2553 alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED; 2554 break; 2555 case 'A': 2556 alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED; 2557 break; 2558 case 'S': 2559 alua_state = ALUA_ACCESS_STATE_STANDBY; 2560 break; 2561 case 'U': 2562 alua_state = ALUA_ACCESS_STATE_UNAVAILABLE; 2563 break; 2564 default: 2565 pr_err("Invalid ALUA state '%c'\n", state); 2566 ret = -EINVAL; 2567 goto out; 2568 } 2569 2570 ret = core_alua_allocate_lba_map_mem(lba_map, 2571 pg_id, alua_state); 2572 if (ret) { 2573 pr_err("Invalid target descriptor %d:%c " 2574 "at line %d\n", 2575 pg_id, state, num); 2576 break; 2577 } 2578 pg++; 2579 ptr = strchr(ptr, ' '); 2580 if (ptr) 2581 ptr++; 2582 else 2583 break; 2584 } 2585 if (pg_num == -1) 2586 pg_num = pg; 2587 else if (pg != pg_num) { 2588 pr_err("Only %d from %d port groups definitions " 2589 "at line %d\n", pg, pg_num, num); 2590 ret = -EINVAL; 2591 break; 2592 } 2593 num++; 2594 } 2595 out: 2596 if (ret) { 2597 core_alua_free_lba_map(&lba_list); 2598 count = ret; 2599 } else 2600 core_alua_set_lba_map(dev, &lba_list, 2601 segment_size, segment_mult); 2602 kfree(orig); 2603 return count; 2604 } 2605 2606 CONFIGFS_ATTR_RO(target_dev_, info); 2607 CONFIGFS_ATTR_WO(target_dev_, control); 2608 CONFIGFS_ATTR(target_dev_, alias); 2609 CONFIGFS_ATTR(target_dev_, udev_path); 2610 CONFIGFS_ATTR(target_dev_, enable); 2611 CONFIGFS_ATTR(target_dev_, alua_lu_gp); 2612 CONFIGFS_ATTR(target_dev_, lba_map); 2613 2614 static struct configfs_attribute *target_core_dev_attrs[] = { 2615 &target_dev_attr_info, 2616 &target_dev_attr_control, 2617 &target_dev_attr_alias, 2618 &target_dev_attr_udev_path, 2619 &target_dev_attr_enable, 2620 &target_dev_attr_alua_lu_gp, 2621 &target_dev_attr_lba_map, 2622 NULL, 2623 }; 2624 2625 static void target_core_dev_release(struct config_item *item) 2626 { 2627 struct config_group *dev_cg = to_config_group(item); 2628 struct se_device *dev = 2629 container_of(dev_cg, struct se_device, dev_group); 2630 2631 target_free_device(dev); 2632 } 2633 2634 /* 2635 * Used in target_core_fabric_configfs.c to verify valid se_device symlink 2636 * within target_fabric_port_link() 2637 */ 2638 struct configfs_item_operations target_core_dev_item_ops = { 2639 .release = target_core_dev_release, 2640 }; 2641 2642 TB_CIT_SETUP(dev, &target_core_dev_item_ops, NULL, target_core_dev_attrs); 2643 2644 /* End functions for struct config_item_type tb_dev_cit */ 2645 2646 /* Start functions for struct config_item_type target_core_alua_lu_gp_cit */ 2647 2648 static inline struct t10_alua_lu_gp *to_lu_gp(struct config_item *item) 2649 { 2650 return container_of(to_config_group(item), struct t10_alua_lu_gp, 2651 lu_gp_group); 2652 } 2653 2654 static ssize_t target_lu_gp_lu_gp_id_show(struct config_item *item, char *page) 2655 { 2656 struct t10_alua_lu_gp *lu_gp = to_lu_gp(item); 2657 2658 if (!lu_gp->lu_gp_valid_id) 2659 return 0; 2660 return sprintf(page, "%hu\n", lu_gp->lu_gp_id); 2661 } 2662 2663 static ssize_t target_lu_gp_lu_gp_id_store(struct config_item *item, 2664 const char *page, size_t count) 2665 { 2666 struct t10_alua_lu_gp *lu_gp = to_lu_gp(item); 2667 struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group; 2668 unsigned long lu_gp_id; 2669 int ret; 2670 2671 ret = kstrtoul(page, 0, &lu_gp_id); 2672 if (ret < 0) { 2673 pr_err("kstrtoul() returned %d for" 2674 " lu_gp_id\n", ret); 2675 return ret; 2676 } 2677 if (lu_gp_id > 0x0000ffff) { 2678 pr_err("ALUA lu_gp_id: %lu exceeds maximum:" 2679 " 0x0000ffff\n", lu_gp_id); 2680 return -EINVAL; 2681 } 2682 2683 ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id); 2684 if (ret < 0) 2685 return -EINVAL; 2686 2687 pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit" 2688 " Group: core/alua/lu_gps/%s to ID: %hu\n", 2689 config_item_name(&alua_lu_gp_cg->cg_item), 2690 lu_gp->lu_gp_id); 2691 2692 return count; 2693 } 2694 2695 static ssize_t target_lu_gp_members_show(struct config_item *item, char *page) 2696 { 2697 struct t10_alua_lu_gp *lu_gp = to_lu_gp(item); 2698 struct se_device *dev; 2699 struct se_hba *hba; 2700 struct t10_alua_lu_gp_member *lu_gp_mem; 2701 ssize_t len = 0, cur_len; 2702 unsigned char buf[LU_GROUP_NAME_BUF] = { }; 2703 2704 spin_lock(&lu_gp->lu_gp_lock); 2705 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) { 2706 dev = lu_gp_mem->lu_gp_mem_dev; 2707 hba = dev->se_hba; 2708 2709 cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n", 2710 config_item_name(&hba->hba_group.cg_item), 2711 config_item_name(&dev->dev_group.cg_item)); 2712 cur_len++; /* Extra byte for NULL terminator */ 2713 2714 if ((cur_len + len) > PAGE_SIZE) { 2715 pr_warn("Ran out of lu_gp_show_attr" 2716 "_members buffer\n"); 2717 break; 2718 } 2719 memcpy(page+len, buf, cur_len); 2720 len += cur_len; 2721 } 2722 spin_unlock(&lu_gp->lu_gp_lock); 2723 2724 return len; 2725 } 2726 2727 CONFIGFS_ATTR(target_lu_gp_, lu_gp_id); 2728 CONFIGFS_ATTR_RO(target_lu_gp_, members); 2729 2730 static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = { 2731 &target_lu_gp_attr_lu_gp_id, 2732 &target_lu_gp_attr_members, 2733 NULL, 2734 }; 2735 2736 static void target_core_alua_lu_gp_release(struct config_item *item) 2737 { 2738 struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item), 2739 struct t10_alua_lu_gp, lu_gp_group); 2740 2741 core_alua_free_lu_gp(lu_gp); 2742 } 2743 2744 static struct configfs_item_operations target_core_alua_lu_gp_ops = { 2745 .release = target_core_alua_lu_gp_release, 2746 }; 2747 2748 static const struct config_item_type target_core_alua_lu_gp_cit = { 2749 .ct_item_ops = &target_core_alua_lu_gp_ops, 2750 .ct_attrs = target_core_alua_lu_gp_attrs, 2751 .ct_owner = THIS_MODULE, 2752 }; 2753 2754 /* End functions for struct config_item_type target_core_alua_lu_gp_cit */ 2755 2756 /* Start functions for struct config_item_type target_core_alua_lu_gps_cit */ 2757 2758 static struct config_group *target_core_alua_create_lu_gp( 2759 struct config_group *group, 2760 const char *name) 2761 { 2762 struct t10_alua_lu_gp *lu_gp; 2763 struct config_group *alua_lu_gp_cg = NULL; 2764 struct config_item *alua_lu_gp_ci = NULL; 2765 2766 lu_gp = core_alua_allocate_lu_gp(name, 0); 2767 if (IS_ERR(lu_gp)) 2768 return NULL; 2769 2770 alua_lu_gp_cg = &lu_gp->lu_gp_group; 2771 alua_lu_gp_ci = &alua_lu_gp_cg->cg_item; 2772 2773 config_group_init_type_name(alua_lu_gp_cg, name, 2774 &target_core_alua_lu_gp_cit); 2775 2776 pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit" 2777 " Group: core/alua/lu_gps/%s\n", 2778 config_item_name(alua_lu_gp_ci)); 2779 2780 return alua_lu_gp_cg; 2781 2782 } 2783 2784 static void target_core_alua_drop_lu_gp( 2785 struct config_group *group, 2786 struct config_item *item) 2787 { 2788 struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item), 2789 struct t10_alua_lu_gp, lu_gp_group); 2790 2791 pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit" 2792 " Group: core/alua/lu_gps/%s, ID: %hu\n", 2793 config_item_name(item), lu_gp->lu_gp_id); 2794 /* 2795 * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release() 2796 * -> target_core_alua_lu_gp_release() 2797 */ 2798 config_item_put(item); 2799 } 2800 2801 static struct configfs_group_operations target_core_alua_lu_gps_group_ops = { 2802 .make_group = &target_core_alua_create_lu_gp, 2803 .drop_item = &target_core_alua_drop_lu_gp, 2804 }; 2805 2806 static const struct config_item_type target_core_alua_lu_gps_cit = { 2807 .ct_item_ops = NULL, 2808 .ct_group_ops = &target_core_alua_lu_gps_group_ops, 2809 .ct_owner = THIS_MODULE, 2810 }; 2811 2812 /* End functions for struct config_item_type target_core_alua_lu_gps_cit */ 2813 2814 /* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */ 2815 2816 static inline struct t10_alua_tg_pt_gp *to_tg_pt_gp(struct config_item *item) 2817 { 2818 return container_of(to_config_group(item), struct t10_alua_tg_pt_gp, 2819 tg_pt_gp_group); 2820 } 2821 2822 static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item, 2823 char *page) 2824 { 2825 return sprintf(page, "%d\n", 2826 to_tg_pt_gp(item)->tg_pt_gp_alua_access_state); 2827 } 2828 2829 static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item, 2830 const char *page, size_t count) 2831 { 2832 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); 2833 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 2834 unsigned long tmp; 2835 int new_state, ret; 2836 2837 if (!tg_pt_gp->tg_pt_gp_valid_id) { 2838 pr_err("Unable to do implicit ALUA on invalid tg_pt_gp ID\n"); 2839 return -EINVAL; 2840 } 2841 if (!target_dev_configured(dev)) { 2842 pr_err("Unable to set alua_access_state while device is" 2843 " not configured\n"); 2844 return -ENODEV; 2845 } 2846 2847 ret = kstrtoul(page, 0, &tmp); 2848 if (ret < 0) { 2849 pr_err("Unable to extract new ALUA access state from" 2850 " %s\n", page); 2851 return ret; 2852 } 2853 new_state = (int)tmp; 2854 2855 if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) { 2856 pr_err("Unable to process implicit configfs ALUA" 2857 " transition while TPGS_IMPLICIT_ALUA is disabled\n"); 2858 return -EINVAL; 2859 } 2860 if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA && 2861 new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) { 2862 /* LBA DEPENDENT is only allowed with implicit ALUA */ 2863 pr_err("Unable to process implicit configfs ALUA transition" 2864 " while explicit ALUA management is enabled\n"); 2865 return -EINVAL; 2866 } 2867 2868 ret = core_alua_do_port_transition(tg_pt_gp, dev, 2869 NULL, NULL, new_state, 0); 2870 return (!ret) ? count : -EINVAL; 2871 } 2872 2873 static ssize_t target_tg_pt_gp_alua_access_status_show(struct config_item *item, 2874 char *page) 2875 { 2876 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); 2877 return sprintf(page, "%s\n", 2878 core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status)); 2879 } 2880 2881 static ssize_t target_tg_pt_gp_alua_access_status_store( 2882 struct config_item *item, const char *page, size_t count) 2883 { 2884 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); 2885 unsigned long tmp; 2886 int new_status, ret; 2887 2888 if (!tg_pt_gp->tg_pt_gp_valid_id) { 2889 pr_err("Unable to set ALUA access status on invalid tg_pt_gp ID\n"); 2890 return -EINVAL; 2891 } 2892 2893 ret = kstrtoul(page, 0, &tmp); 2894 if (ret < 0) { 2895 pr_err("Unable to extract new ALUA access status" 2896 " from %s\n", page); 2897 return ret; 2898 } 2899 new_status = (int)tmp; 2900 2901 if ((new_status != ALUA_STATUS_NONE) && 2902 (new_status != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && 2903 (new_status != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) { 2904 pr_err("Illegal ALUA access status: 0x%02x\n", 2905 new_status); 2906 return -EINVAL; 2907 } 2908 2909 tg_pt_gp->tg_pt_gp_alua_access_status = new_status; 2910 return count; 2911 } 2912 2913 static ssize_t target_tg_pt_gp_alua_access_type_show(struct config_item *item, 2914 char *page) 2915 { 2916 return core_alua_show_access_type(to_tg_pt_gp(item), page); 2917 } 2918 2919 static ssize_t target_tg_pt_gp_alua_access_type_store(struct config_item *item, 2920 const char *page, size_t count) 2921 { 2922 return core_alua_store_access_type(to_tg_pt_gp(item), page, count); 2923 } 2924 2925 #define ALUA_SUPPORTED_STATE_ATTR(_name, _bit) \ 2926 static ssize_t target_tg_pt_gp_alua_support_##_name##_show( \ 2927 struct config_item *item, char *p) \ 2928 { \ 2929 struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item); \ 2930 return sprintf(p, "%d\n", \ 2931 !!(t->tg_pt_gp_alua_supported_states & _bit)); \ 2932 } \ 2933 \ 2934 static ssize_t target_tg_pt_gp_alua_support_##_name##_store( \ 2935 struct config_item *item, const char *p, size_t c) \ 2936 { \ 2937 struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item); \ 2938 unsigned long tmp; \ 2939 int ret; \ 2940 \ 2941 if (!t->tg_pt_gp_valid_id) { \ 2942 pr_err("Unable to set " #_name " ALUA state on invalid tg_pt_gp ID\n"); \ 2943 return -EINVAL; \ 2944 } \ 2945 \ 2946 ret = kstrtoul(p, 0, &tmp); \ 2947 if (ret < 0) { \ 2948 pr_err("Invalid value '%s', must be '0' or '1'\n", p); \ 2949 return -EINVAL; \ 2950 } \ 2951 if (tmp > 1) { \ 2952 pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \ 2953 return -EINVAL; \ 2954 } \ 2955 if (tmp) \ 2956 t->tg_pt_gp_alua_supported_states |= _bit; \ 2957 else \ 2958 t->tg_pt_gp_alua_supported_states &= ~_bit; \ 2959 \ 2960 return c; \ 2961 } 2962 2963 ALUA_SUPPORTED_STATE_ATTR(transitioning, ALUA_T_SUP); 2964 ALUA_SUPPORTED_STATE_ATTR(offline, ALUA_O_SUP); 2965 ALUA_SUPPORTED_STATE_ATTR(lba_dependent, ALUA_LBD_SUP); 2966 ALUA_SUPPORTED_STATE_ATTR(unavailable, ALUA_U_SUP); 2967 ALUA_SUPPORTED_STATE_ATTR(standby, ALUA_S_SUP); 2968 ALUA_SUPPORTED_STATE_ATTR(active_optimized, ALUA_AO_SUP); 2969 ALUA_SUPPORTED_STATE_ATTR(active_nonoptimized, ALUA_AN_SUP); 2970 2971 static ssize_t target_tg_pt_gp_alua_write_metadata_show( 2972 struct config_item *item, char *page) 2973 { 2974 return sprintf(page, "%d\n", 2975 to_tg_pt_gp(item)->tg_pt_gp_write_metadata); 2976 } 2977 2978 static ssize_t target_tg_pt_gp_alua_write_metadata_store( 2979 struct config_item *item, const char *page, size_t count) 2980 { 2981 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); 2982 unsigned long tmp; 2983 int ret; 2984 2985 ret = kstrtoul(page, 0, &tmp); 2986 if (ret < 0) { 2987 pr_err("Unable to extract alua_write_metadata\n"); 2988 return ret; 2989 } 2990 2991 if ((tmp != 0) && (tmp != 1)) { 2992 pr_err("Illegal value for alua_write_metadata:" 2993 " %lu\n", tmp); 2994 return -EINVAL; 2995 } 2996 tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp; 2997 2998 return count; 2999 } 3000 3001 static ssize_t target_tg_pt_gp_nonop_delay_msecs_show(struct config_item *item, 3002 char *page) 3003 { 3004 return core_alua_show_nonop_delay_msecs(to_tg_pt_gp(item), page); 3005 } 3006 3007 static ssize_t target_tg_pt_gp_nonop_delay_msecs_store(struct config_item *item, 3008 const char *page, size_t count) 3009 { 3010 return core_alua_store_nonop_delay_msecs(to_tg_pt_gp(item), page, 3011 count); 3012 } 3013 3014 static ssize_t target_tg_pt_gp_trans_delay_msecs_show(struct config_item *item, 3015 char *page) 3016 { 3017 return core_alua_show_trans_delay_msecs(to_tg_pt_gp(item), page); 3018 } 3019 3020 static ssize_t target_tg_pt_gp_trans_delay_msecs_store(struct config_item *item, 3021 const char *page, size_t count) 3022 { 3023 return core_alua_store_trans_delay_msecs(to_tg_pt_gp(item), page, 3024 count); 3025 } 3026 3027 static ssize_t target_tg_pt_gp_implicit_trans_secs_show( 3028 struct config_item *item, char *page) 3029 { 3030 return core_alua_show_implicit_trans_secs(to_tg_pt_gp(item), page); 3031 } 3032 3033 static ssize_t target_tg_pt_gp_implicit_trans_secs_store( 3034 struct config_item *item, const char *page, size_t count) 3035 { 3036 return core_alua_store_implicit_trans_secs(to_tg_pt_gp(item), page, 3037 count); 3038 } 3039 3040 static ssize_t target_tg_pt_gp_preferred_show(struct config_item *item, 3041 char *page) 3042 { 3043 return core_alua_show_preferred_bit(to_tg_pt_gp(item), page); 3044 } 3045 3046 static ssize_t target_tg_pt_gp_preferred_store(struct config_item *item, 3047 const char *page, size_t count) 3048 { 3049 return core_alua_store_preferred_bit(to_tg_pt_gp(item), page, count); 3050 } 3051 3052 static ssize_t target_tg_pt_gp_tg_pt_gp_id_show(struct config_item *item, 3053 char *page) 3054 { 3055 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); 3056 3057 if (!tg_pt_gp->tg_pt_gp_valid_id) 3058 return 0; 3059 return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id); 3060 } 3061 3062 static ssize_t target_tg_pt_gp_tg_pt_gp_id_store(struct config_item *item, 3063 const char *page, size_t count) 3064 { 3065 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); 3066 struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group; 3067 unsigned long tg_pt_gp_id; 3068 int ret; 3069 3070 ret = kstrtoul(page, 0, &tg_pt_gp_id); 3071 if (ret < 0) { 3072 pr_err("ALUA tg_pt_gp_id: invalid value '%s' for tg_pt_gp_id\n", 3073 page); 3074 return ret; 3075 } 3076 if (tg_pt_gp_id > 0x0000ffff) { 3077 pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum: 0x0000ffff\n", 3078 tg_pt_gp_id); 3079 return -EINVAL; 3080 } 3081 3082 ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id); 3083 if (ret < 0) 3084 return -EINVAL; 3085 3086 pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: " 3087 "core/alua/tg_pt_gps/%s to ID: %hu\n", 3088 config_item_name(&alua_tg_pt_gp_cg->cg_item), 3089 tg_pt_gp->tg_pt_gp_id); 3090 3091 return count; 3092 } 3093 3094 static ssize_t target_tg_pt_gp_members_show(struct config_item *item, 3095 char *page) 3096 { 3097 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); 3098 struct se_lun *lun; 3099 ssize_t len = 0, cur_len; 3100 unsigned char buf[TG_PT_GROUP_NAME_BUF] = { }; 3101 3102 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 3103 list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list, 3104 lun_tg_pt_gp_link) { 3105 struct se_portal_group *tpg = lun->lun_tpg; 3106 3107 cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu" 3108 "/%s\n", tpg->se_tpg_tfo->fabric_name, 3109 tpg->se_tpg_tfo->tpg_get_wwn(tpg), 3110 tpg->se_tpg_tfo->tpg_get_tag(tpg), 3111 config_item_name(&lun->lun_group.cg_item)); 3112 cur_len++; /* Extra byte for NULL terminator */ 3113 3114 if ((cur_len + len) > PAGE_SIZE) { 3115 pr_warn("Ran out of lu_gp_show_attr" 3116 "_members buffer\n"); 3117 break; 3118 } 3119 memcpy(page+len, buf, cur_len); 3120 len += cur_len; 3121 } 3122 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 3123 3124 return len; 3125 } 3126 3127 CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_state); 3128 CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_status); 3129 CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_type); 3130 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_transitioning); 3131 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_offline); 3132 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_lba_dependent); 3133 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_unavailable); 3134 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_standby); 3135 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_optimized); 3136 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_nonoptimized); 3137 CONFIGFS_ATTR(target_tg_pt_gp_, alua_write_metadata); 3138 CONFIGFS_ATTR(target_tg_pt_gp_, nonop_delay_msecs); 3139 CONFIGFS_ATTR(target_tg_pt_gp_, trans_delay_msecs); 3140 CONFIGFS_ATTR(target_tg_pt_gp_, implicit_trans_secs); 3141 CONFIGFS_ATTR(target_tg_pt_gp_, preferred); 3142 CONFIGFS_ATTR(target_tg_pt_gp_, tg_pt_gp_id); 3143 CONFIGFS_ATTR_RO(target_tg_pt_gp_, members); 3144 3145 static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = { 3146 &target_tg_pt_gp_attr_alua_access_state, 3147 &target_tg_pt_gp_attr_alua_access_status, 3148 &target_tg_pt_gp_attr_alua_access_type, 3149 &target_tg_pt_gp_attr_alua_support_transitioning, 3150 &target_tg_pt_gp_attr_alua_support_offline, 3151 &target_tg_pt_gp_attr_alua_support_lba_dependent, 3152 &target_tg_pt_gp_attr_alua_support_unavailable, 3153 &target_tg_pt_gp_attr_alua_support_standby, 3154 &target_tg_pt_gp_attr_alua_support_active_nonoptimized, 3155 &target_tg_pt_gp_attr_alua_support_active_optimized, 3156 &target_tg_pt_gp_attr_alua_write_metadata, 3157 &target_tg_pt_gp_attr_nonop_delay_msecs, 3158 &target_tg_pt_gp_attr_trans_delay_msecs, 3159 &target_tg_pt_gp_attr_implicit_trans_secs, 3160 &target_tg_pt_gp_attr_preferred, 3161 &target_tg_pt_gp_attr_tg_pt_gp_id, 3162 &target_tg_pt_gp_attr_members, 3163 NULL, 3164 }; 3165 3166 static void target_core_alua_tg_pt_gp_release(struct config_item *item) 3167 { 3168 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item), 3169 struct t10_alua_tg_pt_gp, tg_pt_gp_group); 3170 3171 core_alua_free_tg_pt_gp(tg_pt_gp); 3172 } 3173 3174 static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = { 3175 .release = target_core_alua_tg_pt_gp_release, 3176 }; 3177 3178 static const struct config_item_type target_core_alua_tg_pt_gp_cit = { 3179 .ct_item_ops = &target_core_alua_tg_pt_gp_ops, 3180 .ct_attrs = target_core_alua_tg_pt_gp_attrs, 3181 .ct_owner = THIS_MODULE, 3182 }; 3183 3184 /* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */ 3185 3186 /* Start functions for struct config_item_type tb_alua_tg_pt_gps_cit */ 3187 3188 static struct config_group *target_core_alua_create_tg_pt_gp( 3189 struct config_group *group, 3190 const char *name) 3191 { 3192 struct t10_alua *alua = container_of(group, struct t10_alua, 3193 alua_tg_pt_gps_group); 3194 struct t10_alua_tg_pt_gp *tg_pt_gp; 3195 struct config_group *alua_tg_pt_gp_cg = NULL; 3196 struct config_item *alua_tg_pt_gp_ci = NULL; 3197 3198 tg_pt_gp = core_alua_allocate_tg_pt_gp(alua->t10_dev, name, 0); 3199 if (!tg_pt_gp) 3200 return NULL; 3201 3202 alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group; 3203 alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item; 3204 3205 config_group_init_type_name(alua_tg_pt_gp_cg, name, 3206 &target_core_alua_tg_pt_gp_cit); 3207 3208 pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port" 3209 " Group: alua/tg_pt_gps/%s\n", 3210 config_item_name(alua_tg_pt_gp_ci)); 3211 3212 return alua_tg_pt_gp_cg; 3213 } 3214 3215 static void target_core_alua_drop_tg_pt_gp( 3216 struct config_group *group, 3217 struct config_item *item) 3218 { 3219 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item), 3220 struct t10_alua_tg_pt_gp, tg_pt_gp_group); 3221 3222 pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port" 3223 " Group: alua/tg_pt_gps/%s, ID: %hu\n", 3224 config_item_name(item), tg_pt_gp->tg_pt_gp_id); 3225 /* 3226 * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release() 3227 * -> target_core_alua_tg_pt_gp_release(). 3228 */ 3229 config_item_put(item); 3230 } 3231 3232 static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = { 3233 .make_group = &target_core_alua_create_tg_pt_gp, 3234 .drop_item = &target_core_alua_drop_tg_pt_gp, 3235 }; 3236 3237 TB_CIT_SETUP(dev_alua_tg_pt_gps, NULL, &target_core_alua_tg_pt_gps_group_ops, NULL); 3238 3239 /* End functions for struct config_item_type tb_alua_tg_pt_gps_cit */ 3240 3241 /* Start functions for struct config_item_type target_core_alua_cit */ 3242 3243 /* 3244 * target_core_alua_cit is a ConfigFS group that lives under 3245 * /sys/kernel/config/target/core/alua. There are default groups 3246 * core/alua/lu_gps and core/alua/tg_pt_gps that are attached to 3247 * target_core_alua_cit in target_core_init_configfs() below. 3248 */ 3249 static const struct config_item_type target_core_alua_cit = { 3250 .ct_item_ops = NULL, 3251 .ct_attrs = NULL, 3252 .ct_owner = THIS_MODULE, 3253 }; 3254 3255 /* End functions for struct config_item_type target_core_alua_cit */ 3256 3257 /* Start functions for struct config_item_type tb_dev_stat_cit */ 3258 3259 static struct config_group *target_core_stat_mkdir( 3260 struct config_group *group, 3261 const char *name) 3262 { 3263 return ERR_PTR(-ENOSYS); 3264 } 3265 3266 static void target_core_stat_rmdir( 3267 struct config_group *group, 3268 struct config_item *item) 3269 { 3270 return; 3271 } 3272 3273 static struct configfs_group_operations target_core_stat_group_ops = { 3274 .make_group = &target_core_stat_mkdir, 3275 .drop_item = &target_core_stat_rmdir, 3276 }; 3277 3278 TB_CIT_SETUP(dev_stat, NULL, &target_core_stat_group_ops, NULL); 3279 3280 /* End functions for struct config_item_type tb_dev_stat_cit */ 3281 3282 /* Start functions for struct config_item_type target_core_hba_cit */ 3283 3284 static struct config_group *target_core_make_subdev( 3285 struct config_group *group, 3286 const char *name) 3287 { 3288 struct t10_alua_tg_pt_gp *tg_pt_gp; 3289 struct config_item *hba_ci = &group->cg_item; 3290 struct se_hba *hba = item_to_hba(hba_ci); 3291 struct target_backend *tb = hba->backend; 3292 struct se_device *dev; 3293 int errno = -ENOMEM, ret; 3294 3295 ret = mutex_lock_interruptible(&hba->hba_access_mutex); 3296 if (ret) 3297 return ERR_PTR(ret); 3298 3299 dev = target_alloc_device(hba, name); 3300 if (!dev) 3301 goto out_unlock; 3302 3303 config_group_init_type_name(&dev->dev_group, name, &tb->tb_dev_cit); 3304 3305 config_group_init_type_name(&dev->dev_action_group, "action", 3306 &tb->tb_dev_action_cit); 3307 configfs_add_default_group(&dev->dev_action_group, &dev->dev_group); 3308 3309 config_group_init_type_name(&dev->dev_attrib.da_group, "attrib", 3310 &tb->tb_dev_attrib_cit); 3311 configfs_add_default_group(&dev->dev_attrib.da_group, &dev->dev_group); 3312 3313 config_group_init_type_name(&dev->dev_pr_group, "pr", 3314 &tb->tb_dev_pr_cit); 3315 configfs_add_default_group(&dev->dev_pr_group, &dev->dev_group); 3316 3317 config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn", 3318 &tb->tb_dev_wwn_cit); 3319 configfs_add_default_group(&dev->t10_wwn.t10_wwn_group, 3320 &dev->dev_group); 3321 3322 config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group, 3323 "alua", &tb->tb_dev_alua_tg_pt_gps_cit); 3324 configfs_add_default_group(&dev->t10_alua.alua_tg_pt_gps_group, 3325 &dev->dev_group); 3326 3327 config_group_init_type_name(&dev->dev_stat_grps.stat_group, 3328 "statistics", &tb->tb_dev_stat_cit); 3329 configfs_add_default_group(&dev->dev_stat_grps.stat_group, 3330 &dev->dev_group); 3331 3332 /* 3333 * Add core/$HBA/$DEV/alua/default_tg_pt_gp 3334 */ 3335 tg_pt_gp = core_alua_allocate_tg_pt_gp(dev, "default_tg_pt_gp", 1); 3336 if (!tg_pt_gp) 3337 goto out_free_device; 3338 dev->t10_alua.default_tg_pt_gp = tg_pt_gp; 3339 3340 config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group, 3341 "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit); 3342 configfs_add_default_group(&tg_pt_gp->tg_pt_gp_group, 3343 &dev->t10_alua.alua_tg_pt_gps_group); 3344 3345 /* 3346 * Add core/$HBA/$DEV/statistics/ default groups 3347 */ 3348 target_stat_setup_dev_default_groups(dev); 3349 3350 mutex_lock(&target_devices_lock); 3351 target_devices++; 3352 mutex_unlock(&target_devices_lock); 3353 3354 mutex_unlock(&hba->hba_access_mutex); 3355 return &dev->dev_group; 3356 3357 out_free_device: 3358 target_free_device(dev); 3359 out_unlock: 3360 mutex_unlock(&hba->hba_access_mutex); 3361 return ERR_PTR(errno); 3362 } 3363 3364 static void target_core_drop_subdev( 3365 struct config_group *group, 3366 struct config_item *item) 3367 { 3368 struct config_group *dev_cg = to_config_group(item); 3369 struct se_device *dev = 3370 container_of(dev_cg, struct se_device, dev_group); 3371 struct se_hba *hba; 3372 3373 hba = item_to_hba(&dev->se_hba->hba_group.cg_item); 3374 3375 mutex_lock(&hba->hba_access_mutex); 3376 3377 configfs_remove_default_groups(&dev->dev_stat_grps.stat_group); 3378 configfs_remove_default_groups(&dev->t10_alua.alua_tg_pt_gps_group); 3379 3380 /* 3381 * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp 3382 * directly from target_core_alua_tg_pt_gp_release(). 3383 */ 3384 dev->t10_alua.default_tg_pt_gp = NULL; 3385 3386 configfs_remove_default_groups(dev_cg); 3387 3388 /* 3389 * se_dev is released from target_core_dev_item_ops->release() 3390 */ 3391 config_item_put(item); 3392 3393 mutex_lock(&target_devices_lock); 3394 target_devices--; 3395 mutex_unlock(&target_devices_lock); 3396 3397 mutex_unlock(&hba->hba_access_mutex); 3398 } 3399 3400 static struct configfs_group_operations target_core_hba_group_ops = { 3401 .make_group = target_core_make_subdev, 3402 .drop_item = target_core_drop_subdev, 3403 }; 3404 3405 3406 static inline struct se_hba *to_hba(struct config_item *item) 3407 { 3408 return container_of(to_config_group(item), struct se_hba, hba_group); 3409 } 3410 3411 static ssize_t target_hba_info_show(struct config_item *item, char *page) 3412 { 3413 struct se_hba *hba = to_hba(item); 3414 3415 return sprintf(page, "HBA Index: %d plugin: %s version: %s\n", 3416 hba->hba_id, hba->backend->ops->name, 3417 TARGET_CORE_VERSION); 3418 } 3419 3420 static ssize_t target_hba_mode_show(struct config_item *item, char *page) 3421 { 3422 struct se_hba *hba = to_hba(item); 3423 int hba_mode = 0; 3424 3425 if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE) 3426 hba_mode = 1; 3427 3428 return sprintf(page, "%d\n", hba_mode); 3429 } 3430 3431 static ssize_t target_hba_mode_store(struct config_item *item, 3432 const char *page, size_t count) 3433 { 3434 struct se_hba *hba = to_hba(item); 3435 unsigned long mode_flag; 3436 int ret; 3437 3438 if (hba->backend->ops->pmode_enable_hba == NULL) 3439 return -EINVAL; 3440 3441 ret = kstrtoul(page, 0, &mode_flag); 3442 if (ret < 0) { 3443 pr_err("Unable to extract hba mode flag: %d\n", ret); 3444 return ret; 3445 } 3446 3447 if (hba->dev_count) { 3448 pr_err("Unable to set hba_mode with active devices\n"); 3449 return -EINVAL; 3450 } 3451 3452 ret = hba->backend->ops->pmode_enable_hba(hba, mode_flag); 3453 if (ret < 0) 3454 return -EINVAL; 3455 if (ret > 0) 3456 hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; 3457 else if (ret == 0) 3458 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; 3459 3460 return count; 3461 } 3462 3463 CONFIGFS_ATTR_RO(target_, hba_info); 3464 CONFIGFS_ATTR(target_, hba_mode); 3465 3466 static void target_core_hba_release(struct config_item *item) 3467 { 3468 struct se_hba *hba = container_of(to_config_group(item), 3469 struct se_hba, hba_group); 3470 core_delete_hba(hba); 3471 } 3472 3473 static struct configfs_attribute *target_core_hba_attrs[] = { 3474 &target_attr_hba_info, 3475 &target_attr_hba_mode, 3476 NULL, 3477 }; 3478 3479 static struct configfs_item_operations target_core_hba_item_ops = { 3480 .release = target_core_hba_release, 3481 }; 3482 3483 static const struct config_item_type target_core_hba_cit = { 3484 .ct_item_ops = &target_core_hba_item_ops, 3485 .ct_group_ops = &target_core_hba_group_ops, 3486 .ct_attrs = target_core_hba_attrs, 3487 .ct_owner = THIS_MODULE, 3488 }; 3489 3490 static struct config_group *target_core_call_addhbatotarget( 3491 struct config_group *group, 3492 const char *name) 3493 { 3494 char *se_plugin_str, *str, *str2; 3495 struct se_hba *hba; 3496 char buf[TARGET_CORE_NAME_MAX_LEN] = { }; 3497 unsigned long plugin_dep_id = 0; 3498 int ret; 3499 3500 if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) { 3501 pr_err("Passed *name strlen(): %d exceeds" 3502 " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name), 3503 TARGET_CORE_NAME_MAX_LEN); 3504 return ERR_PTR(-ENAMETOOLONG); 3505 } 3506 snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name); 3507 3508 str = strstr(buf, "_"); 3509 if (!str) { 3510 pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n"); 3511 return ERR_PTR(-EINVAL); 3512 } 3513 se_plugin_str = buf; 3514 /* 3515 * Special case for subsystem plugins that have "_" in their names. 3516 * Namely rd_direct and rd_mcp.. 3517 */ 3518 str2 = strstr(str+1, "_"); 3519 if (str2) { 3520 *str2 = '\0'; /* Terminate for *se_plugin_str */ 3521 str2++; /* Skip to start of plugin dependent ID */ 3522 str = str2; 3523 } else { 3524 *str = '\0'; /* Terminate for *se_plugin_str */ 3525 str++; /* Skip to start of plugin dependent ID */ 3526 } 3527 3528 ret = kstrtoul(str, 0, &plugin_dep_id); 3529 if (ret < 0) { 3530 pr_err("kstrtoul() returned %d for" 3531 " plugin_dep_id\n", ret); 3532 return ERR_PTR(ret); 3533 } 3534 /* 3535 * Load up TCM subsystem plugins if they have not already been loaded. 3536 */ 3537 transport_subsystem_check_init(); 3538 3539 hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0); 3540 if (IS_ERR(hba)) 3541 return ERR_CAST(hba); 3542 3543 config_group_init_type_name(&hba->hba_group, name, 3544 &target_core_hba_cit); 3545 3546 return &hba->hba_group; 3547 } 3548 3549 static void target_core_call_delhbafromtarget( 3550 struct config_group *group, 3551 struct config_item *item) 3552 { 3553 /* 3554 * core_delete_hba() is called from target_core_hba_item_ops->release() 3555 * -> target_core_hba_release() 3556 */ 3557 config_item_put(item); 3558 } 3559 3560 static struct configfs_group_operations target_core_group_ops = { 3561 .make_group = target_core_call_addhbatotarget, 3562 .drop_item = target_core_call_delhbafromtarget, 3563 }; 3564 3565 static const struct config_item_type target_core_cit = { 3566 .ct_item_ops = NULL, 3567 .ct_group_ops = &target_core_group_ops, 3568 .ct_attrs = NULL, 3569 .ct_owner = THIS_MODULE, 3570 }; 3571 3572 /* Stop functions for struct config_item_type target_core_hba_cit */ 3573 3574 void target_setup_backend_cits(struct target_backend *tb) 3575 { 3576 target_core_setup_dev_cit(tb); 3577 target_core_setup_dev_action_cit(tb); 3578 target_core_setup_dev_attrib_cit(tb); 3579 target_core_setup_dev_pr_cit(tb); 3580 target_core_setup_dev_wwn_cit(tb); 3581 target_core_setup_dev_alua_tg_pt_gps_cit(tb); 3582 target_core_setup_dev_stat_cit(tb); 3583 } 3584 3585 static void target_init_dbroot(void) 3586 { 3587 struct file *fp; 3588 3589 snprintf(db_root_stage, DB_ROOT_LEN, DB_ROOT_PREFERRED); 3590 fp = filp_open(db_root_stage, O_RDONLY, 0); 3591 if (IS_ERR(fp)) { 3592 pr_err("db_root: cannot open: %s\n", db_root_stage); 3593 return; 3594 } 3595 if (!S_ISDIR(file_inode(fp)->i_mode)) { 3596 filp_close(fp, NULL); 3597 pr_err("db_root: not a valid directory: %s\n", db_root_stage); 3598 return; 3599 } 3600 filp_close(fp, NULL); 3601 3602 strncpy(db_root, db_root_stage, DB_ROOT_LEN); 3603 pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root); 3604 } 3605 3606 static int __init target_core_init_configfs(void) 3607 { 3608 struct configfs_subsystem *subsys = &target_core_fabrics; 3609 struct t10_alua_lu_gp *lu_gp; 3610 int ret; 3611 3612 pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage" 3613 " Engine: %s on %s/%s on "UTS_RELEASE"\n", 3614 TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine); 3615 3616 config_group_init(&subsys->su_group); 3617 mutex_init(&subsys->su_mutex); 3618 3619 ret = init_se_kmem_caches(); 3620 if (ret < 0) 3621 return ret; 3622 /* 3623 * Create $CONFIGFS/target/core default group for HBA <-> Storage Object 3624 * and ALUA Logical Unit Group and Target Port Group infrastructure. 3625 */ 3626 config_group_init_type_name(&target_core_hbagroup, "core", 3627 &target_core_cit); 3628 configfs_add_default_group(&target_core_hbagroup, &subsys->su_group); 3629 3630 /* 3631 * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/ 3632 */ 3633 config_group_init_type_name(&alua_group, "alua", &target_core_alua_cit); 3634 configfs_add_default_group(&alua_group, &target_core_hbagroup); 3635 3636 /* 3637 * Add ALUA Logical Unit Group and Target Port Group ConfigFS 3638 * groups under /sys/kernel/config/target/core/alua/ 3639 */ 3640 config_group_init_type_name(&alua_lu_gps_group, "lu_gps", 3641 &target_core_alua_lu_gps_cit); 3642 configfs_add_default_group(&alua_lu_gps_group, &alua_group); 3643 3644 /* 3645 * Add core/alua/lu_gps/default_lu_gp 3646 */ 3647 lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1); 3648 if (IS_ERR(lu_gp)) { 3649 ret = -ENOMEM; 3650 goto out_global; 3651 } 3652 3653 config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp", 3654 &target_core_alua_lu_gp_cit); 3655 configfs_add_default_group(&lu_gp->lu_gp_group, &alua_lu_gps_group); 3656 3657 default_lu_gp = lu_gp; 3658 3659 /* 3660 * Register the target_core_mod subsystem with configfs. 3661 */ 3662 ret = configfs_register_subsystem(subsys); 3663 if (ret < 0) { 3664 pr_err("Error %d while registering subsystem %s\n", 3665 ret, subsys->su_group.cg_item.ci_namebuf); 3666 goto out_global; 3667 } 3668 pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric" 3669 " Infrastructure: "TARGET_CORE_VERSION" on %s/%s" 3670 " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine); 3671 /* 3672 * Register built-in RAMDISK subsystem logic for virtual LUN 0 3673 */ 3674 ret = rd_module_init(); 3675 if (ret < 0) 3676 goto out; 3677 3678 ret = core_dev_setup_virtual_lun0(); 3679 if (ret < 0) 3680 goto out; 3681 3682 ret = target_xcopy_setup_pt(); 3683 if (ret < 0) 3684 goto out; 3685 3686 target_init_dbroot(); 3687 3688 return 0; 3689 3690 out: 3691 configfs_unregister_subsystem(subsys); 3692 core_dev_release_virtual_lun0(); 3693 rd_module_exit(); 3694 out_global: 3695 if (default_lu_gp) { 3696 core_alua_free_lu_gp(default_lu_gp); 3697 default_lu_gp = NULL; 3698 } 3699 release_se_kmem_caches(); 3700 return ret; 3701 } 3702 3703 static void __exit target_core_exit_configfs(void) 3704 { 3705 configfs_remove_default_groups(&alua_lu_gps_group); 3706 configfs_remove_default_groups(&alua_group); 3707 configfs_remove_default_groups(&target_core_hbagroup); 3708 3709 /* 3710 * We expect subsys->su_group.default_groups to be released 3711 * by configfs subsystem provider logic.. 3712 */ 3713 configfs_unregister_subsystem(&target_core_fabrics); 3714 3715 core_alua_free_lu_gp(default_lu_gp); 3716 default_lu_gp = NULL; 3717 3718 pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric" 3719 " Infrastructure\n"); 3720 3721 core_dev_release_virtual_lun0(); 3722 rd_module_exit(); 3723 target_xcopy_release_pt(); 3724 release_se_kmem_caches(); 3725 } 3726 3727 MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS"); 3728 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 3729 MODULE_LICENSE("GPL"); 3730 3731 module_init(target_core_init_configfs); 3732 module_exit(target_core_exit_configfs); 3733