1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /******************************************************************************* 3 * Filename: target_core_tpg.c 4 * 5 * This file contains generic Target Portal Group related functions. 6 * 7 * (c) Copyright 2002-2013 Datera, Inc. 8 * 9 * Nicholas A. Bellinger <nab@kernel.org> 10 * 11 ******************************************************************************/ 12 13 #include <linux/net.h> 14 #include <linux/string.h> 15 #include <linux/timer.h> 16 #include <linux/slab.h> 17 #include <linux/spinlock.h> 18 #include <linux/in.h> 19 #include <linux/export.h> 20 #include <net/sock.h> 21 #include <net/tcp.h> 22 #include <scsi/scsi_proto.h> 23 24 #include <target/target_core_base.h> 25 #include <target/target_core_backend.h> 26 #include <target/target_core_fabric.h> 27 28 #include "target_core_internal.h" 29 #include "target_core_alua.h" 30 #include "target_core_pr.h" 31 #include "target_core_ua.h" 32 33 extern struct se_device *g_lun0_dev; 34 static DEFINE_XARRAY_ALLOC(tpg_xa); 35 36 /* __core_tpg_get_initiator_node_acl(): 37 * 38 * mutex_lock(&tpg->acl_node_mutex); must be held when calling 39 */ 40 struct se_node_acl *__core_tpg_get_initiator_node_acl( 41 struct se_portal_group *tpg, 42 const char *initiatorname) 43 { 44 struct se_node_acl *acl; 45 46 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 47 if (!strcmp(acl->initiatorname, initiatorname)) 48 return acl; 49 } 50 51 return NULL; 52 } 53 54 /* core_tpg_get_initiator_node_acl(): 55 * 56 * 57 */ 58 struct se_node_acl *core_tpg_get_initiator_node_acl( 59 struct se_portal_group *tpg, 60 unsigned char *initiatorname) 61 { 62 struct se_node_acl *acl; 63 /* 64 * Obtain se_node_acl->acl_kref using fabric driver provided 65 * initiatorname[] during node acl endpoint lookup driven by 66 * new se_session login. 67 * 68 * The reference is held until se_session shutdown -> release 69 * occurs via fabric driver invoked transport_deregister_session() 70 * or transport_free_session() code. 71 */ 72 mutex_lock(&tpg->acl_node_mutex); 73 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); 74 if (acl) { 75 if (!kref_get_unless_zero(&acl->acl_kref)) 76 acl = NULL; 77 } 78 mutex_unlock(&tpg->acl_node_mutex); 79 80 return acl; 81 } 82 EXPORT_SYMBOL(core_tpg_get_initiator_node_acl); 83 84 void core_allocate_nexus_loss_ua( 85 struct se_node_acl *nacl) 86 { 87 struct se_dev_entry *deve; 88 89 if (!nacl) 90 return; 91 92 rcu_read_lock(); 93 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) 94 core_scsi3_ua_allocate(deve, 0x29, 95 ASCQ_29H_NEXUS_LOSS_OCCURRED); 96 rcu_read_unlock(); 97 } 98 EXPORT_SYMBOL(core_allocate_nexus_loss_ua); 99 100 /* core_tpg_add_node_to_devs(): 101 * 102 * 103 */ 104 void core_tpg_add_node_to_devs( 105 struct se_node_acl *acl, 106 struct se_portal_group *tpg, 107 struct se_lun *lun_orig) 108 { 109 bool lun_access_ro = true; 110 struct se_lun *lun; 111 struct se_device *dev; 112 113 mutex_lock(&tpg->tpg_lun_mutex); 114 hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) { 115 if (lun_orig && lun != lun_orig) 116 continue; 117 118 dev = rcu_dereference_check(lun->lun_se_dev, 119 lockdep_is_held(&tpg->tpg_lun_mutex)); 120 /* 121 * By default in LIO-Target $FABRIC_MOD, 122 * demo_mode_write_protect is ON, or READ_ONLY; 123 */ 124 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) { 125 lun_access_ro = false; 126 } else { 127 /* 128 * Allow only optical drives to issue R/W in default RO 129 * demo mode. 130 */ 131 if (dev->transport->get_device_type(dev) == TYPE_DISK) 132 lun_access_ro = true; 133 else 134 lun_access_ro = false; 135 } 136 137 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s" 138 " access for LUN in Demo Mode\n", 139 tpg->se_tpg_tfo->fabric_name, 140 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 141 lun_access_ro ? "READ-ONLY" : "READ-WRITE"); 142 143 core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun, 144 lun_access_ro, acl, tpg); 145 /* 146 * Check to see if there are any existing persistent reservation 147 * APTPL pre-registrations that need to be enabled for this dynamic 148 * LUN ACL now.. 149 */ 150 core_scsi3_check_aptpl_registration(dev, tpg, lun, acl, 151 lun->unpacked_lun); 152 } 153 mutex_unlock(&tpg->tpg_lun_mutex); 154 } 155 156 static void 157 target_set_nacl_queue_depth(struct se_portal_group *tpg, 158 struct se_node_acl *acl, u32 queue_depth) 159 { 160 acl->queue_depth = queue_depth; 161 162 if (!acl->queue_depth) { 163 pr_warn("Queue depth for %s Initiator Node: %s is 0," 164 "defaulting to 1.\n", tpg->se_tpg_tfo->fabric_name, 165 acl->initiatorname); 166 acl->queue_depth = 1; 167 } 168 } 169 170 static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg, 171 const unsigned char *initiatorname) 172 { 173 struct se_node_acl *acl; 174 u32 queue_depth; 175 176 acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size), 177 GFP_KERNEL); 178 if (!acl) 179 return NULL; 180 181 INIT_LIST_HEAD(&acl->acl_list); 182 INIT_LIST_HEAD(&acl->acl_sess_list); 183 INIT_HLIST_HEAD(&acl->lun_entry_hlist); 184 kref_init(&acl->acl_kref); 185 init_completion(&acl->acl_free_comp); 186 spin_lock_init(&acl->nacl_sess_lock); 187 mutex_init(&acl->lun_entry_mutex); 188 atomic_set(&acl->acl_pr_ref_count, 0); 189 190 if (tpg->se_tpg_tfo->tpg_get_default_depth) 191 queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg); 192 else 193 queue_depth = 1; 194 target_set_nacl_queue_depth(tpg, acl, queue_depth); 195 196 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 197 acl->se_tpg = tpg; 198 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); 199 200 tpg->se_tpg_tfo->set_default_node_attributes(acl); 201 202 return acl; 203 } 204 205 static void target_add_node_acl(struct se_node_acl *acl) 206 { 207 struct se_portal_group *tpg = acl->se_tpg; 208 209 mutex_lock(&tpg->acl_node_mutex); 210 list_add_tail(&acl->acl_list, &tpg->acl_node_list); 211 mutex_unlock(&tpg->acl_node_mutex); 212 213 pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s" 214 " Initiator Node: %s\n", 215 tpg->se_tpg_tfo->fabric_name, 216 tpg->se_tpg_tfo->tpg_get_tag(tpg), 217 acl->dynamic_node_acl ? "DYNAMIC" : "", 218 acl->queue_depth, 219 tpg->se_tpg_tfo->fabric_name, 220 acl->initiatorname); 221 } 222 223 bool target_tpg_has_node_acl(struct se_portal_group *tpg, 224 const char *initiatorname) 225 { 226 struct se_node_acl *acl; 227 bool found = false; 228 229 mutex_lock(&tpg->acl_node_mutex); 230 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 231 if (!strcmp(acl->initiatorname, initiatorname)) { 232 found = true; 233 break; 234 } 235 } 236 mutex_unlock(&tpg->acl_node_mutex); 237 238 return found; 239 } 240 EXPORT_SYMBOL(target_tpg_has_node_acl); 241 242 struct se_node_acl *core_tpg_check_initiator_node_acl( 243 struct se_portal_group *tpg, 244 unsigned char *initiatorname) 245 { 246 struct se_node_acl *acl; 247 248 acl = core_tpg_get_initiator_node_acl(tpg, initiatorname); 249 if (acl) 250 return acl; 251 252 if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) 253 return NULL; 254 255 acl = target_alloc_node_acl(tpg, initiatorname); 256 if (!acl) 257 return NULL; 258 /* 259 * When allocating a dynamically generated node_acl, go ahead 260 * and take the extra kref now before returning to the fabric 261 * driver caller. 262 * 263 * Note this reference will be released at session shutdown 264 * time within transport_free_session() code. 265 */ 266 kref_get(&acl->acl_kref); 267 acl->dynamic_node_acl = 1; 268 269 /* 270 * Here we only create demo-mode MappedLUNs from the active 271 * TPG LUNs if the fabric is not explicitly asking for 272 * tpg_check_demo_mode_login_only() == 1. 273 */ 274 if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) || 275 (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1)) 276 core_tpg_add_node_to_devs(acl, tpg, NULL); 277 278 target_add_node_acl(acl); 279 return acl; 280 } 281 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl); 282 283 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl) 284 { 285 while (atomic_read(&nacl->acl_pr_ref_count) != 0) 286 cpu_relax(); 287 } 288 289 struct se_node_acl *core_tpg_add_initiator_node_acl( 290 struct se_portal_group *tpg, 291 const char *initiatorname) 292 { 293 struct se_node_acl *acl; 294 295 mutex_lock(&tpg->acl_node_mutex); 296 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); 297 if (acl) { 298 if (acl->dynamic_node_acl) { 299 acl->dynamic_node_acl = 0; 300 pr_debug("%s_TPG[%u] - Replacing dynamic ACL" 301 " for %s\n", tpg->se_tpg_tfo->fabric_name, 302 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); 303 mutex_unlock(&tpg->acl_node_mutex); 304 return acl; 305 } 306 307 pr_err("ACL entry for %s Initiator" 308 " Node %s already exists for TPG %u, ignoring" 309 " request.\n", tpg->se_tpg_tfo->fabric_name, 310 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); 311 mutex_unlock(&tpg->acl_node_mutex); 312 return ERR_PTR(-EEXIST); 313 } 314 mutex_unlock(&tpg->acl_node_mutex); 315 316 acl = target_alloc_node_acl(tpg, initiatorname); 317 if (!acl) 318 return ERR_PTR(-ENOMEM); 319 320 target_add_node_acl(acl); 321 return acl; 322 } 323 324 static void target_shutdown_sessions(struct se_node_acl *acl) 325 { 326 struct se_session *sess; 327 unsigned long flags; 328 329 restart: 330 spin_lock_irqsave(&acl->nacl_sess_lock, flags); 331 list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) { 332 if (atomic_read(&sess->stopped)) 333 continue; 334 335 list_del_init(&sess->sess_acl_list); 336 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags); 337 338 if (acl->se_tpg->se_tpg_tfo->close_session) 339 acl->se_tpg->se_tpg_tfo->close_session(sess); 340 goto restart; 341 } 342 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags); 343 } 344 345 void core_tpg_del_initiator_node_acl(struct se_node_acl *acl) 346 { 347 struct se_portal_group *tpg = acl->se_tpg; 348 349 mutex_lock(&tpg->acl_node_mutex); 350 if (acl->dynamic_node_acl) 351 acl->dynamic_node_acl = 0; 352 list_del_init(&acl->acl_list); 353 mutex_unlock(&tpg->acl_node_mutex); 354 355 target_shutdown_sessions(acl); 356 357 target_put_nacl(acl); 358 /* 359 * Wait for last target_put_nacl() to complete in target_complete_nacl() 360 * for active fabric session transport_deregister_session() callbacks. 361 */ 362 wait_for_completion(&acl->acl_free_comp); 363 364 core_tpg_wait_for_nacl_pr_ref(acl); 365 core_free_device_list_for_node(acl, tpg); 366 367 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s" 368 " Initiator Node: %s\n", tpg->se_tpg_tfo->fabric_name, 369 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, 370 tpg->se_tpg_tfo->fabric_name, acl->initiatorname); 371 372 kfree(acl); 373 } 374 375 /* core_tpg_set_initiator_node_queue_depth(): 376 * 377 * 378 */ 379 int core_tpg_set_initiator_node_queue_depth( 380 struct se_node_acl *acl, 381 u32 queue_depth) 382 { 383 struct se_portal_group *tpg = acl->se_tpg; 384 385 /* 386 * Allow the setting of se_node_acl queue_depth to be idempotent, 387 * and not force a session shutdown event if the value is not 388 * changing. 389 */ 390 if (acl->queue_depth == queue_depth) 391 return 0; 392 /* 393 * User has requested to change the queue depth for a Initiator Node. 394 * Change the value in the Node's struct se_node_acl, and call 395 * target_set_nacl_queue_depth() to set the new queue depth. 396 */ 397 target_set_nacl_queue_depth(tpg, acl, queue_depth); 398 399 /* 400 * Shutdown all pending sessions to force session reinstatement. 401 */ 402 target_shutdown_sessions(acl); 403 404 pr_debug("Successfully changed queue depth to: %d for Initiator" 405 " Node: %s on %s Target Portal Group: %u\n", acl->queue_depth, 406 acl->initiatorname, tpg->se_tpg_tfo->fabric_name, 407 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 408 409 return 0; 410 } 411 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth); 412 413 /* core_tpg_set_initiator_node_tag(): 414 * 415 * Initiator nodeacl tags are not used internally, but may be used by 416 * userspace to emulate aliases or groups. 417 * Returns length of newly-set tag or -EINVAL. 418 */ 419 int core_tpg_set_initiator_node_tag( 420 struct se_portal_group *tpg, 421 struct se_node_acl *acl, 422 const char *new_tag) 423 { 424 if (strlen(new_tag) >= MAX_ACL_TAG_SIZE) 425 return -EINVAL; 426 427 if (!strncmp("NULL", new_tag, 4)) { 428 acl->acl_tag[0] = '\0'; 429 return 0; 430 } 431 432 return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag); 433 } 434 EXPORT_SYMBOL(core_tpg_set_initiator_node_tag); 435 436 static void core_tpg_lun_ref_release(struct percpu_ref *ref) 437 { 438 struct se_lun *lun = container_of(ref, struct se_lun, lun_ref); 439 440 complete(&lun->lun_shutdown_comp); 441 } 442 443 static int target_tpg_register_rtpi(struct se_portal_group *se_tpg) 444 { 445 u32 val; 446 int ret; 447 448 ret = xa_alloc(&tpg_xa, &val, se_tpg, 449 XA_LIMIT(1, USHRT_MAX), GFP_KERNEL); 450 if (!ret) 451 se_tpg->tpg_rtpi = val; 452 453 return ret; 454 } 455 456 static void target_tpg_deregister_rtpi(struct se_portal_group *se_tpg) 457 { 458 if (se_tpg->tpg_rtpi && se_tpg->enabled) 459 xa_erase(&tpg_xa, se_tpg->tpg_rtpi); 460 } 461 462 int target_tpg_enable(struct se_portal_group *se_tpg) 463 { 464 int ret; 465 466 ret = target_tpg_register_rtpi(se_tpg); 467 if (ret) 468 return ret; 469 470 ret = se_tpg->se_tpg_tfo->fabric_enable_tpg(se_tpg, true); 471 if (ret) { 472 target_tpg_deregister_rtpi(se_tpg); 473 return ret; 474 } 475 476 se_tpg->enabled = true; 477 478 return 0; 479 } 480 481 int target_tpg_disable(struct se_portal_group *se_tpg) 482 { 483 int ret; 484 485 target_tpg_deregister_rtpi(se_tpg); 486 487 ret = se_tpg->se_tpg_tfo->fabric_enable_tpg(se_tpg, false); 488 if (!ret) 489 se_tpg->enabled = false; 490 491 return ret; 492 } 493 494 /* Does not change se_wwn->priv. */ 495 int core_tpg_register( 496 struct se_wwn *se_wwn, 497 struct se_portal_group *se_tpg, 498 int proto_id) 499 { 500 int ret; 501 502 if (!se_tpg) 503 return -EINVAL; 504 /* 505 * For the typical case where core_tpg_register() is called by a 506 * fabric driver from target_core_fabric_ops->fabric_make_tpg() 507 * configfs context, use the original tf_ops pointer already saved 508 * by target-core in target_fabric_make_wwn(). 509 * 510 * Otherwise, for special cases like iscsi-target discovery TPGs 511 * the caller is responsible for setting ->se_tpg_tfo ahead of 512 * calling core_tpg_register(). 513 */ 514 if (se_wwn) 515 se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops; 516 517 if (!se_tpg->se_tpg_tfo) { 518 pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n"); 519 return -EINVAL; 520 } 521 522 INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist); 523 se_tpg->proto_id = proto_id; 524 se_tpg->se_tpg_wwn = se_wwn; 525 atomic_set(&se_tpg->tpg_pr_ref_count, 0); 526 INIT_LIST_HEAD(&se_tpg->acl_node_list); 527 INIT_LIST_HEAD(&se_tpg->tpg_sess_list); 528 spin_lock_init(&se_tpg->session_lock); 529 mutex_init(&se_tpg->tpg_lun_mutex); 530 mutex_init(&se_tpg->acl_node_mutex); 531 532 if (se_tpg->proto_id >= 0) { 533 se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0); 534 if (IS_ERR(se_tpg->tpg_virt_lun0)) 535 return PTR_ERR(se_tpg->tpg_virt_lun0); 536 537 ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0, 538 true, g_lun0_dev); 539 if (ret < 0) { 540 kfree(se_tpg->tpg_virt_lun0); 541 return ret; 542 } 543 } 544 545 pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, " 546 "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->fabric_name, 547 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ? 548 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL, 549 se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); 550 551 return 0; 552 } 553 EXPORT_SYMBOL(core_tpg_register); 554 555 int core_tpg_deregister(struct se_portal_group *se_tpg) 556 { 557 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; 558 struct se_node_acl *nacl, *nacl_tmp; 559 LIST_HEAD(node_list); 560 561 pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, " 562 "Proto: %d, Portal Tag: %u\n", tfo->fabric_name, 563 tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL, 564 se_tpg->proto_id, tfo->tpg_get_tag(se_tpg)); 565 566 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) 567 cpu_relax(); 568 569 mutex_lock(&se_tpg->acl_node_mutex); 570 list_splice_init(&se_tpg->acl_node_list, &node_list); 571 mutex_unlock(&se_tpg->acl_node_mutex); 572 /* 573 * Release any remaining demo-mode generated se_node_acl that have 574 * not been released because of TFO->tpg_check_demo_mode_cache() == 1 575 * in transport_deregister_session(). 576 */ 577 list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) { 578 list_del_init(&nacl->acl_list); 579 580 core_tpg_wait_for_nacl_pr_ref(nacl); 581 core_free_device_list_for_node(nacl, se_tpg); 582 kfree(nacl); 583 } 584 585 if (se_tpg->proto_id >= 0) { 586 core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0); 587 kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head); 588 } 589 590 target_tpg_deregister_rtpi(se_tpg); 591 592 return 0; 593 } 594 EXPORT_SYMBOL(core_tpg_deregister); 595 596 struct se_lun *core_tpg_alloc_lun( 597 struct se_portal_group *tpg, 598 u64 unpacked_lun) 599 { 600 struct se_lun *lun; 601 602 lun = kzalloc(sizeof(*lun), GFP_KERNEL); 603 if (!lun) { 604 pr_err("Unable to allocate se_lun memory\n"); 605 return ERR_PTR(-ENOMEM); 606 } 607 lun->unpacked_lun = unpacked_lun; 608 atomic_set(&lun->lun_acl_count, 0); 609 init_completion(&lun->lun_shutdown_comp); 610 INIT_LIST_HEAD(&lun->lun_deve_list); 611 INIT_LIST_HEAD(&lun->lun_dev_link); 612 atomic_set(&lun->lun_tg_pt_secondary_offline, 0); 613 spin_lock_init(&lun->lun_deve_lock); 614 mutex_init(&lun->lun_tg_pt_md_mutex); 615 INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link); 616 spin_lock_init(&lun->lun_tg_pt_gp_lock); 617 lun->lun_tpg = tpg; 618 619 return lun; 620 } 621 622 int core_tpg_add_lun( 623 struct se_portal_group *tpg, 624 struct se_lun *lun, 625 bool lun_access_ro, 626 struct se_device *dev) 627 { 628 int ret; 629 630 ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0, 631 GFP_KERNEL); 632 if (ret < 0) 633 goto out; 634 635 ret = core_alloc_rtpi(lun, dev); 636 if (ret) 637 goto out_kill_ref; 638 639 if (!(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) && 640 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 641 target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp); 642 643 mutex_lock(&tpg->tpg_lun_mutex); 644 645 spin_lock(&dev->se_port_lock); 646 lun->lun_index = dev->dev_index; 647 rcu_assign_pointer(lun->lun_se_dev, dev); 648 dev->export_count++; 649 list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list); 650 spin_unlock(&dev->se_port_lock); 651 652 if (dev->dev_flags & DF_READ_ONLY) 653 lun->lun_access_ro = true; 654 else 655 lun->lun_access_ro = lun_access_ro; 656 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 657 hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist); 658 mutex_unlock(&tpg->tpg_lun_mutex); 659 660 return 0; 661 662 out_kill_ref: 663 percpu_ref_exit(&lun->lun_ref); 664 out: 665 return ret; 666 } 667 668 void core_tpg_remove_lun( 669 struct se_portal_group *tpg, 670 struct se_lun *lun) 671 { 672 /* 673 * rcu_dereference_raw protected by se_lun->lun_group symlink 674 * reference to se_device->dev_group. 675 */ 676 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 677 678 lun->lun_shutdown = true; 679 680 core_clear_lun_from_tpg(lun, tpg); 681 /* 682 * Wait for any active I/O references to percpu se_lun->lun_ref to 683 * be released. Also, se_lun->lun_ref is now used by PR and ALUA 684 * logic when referencing a remote target port during ALL_TGT_PT=1 685 * and generating UNIT_ATTENTIONs for ALUA access state transition. 686 */ 687 transport_clear_lun_ref(lun); 688 689 mutex_lock(&tpg->tpg_lun_mutex); 690 if (lun->lun_se_dev) { 691 target_detach_tg_pt_gp(lun); 692 693 spin_lock(&dev->se_port_lock); 694 list_del(&lun->lun_dev_link); 695 dev->export_count--; 696 rcu_assign_pointer(lun->lun_se_dev, NULL); 697 spin_unlock(&dev->se_port_lock); 698 } 699 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 700 hlist_del_rcu(&lun->link); 701 702 lun->lun_shutdown = false; 703 mutex_unlock(&tpg->tpg_lun_mutex); 704 705 percpu_ref_exit(&lun->lun_ref); 706 } 707