1 /******************************************************************************* 2 * Filename: target_core_tpg.c 3 * 4 * This file contains generic Target Portal Group related functions. 5 * 6 * (c) Copyright 2002-2013 Datera, Inc. 7 * 8 * Nicholas A. Bellinger <nab@kernel.org> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 * 24 ******************************************************************************/ 25 26 #include <linux/net.h> 27 #include <linux/string.h> 28 #include <linux/timer.h> 29 #include <linux/slab.h> 30 #include <linux/spinlock.h> 31 #include <linux/in.h> 32 #include <linux/export.h> 33 #include <net/sock.h> 34 #include <net/tcp.h> 35 #include <scsi/scsi_proto.h> 36 37 #include <target/target_core_base.h> 38 #include <target/target_core_backend.h> 39 #include <target/target_core_fabric.h> 40 41 #include "target_core_internal.h" 42 #include "target_core_alua.h" 43 #include "target_core_pr.h" 44 #include "target_core_ua.h" 45 46 extern struct se_device *g_lun0_dev; 47 48 static DEFINE_SPINLOCK(tpg_lock); 49 static LIST_HEAD(tpg_list); 50 51 /* __core_tpg_get_initiator_node_acl(): 52 * 53 * mutex_lock(&tpg->acl_node_mutex); must be held when calling 54 */ 55 struct se_node_acl *__core_tpg_get_initiator_node_acl( 56 struct se_portal_group *tpg, 57 const char *initiatorname) 58 { 59 struct se_node_acl *acl; 60 61 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 62 if (!strcmp(acl->initiatorname, initiatorname)) 63 return acl; 64 } 65 66 return NULL; 67 } 68 69 /* core_tpg_get_initiator_node_acl(): 70 * 71 * 72 */ 73 struct se_node_acl *core_tpg_get_initiator_node_acl( 74 struct se_portal_group *tpg, 75 unsigned char *initiatorname) 76 { 77 struct se_node_acl *acl; 78 /* 79 * Obtain se_node_acl->acl_kref using fabric driver provided 80 * initiatorname[] during node acl endpoint lookup driven by 81 * new se_session login. 82 * 83 * The reference is held until se_session shutdown -> release 84 * occurs via fabric driver invoked transport_deregister_session() 85 * or transport_free_session() code. 86 */ 87 mutex_lock(&tpg->acl_node_mutex); 88 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); 89 if (acl) { 90 if (!kref_get_unless_zero(&acl->acl_kref)) 91 acl = NULL; 92 } 93 mutex_unlock(&tpg->acl_node_mutex); 94 95 return acl; 96 } 97 EXPORT_SYMBOL(core_tpg_get_initiator_node_acl); 98 99 void core_allocate_nexus_loss_ua( 100 struct se_node_acl *nacl) 101 { 102 struct se_dev_entry *deve; 103 104 if (!nacl) 105 return; 106 107 rcu_read_lock(); 108 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) 109 core_scsi3_ua_allocate(deve, 0x29, 110 ASCQ_29H_NEXUS_LOSS_OCCURRED); 111 rcu_read_unlock(); 112 } 113 EXPORT_SYMBOL(core_allocate_nexus_loss_ua); 114 115 /* core_tpg_add_node_to_devs(): 116 * 117 * 118 */ 119 void core_tpg_add_node_to_devs( 120 struct se_node_acl *acl, 121 struct se_portal_group *tpg, 122 struct se_lun *lun_orig) 123 { 124 bool lun_access_ro = true; 125 struct se_lun *lun; 126 struct se_device *dev; 127 128 mutex_lock(&tpg->tpg_lun_mutex); 129 hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) { 130 if (lun_orig && lun != lun_orig) 131 continue; 132 133 dev = rcu_dereference_check(lun->lun_se_dev, 134 lockdep_is_held(&tpg->tpg_lun_mutex)); 135 /* 136 * By default in LIO-Target $FABRIC_MOD, 137 * demo_mode_write_protect is ON, or READ_ONLY; 138 */ 139 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) { 140 lun_access_ro = false; 141 } else { 142 /* 143 * Allow only optical drives to issue R/W in default RO 144 * demo mode. 145 */ 146 if (dev->transport->get_device_type(dev) == TYPE_DISK) 147 lun_access_ro = true; 148 else 149 lun_access_ro = false; 150 } 151 152 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s" 153 " access for LUN in Demo Mode\n", 154 tpg->se_tpg_tfo->get_fabric_name(), 155 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 156 lun_access_ro ? "READ-ONLY" : "READ-WRITE"); 157 158 core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun, 159 lun_access_ro, acl, tpg); 160 /* 161 * Check to see if there are any existing persistent reservation 162 * APTPL pre-registrations that need to be enabled for this dynamic 163 * LUN ACL now.. 164 */ 165 core_scsi3_check_aptpl_registration(dev, tpg, lun, acl, 166 lun->unpacked_lun); 167 } 168 mutex_unlock(&tpg->tpg_lun_mutex); 169 } 170 171 static void 172 target_set_nacl_queue_depth(struct se_portal_group *tpg, 173 struct se_node_acl *acl, u32 queue_depth) 174 { 175 acl->queue_depth = queue_depth; 176 177 if (!acl->queue_depth) { 178 pr_warn("Queue depth for %s Initiator Node: %s is 0," 179 "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(), 180 acl->initiatorname); 181 acl->queue_depth = 1; 182 } 183 } 184 185 static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg, 186 const unsigned char *initiatorname) 187 { 188 struct se_node_acl *acl; 189 u32 queue_depth; 190 191 acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size), 192 GFP_KERNEL); 193 if (!acl) 194 return NULL; 195 196 INIT_LIST_HEAD(&acl->acl_list); 197 INIT_LIST_HEAD(&acl->acl_sess_list); 198 INIT_HLIST_HEAD(&acl->lun_entry_hlist); 199 kref_init(&acl->acl_kref); 200 init_completion(&acl->acl_free_comp); 201 spin_lock_init(&acl->nacl_sess_lock); 202 mutex_init(&acl->lun_entry_mutex); 203 atomic_set(&acl->acl_pr_ref_count, 0); 204 205 if (tpg->se_tpg_tfo->tpg_get_default_depth) 206 queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg); 207 else 208 queue_depth = 1; 209 target_set_nacl_queue_depth(tpg, acl, queue_depth); 210 211 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 212 acl->se_tpg = tpg; 213 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); 214 215 tpg->se_tpg_tfo->set_default_node_attributes(acl); 216 217 return acl; 218 } 219 220 static void target_add_node_acl(struct se_node_acl *acl) 221 { 222 struct se_portal_group *tpg = acl->se_tpg; 223 224 mutex_lock(&tpg->acl_node_mutex); 225 list_add_tail(&acl->acl_list, &tpg->acl_node_list); 226 mutex_unlock(&tpg->acl_node_mutex); 227 228 pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s" 229 " Initiator Node: %s\n", 230 tpg->se_tpg_tfo->get_fabric_name(), 231 tpg->se_tpg_tfo->tpg_get_tag(tpg), 232 acl->dynamic_node_acl ? "DYNAMIC" : "", 233 acl->queue_depth, 234 tpg->se_tpg_tfo->get_fabric_name(), 235 acl->initiatorname); 236 } 237 238 bool target_tpg_has_node_acl(struct se_portal_group *tpg, 239 const char *initiatorname) 240 { 241 struct se_node_acl *acl; 242 bool found = false; 243 244 mutex_lock(&tpg->acl_node_mutex); 245 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 246 if (!strcmp(acl->initiatorname, initiatorname)) { 247 found = true; 248 break; 249 } 250 } 251 mutex_unlock(&tpg->acl_node_mutex); 252 253 return found; 254 } 255 EXPORT_SYMBOL(target_tpg_has_node_acl); 256 257 struct se_node_acl *core_tpg_check_initiator_node_acl( 258 struct se_portal_group *tpg, 259 unsigned char *initiatorname) 260 { 261 struct se_node_acl *acl; 262 263 acl = core_tpg_get_initiator_node_acl(tpg, initiatorname); 264 if (acl) 265 return acl; 266 267 if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) 268 return NULL; 269 270 acl = target_alloc_node_acl(tpg, initiatorname); 271 if (!acl) 272 return NULL; 273 /* 274 * When allocating a dynamically generated node_acl, go ahead 275 * and take the extra kref now before returning to the fabric 276 * driver caller. 277 * 278 * Note this reference will be released at session shutdown 279 * time within transport_free_session() code. 280 */ 281 kref_get(&acl->acl_kref); 282 acl->dynamic_node_acl = 1; 283 284 /* 285 * Here we only create demo-mode MappedLUNs from the active 286 * TPG LUNs if the fabric is not explicitly asking for 287 * tpg_check_demo_mode_login_only() == 1. 288 */ 289 if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) || 290 (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1)) 291 core_tpg_add_node_to_devs(acl, tpg, NULL); 292 293 target_add_node_acl(acl); 294 return acl; 295 } 296 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl); 297 298 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl) 299 { 300 while (atomic_read(&nacl->acl_pr_ref_count) != 0) 301 cpu_relax(); 302 } 303 304 struct se_node_acl *core_tpg_add_initiator_node_acl( 305 struct se_portal_group *tpg, 306 const char *initiatorname) 307 { 308 struct se_node_acl *acl; 309 310 mutex_lock(&tpg->acl_node_mutex); 311 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); 312 if (acl) { 313 if (acl->dynamic_node_acl) { 314 acl->dynamic_node_acl = 0; 315 pr_debug("%s_TPG[%u] - Replacing dynamic ACL" 316 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), 317 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); 318 mutex_unlock(&tpg->acl_node_mutex); 319 return acl; 320 } 321 322 pr_err("ACL entry for %s Initiator" 323 " Node %s already exists for TPG %u, ignoring" 324 " request.\n", tpg->se_tpg_tfo->get_fabric_name(), 325 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); 326 mutex_unlock(&tpg->acl_node_mutex); 327 return ERR_PTR(-EEXIST); 328 } 329 mutex_unlock(&tpg->acl_node_mutex); 330 331 acl = target_alloc_node_acl(tpg, initiatorname); 332 if (!acl) 333 return ERR_PTR(-ENOMEM); 334 335 target_add_node_acl(acl); 336 return acl; 337 } 338 339 static void target_shutdown_sessions(struct se_node_acl *acl) 340 { 341 struct se_session *sess; 342 unsigned long flags; 343 344 restart: 345 spin_lock_irqsave(&acl->nacl_sess_lock, flags); 346 list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) { 347 if (sess->sess_tearing_down) 348 continue; 349 350 list_del_init(&sess->sess_acl_list); 351 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags); 352 353 if (acl->se_tpg->se_tpg_tfo->close_session) 354 acl->se_tpg->se_tpg_tfo->close_session(sess); 355 goto restart; 356 } 357 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags); 358 } 359 360 void core_tpg_del_initiator_node_acl(struct se_node_acl *acl) 361 { 362 struct se_portal_group *tpg = acl->se_tpg; 363 364 mutex_lock(&tpg->acl_node_mutex); 365 if (acl->dynamic_node_acl) 366 acl->dynamic_node_acl = 0; 367 list_del(&acl->acl_list); 368 mutex_unlock(&tpg->acl_node_mutex); 369 370 target_shutdown_sessions(acl); 371 372 target_put_nacl(acl); 373 /* 374 * Wait for last target_put_nacl() to complete in target_complete_nacl() 375 * for active fabric session transport_deregister_session() callbacks. 376 */ 377 wait_for_completion(&acl->acl_free_comp); 378 379 core_tpg_wait_for_nacl_pr_ref(acl); 380 core_free_device_list_for_node(acl, tpg); 381 382 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s" 383 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 384 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, 385 tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname); 386 387 kfree(acl); 388 } 389 390 /* core_tpg_set_initiator_node_queue_depth(): 391 * 392 * 393 */ 394 int core_tpg_set_initiator_node_queue_depth( 395 struct se_node_acl *acl, 396 u32 queue_depth) 397 { 398 struct se_portal_group *tpg = acl->se_tpg; 399 400 /* 401 * User has requested to change the queue depth for a Initiator Node. 402 * Change the value in the Node's struct se_node_acl, and call 403 * target_set_nacl_queue_depth() to set the new queue depth. 404 */ 405 target_set_nacl_queue_depth(tpg, acl, queue_depth); 406 407 /* 408 * Shutdown all pending sessions to force session reinstatement. 409 */ 410 target_shutdown_sessions(acl); 411 412 pr_debug("Successfully changed queue depth to: %d for Initiator" 413 " Node: %s on %s Target Portal Group: %u\n", acl->queue_depth, 414 acl->initiatorname, tpg->se_tpg_tfo->get_fabric_name(), 415 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 416 417 return 0; 418 } 419 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth); 420 421 /* core_tpg_set_initiator_node_tag(): 422 * 423 * Initiator nodeacl tags are not used internally, but may be used by 424 * userspace to emulate aliases or groups. 425 * Returns length of newly-set tag or -EINVAL. 426 */ 427 int core_tpg_set_initiator_node_tag( 428 struct se_portal_group *tpg, 429 struct se_node_acl *acl, 430 const char *new_tag) 431 { 432 if (strlen(new_tag) >= MAX_ACL_TAG_SIZE) 433 return -EINVAL; 434 435 if (!strncmp("NULL", new_tag, 4)) { 436 acl->acl_tag[0] = '\0'; 437 return 0; 438 } 439 440 return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag); 441 } 442 EXPORT_SYMBOL(core_tpg_set_initiator_node_tag); 443 444 static void core_tpg_lun_ref_release(struct percpu_ref *ref) 445 { 446 struct se_lun *lun = container_of(ref, struct se_lun, lun_ref); 447 448 complete(&lun->lun_ref_comp); 449 } 450 451 int core_tpg_register( 452 struct se_wwn *se_wwn, 453 struct se_portal_group *se_tpg, 454 int proto_id) 455 { 456 int ret; 457 458 if (!se_tpg) 459 return -EINVAL; 460 /* 461 * For the typical case where core_tpg_register() is called by a 462 * fabric driver from target_core_fabric_ops->fabric_make_tpg() 463 * configfs context, use the original tf_ops pointer already saved 464 * by target-core in target_fabric_make_wwn(). 465 * 466 * Otherwise, for special cases like iscsi-target discovery TPGs 467 * the caller is responsible for setting ->se_tpg_tfo ahead of 468 * calling core_tpg_register(). 469 */ 470 if (se_wwn) 471 se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops; 472 473 if (!se_tpg->se_tpg_tfo) { 474 pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n"); 475 return -EINVAL; 476 } 477 478 INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist); 479 se_tpg->proto_id = proto_id; 480 se_tpg->se_tpg_wwn = se_wwn; 481 atomic_set(&se_tpg->tpg_pr_ref_count, 0); 482 INIT_LIST_HEAD(&se_tpg->acl_node_list); 483 INIT_LIST_HEAD(&se_tpg->se_tpg_node); 484 INIT_LIST_HEAD(&se_tpg->tpg_sess_list); 485 spin_lock_init(&se_tpg->session_lock); 486 mutex_init(&se_tpg->tpg_lun_mutex); 487 mutex_init(&se_tpg->acl_node_mutex); 488 489 if (se_tpg->proto_id >= 0) { 490 se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0); 491 if (IS_ERR(se_tpg->tpg_virt_lun0)) 492 return PTR_ERR(se_tpg->tpg_virt_lun0); 493 494 ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0, 495 true, g_lun0_dev); 496 if (ret < 0) { 497 kfree(se_tpg->tpg_virt_lun0); 498 return ret; 499 } 500 } 501 502 spin_lock_bh(&tpg_lock); 503 list_add_tail(&se_tpg->se_tpg_node, &tpg_list); 504 spin_unlock_bh(&tpg_lock); 505 506 pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, " 507 "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->get_fabric_name(), 508 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ? 509 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL, 510 se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); 511 512 return 0; 513 } 514 EXPORT_SYMBOL(core_tpg_register); 515 516 int core_tpg_deregister(struct se_portal_group *se_tpg) 517 { 518 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; 519 struct se_node_acl *nacl, *nacl_tmp; 520 LIST_HEAD(node_list); 521 522 pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, " 523 "Proto: %d, Portal Tag: %u\n", tfo->get_fabric_name(), 524 tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL, 525 se_tpg->proto_id, tfo->tpg_get_tag(se_tpg)); 526 527 spin_lock_bh(&tpg_lock); 528 list_del(&se_tpg->se_tpg_node); 529 spin_unlock_bh(&tpg_lock); 530 531 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) 532 cpu_relax(); 533 534 mutex_lock(&se_tpg->acl_node_mutex); 535 list_splice_init(&se_tpg->acl_node_list, &node_list); 536 mutex_unlock(&se_tpg->acl_node_mutex); 537 /* 538 * Release any remaining demo-mode generated se_node_acl that have 539 * not been released because of TFO->tpg_check_demo_mode_cache() == 1 540 * in transport_deregister_session(). 541 */ 542 list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) { 543 list_del(&nacl->acl_list); 544 545 core_tpg_wait_for_nacl_pr_ref(nacl); 546 core_free_device_list_for_node(nacl, se_tpg); 547 kfree(nacl); 548 } 549 550 if (se_tpg->proto_id >= 0) { 551 core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0); 552 kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head); 553 } 554 555 return 0; 556 } 557 EXPORT_SYMBOL(core_tpg_deregister); 558 559 struct se_lun *core_tpg_alloc_lun( 560 struct se_portal_group *tpg, 561 u64 unpacked_lun) 562 { 563 struct se_lun *lun; 564 565 lun = kzalloc(sizeof(*lun), GFP_KERNEL); 566 if (!lun) { 567 pr_err("Unable to allocate se_lun memory\n"); 568 return ERR_PTR(-ENOMEM); 569 } 570 lun->unpacked_lun = unpacked_lun; 571 lun->lun_link_magic = SE_LUN_LINK_MAGIC; 572 atomic_set(&lun->lun_acl_count, 0); 573 init_completion(&lun->lun_ref_comp); 574 INIT_LIST_HEAD(&lun->lun_deve_list); 575 INIT_LIST_HEAD(&lun->lun_dev_link); 576 atomic_set(&lun->lun_tg_pt_secondary_offline, 0); 577 spin_lock_init(&lun->lun_deve_lock); 578 mutex_init(&lun->lun_tg_pt_md_mutex); 579 INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link); 580 spin_lock_init(&lun->lun_tg_pt_gp_lock); 581 lun->lun_tpg = tpg; 582 583 return lun; 584 } 585 586 int core_tpg_add_lun( 587 struct se_portal_group *tpg, 588 struct se_lun *lun, 589 bool lun_access_ro, 590 struct se_device *dev) 591 { 592 int ret; 593 594 ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0, 595 GFP_KERNEL); 596 if (ret < 0) 597 goto out; 598 599 ret = core_alloc_rtpi(lun, dev); 600 if (ret) 601 goto out_kill_ref; 602 603 if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && 604 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 605 target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp); 606 607 mutex_lock(&tpg->tpg_lun_mutex); 608 609 spin_lock(&dev->se_port_lock); 610 lun->lun_index = dev->dev_index; 611 rcu_assign_pointer(lun->lun_se_dev, dev); 612 dev->export_count++; 613 list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list); 614 spin_unlock(&dev->se_port_lock); 615 616 if (dev->dev_flags & DF_READ_ONLY) 617 lun->lun_access_ro = true; 618 else 619 lun->lun_access_ro = lun_access_ro; 620 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 621 hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist); 622 mutex_unlock(&tpg->tpg_lun_mutex); 623 624 return 0; 625 626 out_kill_ref: 627 percpu_ref_exit(&lun->lun_ref); 628 out: 629 return ret; 630 } 631 632 void core_tpg_remove_lun( 633 struct se_portal_group *tpg, 634 struct se_lun *lun) 635 { 636 /* 637 * rcu_dereference_raw protected by se_lun->lun_group symlink 638 * reference to se_device->dev_group. 639 */ 640 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 641 642 core_clear_lun_from_tpg(lun, tpg); 643 /* 644 * Wait for any active I/O references to percpu se_lun->lun_ref to 645 * be released. Also, se_lun->lun_ref is now used by PR and ALUA 646 * logic when referencing a remote target port during ALL_TGT_PT=1 647 * and generating UNIT_ATTENTIONs for ALUA access state transition. 648 */ 649 transport_clear_lun_ref(lun); 650 651 mutex_lock(&tpg->tpg_lun_mutex); 652 if (lun->lun_se_dev) { 653 target_detach_tg_pt_gp(lun); 654 655 spin_lock(&dev->se_port_lock); 656 list_del(&lun->lun_dev_link); 657 dev->export_count--; 658 rcu_assign_pointer(lun->lun_se_dev, NULL); 659 spin_unlock(&dev->se_port_lock); 660 } 661 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 662 hlist_del_rcu(&lun->link); 663 mutex_unlock(&tpg->tpg_lun_mutex); 664 665 percpu_ref_exit(&lun->lun_ref); 666 } 667