1 /******************************************************************************* 2 * Filename: target_core_tpg.c 3 * 4 * This file contains generic Target Portal Group related functions. 5 * 6 * (c) Copyright 2002-2013 Datera, Inc. 7 * 8 * Nicholas A. Bellinger <nab@kernel.org> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 * 24 ******************************************************************************/ 25 26 #include <linux/net.h> 27 #include <linux/string.h> 28 #include <linux/timer.h> 29 #include <linux/slab.h> 30 #include <linux/spinlock.h> 31 #include <linux/in.h> 32 #include <linux/export.h> 33 #include <net/sock.h> 34 #include <net/tcp.h> 35 #include <scsi/scsi_proto.h> 36 37 #include <target/target_core_base.h> 38 #include <target/target_core_backend.h> 39 #include <target/target_core_fabric.h> 40 41 #include "target_core_internal.h" 42 #include "target_core_alua.h" 43 #include "target_core_pr.h" 44 #include "target_core_ua.h" 45 46 extern struct se_device *g_lun0_dev; 47 48 static DEFINE_SPINLOCK(tpg_lock); 49 static LIST_HEAD(tpg_list); 50 51 /* __core_tpg_get_initiator_node_acl(): 52 * 53 * mutex_lock(&tpg->acl_node_mutex); must be held when calling 54 */ 55 struct se_node_acl *__core_tpg_get_initiator_node_acl( 56 struct se_portal_group *tpg, 57 const char *initiatorname) 58 { 59 struct se_node_acl *acl; 60 61 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 62 if (!strcmp(acl->initiatorname, initiatorname)) 63 return acl; 64 } 65 66 return NULL; 67 } 68 69 /* core_tpg_get_initiator_node_acl(): 70 * 71 * 72 */ 73 struct se_node_acl *core_tpg_get_initiator_node_acl( 74 struct se_portal_group *tpg, 75 unsigned char *initiatorname) 76 { 77 struct se_node_acl *acl; 78 79 mutex_lock(&tpg->acl_node_mutex); 80 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); 81 mutex_unlock(&tpg->acl_node_mutex); 82 83 return acl; 84 } 85 EXPORT_SYMBOL(core_tpg_get_initiator_node_acl); 86 87 void core_allocate_nexus_loss_ua( 88 struct se_node_acl *nacl) 89 { 90 struct se_dev_entry *deve; 91 92 if (!nacl) 93 return; 94 95 rcu_read_lock(); 96 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) 97 core_scsi3_ua_allocate(deve, 0x29, 98 ASCQ_29H_NEXUS_LOSS_OCCURRED); 99 rcu_read_unlock(); 100 } 101 EXPORT_SYMBOL(core_allocate_nexus_loss_ua); 102 103 /* core_tpg_add_node_to_devs(): 104 * 105 * 106 */ 107 void core_tpg_add_node_to_devs( 108 struct se_node_acl *acl, 109 struct se_portal_group *tpg, 110 struct se_lun *lun_orig) 111 { 112 u32 lun_access = 0; 113 struct se_lun *lun; 114 struct se_device *dev; 115 116 mutex_lock(&tpg->tpg_lun_mutex); 117 hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) { 118 if (lun_orig && lun != lun_orig) 119 continue; 120 121 dev = rcu_dereference_check(lun->lun_se_dev, 122 lockdep_is_held(&tpg->tpg_lun_mutex)); 123 /* 124 * By default in LIO-Target $FABRIC_MOD, 125 * demo_mode_write_protect is ON, or READ_ONLY; 126 */ 127 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) { 128 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; 129 } else { 130 /* 131 * Allow only optical drives to issue R/W in default RO 132 * demo mode. 133 */ 134 if (dev->transport->get_device_type(dev) == TYPE_DISK) 135 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 136 else 137 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; 138 } 139 140 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s" 141 " access for LUN in Demo Mode\n", 142 tpg->se_tpg_tfo->get_fabric_name(), 143 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 144 (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ? 145 "READ-WRITE" : "READ-ONLY"); 146 147 core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun, 148 lun_access, acl, tpg); 149 /* 150 * Check to see if there are any existing persistent reservation 151 * APTPL pre-registrations that need to be enabled for this dynamic 152 * LUN ACL now.. 153 */ 154 core_scsi3_check_aptpl_registration(dev, tpg, lun, acl, 155 lun->unpacked_lun); 156 } 157 mutex_unlock(&tpg->tpg_lun_mutex); 158 } 159 160 /* core_set_queue_depth_for_node(): 161 * 162 * 163 */ 164 static int core_set_queue_depth_for_node( 165 struct se_portal_group *tpg, 166 struct se_node_acl *acl) 167 { 168 if (!acl->queue_depth) { 169 pr_err("Queue depth for %s Initiator Node: %s is 0," 170 "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(), 171 acl->initiatorname); 172 acl->queue_depth = 1; 173 } 174 175 return 0; 176 } 177 178 static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg, 179 const unsigned char *initiatorname) 180 { 181 struct se_node_acl *acl; 182 183 acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size), 184 GFP_KERNEL); 185 if (!acl) 186 return NULL; 187 188 INIT_LIST_HEAD(&acl->acl_list); 189 INIT_LIST_HEAD(&acl->acl_sess_list); 190 INIT_HLIST_HEAD(&acl->lun_entry_hlist); 191 kref_init(&acl->acl_kref); 192 init_completion(&acl->acl_free_comp); 193 spin_lock_init(&acl->nacl_sess_lock); 194 mutex_init(&acl->lun_entry_mutex); 195 atomic_set(&acl->acl_pr_ref_count, 0); 196 if (tpg->se_tpg_tfo->tpg_get_default_depth) 197 acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg); 198 else 199 acl->queue_depth = 1; 200 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 201 acl->se_tpg = tpg; 202 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); 203 204 tpg->se_tpg_tfo->set_default_node_attributes(acl); 205 206 if (core_set_queue_depth_for_node(tpg, acl) < 0) 207 goto out_free_acl; 208 209 return acl; 210 211 out_free_acl: 212 kfree(acl); 213 return NULL; 214 } 215 216 static void target_add_node_acl(struct se_node_acl *acl) 217 { 218 struct se_portal_group *tpg = acl->se_tpg; 219 220 mutex_lock(&tpg->acl_node_mutex); 221 list_add_tail(&acl->acl_list, &tpg->acl_node_list); 222 mutex_unlock(&tpg->acl_node_mutex); 223 224 pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s" 225 " Initiator Node: %s\n", 226 tpg->se_tpg_tfo->get_fabric_name(), 227 tpg->se_tpg_tfo->tpg_get_tag(tpg), 228 acl->dynamic_node_acl ? "DYNAMIC" : "", 229 acl->queue_depth, 230 tpg->se_tpg_tfo->get_fabric_name(), 231 acl->initiatorname); 232 } 233 234 struct se_node_acl *core_tpg_check_initiator_node_acl( 235 struct se_portal_group *tpg, 236 unsigned char *initiatorname) 237 { 238 struct se_node_acl *acl; 239 240 acl = core_tpg_get_initiator_node_acl(tpg, initiatorname); 241 if (acl) 242 return acl; 243 244 if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) 245 return NULL; 246 247 acl = target_alloc_node_acl(tpg, initiatorname); 248 if (!acl) 249 return NULL; 250 acl->dynamic_node_acl = 1; 251 252 /* 253 * Here we only create demo-mode MappedLUNs from the active 254 * TPG LUNs if the fabric is not explicitly asking for 255 * tpg_check_demo_mode_login_only() == 1. 256 */ 257 if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) || 258 (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1)) 259 core_tpg_add_node_to_devs(acl, tpg, NULL); 260 261 target_add_node_acl(acl); 262 return acl; 263 } 264 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl); 265 266 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl) 267 { 268 while (atomic_read(&nacl->acl_pr_ref_count) != 0) 269 cpu_relax(); 270 } 271 272 struct se_node_acl *core_tpg_add_initiator_node_acl( 273 struct se_portal_group *tpg, 274 const char *initiatorname) 275 { 276 struct se_node_acl *acl; 277 278 mutex_lock(&tpg->acl_node_mutex); 279 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); 280 if (acl) { 281 if (acl->dynamic_node_acl) { 282 acl->dynamic_node_acl = 0; 283 pr_debug("%s_TPG[%u] - Replacing dynamic ACL" 284 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), 285 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); 286 mutex_unlock(&tpg->acl_node_mutex); 287 return acl; 288 } 289 290 pr_err("ACL entry for %s Initiator" 291 " Node %s already exists for TPG %u, ignoring" 292 " request.\n", tpg->se_tpg_tfo->get_fabric_name(), 293 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); 294 mutex_unlock(&tpg->acl_node_mutex); 295 return ERR_PTR(-EEXIST); 296 } 297 mutex_unlock(&tpg->acl_node_mutex); 298 299 acl = target_alloc_node_acl(tpg, initiatorname); 300 if (!acl) 301 return ERR_PTR(-ENOMEM); 302 303 target_add_node_acl(acl); 304 return acl; 305 } 306 307 void core_tpg_del_initiator_node_acl(struct se_node_acl *acl) 308 { 309 struct se_portal_group *tpg = acl->se_tpg; 310 LIST_HEAD(sess_list); 311 struct se_session *sess, *sess_tmp; 312 unsigned long flags; 313 int rc; 314 315 mutex_lock(&tpg->acl_node_mutex); 316 if (acl->dynamic_node_acl) { 317 acl->dynamic_node_acl = 0; 318 } 319 list_del(&acl->acl_list); 320 mutex_unlock(&tpg->acl_node_mutex); 321 322 spin_lock_irqsave(&acl->nacl_sess_lock, flags); 323 acl->acl_stop = 1; 324 325 list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list, 326 sess_acl_list) { 327 if (sess->sess_tearing_down != 0) 328 continue; 329 330 target_get_session(sess); 331 list_move(&sess->sess_acl_list, &sess_list); 332 } 333 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags); 334 335 list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) { 336 list_del(&sess->sess_acl_list); 337 338 rc = tpg->se_tpg_tfo->shutdown_session(sess); 339 target_put_session(sess); 340 if (!rc) 341 continue; 342 target_put_session(sess); 343 } 344 target_put_nacl(acl); 345 /* 346 * Wait for last target_put_nacl() to complete in target_complete_nacl() 347 * for active fabric session transport_deregister_session() callbacks. 348 */ 349 wait_for_completion(&acl->acl_free_comp); 350 351 core_tpg_wait_for_nacl_pr_ref(acl); 352 core_free_device_list_for_node(acl, tpg); 353 354 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s" 355 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 356 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, 357 tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname); 358 359 kfree(acl); 360 } 361 362 /* core_tpg_set_initiator_node_queue_depth(): 363 * 364 * 365 */ 366 int core_tpg_set_initiator_node_queue_depth( 367 struct se_portal_group *tpg, 368 unsigned char *initiatorname, 369 u32 queue_depth, 370 int force) 371 { 372 struct se_session *sess, *init_sess = NULL; 373 struct se_node_acl *acl; 374 unsigned long flags; 375 int dynamic_acl = 0; 376 377 mutex_lock(&tpg->acl_node_mutex); 378 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); 379 if (!acl) { 380 pr_err("Access Control List entry for %s Initiator" 381 " Node %s does not exists for TPG %hu, ignoring" 382 " request.\n", tpg->se_tpg_tfo->get_fabric_name(), 383 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); 384 mutex_unlock(&tpg->acl_node_mutex); 385 return -ENODEV; 386 } 387 if (acl->dynamic_node_acl) { 388 acl->dynamic_node_acl = 0; 389 dynamic_acl = 1; 390 } 391 mutex_unlock(&tpg->acl_node_mutex); 392 393 spin_lock_irqsave(&tpg->session_lock, flags); 394 list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) { 395 if (sess->se_node_acl != acl) 396 continue; 397 398 if (!force) { 399 pr_err("Unable to change queue depth for %s" 400 " Initiator Node: %s while session is" 401 " operational. To forcefully change the queue" 402 " depth and force session reinstatement" 403 " use the \"force=1\" parameter.\n", 404 tpg->se_tpg_tfo->get_fabric_name(), initiatorname); 405 spin_unlock_irqrestore(&tpg->session_lock, flags); 406 407 mutex_lock(&tpg->acl_node_mutex); 408 if (dynamic_acl) 409 acl->dynamic_node_acl = 1; 410 mutex_unlock(&tpg->acl_node_mutex); 411 return -EEXIST; 412 } 413 /* 414 * Determine if the session needs to be closed by our context. 415 */ 416 if (!tpg->se_tpg_tfo->shutdown_session(sess)) 417 continue; 418 419 init_sess = sess; 420 break; 421 } 422 423 /* 424 * User has requested to change the queue depth for a Initiator Node. 425 * Change the value in the Node's struct se_node_acl, and call 426 * core_set_queue_depth_for_node() to add the requested queue depth. 427 * 428 * Finally call tpg->se_tpg_tfo->close_session() to force session 429 * reinstatement to occur if there is an active session for the 430 * $FABRIC_MOD Initiator Node in question. 431 */ 432 acl->queue_depth = queue_depth; 433 434 if (core_set_queue_depth_for_node(tpg, acl) < 0) { 435 spin_unlock_irqrestore(&tpg->session_lock, flags); 436 /* 437 * Force session reinstatement if 438 * core_set_queue_depth_for_node() failed, because we assume 439 * the $FABRIC_MOD has already the set session reinstatement 440 * bit from tpg->se_tpg_tfo->shutdown_session() called above. 441 */ 442 if (init_sess) 443 tpg->se_tpg_tfo->close_session(init_sess); 444 445 mutex_lock(&tpg->acl_node_mutex); 446 if (dynamic_acl) 447 acl->dynamic_node_acl = 1; 448 mutex_unlock(&tpg->acl_node_mutex); 449 return -EINVAL; 450 } 451 spin_unlock_irqrestore(&tpg->session_lock, flags); 452 /* 453 * If the $FABRIC_MOD session for the Initiator Node ACL exists, 454 * forcefully shutdown the $FABRIC_MOD session/nexus. 455 */ 456 if (init_sess) 457 tpg->se_tpg_tfo->close_session(init_sess); 458 459 pr_debug("Successfully changed queue depth to: %d for Initiator" 460 " Node: %s on %s Target Portal Group: %u\n", queue_depth, 461 initiatorname, tpg->se_tpg_tfo->get_fabric_name(), 462 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 463 464 mutex_lock(&tpg->acl_node_mutex); 465 if (dynamic_acl) 466 acl->dynamic_node_acl = 1; 467 mutex_unlock(&tpg->acl_node_mutex); 468 469 return 0; 470 } 471 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth); 472 473 /* core_tpg_set_initiator_node_tag(): 474 * 475 * Initiator nodeacl tags are not used internally, but may be used by 476 * userspace to emulate aliases or groups. 477 * Returns length of newly-set tag or -EINVAL. 478 */ 479 int core_tpg_set_initiator_node_tag( 480 struct se_portal_group *tpg, 481 struct se_node_acl *acl, 482 const char *new_tag) 483 { 484 if (strlen(new_tag) >= MAX_ACL_TAG_SIZE) 485 return -EINVAL; 486 487 if (!strncmp("NULL", new_tag, 4)) { 488 acl->acl_tag[0] = '\0'; 489 return 0; 490 } 491 492 return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag); 493 } 494 EXPORT_SYMBOL(core_tpg_set_initiator_node_tag); 495 496 static void core_tpg_lun_ref_release(struct percpu_ref *ref) 497 { 498 struct se_lun *lun = container_of(ref, struct se_lun, lun_ref); 499 500 complete(&lun->lun_ref_comp); 501 } 502 503 int core_tpg_register( 504 struct se_wwn *se_wwn, 505 struct se_portal_group *se_tpg, 506 int proto_id) 507 { 508 int ret; 509 510 if (!se_tpg) 511 return -EINVAL; 512 /* 513 * For the typical case where core_tpg_register() is called by a 514 * fabric driver from target_core_fabric_ops->fabric_make_tpg() 515 * configfs context, use the original tf_ops pointer already saved 516 * by target-core in target_fabric_make_wwn(). 517 * 518 * Otherwise, for special cases like iscsi-target discovery TPGs 519 * the caller is responsible for setting ->se_tpg_tfo ahead of 520 * calling core_tpg_register(). 521 */ 522 if (se_wwn) 523 se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops; 524 525 if (!se_tpg->se_tpg_tfo) { 526 pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n"); 527 return -EINVAL; 528 } 529 530 INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist); 531 se_tpg->proto_id = proto_id; 532 se_tpg->se_tpg_wwn = se_wwn; 533 atomic_set(&se_tpg->tpg_pr_ref_count, 0); 534 INIT_LIST_HEAD(&se_tpg->acl_node_list); 535 INIT_LIST_HEAD(&se_tpg->se_tpg_node); 536 INIT_LIST_HEAD(&se_tpg->tpg_sess_list); 537 spin_lock_init(&se_tpg->session_lock); 538 mutex_init(&se_tpg->tpg_lun_mutex); 539 mutex_init(&se_tpg->acl_node_mutex); 540 541 if (se_tpg->proto_id >= 0) { 542 se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0); 543 if (IS_ERR(se_tpg->tpg_virt_lun0)) 544 return PTR_ERR(se_tpg->tpg_virt_lun0); 545 546 ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0, 547 TRANSPORT_LUNFLAGS_READ_ONLY, g_lun0_dev); 548 if (ret < 0) { 549 kfree(se_tpg->tpg_virt_lun0); 550 return ret; 551 } 552 } 553 554 spin_lock_bh(&tpg_lock); 555 list_add_tail(&se_tpg->se_tpg_node, &tpg_list); 556 spin_unlock_bh(&tpg_lock); 557 558 pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, " 559 "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->get_fabric_name(), 560 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ? 561 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL, 562 se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); 563 564 return 0; 565 } 566 EXPORT_SYMBOL(core_tpg_register); 567 568 int core_tpg_deregister(struct se_portal_group *se_tpg) 569 { 570 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; 571 struct se_node_acl *nacl, *nacl_tmp; 572 LIST_HEAD(node_list); 573 574 pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, " 575 "Proto: %d, Portal Tag: %u\n", tfo->get_fabric_name(), 576 tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL, 577 se_tpg->proto_id, tfo->tpg_get_tag(se_tpg)); 578 579 spin_lock_bh(&tpg_lock); 580 list_del(&se_tpg->se_tpg_node); 581 spin_unlock_bh(&tpg_lock); 582 583 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) 584 cpu_relax(); 585 586 mutex_lock(&se_tpg->acl_node_mutex); 587 list_splice_init(&se_tpg->acl_node_list, &node_list); 588 mutex_unlock(&se_tpg->acl_node_mutex); 589 /* 590 * Release any remaining demo-mode generated se_node_acl that have 591 * not been released because of TFO->tpg_check_demo_mode_cache() == 1 592 * in transport_deregister_session(). 593 */ 594 list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) { 595 list_del(&nacl->acl_list); 596 597 core_tpg_wait_for_nacl_pr_ref(nacl); 598 core_free_device_list_for_node(nacl, se_tpg); 599 kfree(nacl); 600 } 601 602 if (se_tpg->proto_id >= 0) { 603 core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0); 604 kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head); 605 } 606 607 return 0; 608 } 609 EXPORT_SYMBOL(core_tpg_deregister); 610 611 struct se_lun *core_tpg_alloc_lun( 612 struct se_portal_group *tpg, 613 u64 unpacked_lun) 614 { 615 struct se_lun *lun; 616 617 lun = kzalloc(sizeof(*lun), GFP_KERNEL); 618 if (!lun) { 619 pr_err("Unable to allocate se_lun memory\n"); 620 return ERR_PTR(-ENOMEM); 621 } 622 lun->unpacked_lun = unpacked_lun; 623 lun->lun_link_magic = SE_LUN_LINK_MAGIC; 624 atomic_set(&lun->lun_acl_count, 0); 625 init_completion(&lun->lun_ref_comp); 626 INIT_LIST_HEAD(&lun->lun_deve_list); 627 INIT_LIST_HEAD(&lun->lun_dev_link); 628 atomic_set(&lun->lun_tg_pt_secondary_offline, 0); 629 spin_lock_init(&lun->lun_deve_lock); 630 mutex_init(&lun->lun_tg_pt_md_mutex); 631 INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link); 632 spin_lock_init(&lun->lun_tg_pt_gp_lock); 633 lun->lun_tpg = tpg; 634 635 return lun; 636 } 637 638 int core_tpg_add_lun( 639 struct se_portal_group *tpg, 640 struct se_lun *lun, 641 u32 lun_access, 642 struct se_device *dev) 643 { 644 int ret; 645 646 ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0, 647 GFP_KERNEL); 648 if (ret < 0) 649 goto out; 650 651 ret = core_alloc_rtpi(lun, dev); 652 if (ret) 653 goto out_kill_ref; 654 655 if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && 656 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 657 target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp); 658 659 mutex_lock(&tpg->tpg_lun_mutex); 660 661 spin_lock(&dev->se_port_lock); 662 lun->lun_index = dev->dev_index; 663 rcu_assign_pointer(lun->lun_se_dev, dev); 664 dev->export_count++; 665 list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list); 666 spin_unlock(&dev->se_port_lock); 667 668 if (dev->dev_flags & DF_READ_ONLY) 669 lun->lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 670 else 671 lun->lun_access = lun_access; 672 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 673 hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist); 674 mutex_unlock(&tpg->tpg_lun_mutex); 675 676 return 0; 677 678 out_kill_ref: 679 percpu_ref_exit(&lun->lun_ref); 680 out: 681 return ret; 682 } 683 684 void core_tpg_remove_lun( 685 struct se_portal_group *tpg, 686 struct se_lun *lun) 687 { 688 /* 689 * rcu_dereference_raw protected by se_lun->lun_group symlink 690 * reference to se_device->dev_group. 691 */ 692 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 693 694 core_clear_lun_from_tpg(lun, tpg); 695 /* 696 * Wait for any active I/O references to percpu se_lun->lun_ref to 697 * be released. Also, se_lun->lun_ref is now used by PR and ALUA 698 * logic when referencing a remote target port during ALL_TGT_PT=1 699 * and generating UNIT_ATTENTIONs for ALUA access state transition. 700 */ 701 transport_clear_lun_ref(lun); 702 703 mutex_lock(&tpg->tpg_lun_mutex); 704 if (lun->lun_se_dev) { 705 target_detach_tg_pt_gp(lun); 706 707 spin_lock(&dev->se_port_lock); 708 list_del(&lun->lun_dev_link); 709 dev->export_count--; 710 rcu_assign_pointer(lun->lun_se_dev, NULL); 711 spin_unlock(&dev->se_port_lock); 712 } 713 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 714 hlist_del_rcu(&lun->link); 715 mutex_unlock(&tpg->tpg_lun_mutex); 716 717 percpu_ref_exit(&lun->lun_ref); 718 } 719