1 /******************************************************************************* 2 * Filename: target_core_tpg.c 3 * 4 * This file contains generic Target Portal Group related functions. 5 * 6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. 7 * Copyright (c) 2005, 2006, 2007 SBE, Inc. 8 * Copyright (c) 2007-2010 Rising Tide Systems 9 * Copyright (c) 2008-2010 Linux-iSCSI.org 10 * 11 * Nicholas A. Bellinger <nab@kernel.org> 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or 16 * (at your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * GNU General Public License for more details. 22 * 23 * You should have received a copy of the GNU General Public License 24 * along with this program; if not, write to the Free Software 25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 26 * 27 ******************************************************************************/ 28 29 #include <linux/net.h> 30 #include <linux/string.h> 31 #include <linux/timer.h> 32 #include <linux/slab.h> 33 #include <linux/spinlock.h> 34 #include <linux/in.h> 35 #include <net/sock.h> 36 #include <net/tcp.h> 37 #include <scsi/scsi.h> 38 #include <scsi/scsi_cmnd.h> 39 40 #include <target/target_core_base.h> 41 #include <target/target_core_device.h> 42 #include <target/target_core_tpg.h> 43 #include <target/target_core_transport.h> 44 #include <target/target_core_fabric_ops.h> 45 46 #include "target_core_hba.h" 47 #include "target_core_stat.h" 48 49 extern struct se_device *g_lun0_dev; 50 51 static DEFINE_SPINLOCK(tpg_lock); 52 static LIST_HEAD(tpg_list); 53 54 /* core_clear_initiator_node_from_tpg(): 55 * 56 * 57 */ 58 static void core_clear_initiator_node_from_tpg( 59 struct se_node_acl *nacl, 60 struct se_portal_group *tpg) 61 { 62 int i; 63 struct se_dev_entry *deve; 64 struct se_lun *lun; 65 struct se_lun_acl *acl, *acl_tmp; 66 67 spin_lock_irq(&nacl->device_list_lock); 68 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 69 deve = &nacl->device_list[i]; 70 71 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) 72 continue; 73 74 if (!deve->se_lun) { 75 pr_err("%s device entries device pointer is" 76 " NULL, but Initiator has access.\n", 77 tpg->se_tpg_tfo->get_fabric_name()); 78 continue; 79 } 80 81 lun = deve->se_lun; 82 spin_unlock_irq(&nacl->device_list_lock); 83 core_update_device_list_for_node(lun, NULL, deve->mapped_lun, 84 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); 85 86 spin_lock(&lun->lun_acl_lock); 87 list_for_each_entry_safe(acl, acl_tmp, 88 &lun->lun_acl_list, lacl_list) { 89 if (!strcmp(acl->initiatorname, nacl->initiatorname) && 90 (acl->mapped_lun == deve->mapped_lun)) 91 break; 92 } 93 94 if (!acl) { 95 pr_err("Unable to locate struct se_lun_acl for %s," 96 " mapped_lun: %u\n", nacl->initiatorname, 97 deve->mapped_lun); 98 spin_unlock(&lun->lun_acl_lock); 99 spin_lock_irq(&nacl->device_list_lock); 100 continue; 101 } 102 103 list_del(&acl->lacl_list); 104 spin_unlock(&lun->lun_acl_lock); 105 106 spin_lock_irq(&nacl->device_list_lock); 107 kfree(acl); 108 } 109 spin_unlock_irq(&nacl->device_list_lock); 110 } 111 112 /* __core_tpg_get_initiator_node_acl(): 113 * 114 * spin_lock_bh(&tpg->acl_node_lock); must be held when calling 115 */ 116 struct se_node_acl *__core_tpg_get_initiator_node_acl( 117 struct se_portal_group *tpg, 118 const char *initiatorname) 119 { 120 struct se_node_acl *acl; 121 122 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 123 if (!strcmp(acl->initiatorname, initiatorname)) 124 return acl; 125 } 126 127 return NULL; 128 } 129 130 /* core_tpg_get_initiator_node_acl(): 131 * 132 * 133 */ 134 struct se_node_acl *core_tpg_get_initiator_node_acl( 135 struct se_portal_group *tpg, 136 unsigned char *initiatorname) 137 { 138 struct se_node_acl *acl; 139 140 spin_lock_irq(&tpg->acl_node_lock); 141 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 142 if (!strcmp(acl->initiatorname, initiatorname) && 143 !acl->dynamic_node_acl) { 144 spin_unlock_irq(&tpg->acl_node_lock); 145 return acl; 146 } 147 } 148 spin_unlock_irq(&tpg->acl_node_lock); 149 150 return NULL; 151 } 152 153 /* core_tpg_add_node_to_devs(): 154 * 155 * 156 */ 157 void core_tpg_add_node_to_devs( 158 struct se_node_acl *acl, 159 struct se_portal_group *tpg) 160 { 161 int i = 0; 162 u32 lun_access = 0; 163 struct se_lun *lun; 164 struct se_device *dev; 165 166 spin_lock(&tpg->tpg_lun_lock); 167 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 168 lun = &tpg->tpg_lun_list[i]; 169 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) 170 continue; 171 172 spin_unlock(&tpg->tpg_lun_lock); 173 174 dev = lun->lun_se_dev; 175 /* 176 * By default in LIO-Target $FABRIC_MOD, 177 * demo_mode_write_protect is ON, or READ_ONLY; 178 */ 179 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) { 180 if (dev->dev_flags & DF_READ_ONLY) 181 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 182 else 183 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; 184 } else { 185 /* 186 * Allow only optical drives to issue R/W in default RO 187 * demo mode. 188 */ 189 if (dev->transport->get_device_type(dev) == TYPE_DISK) 190 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 191 else 192 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; 193 } 194 195 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s" 196 " access for LUN in Demo Mode\n", 197 tpg->se_tpg_tfo->get_fabric_name(), 198 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 199 (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ? 200 "READ-WRITE" : "READ-ONLY"); 201 202 core_update_device_list_for_node(lun, NULL, lun->unpacked_lun, 203 lun_access, acl, tpg, 1); 204 spin_lock(&tpg->tpg_lun_lock); 205 } 206 spin_unlock(&tpg->tpg_lun_lock); 207 } 208 209 /* core_set_queue_depth_for_node(): 210 * 211 * 212 */ 213 static int core_set_queue_depth_for_node( 214 struct se_portal_group *tpg, 215 struct se_node_acl *acl) 216 { 217 if (!acl->queue_depth) { 218 pr_err("Queue depth for %s Initiator Node: %s is 0," 219 "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(), 220 acl->initiatorname); 221 acl->queue_depth = 1; 222 } 223 224 return 0; 225 } 226 227 /* core_create_device_list_for_node(): 228 * 229 * 230 */ 231 static int core_create_device_list_for_node(struct se_node_acl *nacl) 232 { 233 struct se_dev_entry *deve; 234 int i; 235 236 nacl->device_list = kzalloc(sizeof(struct se_dev_entry) * 237 TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL); 238 if (!nacl->device_list) { 239 pr_err("Unable to allocate memory for" 240 " struct se_node_acl->device_list\n"); 241 return -ENOMEM; 242 } 243 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 244 deve = &nacl->device_list[i]; 245 246 atomic_set(&deve->ua_count, 0); 247 atomic_set(&deve->pr_ref_count, 0); 248 spin_lock_init(&deve->ua_lock); 249 INIT_LIST_HEAD(&deve->alua_port_list); 250 INIT_LIST_HEAD(&deve->ua_list); 251 } 252 253 return 0; 254 } 255 256 /* core_tpg_check_initiator_node_acl() 257 * 258 * 259 */ 260 struct se_node_acl *core_tpg_check_initiator_node_acl( 261 struct se_portal_group *tpg, 262 unsigned char *initiatorname) 263 { 264 struct se_node_acl *acl; 265 266 acl = core_tpg_get_initiator_node_acl(tpg, initiatorname); 267 if (acl) 268 return acl; 269 270 if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) 271 return NULL; 272 273 acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg); 274 if (!acl) 275 return NULL; 276 277 INIT_LIST_HEAD(&acl->acl_list); 278 INIT_LIST_HEAD(&acl->acl_sess_list); 279 spin_lock_init(&acl->device_list_lock); 280 spin_lock_init(&acl->nacl_sess_lock); 281 atomic_set(&acl->acl_pr_ref_count, 0); 282 acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg); 283 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 284 acl->se_tpg = tpg; 285 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); 286 spin_lock_init(&acl->stats_lock); 287 acl->dynamic_node_acl = 1; 288 289 tpg->se_tpg_tfo->set_default_node_attributes(acl); 290 291 if (core_create_device_list_for_node(acl) < 0) { 292 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); 293 return NULL; 294 } 295 296 if (core_set_queue_depth_for_node(tpg, acl) < 0) { 297 core_free_device_list_for_node(acl, tpg); 298 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); 299 return NULL; 300 } 301 /* 302 * Here we only create demo-mode MappedLUNs from the active 303 * TPG LUNs if the fabric is not explictly asking for 304 * tpg_check_demo_mode_login_only() == 1. 305 */ 306 if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) && 307 (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1)) 308 do { ; } while (0); 309 else 310 core_tpg_add_node_to_devs(acl, tpg); 311 312 spin_lock_irq(&tpg->acl_node_lock); 313 list_add_tail(&acl->acl_list, &tpg->acl_node_list); 314 tpg->num_node_acls++; 315 spin_unlock_irq(&tpg->acl_node_lock); 316 317 pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" 318 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 319 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, 320 tpg->se_tpg_tfo->get_fabric_name(), initiatorname); 321 322 return acl; 323 } 324 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl); 325 326 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl) 327 { 328 while (atomic_read(&nacl->acl_pr_ref_count) != 0) 329 cpu_relax(); 330 } 331 332 void core_tpg_clear_object_luns(struct se_portal_group *tpg) 333 { 334 int i, ret; 335 struct se_lun *lun; 336 337 spin_lock(&tpg->tpg_lun_lock); 338 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 339 lun = &tpg->tpg_lun_list[i]; 340 341 if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) || 342 (lun->lun_se_dev == NULL)) 343 continue; 344 345 spin_unlock(&tpg->tpg_lun_lock); 346 ret = core_dev_del_lun(tpg, lun->unpacked_lun); 347 spin_lock(&tpg->tpg_lun_lock); 348 } 349 spin_unlock(&tpg->tpg_lun_lock); 350 } 351 EXPORT_SYMBOL(core_tpg_clear_object_luns); 352 353 /* core_tpg_add_initiator_node_acl(): 354 * 355 * 356 */ 357 struct se_node_acl *core_tpg_add_initiator_node_acl( 358 struct se_portal_group *tpg, 359 struct se_node_acl *se_nacl, 360 const char *initiatorname, 361 u32 queue_depth) 362 { 363 struct se_node_acl *acl = NULL; 364 365 spin_lock_irq(&tpg->acl_node_lock); 366 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); 367 if (acl) { 368 if (acl->dynamic_node_acl) { 369 acl->dynamic_node_acl = 0; 370 pr_debug("%s_TPG[%u] - Replacing dynamic ACL" 371 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), 372 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); 373 spin_unlock_irq(&tpg->acl_node_lock); 374 /* 375 * Release the locally allocated struct se_node_acl 376 * because * core_tpg_add_initiator_node_acl() returned 377 * a pointer to an existing demo mode node ACL. 378 */ 379 if (se_nacl) 380 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, 381 se_nacl); 382 goto done; 383 } 384 385 pr_err("ACL entry for %s Initiator" 386 " Node %s already exists for TPG %u, ignoring" 387 " request.\n", tpg->se_tpg_tfo->get_fabric_name(), 388 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); 389 spin_unlock_irq(&tpg->acl_node_lock); 390 return ERR_PTR(-EEXIST); 391 } 392 spin_unlock_irq(&tpg->acl_node_lock); 393 394 if (!se_nacl) { 395 pr_err("struct se_node_acl pointer is NULL\n"); 396 return ERR_PTR(-EINVAL); 397 } 398 /* 399 * For v4.x logic the se_node_acl_s is hanging off a fabric 400 * dependent structure allocated via 401 * struct target_core_fabric_ops->fabric_make_nodeacl() 402 */ 403 acl = se_nacl; 404 405 INIT_LIST_HEAD(&acl->acl_list); 406 INIT_LIST_HEAD(&acl->acl_sess_list); 407 spin_lock_init(&acl->device_list_lock); 408 spin_lock_init(&acl->nacl_sess_lock); 409 atomic_set(&acl->acl_pr_ref_count, 0); 410 acl->queue_depth = queue_depth; 411 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 412 acl->se_tpg = tpg; 413 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); 414 spin_lock_init(&acl->stats_lock); 415 416 tpg->se_tpg_tfo->set_default_node_attributes(acl); 417 418 if (core_create_device_list_for_node(acl) < 0) { 419 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); 420 return ERR_PTR(-ENOMEM); 421 } 422 423 if (core_set_queue_depth_for_node(tpg, acl) < 0) { 424 core_free_device_list_for_node(acl, tpg); 425 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); 426 return ERR_PTR(-EINVAL); 427 } 428 429 spin_lock_irq(&tpg->acl_node_lock); 430 list_add_tail(&acl->acl_list, &tpg->acl_node_list); 431 tpg->num_node_acls++; 432 spin_unlock_irq(&tpg->acl_node_lock); 433 434 done: 435 pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" 436 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 437 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, 438 tpg->se_tpg_tfo->get_fabric_name(), initiatorname); 439 440 return acl; 441 } 442 EXPORT_SYMBOL(core_tpg_add_initiator_node_acl); 443 444 /* core_tpg_del_initiator_node_acl(): 445 * 446 * 447 */ 448 int core_tpg_del_initiator_node_acl( 449 struct se_portal_group *tpg, 450 struct se_node_acl *acl, 451 int force) 452 { 453 struct se_session *sess, *sess_tmp; 454 int dynamic_acl = 0; 455 456 spin_lock_irq(&tpg->acl_node_lock); 457 if (acl->dynamic_node_acl) { 458 acl->dynamic_node_acl = 0; 459 dynamic_acl = 1; 460 } 461 list_del(&acl->acl_list); 462 tpg->num_node_acls--; 463 spin_unlock_irq(&tpg->acl_node_lock); 464 465 spin_lock_bh(&tpg->session_lock); 466 list_for_each_entry_safe(sess, sess_tmp, 467 &tpg->tpg_sess_list, sess_list) { 468 if (sess->se_node_acl != acl) 469 continue; 470 /* 471 * Determine if the session needs to be closed by our context. 472 */ 473 if (!tpg->se_tpg_tfo->shutdown_session(sess)) 474 continue; 475 476 spin_unlock_bh(&tpg->session_lock); 477 /* 478 * If the $FABRIC_MOD session for the Initiator Node ACL exists, 479 * forcefully shutdown the $FABRIC_MOD session/nexus. 480 */ 481 tpg->se_tpg_tfo->close_session(sess); 482 483 spin_lock_bh(&tpg->session_lock); 484 } 485 spin_unlock_bh(&tpg->session_lock); 486 487 core_tpg_wait_for_nacl_pr_ref(acl); 488 core_clear_initiator_node_from_tpg(acl, tpg); 489 core_free_device_list_for_node(acl, tpg); 490 491 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s" 492 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 493 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, 494 tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname); 495 496 return 0; 497 } 498 EXPORT_SYMBOL(core_tpg_del_initiator_node_acl); 499 500 /* core_tpg_set_initiator_node_queue_depth(): 501 * 502 * 503 */ 504 int core_tpg_set_initiator_node_queue_depth( 505 struct se_portal_group *tpg, 506 unsigned char *initiatorname, 507 u32 queue_depth, 508 int force) 509 { 510 struct se_session *sess, *init_sess = NULL; 511 struct se_node_acl *acl; 512 int dynamic_acl = 0; 513 514 spin_lock_irq(&tpg->acl_node_lock); 515 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); 516 if (!acl) { 517 pr_err("Access Control List entry for %s Initiator" 518 " Node %s does not exists for TPG %hu, ignoring" 519 " request.\n", tpg->se_tpg_tfo->get_fabric_name(), 520 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); 521 spin_unlock_irq(&tpg->acl_node_lock); 522 return -ENODEV; 523 } 524 if (acl->dynamic_node_acl) { 525 acl->dynamic_node_acl = 0; 526 dynamic_acl = 1; 527 } 528 spin_unlock_irq(&tpg->acl_node_lock); 529 530 spin_lock_bh(&tpg->session_lock); 531 list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) { 532 if (sess->se_node_acl != acl) 533 continue; 534 535 if (!force) { 536 pr_err("Unable to change queue depth for %s" 537 " Initiator Node: %s while session is" 538 " operational. To forcefully change the queue" 539 " depth and force session reinstatement" 540 " use the \"force=1\" parameter.\n", 541 tpg->se_tpg_tfo->get_fabric_name(), initiatorname); 542 spin_unlock_bh(&tpg->session_lock); 543 544 spin_lock_irq(&tpg->acl_node_lock); 545 if (dynamic_acl) 546 acl->dynamic_node_acl = 1; 547 spin_unlock_irq(&tpg->acl_node_lock); 548 return -EEXIST; 549 } 550 /* 551 * Determine if the session needs to be closed by our context. 552 */ 553 if (!tpg->se_tpg_tfo->shutdown_session(sess)) 554 continue; 555 556 init_sess = sess; 557 break; 558 } 559 560 /* 561 * User has requested to change the queue depth for a Initiator Node. 562 * Change the value in the Node's struct se_node_acl, and call 563 * core_set_queue_depth_for_node() to add the requested queue depth. 564 * 565 * Finally call tpg->se_tpg_tfo->close_session() to force session 566 * reinstatement to occur if there is an active session for the 567 * $FABRIC_MOD Initiator Node in question. 568 */ 569 acl->queue_depth = queue_depth; 570 571 if (core_set_queue_depth_for_node(tpg, acl) < 0) { 572 spin_unlock_bh(&tpg->session_lock); 573 /* 574 * Force session reinstatement if 575 * core_set_queue_depth_for_node() failed, because we assume 576 * the $FABRIC_MOD has already the set session reinstatement 577 * bit from tpg->se_tpg_tfo->shutdown_session() called above. 578 */ 579 if (init_sess) 580 tpg->se_tpg_tfo->close_session(init_sess); 581 582 spin_lock_irq(&tpg->acl_node_lock); 583 if (dynamic_acl) 584 acl->dynamic_node_acl = 1; 585 spin_unlock_irq(&tpg->acl_node_lock); 586 return -EINVAL; 587 } 588 spin_unlock_bh(&tpg->session_lock); 589 /* 590 * If the $FABRIC_MOD session for the Initiator Node ACL exists, 591 * forcefully shutdown the $FABRIC_MOD session/nexus. 592 */ 593 if (init_sess) 594 tpg->se_tpg_tfo->close_session(init_sess); 595 596 pr_debug("Successfuly changed queue depth to: %d for Initiator" 597 " Node: %s on %s Target Portal Group: %u\n", queue_depth, 598 initiatorname, tpg->se_tpg_tfo->get_fabric_name(), 599 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 600 601 spin_lock_irq(&tpg->acl_node_lock); 602 if (dynamic_acl) 603 acl->dynamic_node_acl = 1; 604 spin_unlock_irq(&tpg->acl_node_lock); 605 606 return 0; 607 } 608 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth); 609 610 static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) 611 { 612 /* Set in core_dev_setup_virtual_lun0() */ 613 struct se_device *dev = g_lun0_dev; 614 struct se_lun *lun = &se_tpg->tpg_virt_lun0; 615 u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 616 int ret; 617 618 lun->unpacked_lun = 0; 619 lun->lun_status = TRANSPORT_LUN_STATUS_FREE; 620 atomic_set(&lun->lun_acl_count, 0); 621 init_completion(&lun->lun_shutdown_comp); 622 INIT_LIST_HEAD(&lun->lun_acl_list); 623 INIT_LIST_HEAD(&lun->lun_cmd_list); 624 spin_lock_init(&lun->lun_acl_lock); 625 spin_lock_init(&lun->lun_cmd_lock); 626 spin_lock_init(&lun->lun_sep_lock); 627 628 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); 629 if (ret < 0) 630 return ret; 631 632 return 0; 633 } 634 635 static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg) 636 { 637 struct se_lun *lun = &se_tpg->tpg_virt_lun0; 638 639 core_tpg_post_dellun(se_tpg, lun); 640 } 641 642 int core_tpg_register( 643 struct target_core_fabric_ops *tfo, 644 struct se_wwn *se_wwn, 645 struct se_portal_group *se_tpg, 646 void *tpg_fabric_ptr, 647 int se_tpg_type) 648 { 649 struct se_lun *lun; 650 u32 i; 651 652 se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) * 653 TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL); 654 if (!se_tpg->tpg_lun_list) { 655 pr_err("Unable to allocate struct se_portal_group->" 656 "tpg_lun_list\n"); 657 return -ENOMEM; 658 } 659 660 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 661 lun = &se_tpg->tpg_lun_list[i]; 662 lun->unpacked_lun = i; 663 lun->lun_status = TRANSPORT_LUN_STATUS_FREE; 664 atomic_set(&lun->lun_acl_count, 0); 665 init_completion(&lun->lun_shutdown_comp); 666 INIT_LIST_HEAD(&lun->lun_acl_list); 667 INIT_LIST_HEAD(&lun->lun_cmd_list); 668 spin_lock_init(&lun->lun_acl_lock); 669 spin_lock_init(&lun->lun_cmd_lock); 670 spin_lock_init(&lun->lun_sep_lock); 671 } 672 673 se_tpg->se_tpg_type = se_tpg_type; 674 se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr; 675 se_tpg->se_tpg_tfo = tfo; 676 se_tpg->se_tpg_wwn = se_wwn; 677 atomic_set(&se_tpg->tpg_pr_ref_count, 0); 678 INIT_LIST_HEAD(&se_tpg->acl_node_list); 679 INIT_LIST_HEAD(&se_tpg->se_tpg_node); 680 INIT_LIST_HEAD(&se_tpg->tpg_sess_list); 681 spin_lock_init(&se_tpg->acl_node_lock); 682 spin_lock_init(&se_tpg->session_lock); 683 spin_lock_init(&se_tpg->tpg_lun_lock); 684 685 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) { 686 if (core_tpg_setup_virtual_lun0(se_tpg) < 0) { 687 kfree(se_tpg); 688 return -ENOMEM; 689 } 690 } 691 692 spin_lock_bh(&tpg_lock); 693 list_add_tail(&se_tpg->se_tpg_node, &tpg_list); 694 spin_unlock_bh(&tpg_lock); 695 696 pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for" 697 " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(), 698 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? 699 "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ? 700 "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg)); 701 702 return 0; 703 } 704 EXPORT_SYMBOL(core_tpg_register); 705 706 int core_tpg_deregister(struct se_portal_group *se_tpg) 707 { 708 struct se_node_acl *nacl, *nacl_tmp; 709 710 pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group" 711 " for endpoint: %s Portal Tag %u\n", 712 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? 713 "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(), 714 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg), 715 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); 716 717 spin_lock_bh(&tpg_lock); 718 list_del(&se_tpg->se_tpg_node); 719 spin_unlock_bh(&tpg_lock); 720 721 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) 722 cpu_relax(); 723 /* 724 * Release any remaining demo-mode generated se_node_acl that have 725 * not been released because of TFO->tpg_check_demo_mode_cache() == 1 726 * in transport_deregister_session(). 727 */ 728 spin_lock_irq(&se_tpg->acl_node_lock); 729 list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list, 730 acl_list) { 731 list_del(&nacl->acl_list); 732 se_tpg->num_node_acls--; 733 spin_unlock_irq(&se_tpg->acl_node_lock); 734 735 core_tpg_wait_for_nacl_pr_ref(nacl); 736 core_free_device_list_for_node(nacl, se_tpg); 737 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl); 738 739 spin_lock_irq(&se_tpg->acl_node_lock); 740 } 741 spin_unlock_irq(&se_tpg->acl_node_lock); 742 743 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) 744 core_tpg_release_virtual_lun0(se_tpg); 745 746 se_tpg->se_tpg_fabric_ptr = NULL; 747 kfree(se_tpg->tpg_lun_list); 748 return 0; 749 } 750 EXPORT_SYMBOL(core_tpg_deregister); 751 752 struct se_lun *core_tpg_pre_addlun( 753 struct se_portal_group *tpg, 754 u32 unpacked_lun) 755 { 756 struct se_lun *lun; 757 758 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { 759 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" 760 "-1: %u for Target Portal Group: %u\n", 761 tpg->se_tpg_tfo->get_fabric_name(), 762 unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, 763 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 764 return ERR_PTR(-EOVERFLOW); 765 } 766 767 spin_lock(&tpg->tpg_lun_lock); 768 lun = &tpg->tpg_lun_list[unpacked_lun]; 769 if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) { 770 pr_err("TPG Logical Unit Number: %u is already active" 771 " on %s Target Portal Group: %u, ignoring request.\n", 772 unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(), 773 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 774 spin_unlock(&tpg->tpg_lun_lock); 775 return ERR_PTR(-EINVAL); 776 } 777 spin_unlock(&tpg->tpg_lun_lock); 778 779 return lun; 780 } 781 782 int core_tpg_post_addlun( 783 struct se_portal_group *tpg, 784 struct se_lun *lun, 785 u32 lun_access, 786 void *lun_ptr) 787 { 788 int ret; 789 790 ret = core_dev_export(lun_ptr, tpg, lun); 791 if (ret < 0) 792 return ret; 793 794 spin_lock(&tpg->tpg_lun_lock); 795 lun->lun_access = lun_access; 796 lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE; 797 spin_unlock(&tpg->tpg_lun_lock); 798 799 return 0; 800 } 801 802 static void core_tpg_shutdown_lun( 803 struct se_portal_group *tpg, 804 struct se_lun *lun) 805 { 806 core_clear_lun_from_tpg(lun, tpg); 807 transport_clear_lun_from_sessions(lun); 808 } 809 810 struct se_lun *core_tpg_pre_dellun( 811 struct se_portal_group *tpg, 812 u32 unpacked_lun, 813 int *ret) 814 { 815 struct se_lun *lun; 816 817 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { 818 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" 819 "-1: %u for Target Portal Group: %u\n", 820 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 821 TRANSPORT_MAX_LUNS_PER_TPG-1, 822 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 823 return ERR_PTR(-EOVERFLOW); 824 } 825 826 spin_lock(&tpg->tpg_lun_lock); 827 lun = &tpg->tpg_lun_list[unpacked_lun]; 828 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { 829 pr_err("%s Logical Unit Number: %u is not active on" 830 " Target Portal Group: %u, ignoring request.\n", 831 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 832 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 833 spin_unlock(&tpg->tpg_lun_lock); 834 return ERR_PTR(-ENODEV); 835 } 836 spin_unlock(&tpg->tpg_lun_lock); 837 838 return lun; 839 } 840 841 int core_tpg_post_dellun( 842 struct se_portal_group *tpg, 843 struct se_lun *lun) 844 { 845 core_tpg_shutdown_lun(tpg, lun); 846 847 core_dev_unexport(lun->lun_se_dev, tpg, lun); 848 849 spin_lock(&tpg->tpg_lun_lock); 850 lun->lun_status = TRANSPORT_LUN_STATUS_FREE; 851 spin_unlock(&tpg->tpg_lun_lock); 852 853 return 0; 854 } 855