1 /******************************************************************************* 2 * Filename: target_core_device.c (based on iscsi_target_device.c) 3 * 4 * This file contains the TCM Virtual Device and Disk Transport 5 * agnostic related functions. 6 * 7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. 8 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved. 9 * Copyright (c) 2007-2010 Rising Tide Systems 10 * Copyright (c) 2008-2010 Linux-iSCSI.org 11 * 12 * Nicholas A. Bellinger <nab@kernel.org> 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2 of the License, or 17 * (at your option) any later version. 18 * 19 * This program is distributed in the hope that it will be useful, 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * GNU General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License 25 * along with this program; if not, write to the Free Software 26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 27 * 28 ******************************************************************************/ 29 30 #include <linux/net.h> 31 #include <linux/string.h> 32 #include <linux/delay.h> 33 #include <linux/timer.h> 34 #include <linux/slab.h> 35 #include <linux/spinlock.h> 36 #include <linux/kthread.h> 37 #include <linux/in.h> 38 #include <linux/export.h> 39 #include <net/sock.h> 40 #include <net/tcp.h> 41 #include <scsi/scsi.h> 42 #include <scsi/scsi_device.h> 43 44 #include <target/target_core_base.h> 45 #include <target/target_core_backend.h> 46 #include <target/target_core_fabric.h> 47 48 #include "target_core_internal.h" 49 #include "target_core_alua.h" 50 #include "target_core_pr.h" 51 #include "target_core_ua.h" 52 53 static struct se_hba *lun0_hba; 54 /* not static, needed by tpg.c */ 55 struct se_device *g_lun0_dev; 56 57 int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) 58 { 59 struct se_lun *se_lun = NULL; 60 struct se_session *se_sess = se_cmd->se_sess; 61 struct se_device *dev; 62 unsigned long flags; 63 64 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) { 65 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; 66 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 67 return -ENODEV; 68 } 69 70 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags); 71 se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun]; 72 if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { 73 struct se_dev_entry *deve = se_cmd->se_deve; 74 75 deve->total_cmds++; 76 deve->total_bytes += se_cmd->data_length; 77 78 if ((se_cmd->data_direction == DMA_TO_DEVICE) && 79 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { 80 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; 81 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 82 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" 83 " Access for 0x%08x\n", 84 se_cmd->se_tfo->get_fabric_name(), 85 unpacked_lun); 86 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); 87 return -EACCES; 88 } 89 90 if (se_cmd->data_direction == DMA_TO_DEVICE) 91 deve->write_bytes += se_cmd->data_length; 92 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 93 deve->read_bytes += se_cmd->data_length; 94 95 deve->deve_cmds++; 96 97 se_lun = deve->se_lun; 98 se_cmd->se_lun = deve->se_lun; 99 se_cmd->pr_res_key = deve->pr_res_key; 100 se_cmd->orig_fe_lun = unpacked_lun; 101 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 102 } 103 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); 104 105 if (!se_lun) { 106 /* 107 * Use the se_portal_group->tpg_virt_lun0 to allow for 108 * REPORT_LUNS, et al to be returned when no active 109 * MappedLUN=0 exists for this Initiator Port. 110 */ 111 if (unpacked_lun != 0) { 112 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; 113 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 114 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 115 " Access for 0x%08x\n", 116 se_cmd->se_tfo->get_fabric_name(), 117 unpacked_lun); 118 return -ENODEV; 119 } 120 /* 121 * Force WRITE PROTECT for virtual LUN 0 122 */ 123 if ((se_cmd->data_direction != DMA_FROM_DEVICE) && 124 (se_cmd->data_direction != DMA_NONE)) { 125 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; 126 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 127 return -EACCES; 128 } 129 130 se_lun = &se_sess->se_tpg->tpg_virt_lun0; 131 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; 132 se_cmd->orig_fe_lun = 0; 133 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 134 } 135 136 /* Directly associate cmd with se_dev */ 137 se_cmd->se_dev = se_lun->lun_se_dev; 138 139 /* TODO: get rid of this and use atomics for stats */ 140 dev = se_lun->lun_se_dev; 141 spin_lock_irqsave(&dev->stats_lock, flags); 142 dev->num_cmds++; 143 if (se_cmd->data_direction == DMA_TO_DEVICE) 144 dev->write_bytes += se_cmd->data_length; 145 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 146 dev->read_bytes += se_cmd->data_length; 147 spin_unlock_irqrestore(&dev->stats_lock, flags); 148 149 spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); 150 list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list); 151 spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); 152 153 return 0; 154 } 155 EXPORT_SYMBOL(transport_lookup_cmd_lun); 156 157 int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun) 158 { 159 struct se_dev_entry *deve; 160 struct se_lun *se_lun = NULL; 161 struct se_session *se_sess = se_cmd->se_sess; 162 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 163 unsigned long flags; 164 165 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) { 166 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; 167 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 168 return -ENODEV; 169 } 170 171 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags); 172 se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun]; 173 deve = se_cmd->se_deve; 174 175 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { 176 se_tmr->tmr_lun = deve->se_lun; 177 se_cmd->se_lun = deve->se_lun; 178 se_lun = deve->se_lun; 179 se_cmd->pr_res_key = deve->pr_res_key; 180 se_cmd->orig_fe_lun = unpacked_lun; 181 } 182 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); 183 184 if (!se_lun) { 185 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 186 " Access for 0x%08x\n", 187 se_cmd->se_tfo->get_fabric_name(), 188 unpacked_lun); 189 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 190 return -ENODEV; 191 } 192 193 /* Directly associate cmd with se_dev */ 194 se_cmd->se_dev = se_lun->lun_se_dev; 195 se_tmr->tmr_dev = se_lun->lun_se_dev; 196 197 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags); 198 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list); 199 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags); 200 201 return 0; 202 } 203 EXPORT_SYMBOL(transport_lookup_tmr_lun); 204 205 /* 206 * This function is called from core_scsi3_emulate_pro_register_and_move() 207 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count 208 * when a matching rtpi is found. 209 */ 210 struct se_dev_entry *core_get_se_deve_from_rtpi( 211 struct se_node_acl *nacl, 212 u16 rtpi) 213 { 214 struct se_dev_entry *deve; 215 struct se_lun *lun; 216 struct se_port *port; 217 struct se_portal_group *tpg = nacl->se_tpg; 218 u32 i; 219 220 spin_lock_irq(&nacl->device_list_lock); 221 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 222 deve = nacl->device_list[i]; 223 224 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) 225 continue; 226 227 lun = deve->se_lun; 228 if (!lun) { 229 pr_err("%s device entries device pointer is" 230 " NULL, but Initiator has access.\n", 231 tpg->se_tpg_tfo->get_fabric_name()); 232 continue; 233 } 234 port = lun->lun_sep; 235 if (!port) { 236 pr_err("%s device entries device pointer is" 237 " NULL, but Initiator has access.\n", 238 tpg->se_tpg_tfo->get_fabric_name()); 239 continue; 240 } 241 if (port->sep_rtpi != rtpi) 242 continue; 243 244 atomic_inc(&deve->pr_ref_count); 245 smp_mb__after_atomic_inc(); 246 spin_unlock_irq(&nacl->device_list_lock); 247 248 return deve; 249 } 250 spin_unlock_irq(&nacl->device_list_lock); 251 252 return NULL; 253 } 254 255 int core_free_device_list_for_node( 256 struct se_node_acl *nacl, 257 struct se_portal_group *tpg) 258 { 259 struct se_dev_entry *deve; 260 struct se_lun *lun; 261 u32 i; 262 263 if (!nacl->device_list) 264 return 0; 265 266 spin_lock_irq(&nacl->device_list_lock); 267 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 268 deve = nacl->device_list[i]; 269 270 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) 271 continue; 272 273 if (!deve->se_lun) { 274 pr_err("%s device entries device pointer is" 275 " NULL, but Initiator has access.\n", 276 tpg->se_tpg_tfo->get_fabric_name()); 277 continue; 278 } 279 lun = deve->se_lun; 280 281 spin_unlock_irq(&nacl->device_list_lock); 282 core_disable_device_list_for_node(lun, NULL, deve->mapped_lun, 283 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg); 284 spin_lock_irq(&nacl->device_list_lock); 285 } 286 spin_unlock_irq(&nacl->device_list_lock); 287 288 array_free(nacl->device_list, TRANSPORT_MAX_LUNS_PER_TPG); 289 nacl->device_list = NULL; 290 291 return 0; 292 } 293 294 void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd) 295 { 296 struct se_dev_entry *deve; 297 unsigned long flags; 298 299 spin_lock_irqsave(&se_nacl->device_list_lock, flags); 300 deve = se_nacl->device_list[se_cmd->orig_fe_lun]; 301 deve->deve_cmds--; 302 spin_unlock_irqrestore(&se_nacl->device_list_lock, flags); 303 } 304 305 void core_update_device_list_access( 306 u32 mapped_lun, 307 u32 lun_access, 308 struct se_node_acl *nacl) 309 { 310 struct se_dev_entry *deve; 311 312 spin_lock_irq(&nacl->device_list_lock); 313 deve = nacl->device_list[mapped_lun]; 314 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { 315 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; 316 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; 317 } else { 318 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; 319 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; 320 } 321 spin_unlock_irq(&nacl->device_list_lock); 322 } 323 324 /* core_enable_device_list_for_node(): 325 * 326 * 327 */ 328 int core_enable_device_list_for_node( 329 struct se_lun *lun, 330 struct se_lun_acl *lun_acl, 331 u32 mapped_lun, 332 u32 lun_access, 333 struct se_node_acl *nacl, 334 struct se_portal_group *tpg) 335 { 336 struct se_port *port = lun->lun_sep; 337 struct se_dev_entry *deve; 338 339 spin_lock_irq(&nacl->device_list_lock); 340 341 deve = nacl->device_list[mapped_lun]; 342 343 /* 344 * Check if the call is handling demo mode -> explict LUN ACL 345 * transition. This transition must be for the same struct se_lun 346 * + mapped_lun that was setup in demo mode.. 347 */ 348 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { 349 if (deve->se_lun_acl != NULL) { 350 pr_err("struct se_dev_entry->se_lun_acl" 351 " already set for demo mode -> explict" 352 " LUN ACL transition\n"); 353 spin_unlock_irq(&nacl->device_list_lock); 354 return -EINVAL; 355 } 356 if (deve->se_lun != lun) { 357 pr_err("struct se_dev_entry->se_lun does" 358 " match passed struct se_lun for demo mode" 359 " -> explict LUN ACL transition\n"); 360 spin_unlock_irq(&nacl->device_list_lock); 361 return -EINVAL; 362 } 363 deve->se_lun_acl = lun_acl; 364 365 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { 366 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; 367 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; 368 } else { 369 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; 370 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; 371 } 372 373 spin_unlock_irq(&nacl->device_list_lock); 374 return 0; 375 } 376 377 deve->se_lun = lun; 378 deve->se_lun_acl = lun_acl; 379 deve->mapped_lun = mapped_lun; 380 deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS; 381 382 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { 383 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; 384 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; 385 } else { 386 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; 387 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; 388 } 389 390 deve->creation_time = get_jiffies_64(); 391 deve->attach_count++; 392 spin_unlock_irq(&nacl->device_list_lock); 393 394 spin_lock_bh(&port->sep_alua_lock); 395 list_add_tail(&deve->alua_port_list, &port->sep_alua_list); 396 spin_unlock_bh(&port->sep_alua_lock); 397 398 return 0; 399 } 400 401 /* core_disable_device_list_for_node(): 402 * 403 * 404 */ 405 int core_disable_device_list_for_node( 406 struct se_lun *lun, 407 struct se_lun_acl *lun_acl, 408 u32 mapped_lun, 409 u32 lun_access, 410 struct se_node_acl *nacl, 411 struct se_portal_group *tpg) 412 { 413 struct se_port *port = lun->lun_sep; 414 struct se_dev_entry *deve = nacl->device_list[mapped_lun]; 415 416 /* 417 * If the MappedLUN entry is being disabled, the entry in 418 * port->sep_alua_list must be removed now before clearing the 419 * struct se_dev_entry pointers below as logic in 420 * core_alua_do_transition_tg_pt() depends on these being present. 421 * 422 * deve->se_lun_acl will be NULL for demo-mode created LUNs 423 * that have not been explicitly converted to MappedLUNs -> 424 * struct se_lun_acl, but we remove deve->alua_port_list from 425 * port->sep_alua_list. This also means that active UAs and 426 * NodeACL context specific PR metadata for demo-mode 427 * MappedLUN *deve will be released below.. 428 */ 429 spin_lock_bh(&port->sep_alua_lock); 430 list_del(&deve->alua_port_list); 431 spin_unlock_bh(&port->sep_alua_lock); 432 /* 433 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE 434 * PR operation to complete. 435 */ 436 while (atomic_read(&deve->pr_ref_count) != 0) 437 cpu_relax(); 438 439 spin_lock_irq(&nacl->device_list_lock); 440 /* 441 * Disable struct se_dev_entry LUN ACL mapping 442 */ 443 core_scsi3_ua_release_all(deve); 444 deve->se_lun = NULL; 445 deve->se_lun_acl = NULL; 446 deve->lun_flags = 0; 447 deve->creation_time = 0; 448 deve->attach_count--; 449 spin_unlock_irq(&nacl->device_list_lock); 450 451 core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl); 452 return 0; 453 } 454 455 /* core_clear_lun_from_tpg(): 456 * 457 * 458 */ 459 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) 460 { 461 struct se_node_acl *nacl; 462 struct se_dev_entry *deve; 463 u32 i; 464 465 spin_lock_irq(&tpg->acl_node_lock); 466 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { 467 spin_unlock_irq(&tpg->acl_node_lock); 468 469 spin_lock_irq(&nacl->device_list_lock); 470 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 471 deve = nacl->device_list[i]; 472 if (lun != deve->se_lun) 473 continue; 474 spin_unlock_irq(&nacl->device_list_lock); 475 476 core_disable_device_list_for_node(lun, NULL, 477 deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS, 478 nacl, tpg); 479 480 spin_lock_irq(&nacl->device_list_lock); 481 } 482 spin_unlock_irq(&nacl->device_list_lock); 483 484 spin_lock_irq(&tpg->acl_node_lock); 485 } 486 spin_unlock_irq(&tpg->acl_node_lock); 487 } 488 489 static struct se_port *core_alloc_port(struct se_device *dev) 490 { 491 struct se_port *port, *port_tmp; 492 493 port = kzalloc(sizeof(struct se_port), GFP_KERNEL); 494 if (!port) { 495 pr_err("Unable to allocate struct se_port\n"); 496 return ERR_PTR(-ENOMEM); 497 } 498 INIT_LIST_HEAD(&port->sep_alua_list); 499 INIT_LIST_HEAD(&port->sep_list); 500 atomic_set(&port->sep_tg_pt_secondary_offline, 0); 501 spin_lock_init(&port->sep_alua_lock); 502 mutex_init(&port->sep_tg_pt_md_mutex); 503 504 spin_lock(&dev->se_port_lock); 505 if (dev->dev_port_count == 0x0000ffff) { 506 pr_warn("Reached dev->dev_port_count ==" 507 " 0x0000ffff\n"); 508 spin_unlock(&dev->se_port_lock); 509 return ERR_PTR(-ENOSPC); 510 } 511 again: 512 /* 513 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device 514 * Here is the table from spc4r17 section 7.7.3.8. 515 * 516 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field 517 * 518 * Code Description 519 * 0h Reserved 520 * 1h Relative port 1, historically known as port A 521 * 2h Relative port 2, historically known as port B 522 * 3h to FFFFh Relative port 3 through 65 535 523 */ 524 port->sep_rtpi = dev->dev_rpti_counter++; 525 if (!port->sep_rtpi) 526 goto again; 527 528 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) { 529 /* 530 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique 531 * for 16-bit wrap.. 532 */ 533 if (port->sep_rtpi == port_tmp->sep_rtpi) 534 goto again; 535 } 536 spin_unlock(&dev->se_port_lock); 537 538 return port; 539 } 540 541 static void core_export_port( 542 struct se_device *dev, 543 struct se_portal_group *tpg, 544 struct se_port *port, 545 struct se_lun *lun) 546 { 547 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL; 548 549 spin_lock(&dev->se_port_lock); 550 spin_lock(&lun->lun_sep_lock); 551 port->sep_tpg = tpg; 552 port->sep_lun = lun; 553 lun->lun_sep = port; 554 spin_unlock(&lun->lun_sep_lock); 555 556 list_add_tail(&port->sep_list, &dev->dev_sep_list); 557 spin_unlock(&dev->se_port_lock); 558 559 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV && 560 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { 561 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); 562 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { 563 pr_err("Unable to allocate t10_alua_tg_pt" 564 "_gp_member_t\n"); 565 return; 566 } 567 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 568 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, 569 dev->t10_alua.default_tg_pt_gp); 570 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 571 pr_debug("%s/%s: Adding to default ALUA Target Port" 572 " Group: alua/default_tg_pt_gp\n", 573 dev->transport->name, tpg->se_tpg_tfo->get_fabric_name()); 574 } 575 576 dev->dev_port_count++; 577 port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFIER */ 578 } 579 580 /* 581 * Called with struct se_device->se_port_lock spinlock held. 582 */ 583 static void core_release_port(struct se_device *dev, struct se_port *port) 584 __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock) 585 { 586 /* 587 * Wait for any port reference for PR ALL_TG_PT=1 operation 588 * to complete in __core_scsi3_alloc_registration() 589 */ 590 spin_unlock(&dev->se_port_lock); 591 if (atomic_read(&port->sep_tg_pt_ref_cnt)) 592 cpu_relax(); 593 spin_lock(&dev->se_port_lock); 594 595 core_alua_free_tg_pt_gp_mem(port); 596 597 list_del(&port->sep_list); 598 dev->dev_port_count--; 599 kfree(port); 600 } 601 602 int core_dev_export( 603 struct se_device *dev, 604 struct se_portal_group *tpg, 605 struct se_lun *lun) 606 { 607 struct se_hba *hba = dev->se_hba; 608 struct se_port *port; 609 610 port = core_alloc_port(dev); 611 if (IS_ERR(port)) 612 return PTR_ERR(port); 613 614 lun->lun_se_dev = dev; 615 616 spin_lock(&hba->device_lock); 617 dev->export_count++; 618 spin_unlock(&hba->device_lock); 619 620 core_export_port(dev, tpg, port, lun); 621 return 0; 622 } 623 624 void core_dev_unexport( 625 struct se_device *dev, 626 struct se_portal_group *tpg, 627 struct se_lun *lun) 628 { 629 struct se_hba *hba = dev->se_hba; 630 struct se_port *port = lun->lun_sep; 631 632 spin_lock(&lun->lun_sep_lock); 633 if (lun->lun_se_dev == NULL) { 634 spin_unlock(&lun->lun_sep_lock); 635 return; 636 } 637 spin_unlock(&lun->lun_sep_lock); 638 639 spin_lock(&dev->se_port_lock); 640 core_release_port(dev, port); 641 spin_unlock(&dev->se_port_lock); 642 643 spin_lock(&hba->device_lock); 644 dev->export_count--; 645 spin_unlock(&hba->device_lock); 646 647 lun->lun_se_dev = NULL; 648 } 649 650 static void se_release_vpd_for_dev(struct se_device *dev) 651 { 652 struct t10_vpd *vpd, *vpd_tmp; 653 654 spin_lock(&dev->t10_wwn.t10_vpd_lock); 655 list_for_each_entry_safe(vpd, vpd_tmp, 656 &dev->t10_wwn.t10_vpd_list, vpd_list) { 657 list_del(&vpd->vpd_list); 658 kfree(vpd); 659 } 660 spin_unlock(&dev->t10_wwn.t10_vpd_lock); 661 } 662 663 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) 664 { 665 u32 aligned_max_sectors; 666 u32 alignment; 667 /* 668 * Limit max_sectors to a PAGE_SIZE aligned value for modern 669 * transport_allocate_data_tasks() operation. 670 */ 671 alignment = max(1ul, PAGE_SIZE / block_size); 672 aligned_max_sectors = rounddown(max_sectors, alignment); 673 674 if (max_sectors != aligned_max_sectors) 675 pr_info("Rounding down aligned max_sectors from %u to %u\n", 676 max_sectors, aligned_max_sectors); 677 678 return aligned_max_sectors; 679 } 680 681 int se_dev_set_max_unmap_lba_count( 682 struct se_device *dev, 683 u32 max_unmap_lba_count) 684 { 685 dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count; 686 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n", 687 dev, dev->dev_attrib.max_unmap_lba_count); 688 return 0; 689 } 690 691 int se_dev_set_max_unmap_block_desc_count( 692 struct se_device *dev, 693 u32 max_unmap_block_desc_count) 694 { 695 dev->dev_attrib.max_unmap_block_desc_count = 696 max_unmap_block_desc_count; 697 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n", 698 dev, dev->dev_attrib.max_unmap_block_desc_count); 699 return 0; 700 } 701 702 int se_dev_set_unmap_granularity( 703 struct se_device *dev, 704 u32 unmap_granularity) 705 { 706 dev->dev_attrib.unmap_granularity = unmap_granularity; 707 pr_debug("dev[%p]: Set unmap_granularity: %u\n", 708 dev, dev->dev_attrib.unmap_granularity); 709 return 0; 710 } 711 712 int se_dev_set_unmap_granularity_alignment( 713 struct se_device *dev, 714 u32 unmap_granularity_alignment) 715 { 716 dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment; 717 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n", 718 dev, dev->dev_attrib.unmap_granularity_alignment); 719 return 0; 720 } 721 722 int se_dev_set_emulate_dpo(struct se_device *dev, int flag) 723 { 724 if (flag != 0 && flag != 1) { 725 pr_err("Illegal value %d\n", flag); 726 return -EINVAL; 727 } 728 729 if (flag) { 730 pr_err("dpo_emulated not supported\n"); 731 return -EINVAL; 732 } 733 734 return 0; 735 } 736 737 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) 738 { 739 if (flag != 0 && flag != 1) { 740 pr_err("Illegal value %d\n", flag); 741 return -EINVAL; 742 } 743 744 if (flag && 745 dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 746 pr_err("emulate_fua_write not supported for pSCSI\n"); 747 return -EINVAL; 748 } 749 dev->dev_attrib.emulate_fua_write = flag; 750 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", 751 dev, dev->dev_attrib.emulate_fua_write); 752 return 0; 753 } 754 755 int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) 756 { 757 if (flag != 0 && flag != 1) { 758 pr_err("Illegal value %d\n", flag); 759 return -EINVAL; 760 } 761 762 if (flag) { 763 pr_err("ua read emulated not supported\n"); 764 return -EINVAL; 765 } 766 767 return 0; 768 } 769 770 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) 771 { 772 if (flag != 0 && flag != 1) { 773 pr_err("Illegal value %d\n", flag); 774 return -EINVAL; 775 } 776 if (flag && 777 dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 778 pr_err("emulate_write_cache not supported for pSCSI\n"); 779 return -EINVAL; 780 } 781 dev->dev_attrib.emulate_write_cache = flag; 782 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", 783 dev, dev->dev_attrib.emulate_write_cache); 784 return 0; 785 } 786 787 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) 788 { 789 if ((flag != 0) && (flag != 1) && (flag != 2)) { 790 pr_err("Illegal value %d\n", flag); 791 return -EINVAL; 792 } 793 794 if (dev->export_count) { 795 pr_err("dev[%p]: Unable to change SE Device" 796 " UA_INTRLCK_CTRL while export_count is %d\n", 797 dev, dev->export_count); 798 return -EINVAL; 799 } 800 dev->dev_attrib.emulate_ua_intlck_ctrl = flag; 801 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", 802 dev, dev->dev_attrib.emulate_ua_intlck_ctrl); 803 804 return 0; 805 } 806 807 int se_dev_set_emulate_tas(struct se_device *dev, int flag) 808 { 809 if ((flag != 0) && (flag != 1)) { 810 pr_err("Illegal value %d\n", flag); 811 return -EINVAL; 812 } 813 814 if (dev->export_count) { 815 pr_err("dev[%p]: Unable to change SE Device TAS while" 816 " export_count is %d\n", 817 dev, dev->export_count); 818 return -EINVAL; 819 } 820 dev->dev_attrib.emulate_tas = flag; 821 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n", 822 dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled"); 823 824 return 0; 825 } 826 827 int se_dev_set_emulate_tpu(struct se_device *dev, int flag) 828 { 829 if ((flag != 0) && (flag != 1)) { 830 pr_err("Illegal value %d\n", flag); 831 return -EINVAL; 832 } 833 /* 834 * We expect this value to be non-zero when generic Block Layer 835 * Discard supported is detected iblock_create_virtdevice(). 836 */ 837 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) { 838 pr_err("Generic Block Discard not supported\n"); 839 return -ENOSYS; 840 } 841 842 dev->dev_attrib.emulate_tpu = flag; 843 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", 844 dev, flag); 845 return 0; 846 } 847 848 int se_dev_set_emulate_tpws(struct se_device *dev, int flag) 849 { 850 if ((flag != 0) && (flag != 1)) { 851 pr_err("Illegal value %d\n", flag); 852 return -EINVAL; 853 } 854 /* 855 * We expect this value to be non-zero when generic Block Layer 856 * Discard supported is detected iblock_create_virtdevice(). 857 */ 858 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) { 859 pr_err("Generic Block Discard not supported\n"); 860 return -ENOSYS; 861 } 862 863 dev->dev_attrib.emulate_tpws = flag; 864 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", 865 dev, flag); 866 return 0; 867 } 868 869 int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) 870 { 871 if ((flag != 0) && (flag != 1)) { 872 pr_err("Illegal value %d\n", flag); 873 return -EINVAL; 874 } 875 dev->dev_attrib.enforce_pr_isids = flag; 876 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, 877 (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled"); 878 return 0; 879 } 880 881 int se_dev_set_is_nonrot(struct se_device *dev, int flag) 882 { 883 if ((flag != 0) && (flag != 1)) { 884 printk(KERN_ERR "Illegal value %d\n", flag); 885 return -EINVAL; 886 } 887 dev->dev_attrib.is_nonrot = flag; 888 pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n", 889 dev, flag); 890 return 0; 891 } 892 893 int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag) 894 { 895 if (flag != 0) { 896 printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted" 897 " reordering not implemented\n", dev); 898 return -ENOSYS; 899 } 900 dev->dev_attrib.emulate_rest_reord = flag; 901 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag); 902 return 0; 903 } 904 905 /* 906 * Note, this can only be called on unexported SE Device Object. 907 */ 908 int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) 909 { 910 if (dev->export_count) { 911 pr_err("dev[%p]: Unable to change SE Device TCQ while" 912 " export_count is %d\n", 913 dev, dev->export_count); 914 return -EINVAL; 915 } 916 if (!queue_depth) { 917 pr_err("dev[%p]: Illegal ZERO value for queue" 918 "_depth\n", dev); 919 return -EINVAL; 920 } 921 922 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 923 if (queue_depth > dev->dev_attrib.hw_queue_depth) { 924 pr_err("dev[%p]: Passed queue_depth: %u" 925 " exceeds TCM/SE_Device TCQ: %u\n", 926 dev, queue_depth, 927 dev->dev_attrib.hw_queue_depth); 928 return -EINVAL; 929 } 930 } else { 931 if (queue_depth > dev->dev_attrib.queue_depth) { 932 if (queue_depth > dev->dev_attrib.hw_queue_depth) { 933 pr_err("dev[%p]: Passed queue_depth:" 934 " %u exceeds TCM/SE_Device MAX" 935 " TCQ: %u\n", dev, queue_depth, 936 dev->dev_attrib.hw_queue_depth); 937 return -EINVAL; 938 } 939 } 940 } 941 942 dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth; 943 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", 944 dev, queue_depth); 945 return 0; 946 } 947 948 int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) 949 { 950 if (dev->export_count) { 951 pr_err("dev[%p]: Unable to change SE Device" 952 " fabric_max_sectors while export_count is %d\n", 953 dev, dev->export_count); 954 return -EINVAL; 955 } 956 if (!fabric_max_sectors) { 957 pr_err("dev[%p]: Illegal ZERO value for" 958 " fabric_max_sectors\n", dev); 959 return -EINVAL; 960 } 961 if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) { 962 pr_err("dev[%p]: Passed fabric_max_sectors: %u less than" 963 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors, 964 DA_STATUS_MAX_SECTORS_MIN); 965 return -EINVAL; 966 } 967 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 968 if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) { 969 pr_err("dev[%p]: Passed fabric_max_sectors: %u" 970 " greater than TCM/SE_Device max_sectors:" 971 " %u\n", dev, fabric_max_sectors, 972 dev->dev_attrib.hw_max_sectors); 973 return -EINVAL; 974 } 975 } else { 976 if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) { 977 pr_err("dev[%p]: Passed fabric_max_sectors: %u" 978 " greater than DA_STATUS_MAX_SECTORS_MAX:" 979 " %u\n", dev, fabric_max_sectors, 980 DA_STATUS_MAX_SECTORS_MAX); 981 return -EINVAL; 982 } 983 } 984 /* 985 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() 986 */ 987 fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors, 988 dev->dev_attrib.block_size); 989 990 dev->dev_attrib.fabric_max_sectors = fabric_max_sectors; 991 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", 992 dev, fabric_max_sectors); 993 return 0; 994 } 995 996 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) 997 { 998 if (dev->export_count) { 999 pr_err("dev[%p]: Unable to change SE Device" 1000 " optimal_sectors while export_count is %d\n", 1001 dev, dev->export_count); 1002 return -EINVAL; 1003 } 1004 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1005 pr_err("dev[%p]: Passed optimal_sectors cannot be" 1006 " changed for TCM/pSCSI\n", dev); 1007 return -EINVAL; 1008 } 1009 if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) { 1010 pr_err("dev[%p]: Passed optimal_sectors %u cannot be" 1011 " greater than fabric_max_sectors: %u\n", dev, 1012 optimal_sectors, dev->dev_attrib.fabric_max_sectors); 1013 return -EINVAL; 1014 } 1015 1016 dev->dev_attrib.optimal_sectors = optimal_sectors; 1017 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n", 1018 dev, optimal_sectors); 1019 return 0; 1020 } 1021 1022 int se_dev_set_block_size(struct se_device *dev, u32 block_size) 1023 { 1024 if (dev->export_count) { 1025 pr_err("dev[%p]: Unable to change SE Device block_size" 1026 " while export_count is %d\n", 1027 dev, dev->export_count); 1028 return -EINVAL; 1029 } 1030 1031 if ((block_size != 512) && 1032 (block_size != 1024) && 1033 (block_size != 2048) && 1034 (block_size != 4096)) { 1035 pr_err("dev[%p]: Illegal value for block_device: %u" 1036 " for SE device, must be 512, 1024, 2048 or 4096\n", 1037 dev, block_size); 1038 return -EINVAL; 1039 } 1040 1041 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1042 pr_err("dev[%p]: Not allowed to change block_size for" 1043 " Physical Device, use for Linux/SCSI to change" 1044 " block_size for underlying hardware\n", dev); 1045 return -EINVAL; 1046 } 1047 1048 dev->dev_attrib.block_size = block_size; 1049 pr_debug("dev[%p]: SE Device block_size changed to %u\n", 1050 dev, block_size); 1051 return 0; 1052 } 1053 1054 struct se_lun *core_dev_add_lun( 1055 struct se_portal_group *tpg, 1056 struct se_device *dev, 1057 u32 lun) 1058 { 1059 struct se_lun *lun_p; 1060 int rc; 1061 1062 lun_p = core_tpg_pre_addlun(tpg, lun); 1063 if (IS_ERR(lun_p)) 1064 return lun_p; 1065 1066 rc = core_tpg_post_addlun(tpg, lun_p, 1067 TRANSPORT_LUNFLAGS_READ_WRITE, dev); 1068 if (rc < 0) 1069 return ERR_PTR(rc); 1070 1071 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" 1072 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), 1073 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun, 1074 tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id); 1075 /* 1076 * Update LUN maps for dynamically added initiators when 1077 * generate_node_acl is enabled. 1078 */ 1079 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { 1080 struct se_node_acl *acl; 1081 spin_lock_irq(&tpg->acl_node_lock); 1082 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 1083 if (acl->dynamic_node_acl && 1084 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || 1085 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { 1086 spin_unlock_irq(&tpg->acl_node_lock); 1087 core_tpg_add_node_to_devs(acl, tpg); 1088 spin_lock_irq(&tpg->acl_node_lock); 1089 } 1090 } 1091 spin_unlock_irq(&tpg->acl_node_lock); 1092 } 1093 1094 return lun_p; 1095 } 1096 1097 /* core_dev_del_lun(): 1098 * 1099 * 1100 */ 1101 int core_dev_del_lun( 1102 struct se_portal_group *tpg, 1103 u32 unpacked_lun) 1104 { 1105 struct se_lun *lun; 1106 1107 lun = core_tpg_pre_dellun(tpg, unpacked_lun); 1108 if (IS_ERR(lun)) 1109 return PTR_ERR(lun); 1110 1111 core_tpg_post_dellun(tpg, lun); 1112 1113 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" 1114 " device object\n", tpg->se_tpg_tfo->get_fabric_name(), 1115 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, 1116 tpg->se_tpg_tfo->get_fabric_name()); 1117 1118 return 0; 1119 } 1120 1121 struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun) 1122 { 1123 struct se_lun *lun; 1124 1125 spin_lock(&tpg->tpg_lun_lock); 1126 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { 1127 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS" 1128 "_PER_TPG-1: %u for Target Portal Group: %hu\n", 1129 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1130 TRANSPORT_MAX_LUNS_PER_TPG-1, 1131 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1132 spin_unlock(&tpg->tpg_lun_lock); 1133 return NULL; 1134 } 1135 lun = tpg->tpg_lun_list[unpacked_lun]; 1136 1137 if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { 1138 pr_err("%s Logical Unit Number: %u is not free on" 1139 " Target Portal Group: %hu, ignoring request.\n", 1140 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1141 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1142 spin_unlock(&tpg->tpg_lun_lock); 1143 return NULL; 1144 } 1145 spin_unlock(&tpg->tpg_lun_lock); 1146 1147 return lun; 1148 } 1149 1150 /* core_dev_get_lun(): 1151 * 1152 * 1153 */ 1154 static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun) 1155 { 1156 struct se_lun *lun; 1157 1158 spin_lock(&tpg->tpg_lun_lock); 1159 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { 1160 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" 1161 "_TPG-1: %u for Target Portal Group: %hu\n", 1162 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1163 TRANSPORT_MAX_LUNS_PER_TPG-1, 1164 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1165 spin_unlock(&tpg->tpg_lun_lock); 1166 return NULL; 1167 } 1168 lun = tpg->tpg_lun_list[unpacked_lun]; 1169 1170 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { 1171 pr_err("%s Logical Unit Number: %u is not active on" 1172 " Target Portal Group: %hu, ignoring request.\n", 1173 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1174 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1175 spin_unlock(&tpg->tpg_lun_lock); 1176 return NULL; 1177 } 1178 spin_unlock(&tpg->tpg_lun_lock); 1179 1180 return lun; 1181 } 1182 1183 struct se_lun_acl *core_dev_init_initiator_node_lun_acl( 1184 struct se_portal_group *tpg, 1185 u32 mapped_lun, 1186 char *initiatorname, 1187 int *ret) 1188 { 1189 struct se_lun_acl *lacl; 1190 struct se_node_acl *nacl; 1191 1192 if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) { 1193 pr_err("%s InitiatorName exceeds maximum size.\n", 1194 tpg->se_tpg_tfo->get_fabric_name()); 1195 *ret = -EOVERFLOW; 1196 return NULL; 1197 } 1198 nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname); 1199 if (!nacl) { 1200 *ret = -EINVAL; 1201 return NULL; 1202 } 1203 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); 1204 if (!lacl) { 1205 pr_err("Unable to allocate memory for struct se_lun_acl.\n"); 1206 *ret = -ENOMEM; 1207 return NULL; 1208 } 1209 1210 INIT_LIST_HEAD(&lacl->lacl_list); 1211 lacl->mapped_lun = mapped_lun; 1212 lacl->se_lun_nacl = nacl; 1213 snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 1214 1215 return lacl; 1216 } 1217 1218 int core_dev_add_initiator_node_lun_acl( 1219 struct se_portal_group *tpg, 1220 struct se_lun_acl *lacl, 1221 u32 unpacked_lun, 1222 u32 lun_access) 1223 { 1224 struct se_lun *lun; 1225 struct se_node_acl *nacl; 1226 1227 lun = core_dev_get_lun(tpg, unpacked_lun); 1228 if (!lun) { 1229 pr_err("%s Logical Unit Number: %u is not active on" 1230 " Target Portal Group: %hu, ignoring request.\n", 1231 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1232 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1233 return -EINVAL; 1234 } 1235 1236 nacl = lacl->se_lun_nacl; 1237 if (!nacl) 1238 return -EINVAL; 1239 1240 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) && 1241 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)) 1242 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 1243 1244 lacl->se_lun = lun; 1245 1246 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun, 1247 lun_access, nacl, tpg) < 0) 1248 return -EINVAL; 1249 1250 spin_lock(&lun->lun_acl_lock); 1251 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); 1252 atomic_inc(&lun->lun_acl_count); 1253 smp_mb__after_atomic_inc(); 1254 spin_unlock(&lun->lun_acl_lock); 1255 1256 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " 1257 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 1258 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, 1259 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", 1260 lacl->initiatorname); 1261 /* 1262 * Check to see if there are any existing persistent reservation APTPL 1263 * pre-registrations that need to be enabled for this LUN ACL.. 1264 */ 1265 core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl); 1266 return 0; 1267 } 1268 1269 /* core_dev_del_initiator_node_lun_acl(): 1270 * 1271 * 1272 */ 1273 int core_dev_del_initiator_node_lun_acl( 1274 struct se_portal_group *tpg, 1275 struct se_lun *lun, 1276 struct se_lun_acl *lacl) 1277 { 1278 struct se_node_acl *nacl; 1279 1280 nacl = lacl->se_lun_nacl; 1281 if (!nacl) 1282 return -EINVAL; 1283 1284 spin_lock(&lun->lun_acl_lock); 1285 list_del(&lacl->lacl_list); 1286 atomic_dec(&lun->lun_acl_count); 1287 smp_mb__after_atomic_dec(); 1288 spin_unlock(&lun->lun_acl_lock); 1289 1290 core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun, 1291 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg); 1292 1293 lacl->se_lun = NULL; 1294 1295 pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for" 1296 " InitiatorNode: %s Mapped LUN: %u\n", 1297 tpg->se_tpg_tfo->get_fabric_name(), 1298 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 1299 lacl->initiatorname, lacl->mapped_lun); 1300 1301 return 0; 1302 } 1303 1304 void core_dev_free_initiator_node_lun_acl( 1305 struct se_portal_group *tpg, 1306 struct se_lun_acl *lacl) 1307 { 1308 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" 1309 " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(), 1310 tpg->se_tpg_tfo->tpg_get_tag(tpg), 1311 tpg->se_tpg_tfo->get_fabric_name(), 1312 lacl->initiatorname, lacl->mapped_lun); 1313 1314 kfree(lacl); 1315 } 1316 1317 static void scsi_dump_inquiry(struct se_device *dev) 1318 { 1319 struct t10_wwn *wwn = &dev->t10_wwn; 1320 char buf[17]; 1321 int i, device_type; 1322 /* 1323 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer 1324 */ 1325 for (i = 0; i < 8; i++) 1326 if (wwn->vendor[i] >= 0x20) 1327 buf[i] = wwn->vendor[i]; 1328 else 1329 buf[i] = ' '; 1330 buf[i] = '\0'; 1331 pr_debug(" Vendor: %s\n", buf); 1332 1333 for (i = 0; i < 16; i++) 1334 if (wwn->model[i] >= 0x20) 1335 buf[i] = wwn->model[i]; 1336 else 1337 buf[i] = ' '; 1338 buf[i] = '\0'; 1339 pr_debug(" Model: %s\n", buf); 1340 1341 for (i = 0; i < 4; i++) 1342 if (wwn->revision[i] >= 0x20) 1343 buf[i] = wwn->revision[i]; 1344 else 1345 buf[i] = ' '; 1346 buf[i] = '\0'; 1347 pr_debug(" Revision: %s\n", buf); 1348 1349 device_type = dev->transport->get_device_type(dev); 1350 pr_debug(" Type: %s ", scsi_device_type(device_type)); 1351 pr_debug(" ANSI SCSI revision: %02x\n", 1352 dev->transport->get_device_rev(dev)); 1353 } 1354 1355 struct se_device *target_alloc_device(struct se_hba *hba, const char *name) 1356 { 1357 struct se_device *dev; 1358 1359 dev = hba->transport->alloc_device(hba, name); 1360 if (!dev) 1361 return NULL; 1362 1363 dev->se_hba = hba; 1364 dev->transport = hba->transport; 1365 1366 INIT_LIST_HEAD(&dev->dev_list); 1367 INIT_LIST_HEAD(&dev->dev_sep_list); 1368 INIT_LIST_HEAD(&dev->dev_tmr_list); 1369 INIT_LIST_HEAD(&dev->delayed_cmd_list); 1370 INIT_LIST_HEAD(&dev->state_list); 1371 INIT_LIST_HEAD(&dev->qf_cmd_list); 1372 spin_lock_init(&dev->stats_lock); 1373 spin_lock_init(&dev->execute_task_lock); 1374 spin_lock_init(&dev->delayed_cmd_lock); 1375 spin_lock_init(&dev->dev_reservation_lock); 1376 spin_lock_init(&dev->se_port_lock); 1377 spin_lock_init(&dev->se_tmr_lock); 1378 spin_lock_init(&dev->qf_cmd_lock); 1379 atomic_set(&dev->dev_ordered_id, 0); 1380 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list); 1381 spin_lock_init(&dev->t10_wwn.t10_vpd_lock); 1382 INIT_LIST_HEAD(&dev->t10_pr.registration_list); 1383 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list); 1384 spin_lock_init(&dev->t10_pr.registration_lock); 1385 spin_lock_init(&dev->t10_pr.aptpl_reg_lock); 1386 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list); 1387 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock); 1388 1389 dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; 1390 dev->t10_wwn.t10_dev = dev; 1391 dev->t10_alua.t10_dev = dev; 1392 1393 dev->dev_attrib.da_dev = dev; 1394 dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO; 1395 dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE; 1396 dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ; 1397 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; 1398 dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; 1399 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS; 1400 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU; 1401 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; 1402 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 1403 dev->dev_attrib.is_nonrot = DA_IS_NONROT; 1404 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; 1405 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; 1406 dev->dev_attrib.max_unmap_block_desc_count = 1407 DA_MAX_UNMAP_BLOCK_DESC_COUNT; 1408 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; 1409 dev->dev_attrib.unmap_granularity_alignment = 1410 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; 1411 dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS; 1412 dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS; 1413 1414 return dev; 1415 } 1416 1417 int target_configure_device(struct se_device *dev) 1418 { 1419 struct se_hba *hba = dev->se_hba; 1420 int ret; 1421 1422 if (dev->dev_flags & DF_CONFIGURED) { 1423 pr_err("se_dev->se_dev_ptr already set for storage" 1424 " object\n"); 1425 return -EEXIST; 1426 } 1427 1428 ret = dev->transport->configure_device(dev); 1429 if (ret) 1430 goto out; 1431 dev->dev_flags |= DF_CONFIGURED; 1432 1433 /* 1434 * XXX: there is not much point to have two different values here.. 1435 */ 1436 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size; 1437 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth; 1438 1439 /* 1440 * Align max_hw_sectors down to PAGE_SIZE I/O transfers 1441 */ 1442 dev->dev_attrib.hw_max_sectors = 1443 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors, 1444 dev->dev_attrib.hw_block_size); 1445 1446 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); 1447 dev->creation_time = get_jiffies_64(); 1448 1449 ret = core_setup_alua(dev); 1450 if (ret) 1451 goto out; 1452 1453 /* 1454 * Startup the struct se_device processing thread 1455 */ 1456 dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1, 1457 dev->transport->name); 1458 if (!dev->tmr_wq) { 1459 pr_err("Unable to create tmr workqueue for %s\n", 1460 dev->transport->name); 1461 ret = -ENOMEM; 1462 goto out_free_alua; 1463 } 1464 1465 /* 1466 * Setup work_queue for QUEUE_FULL 1467 */ 1468 INIT_WORK(&dev->qf_work_queue, target_qf_do_work); 1469 1470 /* 1471 * Preload the initial INQUIRY const values if we are doing 1472 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI 1473 * passthrough because this is being provided by the backend LLD. 1474 */ 1475 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { 1476 strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8); 1477 strncpy(&dev->t10_wwn.model[0], 1478 dev->transport->inquiry_prod, 16); 1479 strncpy(&dev->t10_wwn.revision[0], 1480 dev->transport->inquiry_rev, 4); 1481 } 1482 1483 scsi_dump_inquiry(dev); 1484 1485 spin_lock(&hba->device_lock); 1486 hba->dev_count++; 1487 spin_unlock(&hba->device_lock); 1488 return 0; 1489 1490 out_free_alua: 1491 core_alua_free_lu_gp_mem(dev); 1492 out: 1493 se_release_vpd_for_dev(dev); 1494 return ret; 1495 } 1496 1497 void target_free_device(struct se_device *dev) 1498 { 1499 struct se_hba *hba = dev->se_hba; 1500 1501 WARN_ON(!list_empty(&dev->dev_sep_list)); 1502 1503 if (dev->dev_flags & DF_CONFIGURED) { 1504 destroy_workqueue(dev->tmr_wq); 1505 1506 spin_lock(&hba->device_lock); 1507 hba->dev_count--; 1508 spin_unlock(&hba->device_lock); 1509 } 1510 1511 core_alua_free_lu_gp_mem(dev); 1512 core_scsi3_free_all_registrations(dev); 1513 se_release_vpd_for_dev(dev); 1514 1515 dev->transport->free_device(dev); 1516 } 1517 1518 int core_dev_setup_virtual_lun0(void) 1519 { 1520 struct se_hba *hba; 1521 struct se_device *dev; 1522 char buf[16]; 1523 int ret; 1524 1525 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE); 1526 if (IS_ERR(hba)) 1527 return PTR_ERR(hba); 1528 1529 dev = target_alloc_device(hba, "virt_lun0"); 1530 if (!dev) { 1531 ret = -ENOMEM; 1532 goto out_free_hba; 1533 } 1534 1535 memset(buf, 0, 16); 1536 sprintf(buf, "rd_pages=8"); 1537 hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf)); 1538 1539 ret = target_configure_device(dev); 1540 if (ret) 1541 goto out_free_se_dev; 1542 1543 lun0_hba = hba; 1544 g_lun0_dev = dev; 1545 return 0; 1546 1547 out_free_se_dev: 1548 target_free_device(dev); 1549 out_free_hba: 1550 core_delete_hba(hba); 1551 return ret; 1552 } 1553 1554 1555 void core_dev_release_virtual_lun0(void) 1556 { 1557 struct se_hba *hba = lun0_hba; 1558 1559 if (!hba) 1560 return; 1561 1562 if (g_lun0_dev) 1563 target_free_device(g_lun0_dev); 1564 core_delete_hba(hba); 1565 } 1566