1 /******************************************************************************* 2 * Filename: target_core_device.c (based on iscsi_target_device.c) 3 * 4 * This file contains the TCM Virtual Device and Disk Transport 5 * agnostic related functions. 6 * 7 * (c) Copyright 2003-2012 RisingTide Systems LLC. 8 * 9 * Nicholas A. Bellinger <nab@kernel.org> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2 of the License, or 14 * (at your option) any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 24 * 25 ******************************************************************************/ 26 27 #include <linux/net.h> 28 #include <linux/string.h> 29 #include <linux/delay.h> 30 #include <linux/timer.h> 31 #include <linux/slab.h> 32 #include <linux/spinlock.h> 33 #include <linux/kthread.h> 34 #include <linux/in.h> 35 #include <linux/export.h> 36 #include <net/sock.h> 37 #include <net/tcp.h> 38 #include <scsi/scsi.h> 39 #include <scsi/scsi_device.h> 40 41 #include <target/target_core_base.h> 42 #include <target/target_core_backend.h> 43 #include <target/target_core_fabric.h> 44 45 #include "target_core_internal.h" 46 #include "target_core_alua.h" 47 #include "target_core_pr.h" 48 #include "target_core_ua.h" 49 50 static struct se_hba *lun0_hba; 51 /* not static, needed by tpg.c */ 52 struct se_device *g_lun0_dev; 53 54 sense_reason_t 55 transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) 56 { 57 struct se_lun *se_lun = NULL; 58 struct se_session *se_sess = se_cmd->se_sess; 59 struct se_device *dev; 60 unsigned long flags; 61 62 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) 63 return TCM_NON_EXISTENT_LUN; 64 65 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags); 66 se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun]; 67 if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { 68 struct se_dev_entry *deve = se_cmd->se_deve; 69 70 deve->total_cmds++; 71 72 if ((se_cmd->data_direction == DMA_TO_DEVICE) && 73 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { 74 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" 75 " Access for 0x%08x\n", 76 se_cmd->se_tfo->get_fabric_name(), 77 unpacked_lun); 78 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); 79 return TCM_WRITE_PROTECTED; 80 } 81 82 if (se_cmd->data_direction == DMA_TO_DEVICE) 83 deve->write_bytes += se_cmd->data_length; 84 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 85 deve->read_bytes += se_cmd->data_length; 86 87 se_lun = deve->se_lun; 88 se_cmd->se_lun = deve->se_lun; 89 se_cmd->pr_res_key = deve->pr_res_key; 90 se_cmd->orig_fe_lun = unpacked_lun; 91 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 92 } 93 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); 94 95 if (!se_lun) { 96 /* 97 * Use the se_portal_group->tpg_virt_lun0 to allow for 98 * REPORT_LUNS, et al to be returned when no active 99 * MappedLUN=0 exists for this Initiator Port. 100 */ 101 if (unpacked_lun != 0) { 102 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 103 " Access for 0x%08x\n", 104 se_cmd->se_tfo->get_fabric_name(), 105 unpacked_lun); 106 return TCM_NON_EXISTENT_LUN; 107 } 108 /* 109 * Force WRITE PROTECT for virtual LUN 0 110 */ 111 if ((se_cmd->data_direction != DMA_FROM_DEVICE) && 112 (se_cmd->data_direction != DMA_NONE)) 113 return TCM_WRITE_PROTECTED; 114 115 se_lun = &se_sess->se_tpg->tpg_virt_lun0; 116 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; 117 se_cmd->orig_fe_lun = 0; 118 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 119 } 120 121 /* Directly associate cmd with se_dev */ 122 se_cmd->se_dev = se_lun->lun_se_dev; 123 124 /* TODO: get rid of this and use atomics for stats */ 125 dev = se_lun->lun_se_dev; 126 spin_lock_irqsave(&dev->stats_lock, flags); 127 dev->num_cmds++; 128 if (se_cmd->data_direction == DMA_TO_DEVICE) 129 dev->write_bytes += se_cmd->data_length; 130 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 131 dev->read_bytes += se_cmd->data_length; 132 spin_unlock_irqrestore(&dev->stats_lock, flags); 133 134 spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); 135 list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list); 136 spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); 137 138 return 0; 139 } 140 EXPORT_SYMBOL(transport_lookup_cmd_lun); 141 142 int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun) 143 { 144 struct se_dev_entry *deve; 145 struct se_lun *se_lun = NULL; 146 struct se_session *se_sess = se_cmd->se_sess; 147 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 148 unsigned long flags; 149 150 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) 151 return -ENODEV; 152 153 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags); 154 se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun]; 155 deve = se_cmd->se_deve; 156 157 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { 158 se_tmr->tmr_lun = deve->se_lun; 159 se_cmd->se_lun = deve->se_lun; 160 se_lun = deve->se_lun; 161 se_cmd->pr_res_key = deve->pr_res_key; 162 se_cmd->orig_fe_lun = unpacked_lun; 163 } 164 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); 165 166 if (!se_lun) { 167 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 168 " Access for 0x%08x\n", 169 se_cmd->se_tfo->get_fabric_name(), 170 unpacked_lun); 171 return -ENODEV; 172 } 173 174 /* Directly associate cmd with se_dev */ 175 se_cmd->se_dev = se_lun->lun_se_dev; 176 se_tmr->tmr_dev = se_lun->lun_se_dev; 177 178 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags); 179 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list); 180 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags); 181 182 return 0; 183 } 184 EXPORT_SYMBOL(transport_lookup_tmr_lun); 185 186 /* 187 * This function is called from core_scsi3_emulate_pro_register_and_move() 188 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count 189 * when a matching rtpi is found. 190 */ 191 struct se_dev_entry *core_get_se_deve_from_rtpi( 192 struct se_node_acl *nacl, 193 u16 rtpi) 194 { 195 struct se_dev_entry *deve; 196 struct se_lun *lun; 197 struct se_port *port; 198 struct se_portal_group *tpg = nacl->se_tpg; 199 u32 i; 200 201 spin_lock_irq(&nacl->device_list_lock); 202 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 203 deve = nacl->device_list[i]; 204 205 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) 206 continue; 207 208 lun = deve->se_lun; 209 if (!lun) { 210 pr_err("%s device entries device pointer is" 211 " NULL, but Initiator has access.\n", 212 tpg->se_tpg_tfo->get_fabric_name()); 213 continue; 214 } 215 port = lun->lun_sep; 216 if (!port) { 217 pr_err("%s device entries device pointer is" 218 " NULL, but Initiator has access.\n", 219 tpg->se_tpg_tfo->get_fabric_name()); 220 continue; 221 } 222 if (port->sep_rtpi != rtpi) 223 continue; 224 225 atomic_inc(&deve->pr_ref_count); 226 smp_mb__after_atomic_inc(); 227 spin_unlock_irq(&nacl->device_list_lock); 228 229 return deve; 230 } 231 spin_unlock_irq(&nacl->device_list_lock); 232 233 return NULL; 234 } 235 236 int core_free_device_list_for_node( 237 struct se_node_acl *nacl, 238 struct se_portal_group *tpg) 239 { 240 struct se_dev_entry *deve; 241 struct se_lun *lun; 242 u32 i; 243 244 if (!nacl->device_list) 245 return 0; 246 247 spin_lock_irq(&nacl->device_list_lock); 248 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 249 deve = nacl->device_list[i]; 250 251 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) 252 continue; 253 254 if (!deve->se_lun) { 255 pr_err("%s device entries device pointer is" 256 " NULL, but Initiator has access.\n", 257 tpg->se_tpg_tfo->get_fabric_name()); 258 continue; 259 } 260 lun = deve->se_lun; 261 262 spin_unlock_irq(&nacl->device_list_lock); 263 core_disable_device_list_for_node(lun, NULL, deve->mapped_lun, 264 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg); 265 spin_lock_irq(&nacl->device_list_lock); 266 } 267 spin_unlock_irq(&nacl->device_list_lock); 268 269 array_free(nacl->device_list, TRANSPORT_MAX_LUNS_PER_TPG); 270 nacl->device_list = NULL; 271 272 return 0; 273 } 274 275 void core_update_device_list_access( 276 u32 mapped_lun, 277 u32 lun_access, 278 struct se_node_acl *nacl) 279 { 280 struct se_dev_entry *deve; 281 282 spin_lock_irq(&nacl->device_list_lock); 283 deve = nacl->device_list[mapped_lun]; 284 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { 285 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; 286 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; 287 } else { 288 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; 289 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; 290 } 291 spin_unlock_irq(&nacl->device_list_lock); 292 } 293 294 /* core_enable_device_list_for_node(): 295 * 296 * 297 */ 298 int core_enable_device_list_for_node( 299 struct se_lun *lun, 300 struct se_lun_acl *lun_acl, 301 u32 mapped_lun, 302 u32 lun_access, 303 struct se_node_acl *nacl, 304 struct se_portal_group *tpg) 305 { 306 struct se_port *port = lun->lun_sep; 307 struct se_dev_entry *deve; 308 309 spin_lock_irq(&nacl->device_list_lock); 310 311 deve = nacl->device_list[mapped_lun]; 312 313 /* 314 * Check if the call is handling demo mode -> explict LUN ACL 315 * transition. This transition must be for the same struct se_lun 316 * + mapped_lun that was setup in demo mode.. 317 */ 318 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { 319 if (deve->se_lun_acl != NULL) { 320 pr_err("struct se_dev_entry->se_lun_acl" 321 " already set for demo mode -> explict" 322 " LUN ACL transition\n"); 323 spin_unlock_irq(&nacl->device_list_lock); 324 return -EINVAL; 325 } 326 if (deve->se_lun != lun) { 327 pr_err("struct se_dev_entry->se_lun does" 328 " match passed struct se_lun for demo mode" 329 " -> explict LUN ACL transition\n"); 330 spin_unlock_irq(&nacl->device_list_lock); 331 return -EINVAL; 332 } 333 deve->se_lun_acl = lun_acl; 334 335 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { 336 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; 337 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; 338 } else { 339 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; 340 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; 341 } 342 343 spin_unlock_irq(&nacl->device_list_lock); 344 return 0; 345 } 346 347 deve->se_lun = lun; 348 deve->se_lun_acl = lun_acl; 349 deve->mapped_lun = mapped_lun; 350 deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS; 351 352 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { 353 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; 354 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; 355 } else { 356 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; 357 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; 358 } 359 360 deve->creation_time = get_jiffies_64(); 361 deve->attach_count++; 362 spin_unlock_irq(&nacl->device_list_lock); 363 364 spin_lock_bh(&port->sep_alua_lock); 365 list_add_tail(&deve->alua_port_list, &port->sep_alua_list); 366 spin_unlock_bh(&port->sep_alua_lock); 367 368 return 0; 369 } 370 371 /* core_disable_device_list_for_node(): 372 * 373 * 374 */ 375 int core_disable_device_list_for_node( 376 struct se_lun *lun, 377 struct se_lun_acl *lun_acl, 378 u32 mapped_lun, 379 u32 lun_access, 380 struct se_node_acl *nacl, 381 struct se_portal_group *tpg) 382 { 383 struct se_port *port = lun->lun_sep; 384 struct se_dev_entry *deve = nacl->device_list[mapped_lun]; 385 386 /* 387 * If the MappedLUN entry is being disabled, the entry in 388 * port->sep_alua_list must be removed now before clearing the 389 * struct se_dev_entry pointers below as logic in 390 * core_alua_do_transition_tg_pt() depends on these being present. 391 * 392 * deve->se_lun_acl will be NULL for demo-mode created LUNs 393 * that have not been explicitly converted to MappedLUNs -> 394 * struct se_lun_acl, but we remove deve->alua_port_list from 395 * port->sep_alua_list. This also means that active UAs and 396 * NodeACL context specific PR metadata for demo-mode 397 * MappedLUN *deve will be released below.. 398 */ 399 spin_lock_bh(&port->sep_alua_lock); 400 list_del(&deve->alua_port_list); 401 spin_unlock_bh(&port->sep_alua_lock); 402 /* 403 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE 404 * PR operation to complete. 405 */ 406 while (atomic_read(&deve->pr_ref_count) != 0) 407 cpu_relax(); 408 409 spin_lock_irq(&nacl->device_list_lock); 410 /* 411 * Disable struct se_dev_entry LUN ACL mapping 412 */ 413 core_scsi3_ua_release_all(deve); 414 deve->se_lun = NULL; 415 deve->se_lun_acl = NULL; 416 deve->lun_flags = 0; 417 deve->creation_time = 0; 418 deve->attach_count--; 419 spin_unlock_irq(&nacl->device_list_lock); 420 421 core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl); 422 return 0; 423 } 424 425 /* core_clear_lun_from_tpg(): 426 * 427 * 428 */ 429 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) 430 { 431 struct se_node_acl *nacl; 432 struct se_dev_entry *deve; 433 u32 i; 434 435 spin_lock_irq(&tpg->acl_node_lock); 436 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { 437 spin_unlock_irq(&tpg->acl_node_lock); 438 439 spin_lock_irq(&nacl->device_list_lock); 440 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 441 deve = nacl->device_list[i]; 442 if (lun != deve->se_lun) 443 continue; 444 spin_unlock_irq(&nacl->device_list_lock); 445 446 core_disable_device_list_for_node(lun, NULL, 447 deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS, 448 nacl, tpg); 449 450 spin_lock_irq(&nacl->device_list_lock); 451 } 452 spin_unlock_irq(&nacl->device_list_lock); 453 454 spin_lock_irq(&tpg->acl_node_lock); 455 } 456 spin_unlock_irq(&tpg->acl_node_lock); 457 } 458 459 static struct se_port *core_alloc_port(struct se_device *dev) 460 { 461 struct se_port *port, *port_tmp; 462 463 port = kzalloc(sizeof(struct se_port), GFP_KERNEL); 464 if (!port) { 465 pr_err("Unable to allocate struct se_port\n"); 466 return ERR_PTR(-ENOMEM); 467 } 468 INIT_LIST_HEAD(&port->sep_alua_list); 469 INIT_LIST_HEAD(&port->sep_list); 470 atomic_set(&port->sep_tg_pt_secondary_offline, 0); 471 spin_lock_init(&port->sep_alua_lock); 472 mutex_init(&port->sep_tg_pt_md_mutex); 473 474 spin_lock(&dev->se_port_lock); 475 if (dev->dev_port_count == 0x0000ffff) { 476 pr_warn("Reached dev->dev_port_count ==" 477 " 0x0000ffff\n"); 478 spin_unlock(&dev->se_port_lock); 479 return ERR_PTR(-ENOSPC); 480 } 481 again: 482 /* 483 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device 484 * Here is the table from spc4r17 section 7.7.3.8. 485 * 486 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field 487 * 488 * Code Description 489 * 0h Reserved 490 * 1h Relative port 1, historically known as port A 491 * 2h Relative port 2, historically known as port B 492 * 3h to FFFFh Relative port 3 through 65 535 493 */ 494 port->sep_rtpi = dev->dev_rpti_counter++; 495 if (!port->sep_rtpi) 496 goto again; 497 498 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) { 499 /* 500 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique 501 * for 16-bit wrap.. 502 */ 503 if (port->sep_rtpi == port_tmp->sep_rtpi) 504 goto again; 505 } 506 spin_unlock(&dev->se_port_lock); 507 508 return port; 509 } 510 511 static void core_export_port( 512 struct se_device *dev, 513 struct se_portal_group *tpg, 514 struct se_port *port, 515 struct se_lun *lun) 516 { 517 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL; 518 519 spin_lock(&dev->se_port_lock); 520 spin_lock(&lun->lun_sep_lock); 521 port->sep_tpg = tpg; 522 port->sep_lun = lun; 523 lun->lun_sep = port; 524 spin_unlock(&lun->lun_sep_lock); 525 526 list_add_tail(&port->sep_list, &dev->dev_sep_list); 527 spin_unlock(&dev->se_port_lock); 528 529 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV && 530 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { 531 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); 532 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { 533 pr_err("Unable to allocate t10_alua_tg_pt" 534 "_gp_member_t\n"); 535 return; 536 } 537 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 538 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, 539 dev->t10_alua.default_tg_pt_gp); 540 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 541 pr_debug("%s/%s: Adding to default ALUA Target Port" 542 " Group: alua/default_tg_pt_gp\n", 543 dev->transport->name, tpg->se_tpg_tfo->get_fabric_name()); 544 } 545 546 dev->dev_port_count++; 547 port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFIER */ 548 } 549 550 /* 551 * Called with struct se_device->se_port_lock spinlock held. 552 */ 553 static void core_release_port(struct se_device *dev, struct se_port *port) 554 __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock) 555 { 556 /* 557 * Wait for any port reference for PR ALL_TG_PT=1 operation 558 * to complete in __core_scsi3_alloc_registration() 559 */ 560 spin_unlock(&dev->se_port_lock); 561 if (atomic_read(&port->sep_tg_pt_ref_cnt)) 562 cpu_relax(); 563 spin_lock(&dev->se_port_lock); 564 565 core_alua_free_tg_pt_gp_mem(port); 566 567 list_del(&port->sep_list); 568 dev->dev_port_count--; 569 kfree(port); 570 } 571 572 int core_dev_export( 573 struct se_device *dev, 574 struct se_portal_group *tpg, 575 struct se_lun *lun) 576 { 577 struct se_hba *hba = dev->se_hba; 578 struct se_port *port; 579 580 port = core_alloc_port(dev); 581 if (IS_ERR(port)) 582 return PTR_ERR(port); 583 584 lun->lun_se_dev = dev; 585 586 spin_lock(&hba->device_lock); 587 dev->export_count++; 588 spin_unlock(&hba->device_lock); 589 590 core_export_port(dev, tpg, port, lun); 591 return 0; 592 } 593 594 void core_dev_unexport( 595 struct se_device *dev, 596 struct se_portal_group *tpg, 597 struct se_lun *lun) 598 { 599 struct se_hba *hba = dev->se_hba; 600 struct se_port *port = lun->lun_sep; 601 602 spin_lock(&lun->lun_sep_lock); 603 if (lun->lun_se_dev == NULL) { 604 spin_unlock(&lun->lun_sep_lock); 605 return; 606 } 607 spin_unlock(&lun->lun_sep_lock); 608 609 spin_lock(&dev->se_port_lock); 610 core_release_port(dev, port); 611 spin_unlock(&dev->se_port_lock); 612 613 spin_lock(&hba->device_lock); 614 dev->export_count--; 615 spin_unlock(&hba->device_lock); 616 617 lun->lun_se_dev = NULL; 618 } 619 620 static void se_release_vpd_for_dev(struct se_device *dev) 621 { 622 struct t10_vpd *vpd, *vpd_tmp; 623 624 spin_lock(&dev->t10_wwn.t10_vpd_lock); 625 list_for_each_entry_safe(vpd, vpd_tmp, 626 &dev->t10_wwn.t10_vpd_list, vpd_list) { 627 list_del(&vpd->vpd_list); 628 kfree(vpd); 629 } 630 spin_unlock(&dev->t10_wwn.t10_vpd_lock); 631 } 632 633 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) 634 { 635 u32 aligned_max_sectors; 636 u32 alignment; 637 /* 638 * Limit max_sectors to a PAGE_SIZE aligned value for modern 639 * transport_allocate_data_tasks() operation. 640 */ 641 alignment = max(1ul, PAGE_SIZE / block_size); 642 aligned_max_sectors = rounddown(max_sectors, alignment); 643 644 if (max_sectors != aligned_max_sectors) 645 pr_info("Rounding down aligned max_sectors from %u to %u\n", 646 max_sectors, aligned_max_sectors); 647 648 return aligned_max_sectors; 649 } 650 651 int se_dev_set_max_unmap_lba_count( 652 struct se_device *dev, 653 u32 max_unmap_lba_count) 654 { 655 dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count; 656 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n", 657 dev, dev->dev_attrib.max_unmap_lba_count); 658 return 0; 659 } 660 661 int se_dev_set_max_unmap_block_desc_count( 662 struct se_device *dev, 663 u32 max_unmap_block_desc_count) 664 { 665 dev->dev_attrib.max_unmap_block_desc_count = 666 max_unmap_block_desc_count; 667 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n", 668 dev, dev->dev_attrib.max_unmap_block_desc_count); 669 return 0; 670 } 671 672 int se_dev_set_unmap_granularity( 673 struct se_device *dev, 674 u32 unmap_granularity) 675 { 676 dev->dev_attrib.unmap_granularity = unmap_granularity; 677 pr_debug("dev[%p]: Set unmap_granularity: %u\n", 678 dev, dev->dev_attrib.unmap_granularity); 679 return 0; 680 } 681 682 int se_dev_set_unmap_granularity_alignment( 683 struct se_device *dev, 684 u32 unmap_granularity_alignment) 685 { 686 dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment; 687 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n", 688 dev, dev->dev_attrib.unmap_granularity_alignment); 689 return 0; 690 } 691 692 int se_dev_set_max_write_same_len( 693 struct se_device *dev, 694 u32 max_write_same_len) 695 { 696 dev->dev_attrib.max_write_same_len = max_write_same_len; 697 pr_debug("dev[%p]: Set max_write_same_len: %u\n", 698 dev, dev->dev_attrib.max_write_same_len); 699 return 0; 700 } 701 702 static void dev_set_t10_wwn_model_alias(struct se_device *dev) 703 { 704 const char *configname; 705 706 configname = config_item_name(&dev->dev_group.cg_item); 707 if (strlen(configname) >= 16) { 708 pr_warn("dev[%p]: Backstore name '%s' is too long for " 709 "INQUIRY_MODEL, truncating to 16 bytes\n", dev, 710 configname); 711 } 712 snprintf(&dev->t10_wwn.model[0], 16, "%s", configname); 713 } 714 715 int se_dev_set_emulate_model_alias(struct se_device *dev, int flag) 716 { 717 if (dev->export_count) { 718 pr_err("dev[%p]: Unable to change model alias" 719 " while export_count is %d\n", 720 dev, dev->export_count); 721 return -EINVAL; 722 } 723 724 if (flag != 0 && flag != 1) { 725 pr_err("Illegal value %d\n", flag); 726 return -EINVAL; 727 } 728 729 if (flag) { 730 dev_set_t10_wwn_model_alias(dev); 731 } else { 732 strncpy(&dev->t10_wwn.model[0], 733 dev->transport->inquiry_prod, 16); 734 } 735 dev->dev_attrib.emulate_model_alias = flag; 736 737 return 0; 738 } 739 740 int se_dev_set_emulate_dpo(struct se_device *dev, int flag) 741 { 742 if (flag != 0 && flag != 1) { 743 pr_err("Illegal value %d\n", flag); 744 return -EINVAL; 745 } 746 747 if (flag) { 748 pr_err("dpo_emulated not supported\n"); 749 return -EINVAL; 750 } 751 752 return 0; 753 } 754 755 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) 756 { 757 if (flag != 0 && flag != 1) { 758 pr_err("Illegal value %d\n", flag); 759 return -EINVAL; 760 } 761 762 if (flag && 763 dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 764 pr_err("emulate_fua_write not supported for pSCSI\n"); 765 return -EINVAL; 766 } 767 dev->dev_attrib.emulate_fua_write = flag; 768 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", 769 dev, dev->dev_attrib.emulate_fua_write); 770 return 0; 771 } 772 773 int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) 774 { 775 if (flag != 0 && flag != 1) { 776 pr_err("Illegal value %d\n", flag); 777 return -EINVAL; 778 } 779 780 if (flag) { 781 pr_err("ua read emulated not supported\n"); 782 return -EINVAL; 783 } 784 785 return 0; 786 } 787 788 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) 789 { 790 if (flag != 0 && flag != 1) { 791 pr_err("Illegal value %d\n", flag); 792 return -EINVAL; 793 } 794 if (flag && 795 dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 796 pr_err("emulate_write_cache not supported for pSCSI\n"); 797 return -EINVAL; 798 } 799 if (dev->transport->get_write_cache) { 800 pr_warn("emulate_write_cache cannot be changed when underlying" 801 " HW reports WriteCacheEnabled, ignoring request\n"); 802 return 0; 803 } 804 805 dev->dev_attrib.emulate_write_cache = flag; 806 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", 807 dev, dev->dev_attrib.emulate_write_cache); 808 return 0; 809 } 810 811 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) 812 { 813 if ((flag != 0) && (flag != 1) && (flag != 2)) { 814 pr_err("Illegal value %d\n", flag); 815 return -EINVAL; 816 } 817 818 if (dev->export_count) { 819 pr_err("dev[%p]: Unable to change SE Device" 820 " UA_INTRLCK_CTRL while export_count is %d\n", 821 dev, dev->export_count); 822 return -EINVAL; 823 } 824 dev->dev_attrib.emulate_ua_intlck_ctrl = flag; 825 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", 826 dev, dev->dev_attrib.emulate_ua_intlck_ctrl); 827 828 return 0; 829 } 830 831 int se_dev_set_emulate_tas(struct se_device *dev, int flag) 832 { 833 if ((flag != 0) && (flag != 1)) { 834 pr_err("Illegal value %d\n", flag); 835 return -EINVAL; 836 } 837 838 if (dev->export_count) { 839 pr_err("dev[%p]: Unable to change SE Device TAS while" 840 " export_count is %d\n", 841 dev, dev->export_count); 842 return -EINVAL; 843 } 844 dev->dev_attrib.emulate_tas = flag; 845 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n", 846 dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled"); 847 848 return 0; 849 } 850 851 int se_dev_set_emulate_tpu(struct se_device *dev, int flag) 852 { 853 if ((flag != 0) && (flag != 1)) { 854 pr_err("Illegal value %d\n", flag); 855 return -EINVAL; 856 } 857 /* 858 * We expect this value to be non-zero when generic Block Layer 859 * Discard supported is detected iblock_create_virtdevice(). 860 */ 861 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) { 862 pr_err("Generic Block Discard not supported\n"); 863 return -ENOSYS; 864 } 865 866 dev->dev_attrib.emulate_tpu = flag; 867 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", 868 dev, flag); 869 return 0; 870 } 871 872 int se_dev_set_emulate_tpws(struct se_device *dev, int flag) 873 { 874 if ((flag != 0) && (flag != 1)) { 875 pr_err("Illegal value %d\n", flag); 876 return -EINVAL; 877 } 878 /* 879 * We expect this value to be non-zero when generic Block Layer 880 * Discard supported is detected iblock_create_virtdevice(). 881 */ 882 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) { 883 pr_err("Generic Block Discard not supported\n"); 884 return -ENOSYS; 885 } 886 887 dev->dev_attrib.emulate_tpws = flag; 888 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", 889 dev, flag); 890 return 0; 891 } 892 893 int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) 894 { 895 if ((flag != 0) && (flag != 1)) { 896 pr_err("Illegal value %d\n", flag); 897 return -EINVAL; 898 } 899 dev->dev_attrib.enforce_pr_isids = flag; 900 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, 901 (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled"); 902 return 0; 903 } 904 905 int se_dev_set_is_nonrot(struct se_device *dev, int flag) 906 { 907 if ((flag != 0) && (flag != 1)) { 908 printk(KERN_ERR "Illegal value %d\n", flag); 909 return -EINVAL; 910 } 911 dev->dev_attrib.is_nonrot = flag; 912 pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n", 913 dev, flag); 914 return 0; 915 } 916 917 int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag) 918 { 919 if (flag != 0) { 920 printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted" 921 " reordering not implemented\n", dev); 922 return -ENOSYS; 923 } 924 dev->dev_attrib.emulate_rest_reord = flag; 925 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag); 926 return 0; 927 } 928 929 /* 930 * Note, this can only be called on unexported SE Device Object. 931 */ 932 int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) 933 { 934 if (dev->export_count) { 935 pr_err("dev[%p]: Unable to change SE Device TCQ while" 936 " export_count is %d\n", 937 dev, dev->export_count); 938 return -EINVAL; 939 } 940 if (!queue_depth) { 941 pr_err("dev[%p]: Illegal ZERO value for queue" 942 "_depth\n", dev); 943 return -EINVAL; 944 } 945 946 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 947 if (queue_depth > dev->dev_attrib.hw_queue_depth) { 948 pr_err("dev[%p]: Passed queue_depth: %u" 949 " exceeds TCM/SE_Device TCQ: %u\n", 950 dev, queue_depth, 951 dev->dev_attrib.hw_queue_depth); 952 return -EINVAL; 953 } 954 } else { 955 if (queue_depth > dev->dev_attrib.queue_depth) { 956 if (queue_depth > dev->dev_attrib.hw_queue_depth) { 957 pr_err("dev[%p]: Passed queue_depth:" 958 " %u exceeds TCM/SE_Device MAX" 959 " TCQ: %u\n", dev, queue_depth, 960 dev->dev_attrib.hw_queue_depth); 961 return -EINVAL; 962 } 963 } 964 } 965 966 dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth; 967 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", 968 dev, queue_depth); 969 return 0; 970 } 971 972 int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) 973 { 974 int block_size = dev->dev_attrib.block_size; 975 976 if (dev->export_count) { 977 pr_err("dev[%p]: Unable to change SE Device" 978 " fabric_max_sectors while export_count is %d\n", 979 dev, dev->export_count); 980 return -EINVAL; 981 } 982 if (!fabric_max_sectors) { 983 pr_err("dev[%p]: Illegal ZERO value for" 984 " fabric_max_sectors\n", dev); 985 return -EINVAL; 986 } 987 if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) { 988 pr_err("dev[%p]: Passed fabric_max_sectors: %u less than" 989 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors, 990 DA_STATUS_MAX_SECTORS_MIN); 991 return -EINVAL; 992 } 993 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 994 if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) { 995 pr_err("dev[%p]: Passed fabric_max_sectors: %u" 996 " greater than TCM/SE_Device max_sectors:" 997 " %u\n", dev, fabric_max_sectors, 998 dev->dev_attrib.hw_max_sectors); 999 return -EINVAL; 1000 } 1001 } else { 1002 if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) { 1003 pr_err("dev[%p]: Passed fabric_max_sectors: %u" 1004 " greater than DA_STATUS_MAX_SECTORS_MAX:" 1005 " %u\n", dev, fabric_max_sectors, 1006 DA_STATUS_MAX_SECTORS_MAX); 1007 return -EINVAL; 1008 } 1009 } 1010 /* 1011 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() 1012 */ 1013 if (!block_size) { 1014 block_size = 512; 1015 pr_warn("Defaulting to 512 for zero block_size\n"); 1016 } 1017 fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors, 1018 block_size); 1019 1020 dev->dev_attrib.fabric_max_sectors = fabric_max_sectors; 1021 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", 1022 dev, fabric_max_sectors); 1023 return 0; 1024 } 1025 1026 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) 1027 { 1028 if (dev->export_count) { 1029 pr_err("dev[%p]: Unable to change SE Device" 1030 " optimal_sectors while export_count is %d\n", 1031 dev, dev->export_count); 1032 return -EINVAL; 1033 } 1034 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1035 pr_err("dev[%p]: Passed optimal_sectors cannot be" 1036 " changed for TCM/pSCSI\n", dev); 1037 return -EINVAL; 1038 } 1039 if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) { 1040 pr_err("dev[%p]: Passed optimal_sectors %u cannot be" 1041 " greater than fabric_max_sectors: %u\n", dev, 1042 optimal_sectors, dev->dev_attrib.fabric_max_sectors); 1043 return -EINVAL; 1044 } 1045 1046 dev->dev_attrib.optimal_sectors = optimal_sectors; 1047 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n", 1048 dev, optimal_sectors); 1049 return 0; 1050 } 1051 1052 int se_dev_set_block_size(struct se_device *dev, u32 block_size) 1053 { 1054 if (dev->export_count) { 1055 pr_err("dev[%p]: Unable to change SE Device block_size" 1056 " while export_count is %d\n", 1057 dev, dev->export_count); 1058 return -EINVAL; 1059 } 1060 1061 if ((block_size != 512) && 1062 (block_size != 1024) && 1063 (block_size != 2048) && 1064 (block_size != 4096)) { 1065 pr_err("dev[%p]: Illegal value for block_device: %u" 1066 " for SE device, must be 512, 1024, 2048 or 4096\n", 1067 dev, block_size); 1068 return -EINVAL; 1069 } 1070 1071 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1072 pr_err("dev[%p]: Not allowed to change block_size for" 1073 " Physical Device, use for Linux/SCSI to change" 1074 " block_size for underlying hardware\n", dev); 1075 return -EINVAL; 1076 } 1077 1078 dev->dev_attrib.block_size = block_size; 1079 pr_debug("dev[%p]: SE Device block_size changed to %u\n", 1080 dev, block_size); 1081 return 0; 1082 } 1083 1084 struct se_lun *core_dev_add_lun( 1085 struct se_portal_group *tpg, 1086 struct se_device *dev, 1087 u32 lun) 1088 { 1089 struct se_lun *lun_p; 1090 int rc; 1091 1092 lun_p = core_tpg_pre_addlun(tpg, lun); 1093 if (IS_ERR(lun_p)) 1094 return lun_p; 1095 1096 rc = core_tpg_post_addlun(tpg, lun_p, 1097 TRANSPORT_LUNFLAGS_READ_WRITE, dev); 1098 if (rc < 0) 1099 return ERR_PTR(rc); 1100 1101 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" 1102 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), 1103 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun, 1104 tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id); 1105 /* 1106 * Update LUN maps for dynamically added initiators when 1107 * generate_node_acl is enabled. 1108 */ 1109 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { 1110 struct se_node_acl *acl; 1111 spin_lock_irq(&tpg->acl_node_lock); 1112 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 1113 if (acl->dynamic_node_acl && 1114 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || 1115 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { 1116 spin_unlock_irq(&tpg->acl_node_lock); 1117 core_tpg_add_node_to_devs(acl, tpg); 1118 spin_lock_irq(&tpg->acl_node_lock); 1119 } 1120 } 1121 spin_unlock_irq(&tpg->acl_node_lock); 1122 } 1123 1124 return lun_p; 1125 } 1126 1127 /* core_dev_del_lun(): 1128 * 1129 * 1130 */ 1131 int core_dev_del_lun( 1132 struct se_portal_group *tpg, 1133 u32 unpacked_lun) 1134 { 1135 struct se_lun *lun; 1136 1137 lun = core_tpg_pre_dellun(tpg, unpacked_lun); 1138 if (IS_ERR(lun)) 1139 return PTR_ERR(lun); 1140 1141 core_tpg_post_dellun(tpg, lun); 1142 1143 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" 1144 " device object\n", tpg->se_tpg_tfo->get_fabric_name(), 1145 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, 1146 tpg->se_tpg_tfo->get_fabric_name()); 1147 1148 return 0; 1149 } 1150 1151 struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun) 1152 { 1153 struct se_lun *lun; 1154 1155 spin_lock(&tpg->tpg_lun_lock); 1156 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { 1157 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS" 1158 "_PER_TPG-1: %u for Target Portal Group: %hu\n", 1159 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1160 TRANSPORT_MAX_LUNS_PER_TPG-1, 1161 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1162 spin_unlock(&tpg->tpg_lun_lock); 1163 return NULL; 1164 } 1165 lun = tpg->tpg_lun_list[unpacked_lun]; 1166 1167 if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { 1168 pr_err("%s Logical Unit Number: %u is not free on" 1169 " Target Portal Group: %hu, ignoring request.\n", 1170 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1171 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1172 spin_unlock(&tpg->tpg_lun_lock); 1173 return NULL; 1174 } 1175 spin_unlock(&tpg->tpg_lun_lock); 1176 1177 return lun; 1178 } 1179 1180 /* core_dev_get_lun(): 1181 * 1182 * 1183 */ 1184 static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun) 1185 { 1186 struct se_lun *lun; 1187 1188 spin_lock(&tpg->tpg_lun_lock); 1189 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { 1190 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" 1191 "_TPG-1: %u for Target Portal Group: %hu\n", 1192 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1193 TRANSPORT_MAX_LUNS_PER_TPG-1, 1194 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1195 spin_unlock(&tpg->tpg_lun_lock); 1196 return NULL; 1197 } 1198 lun = tpg->tpg_lun_list[unpacked_lun]; 1199 1200 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { 1201 pr_err("%s Logical Unit Number: %u is not active on" 1202 " Target Portal Group: %hu, ignoring request.\n", 1203 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1204 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1205 spin_unlock(&tpg->tpg_lun_lock); 1206 return NULL; 1207 } 1208 spin_unlock(&tpg->tpg_lun_lock); 1209 1210 return lun; 1211 } 1212 1213 struct se_lun_acl *core_dev_init_initiator_node_lun_acl( 1214 struct se_portal_group *tpg, 1215 struct se_node_acl *nacl, 1216 u32 mapped_lun, 1217 int *ret) 1218 { 1219 struct se_lun_acl *lacl; 1220 1221 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) { 1222 pr_err("%s InitiatorName exceeds maximum size.\n", 1223 tpg->se_tpg_tfo->get_fabric_name()); 1224 *ret = -EOVERFLOW; 1225 return NULL; 1226 } 1227 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); 1228 if (!lacl) { 1229 pr_err("Unable to allocate memory for struct se_lun_acl.\n"); 1230 *ret = -ENOMEM; 1231 return NULL; 1232 } 1233 1234 INIT_LIST_HEAD(&lacl->lacl_list); 1235 lacl->mapped_lun = mapped_lun; 1236 lacl->se_lun_nacl = nacl; 1237 snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", 1238 nacl->initiatorname); 1239 1240 return lacl; 1241 } 1242 1243 int core_dev_add_initiator_node_lun_acl( 1244 struct se_portal_group *tpg, 1245 struct se_lun_acl *lacl, 1246 u32 unpacked_lun, 1247 u32 lun_access) 1248 { 1249 struct se_lun *lun; 1250 struct se_node_acl *nacl; 1251 1252 lun = core_dev_get_lun(tpg, unpacked_lun); 1253 if (!lun) { 1254 pr_err("%s Logical Unit Number: %u is not active on" 1255 " Target Portal Group: %hu, ignoring request.\n", 1256 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1257 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1258 return -EINVAL; 1259 } 1260 1261 nacl = lacl->se_lun_nacl; 1262 if (!nacl) 1263 return -EINVAL; 1264 1265 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) && 1266 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)) 1267 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 1268 1269 lacl->se_lun = lun; 1270 1271 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun, 1272 lun_access, nacl, tpg) < 0) 1273 return -EINVAL; 1274 1275 spin_lock(&lun->lun_acl_lock); 1276 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); 1277 atomic_inc(&lun->lun_acl_count); 1278 smp_mb__after_atomic_inc(); 1279 spin_unlock(&lun->lun_acl_lock); 1280 1281 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " 1282 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 1283 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, 1284 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", 1285 lacl->initiatorname); 1286 /* 1287 * Check to see if there are any existing persistent reservation APTPL 1288 * pre-registrations that need to be enabled for this LUN ACL.. 1289 */ 1290 core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl); 1291 return 0; 1292 } 1293 1294 /* core_dev_del_initiator_node_lun_acl(): 1295 * 1296 * 1297 */ 1298 int core_dev_del_initiator_node_lun_acl( 1299 struct se_portal_group *tpg, 1300 struct se_lun *lun, 1301 struct se_lun_acl *lacl) 1302 { 1303 struct se_node_acl *nacl; 1304 1305 nacl = lacl->se_lun_nacl; 1306 if (!nacl) 1307 return -EINVAL; 1308 1309 spin_lock(&lun->lun_acl_lock); 1310 list_del(&lacl->lacl_list); 1311 atomic_dec(&lun->lun_acl_count); 1312 smp_mb__after_atomic_dec(); 1313 spin_unlock(&lun->lun_acl_lock); 1314 1315 core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun, 1316 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg); 1317 1318 lacl->se_lun = NULL; 1319 1320 pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for" 1321 " InitiatorNode: %s Mapped LUN: %u\n", 1322 tpg->se_tpg_tfo->get_fabric_name(), 1323 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 1324 lacl->initiatorname, lacl->mapped_lun); 1325 1326 return 0; 1327 } 1328 1329 void core_dev_free_initiator_node_lun_acl( 1330 struct se_portal_group *tpg, 1331 struct se_lun_acl *lacl) 1332 { 1333 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" 1334 " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(), 1335 tpg->se_tpg_tfo->tpg_get_tag(tpg), 1336 tpg->se_tpg_tfo->get_fabric_name(), 1337 lacl->initiatorname, lacl->mapped_lun); 1338 1339 kfree(lacl); 1340 } 1341 1342 static void scsi_dump_inquiry(struct se_device *dev) 1343 { 1344 struct t10_wwn *wwn = &dev->t10_wwn; 1345 char buf[17]; 1346 int i, device_type; 1347 /* 1348 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer 1349 */ 1350 for (i = 0; i < 8; i++) 1351 if (wwn->vendor[i] >= 0x20) 1352 buf[i] = wwn->vendor[i]; 1353 else 1354 buf[i] = ' '; 1355 buf[i] = '\0'; 1356 pr_debug(" Vendor: %s\n", buf); 1357 1358 for (i = 0; i < 16; i++) 1359 if (wwn->model[i] >= 0x20) 1360 buf[i] = wwn->model[i]; 1361 else 1362 buf[i] = ' '; 1363 buf[i] = '\0'; 1364 pr_debug(" Model: %s\n", buf); 1365 1366 for (i = 0; i < 4; i++) 1367 if (wwn->revision[i] >= 0x20) 1368 buf[i] = wwn->revision[i]; 1369 else 1370 buf[i] = ' '; 1371 buf[i] = '\0'; 1372 pr_debug(" Revision: %s\n", buf); 1373 1374 device_type = dev->transport->get_device_type(dev); 1375 pr_debug(" Type: %s ", scsi_device_type(device_type)); 1376 } 1377 1378 struct se_device *target_alloc_device(struct se_hba *hba, const char *name) 1379 { 1380 struct se_device *dev; 1381 1382 dev = hba->transport->alloc_device(hba, name); 1383 if (!dev) 1384 return NULL; 1385 1386 dev->dev_link_magic = SE_DEV_LINK_MAGIC; 1387 dev->se_hba = hba; 1388 dev->transport = hba->transport; 1389 1390 INIT_LIST_HEAD(&dev->dev_list); 1391 INIT_LIST_HEAD(&dev->dev_sep_list); 1392 INIT_LIST_HEAD(&dev->dev_tmr_list); 1393 INIT_LIST_HEAD(&dev->delayed_cmd_list); 1394 INIT_LIST_HEAD(&dev->state_list); 1395 INIT_LIST_HEAD(&dev->qf_cmd_list); 1396 spin_lock_init(&dev->stats_lock); 1397 spin_lock_init(&dev->execute_task_lock); 1398 spin_lock_init(&dev->delayed_cmd_lock); 1399 spin_lock_init(&dev->dev_reservation_lock); 1400 spin_lock_init(&dev->se_port_lock); 1401 spin_lock_init(&dev->se_tmr_lock); 1402 spin_lock_init(&dev->qf_cmd_lock); 1403 atomic_set(&dev->dev_ordered_id, 0); 1404 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list); 1405 spin_lock_init(&dev->t10_wwn.t10_vpd_lock); 1406 INIT_LIST_HEAD(&dev->t10_pr.registration_list); 1407 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list); 1408 spin_lock_init(&dev->t10_pr.registration_lock); 1409 spin_lock_init(&dev->t10_pr.aptpl_reg_lock); 1410 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list); 1411 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock); 1412 1413 dev->t10_wwn.t10_dev = dev; 1414 dev->t10_alua.t10_dev = dev; 1415 1416 dev->dev_attrib.da_dev = dev; 1417 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS; 1418 dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO; 1419 dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE; 1420 dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ; 1421 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; 1422 dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; 1423 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS; 1424 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU; 1425 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; 1426 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 1427 dev->dev_attrib.is_nonrot = DA_IS_NONROT; 1428 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; 1429 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; 1430 dev->dev_attrib.max_unmap_block_desc_count = 1431 DA_MAX_UNMAP_BLOCK_DESC_COUNT; 1432 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; 1433 dev->dev_attrib.unmap_granularity_alignment = 1434 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; 1435 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; 1436 dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS; 1437 dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS; 1438 1439 return dev; 1440 } 1441 1442 int target_configure_device(struct se_device *dev) 1443 { 1444 struct se_hba *hba = dev->se_hba; 1445 int ret; 1446 1447 if (dev->dev_flags & DF_CONFIGURED) { 1448 pr_err("se_dev->se_dev_ptr already set for storage" 1449 " object\n"); 1450 return -EEXIST; 1451 } 1452 1453 ret = dev->transport->configure_device(dev); 1454 if (ret) 1455 goto out; 1456 dev->dev_flags |= DF_CONFIGURED; 1457 1458 /* 1459 * XXX: there is not much point to have two different values here.. 1460 */ 1461 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size; 1462 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth; 1463 1464 /* 1465 * Align max_hw_sectors down to PAGE_SIZE I/O transfers 1466 */ 1467 dev->dev_attrib.hw_max_sectors = 1468 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors, 1469 dev->dev_attrib.hw_block_size); 1470 1471 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); 1472 dev->creation_time = get_jiffies_64(); 1473 1474 ret = core_setup_alua(dev); 1475 if (ret) 1476 goto out; 1477 1478 /* 1479 * Startup the struct se_device processing thread 1480 */ 1481 dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1, 1482 dev->transport->name); 1483 if (!dev->tmr_wq) { 1484 pr_err("Unable to create tmr workqueue for %s\n", 1485 dev->transport->name); 1486 ret = -ENOMEM; 1487 goto out_free_alua; 1488 } 1489 1490 /* 1491 * Setup work_queue for QUEUE_FULL 1492 */ 1493 INIT_WORK(&dev->qf_work_queue, target_qf_do_work); 1494 1495 /* 1496 * Preload the initial INQUIRY const values if we are doing 1497 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI 1498 * passthrough because this is being provided by the backend LLD. 1499 */ 1500 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { 1501 strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8); 1502 strncpy(&dev->t10_wwn.model[0], 1503 dev->transport->inquiry_prod, 16); 1504 strncpy(&dev->t10_wwn.revision[0], 1505 dev->transport->inquiry_rev, 4); 1506 } 1507 1508 scsi_dump_inquiry(dev); 1509 1510 spin_lock(&hba->device_lock); 1511 hba->dev_count++; 1512 spin_unlock(&hba->device_lock); 1513 return 0; 1514 1515 out_free_alua: 1516 core_alua_free_lu_gp_mem(dev); 1517 out: 1518 se_release_vpd_for_dev(dev); 1519 return ret; 1520 } 1521 1522 void target_free_device(struct se_device *dev) 1523 { 1524 struct se_hba *hba = dev->se_hba; 1525 1526 WARN_ON(!list_empty(&dev->dev_sep_list)); 1527 1528 if (dev->dev_flags & DF_CONFIGURED) { 1529 destroy_workqueue(dev->tmr_wq); 1530 1531 spin_lock(&hba->device_lock); 1532 hba->dev_count--; 1533 spin_unlock(&hba->device_lock); 1534 } 1535 1536 core_alua_free_lu_gp_mem(dev); 1537 core_scsi3_free_all_registrations(dev); 1538 se_release_vpd_for_dev(dev); 1539 1540 dev->transport->free_device(dev); 1541 } 1542 1543 int core_dev_setup_virtual_lun0(void) 1544 { 1545 struct se_hba *hba; 1546 struct se_device *dev; 1547 char buf[] = "rd_pages=8,rd_nullio=1"; 1548 int ret; 1549 1550 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE); 1551 if (IS_ERR(hba)) 1552 return PTR_ERR(hba); 1553 1554 dev = target_alloc_device(hba, "virt_lun0"); 1555 if (!dev) { 1556 ret = -ENOMEM; 1557 goto out_free_hba; 1558 } 1559 1560 hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf)); 1561 1562 ret = target_configure_device(dev); 1563 if (ret) 1564 goto out_free_se_dev; 1565 1566 lun0_hba = hba; 1567 g_lun0_dev = dev; 1568 return 0; 1569 1570 out_free_se_dev: 1571 target_free_device(dev); 1572 out_free_hba: 1573 core_delete_hba(hba); 1574 return ret; 1575 } 1576 1577 1578 void core_dev_release_virtual_lun0(void) 1579 { 1580 struct se_hba *hba = lun0_hba; 1581 1582 if (!hba) 1583 return; 1584 1585 if (g_lun0_dev) 1586 target_free_device(g_lun0_dev); 1587 core_delete_hba(hba); 1588 } 1589