1 /******************************************************************************* 2 * Filename: target_core_device.c (based on iscsi_target_device.c) 3 * 4 * This file contains the TCM Virtual Device and Disk Transport 5 * agnostic related functions. 6 * 7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. 8 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved. 9 * Copyright (c) 2007-2010 Rising Tide Systems 10 * Copyright (c) 2008-2010 Linux-iSCSI.org 11 * 12 * Nicholas A. Bellinger <nab@kernel.org> 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2 of the License, or 17 * (at your option) any later version. 18 * 19 * This program is distributed in the hope that it will be useful, 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * GNU General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License 25 * along with this program; if not, write to the Free Software 26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 27 * 28 ******************************************************************************/ 29 30 #include <linux/net.h> 31 #include <linux/string.h> 32 #include <linux/delay.h> 33 #include <linux/timer.h> 34 #include <linux/slab.h> 35 #include <linux/spinlock.h> 36 #include <linux/kthread.h> 37 #include <linux/in.h> 38 #include <linux/export.h> 39 #include <net/sock.h> 40 #include <net/tcp.h> 41 #include <scsi/scsi.h> 42 #include <scsi/scsi_device.h> 43 44 #include <target/target_core_base.h> 45 #include <target/target_core_backend.h> 46 #include <target/target_core_fabric.h> 47 48 #include "target_core_internal.h" 49 #include "target_core_alua.h" 50 #include "target_core_pr.h" 51 #include "target_core_ua.h" 52 53 static void se_dev_start(struct se_device *dev); 54 static void se_dev_stop(struct se_device *dev); 55 56 static struct se_hba *lun0_hba; 57 static struct se_subsystem_dev *lun0_su_dev; 58 /* not static, needed by tpg.c */ 59 struct se_device *g_lun0_dev; 60 61 int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) 62 { 63 struct se_lun *se_lun = NULL; 64 struct se_session *se_sess = se_cmd->se_sess; 65 struct se_device *dev; 66 unsigned long flags; 67 68 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) { 69 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; 70 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 71 return -ENODEV; 72 } 73 74 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags); 75 se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun]; 76 if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { 77 struct se_dev_entry *deve = se_cmd->se_deve; 78 79 deve->total_cmds++; 80 deve->total_bytes += se_cmd->data_length; 81 82 if ((se_cmd->data_direction == DMA_TO_DEVICE) && 83 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { 84 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; 85 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 86 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" 87 " Access for 0x%08x\n", 88 se_cmd->se_tfo->get_fabric_name(), 89 unpacked_lun); 90 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); 91 return -EACCES; 92 } 93 94 if (se_cmd->data_direction == DMA_TO_DEVICE) 95 deve->write_bytes += se_cmd->data_length; 96 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 97 deve->read_bytes += se_cmd->data_length; 98 99 deve->deve_cmds++; 100 101 se_lun = deve->se_lun; 102 se_cmd->se_lun = deve->se_lun; 103 se_cmd->pr_res_key = deve->pr_res_key; 104 se_cmd->orig_fe_lun = unpacked_lun; 105 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 106 } 107 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); 108 109 if (!se_lun) { 110 /* 111 * Use the se_portal_group->tpg_virt_lun0 to allow for 112 * REPORT_LUNS, et al to be returned when no active 113 * MappedLUN=0 exists for this Initiator Port. 114 */ 115 if (unpacked_lun != 0) { 116 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; 117 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 118 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 119 " Access for 0x%08x\n", 120 se_cmd->se_tfo->get_fabric_name(), 121 unpacked_lun); 122 return -ENODEV; 123 } 124 /* 125 * Force WRITE PROTECT for virtual LUN 0 126 */ 127 if ((se_cmd->data_direction != DMA_FROM_DEVICE) && 128 (se_cmd->data_direction != DMA_NONE)) { 129 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; 130 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 131 return -EACCES; 132 } 133 134 se_lun = &se_sess->se_tpg->tpg_virt_lun0; 135 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; 136 se_cmd->orig_fe_lun = 0; 137 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 138 } 139 /* 140 * Determine if the struct se_lun is online. 141 * FIXME: Check for LUN_RESET + UNIT Attention 142 */ 143 if (se_dev_check_online(se_lun->lun_se_dev) != 0) { 144 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; 145 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 146 return -ENODEV; 147 } 148 149 /* Directly associate cmd with se_dev */ 150 se_cmd->se_dev = se_lun->lun_se_dev; 151 152 /* TODO: get rid of this and use atomics for stats */ 153 dev = se_lun->lun_se_dev; 154 spin_lock_irqsave(&dev->stats_lock, flags); 155 dev->num_cmds++; 156 if (se_cmd->data_direction == DMA_TO_DEVICE) 157 dev->write_bytes += se_cmd->data_length; 158 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 159 dev->read_bytes += se_cmd->data_length; 160 spin_unlock_irqrestore(&dev->stats_lock, flags); 161 162 spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); 163 list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list); 164 spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); 165 166 return 0; 167 } 168 EXPORT_SYMBOL(transport_lookup_cmd_lun); 169 170 int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun) 171 { 172 struct se_dev_entry *deve; 173 struct se_lun *se_lun = NULL; 174 struct se_session *se_sess = se_cmd->se_sess; 175 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 176 unsigned long flags; 177 178 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) { 179 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; 180 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 181 return -ENODEV; 182 } 183 184 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags); 185 se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun]; 186 deve = se_cmd->se_deve; 187 188 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { 189 se_tmr->tmr_lun = deve->se_lun; 190 se_cmd->se_lun = deve->se_lun; 191 se_lun = deve->se_lun; 192 se_cmd->pr_res_key = deve->pr_res_key; 193 se_cmd->orig_fe_lun = unpacked_lun; 194 } 195 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); 196 197 if (!se_lun) { 198 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 199 " Access for 0x%08x\n", 200 se_cmd->se_tfo->get_fabric_name(), 201 unpacked_lun); 202 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 203 return -ENODEV; 204 } 205 /* 206 * Determine if the struct se_lun is online. 207 * FIXME: Check for LUN_RESET + UNIT Attention 208 */ 209 if (se_dev_check_online(se_lun->lun_se_dev) != 0) { 210 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 211 return -ENODEV; 212 } 213 214 /* Directly associate cmd with se_dev */ 215 se_cmd->se_dev = se_lun->lun_se_dev; 216 se_tmr->tmr_dev = se_lun->lun_se_dev; 217 218 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags); 219 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list); 220 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags); 221 222 return 0; 223 } 224 EXPORT_SYMBOL(transport_lookup_tmr_lun); 225 226 /* 227 * This function is called from core_scsi3_emulate_pro_register_and_move() 228 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count 229 * when a matching rtpi is found. 230 */ 231 struct se_dev_entry *core_get_se_deve_from_rtpi( 232 struct se_node_acl *nacl, 233 u16 rtpi) 234 { 235 struct se_dev_entry *deve; 236 struct se_lun *lun; 237 struct se_port *port; 238 struct se_portal_group *tpg = nacl->se_tpg; 239 u32 i; 240 241 spin_lock_irq(&nacl->device_list_lock); 242 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 243 deve = nacl->device_list[i]; 244 245 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) 246 continue; 247 248 lun = deve->se_lun; 249 if (!lun) { 250 pr_err("%s device entries device pointer is" 251 " NULL, but Initiator has access.\n", 252 tpg->se_tpg_tfo->get_fabric_name()); 253 continue; 254 } 255 port = lun->lun_sep; 256 if (!port) { 257 pr_err("%s device entries device pointer is" 258 " NULL, but Initiator has access.\n", 259 tpg->se_tpg_tfo->get_fabric_name()); 260 continue; 261 } 262 if (port->sep_rtpi != rtpi) 263 continue; 264 265 atomic_inc(&deve->pr_ref_count); 266 smp_mb__after_atomic_inc(); 267 spin_unlock_irq(&nacl->device_list_lock); 268 269 return deve; 270 } 271 spin_unlock_irq(&nacl->device_list_lock); 272 273 return NULL; 274 } 275 276 int core_free_device_list_for_node( 277 struct se_node_acl *nacl, 278 struct se_portal_group *tpg) 279 { 280 struct se_dev_entry *deve; 281 struct se_lun *lun; 282 u32 i; 283 284 if (!nacl->device_list) 285 return 0; 286 287 spin_lock_irq(&nacl->device_list_lock); 288 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 289 deve = nacl->device_list[i]; 290 291 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) 292 continue; 293 294 if (!deve->se_lun) { 295 pr_err("%s device entries device pointer is" 296 " NULL, but Initiator has access.\n", 297 tpg->se_tpg_tfo->get_fabric_name()); 298 continue; 299 } 300 lun = deve->se_lun; 301 302 spin_unlock_irq(&nacl->device_list_lock); 303 core_update_device_list_for_node(lun, NULL, deve->mapped_lun, 304 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); 305 spin_lock_irq(&nacl->device_list_lock); 306 } 307 spin_unlock_irq(&nacl->device_list_lock); 308 309 array_free(nacl->device_list, TRANSPORT_MAX_LUNS_PER_TPG); 310 nacl->device_list = NULL; 311 312 return 0; 313 } 314 315 void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd) 316 { 317 struct se_dev_entry *deve; 318 unsigned long flags; 319 320 spin_lock_irqsave(&se_nacl->device_list_lock, flags); 321 deve = se_nacl->device_list[se_cmd->orig_fe_lun]; 322 deve->deve_cmds--; 323 spin_unlock_irqrestore(&se_nacl->device_list_lock, flags); 324 } 325 326 void core_update_device_list_access( 327 u32 mapped_lun, 328 u32 lun_access, 329 struct se_node_acl *nacl) 330 { 331 struct se_dev_entry *deve; 332 333 spin_lock_irq(&nacl->device_list_lock); 334 deve = nacl->device_list[mapped_lun]; 335 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { 336 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; 337 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; 338 } else { 339 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; 340 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; 341 } 342 spin_unlock_irq(&nacl->device_list_lock); 343 } 344 345 /* core_update_device_list_for_node(): 346 * 347 * 348 */ 349 int core_update_device_list_for_node( 350 struct se_lun *lun, 351 struct se_lun_acl *lun_acl, 352 u32 mapped_lun, 353 u32 lun_access, 354 struct se_node_acl *nacl, 355 struct se_portal_group *tpg, 356 int enable) 357 { 358 struct se_port *port = lun->lun_sep; 359 struct se_dev_entry *deve = nacl->device_list[mapped_lun]; 360 int trans = 0; 361 /* 362 * If the MappedLUN entry is being disabled, the entry in 363 * port->sep_alua_list must be removed now before clearing the 364 * struct se_dev_entry pointers below as logic in 365 * core_alua_do_transition_tg_pt() depends on these being present. 366 */ 367 if (!enable) { 368 /* 369 * deve->se_lun_acl will be NULL for demo-mode created LUNs 370 * that have not been explicitly concerted to MappedLUNs -> 371 * struct se_lun_acl, but we remove deve->alua_port_list from 372 * port->sep_alua_list. This also means that active UAs and 373 * NodeACL context specific PR metadata for demo-mode 374 * MappedLUN *deve will be released below.. 375 */ 376 spin_lock_bh(&port->sep_alua_lock); 377 list_del(&deve->alua_port_list); 378 spin_unlock_bh(&port->sep_alua_lock); 379 } 380 381 spin_lock_irq(&nacl->device_list_lock); 382 if (enable) { 383 /* 384 * Check if the call is handling demo mode -> explict LUN ACL 385 * transition. This transition must be for the same struct se_lun 386 * + mapped_lun that was setup in demo mode.. 387 */ 388 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { 389 if (deve->se_lun_acl != NULL) { 390 pr_err("struct se_dev_entry->se_lun_acl" 391 " already set for demo mode -> explict" 392 " LUN ACL transition\n"); 393 spin_unlock_irq(&nacl->device_list_lock); 394 return -EINVAL; 395 } 396 if (deve->se_lun != lun) { 397 pr_err("struct se_dev_entry->se_lun does" 398 " match passed struct se_lun for demo mode" 399 " -> explict LUN ACL transition\n"); 400 spin_unlock_irq(&nacl->device_list_lock); 401 return -EINVAL; 402 } 403 deve->se_lun_acl = lun_acl; 404 trans = 1; 405 } else { 406 deve->se_lun = lun; 407 deve->se_lun_acl = lun_acl; 408 deve->mapped_lun = mapped_lun; 409 deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS; 410 } 411 412 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { 413 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; 414 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; 415 } else { 416 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; 417 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; 418 } 419 420 if (trans) { 421 spin_unlock_irq(&nacl->device_list_lock); 422 return 0; 423 } 424 deve->creation_time = get_jiffies_64(); 425 deve->attach_count++; 426 spin_unlock_irq(&nacl->device_list_lock); 427 428 spin_lock_bh(&port->sep_alua_lock); 429 list_add_tail(&deve->alua_port_list, &port->sep_alua_list); 430 spin_unlock_bh(&port->sep_alua_lock); 431 432 return 0; 433 } 434 /* 435 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE 436 * PR operation to complete. 437 */ 438 spin_unlock_irq(&nacl->device_list_lock); 439 while (atomic_read(&deve->pr_ref_count) != 0) 440 cpu_relax(); 441 spin_lock_irq(&nacl->device_list_lock); 442 /* 443 * Disable struct se_dev_entry LUN ACL mapping 444 */ 445 core_scsi3_ua_release_all(deve); 446 deve->se_lun = NULL; 447 deve->se_lun_acl = NULL; 448 deve->lun_flags = 0; 449 deve->creation_time = 0; 450 deve->attach_count--; 451 spin_unlock_irq(&nacl->device_list_lock); 452 453 core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl); 454 return 0; 455 } 456 457 /* core_clear_lun_from_tpg(): 458 * 459 * 460 */ 461 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) 462 { 463 struct se_node_acl *nacl; 464 struct se_dev_entry *deve; 465 u32 i; 466 467 spin_lock_irq(&tpg->acl_node_lock); 468 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { 469 spin_unlock_irq(&tpg->acl_node_lock); 470 471 spin_lock_irq(&nacl->device_list_lock); 472 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 473 deve = nacl->device_list[i]; 474 if (lun != deve->se_lun) 475 continue; 476 spin_unlock_irq(&nacl->device_list_lock); 477 478 core_update_device_list_for_node(lun, NULL, 479 deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS, 480 nacl, tpg, 0); 481 482 spin_lock_irq(&nacl->device_list_lock); 483 } 484 spin_unlock_irq(&nacl->device_list_lock); 485 486 spin_lock_irq(&tpg->acl_node_lock); 487 } 488 spin_unlock_irq(&tpg->acl_node_lock); 489 } 490 491 static struct se_port *core_alloc_port(struct se_device *dev) 492 { 493 struct se_port *port, *port_tmp; 494 495 port = kzalloc(sizeof(struct se_port), GFP_KERNEL); 496 if (!port) { 497 pr_err("Unable to allocate struct se_port\n"); 498 return ERR_PTR(-ENOMEM); 499 } 500 INIT_LIST_HEAD(&port->sep_alua_list); 501 INIT_LIST_HEAD(&port->sep_list); 502 atomic_set(&port->sep_tg_pt_secondary_offline, 0); 503 spin_lock_init(&port->sep_alua_lock); 504 mutex_init(&port->sep_tg_pt_md_mutex); 505 506 spin_lock(&dev->se_port_lock); 507 if (dev->dev_port_count == 0x0000ffff) { 508 pr_warn("Reached dev->dev_port_count ==" 509 " 0x0000ffff\n"); 510 spin_unlock(&dev->se_port_lock); 511 return ERR_PTR(-ENOSPC); 512 } 513 again: 514 /* 515 * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device 516 * Here is the table from spc4r17 section 7.7.3.8. 517 * 518 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field 519 * 520 * Code Description 521 * 0h Reserved 522 * 1h Relative port 1, historically known as port A 523 * 2h Relative port 2, historically known as port B 524 * 3h to FFFFh Relative port 3 through 65 535 525 */ 526 port->sep_rtpi = dev->dev_rpti_counter++; 527 if (!port->sep_rtpi) 528 goto again; 529 530 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) { 531 /* 532 * Make sure RELATIVE TARGET PORT IDENTIFER is unique 533 * for 16-bit wrap.. 534 */ 535 if (port->sep_rtpi == port_tmp->sep_rtpi) 536 goto again; 537 } 538 spin_unlock(&dev->se_port_lock); 539 540 return port; 541 } 542 543 static void core_export_port( 544 struct se_device *dev, 545 struct se_portal_group *tpg, 546 struct se_port *port, 547 struct se_lun *lun) 548 { 549 struct se_subsystem_dev *su_dev = dev->se_sub_dev; 550 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL; 551 552 spin_lock(&dev->se_port_lock); 553 spin_lock(&lun->lun_sep_lock); 554 port->sep_tpg = tpg; 555 port->sep_lun = lun; 556 lun->lun_sep = port; 557 spin_unlock(&lun->lun_sep_lock); 558 559 list_add_tail(&port->sep_list, &dev->dev_sep_list); 560 spin_unlock(&dev->se_port_lock); 561 562 if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { 563 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); 564 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { 565 pr_err("Unable to allocate t10_alua_tg_pt" 566 "_gp_member_t\n"); 567 return; 568 } 569 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 570 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, 571 su_dev->t10_alua.default_tg_pt_gp); 572 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 573 pr_debug("%s/%s: Adding to default ALUA Target Port" 574 " Group: alua/default_tg_pt_gp\n", 575 dev->transport->name, tpg->se_tpg_tfo->get_fabric_name()); 576 } 577 578 dev->dev_port_count++; 579 port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */ 580 } 581 582 /* 583 * Called with struct se_device->se_port_lock spinlock held. 584 */ 585 static void core_release_port(struct se_device *dev, struct se_port *port) 586 __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock) 587 { 588 /* 589 * Wait for any port reference for PR ALL_TG_PT=1 operation 590 * to complete in __core_scsi3_alloc_registration() 591 */ 592 spin_unlock(&dev->se_port_lock); 593 if (atomic_read(&port->sep_tg_pt_ref_cnt)) 594 cpu_relax(); 595 spin_lock(&dev->se_port_lock); 596 597 core_alua_free_tg_pt_gp_mem(port); 598 599 list_del(&port->sep_list); 600 dev->dev_port_count--; 601 kfree(port); 602 } 603 604 int core_dev_export( 605 struct se_device *dev, 606 struct se_portal_group *tpg, 607 struct se_lun *lun) 608 { 609 struct se_port *port; 610 611 port = core_alloc_port(dev); 612 if (IS_ERR(port)) 613 return PTR_ERR(port); 614 615 lun->lun_se_dev = dev; 616 se_dev_start(dev); 617 618 atomic_inc(&dev->dev_export_obj.obj_access_count); 619 core_export_port(dev, tpg, port, lun); 620 return 0; 621 } 622 623 void core_dev_unexport( 624 struct se_device *dev, 625 struct se_portal_group *tpg, 626 struct se_lun *lun) 627 { 628 struct se_port *port = lun->lun_sep; 629 630 spin_lock(&lun->lun_sep_lock); 631 if (lun->lun_se_dev == NULL) { 632 spin_unlock(&lun->lun_sep_lock); 633 return; 634 } 635 spin_unlock(&lun->lun_sep_lock); 636 637 spin_lock(&dev->se_port_lock); 638 atomic_dec(&dev->dev_export_obj.obj_access_count); 639 core_release_port(dev, port); 640 spin_unlock(&dev->se_port_lock); 641 642 se_dev_stop(dev); 643 lun->lun_se_dev = NULL; 644 } 645 646 int target_report_luns(struct se_task *se_task) 647 { 648 struct se_cmd *se_cmd = se_task->task_se_cmd; 649 struct se_dev_entry *deve; 650 struct se_lun *se_lun; 651 struct se_session *se_sess = se_cmd->se_sess; 652 unsigned char *buf; 653 u32 lun_count = 0, offset = 8, i; 654 655 buf = transport_kmap_data_sg(se_cmd); 656 if (!buf) 657 return -ENOMEM; 658 659 /* 660 * If no struct se_session pointer is present, this struct se_cmd is 661 * coming via a target_core_mod PASSTHROUGH op, and not through 662 * a $FABRIC_MOD. In that case, report LUN=0 only. 663 */ 664 if (!se_sess) { 665 int_to_scsilun(0, (struct scsi_lun *)&buf[offset]); 666 lun_count = 1; 667 goto done; 668 } 669 670 spin_lock_irq(&se_sess->se_node_acl->device_list_lock); 671 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 672 deve = se_sess->se_node_acl->device_list[i]; 673 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) 674 continue; 675 se_lun = deve->se_lun; 676 /* 677 * We determine the correct LUN LIST LENGTH even once we 678 * have reached the initial allocation length. 679 * See SPC2-R20 7.19. 680 */ 681 lun_count++; 682 if ((offset + 8) > se_cmd->data_length) 683 continue; 684 685 int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]); 686 offset += 8; 687 } 688 spin_unlock_irq(&se_sess->se_node_acl->device_list_lock); 689 690 /* 691 * See SPC3 r07, page 159. 692 */ 693 done: 694 lun_count *= 8; 695 buf[0] = ((lun_count >> 24) & 0xff); 696 buf[1] = ((lun_count >> 16) & 0xff); 697 buf[2] = ((lun_count >> 8) & 0xff); 698 buf[3] = (lun_count & 0xff); 699 transport_kunmap_data_sg(se_cmd); 700 701 se_task->task_scsi_status = GOOD; 702 transport_complete_task(se_task, 1); 703 return 0; 704 } 705 706 /* se_release_device_for_hba(): 707 * 708 * 709 */ 710 void se_release_device_for_hba(struct se_device *dev) 711 { 712 struct se_hba *hba = dev->se_hba; 713 714 if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) || 715 (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) || 716 (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) || 717 (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) || 718 (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED)) 719 se_dev_stop(dev); 720 721 if (dev->dev_ptr) { 722 kthread_stop(dev->process_thread); 723 if (dev->transport->free_device) 724 dev->transport->free_device(dev->dev_ptr); 725 } 726 727 spin_lock(&hba->device_lock); 728 list_del(&dev->dev_list); 729 hba->dev_count--; 730 spin_unlock(&hba->device_lock); 731 732 core_scsi3_free_all_registrations(dev); 733 se_release_vpd_for_dev(dev); 734 735 kfree(dev); 736 } 737 738 void se_release_vpd_for_dev(struct se_device *dev) 739 { 740 struct t10_vpd *vpd, *vpd_tmp; 741 742 spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock); 743 list_for_each_entry_safe(vpd, vpd_tmp, 744 &dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) { 745 list_del(&vpd->vpd_list); 746 kfree(vpd); 747 } 748 spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock); 749 } 750 751 /* se_free_virtual_device(): 752 * 753 * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers. 754 */ 755 int se_free_virtual_device(struct se_device *dev, struct se_hba *hba) 756 { 757 if (!list_empty(&dev->dev_sep_list)) 758 dump_stack(); 759 760 core_alua_free_lu_gp_mem(dev); 761 se_release_device_for_hba(dev); 762 763 return 0; 764 } 765 766 static void se_dev_start(struct se_device *dev) 767 { 768 struct se_hba *hba = dev->se_hba; 769 770 spin_lock(&hba->device_lock); 771 atomic_inc(&dev->dev_obj.obj_access_count); 772 if (atomic_read(&dev->dev_obj.obj_access_count) == 1) { 773 if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) { 774 dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED; 775 dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED; 776 } else if (dev->dev_status & 777 TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) { 778 dev->dev_status &= 779 ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED; 780 dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED; 781 } 782 } 783 spin_unlock(&hba->device_lock); 784 } 785 786 static void se_dev_stop(struct se_device *dev) 787 { 788 struct se_hba *hba = dev->se_hba; 789 790 spin_lock(&hba->device_lock); 791 atomic_dec(&dev->dev_obj.obj_access_count); 792 if (atomic_read(&dev->dev_obj.obj_access_count) == 0) { 793 if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) { 794 dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED; 795 dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; 796 } else if (dev->dev_status & 797 TRANSPORT_DEVICE_OFFLINE_ACTIVATED) { 798 dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED; 799 dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED; 800 } 801 } 802 spin_unlock(&hba->device_lock); 803 } 804 805 int se_dev_check_online(struct se_device *dev) 806 { 807 unsigned long flags; 808 int ret; 809 810 spin_lock_irqsave(&dev->dev_status_lock, flags); 811 ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) || 812 (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1; 813 spin_unlock_irqrestore(&dev->dev_status_lock, flags); 814 815 return ret; 816 } 817 818 int se_dev_check_shutdown(struct se_device *dev) 819 { 820 int ret; 821 822 spin_lock_irq(&dev->dev_status_lock); 823 ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN); 824 spin_unlock_irq(&dev->dev_status_lock); 825 826 return ret; 827 } 828 829 u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) 830 { 831 u32 tmp, aligned_max_sectors; 832 /* 833 * Limit max_sectors to a PAGE_SIZE aligned value for modern 834 * transport_allocate_data_tasks() operation. 835 */ 836 tmp = rounddown((max_sectors * block_size), PAGE_SIZE); 837 aligned_max_sectors = (tmp / block_size); 838 if (max_sectors != aligned_max_sectors) { 839 printk(KERN_INFO "Rounding down aligned max_sectors from %u" 840 " to %u\n", max_sectors, aligned_max_sectors); 841 return aligned_max_sectors; 842 } 843 844 return max_sectors; 845 } 846 847 void se_dev_set_default_attribs( 848 struct se_device *dev, 849 struct se_dev_limits *dev_limits) 850 { 851 struct queue_limits *limits = &dev_limits->limits; 852 853 dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO; 854 dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE; 855 dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ; 856 dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; 857 dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; 858 dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS; 859 dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU; 860 dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS; 861 dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS; 862 dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA; 863 dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 864 dev->se_sub_dev->se_dev_attrib.is_nonrot = DA_IS_NONROT; 865 dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; 866 /* 867 * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK 868 * iblock_create_virtdevice() from struct queue_limits values 869 * if blk_queue_discard()==1 870 */ 871 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; 872 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 873 DA_MAX_UNMAP_BLOCK_DESC_COUNT; 874 dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; 875 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = 876 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; 877 /* 878 * block_size is based on subsystem plugin dependent requirements. 879 */ 880 dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size; 881 dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size; 882 /* 883 * max_sectors is based on subsystem plugin dependent requirements. 884 */ 885 dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; 886 /* 887 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() 888 */ 889 limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors, 890 limits->logical_block_size); 891 dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; 892 /* 893 * Set fabric_max_sectors, which is reported in block limits 894 * VPD page (B0h). 895 */ 896 dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS; 897 /* 898 * Set optimal_sectors from fabric_max_sectors, which can be 899 * lowered via configfs. 900 */ 901 dev->se_sub_dev->se_dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS; 902 /* 903 * queue_depth is based on subsystem plugin dependent requirements. 904 */ 905 dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth; 906 dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth; 907 } 908 909 int se_dev_set_max_unmap_lba_count( 910 struct se_device *dev, 911 u32 max_unmap_lba_count) 912 { 913 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count; 914 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n", 915 dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count); 916 return 0; 917 } 918 919 int se_dev_set_max_unmap_block_desc_count( 920 struct se_device *dev, 921 u32 max_unmap_block_desc_count) 922 { 923 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 924 max_unmap_block_desc_count; 925 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n", 926 dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count); 927 return 0; 928 } 929 930 int se_dev_set_unmap_granularity( 931 struct se_device *dev, 932 u32 unmap_granularity) 933 { 934 dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity; 935 pr_debug("dev[%p]: Set unmap_granularity: %u\n", 936 dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity); 937 return 0; 938 } 939 940 int se_dev_set_unmap_granularity_alignment( 941 struct se_device *dev, 942 u32 unmap_granularity_alignment) 943 { 944 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment; 945 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n", 946 dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment); 947 return 0; 948 } 949 950 int se_dev_set_emulate_dpo(struct se_device *dev, int flag) 951 { 952 if (flag != 0 && flag != 1) { 953 pr_err("Illegal value %d\n", flag); 954 return -EINVAL; 955 } 956 957 if (flag) { 958 pr_err("dpo_emulated not supported\n"); 959 return -EINVAL; 960 } 961 962 return 0; 963 } 964 965 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) 966 { 967 if (flag != 0 && flag != 1) { 968 pr_err("Illegal value %d\n", flag); 969 return -EINVAL; 970 } 971 972 if (flag && dev->transport->fua_write_emulated == 0) { 973 pr_err("fua_write_emulated not supported\n"); 974 return -EINVAL; 975 } 976 dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag; 977 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", 978 dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write); 979 return 0; 980 } 981 982 int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) 983 { 984 if (flag != 0 && flag != 1) { 985 pr_err("Illegal value %d\n", flag); 986 return -EINVAL; 987 } 988 989 if (flag) { 990 pr_err("ua read emulated not supported\n"); 991 return -EINVAL; 992 } 993 994 return 0; 995 } 996 997 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) 998 { 999 if (flag != 0 && flag != 1) { 1000 pr_err("Illegal value %d\n", flag); 1001 return -EINVAL; 1002 } 1003 if (flag && dev->transport->write_cache_emulated == 0) { 1004 pr_err("write_cache_emulated not supported\n"); 1005 return -EINVAL; 1006 } 1007 dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag; 1008 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", 1009 dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache); 1010 return 0; 1011 } 1012 1013 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) 1014 { 1015 if ((flag != 0) && (flag != 1) && (flag != 2)) { 1016 pr_err("Illegal value %d\n", flag); 1017 return -EINVAL; 1018 } 1019 1020 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1021 pr_err("dev[%p]: Unable to change SE Device" 1022 " UA_INTRLCK_CTRL while dev_export_obj: %d count" 1023 " exists\n", dev, 1024 atomic_read(&dev->dev_export_obj.obj_access_count)); 1025 return -EINVAL; 1026 } 1027 dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag; 1028 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", 1029 dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl); 1030 1031 return 0; 1032 } 1033 1034 int se_dev_set_emulate_tas(struct se_device *dev, int flag) 1035 { 1036 if ((flag != 0) && (flag != 1)) { 1037 pr_err("Illegal value %d\n", flag); 1038 return -EINVAL; 1039 } 1040 1041 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1042 pr_err("dev[%p]: Unable to change SE Device TAS while" 1043 " dev_export_obj: %d count exists\n", dev, 1044 atomic_read(&dev->dev_export_obj.obj_access_count)); 1045 return -EINVAL; 1046 } 1047 dev->se_sub_dev->se_dev_attrib.emulate_tas = flag; 1048 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n", 1049 dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled"); 1050 1051 return 0; 1052 } 1053 1054 int se_dev_set_emulate_tpu(struct se_device *dev, int flag) 1055 { 1056 if ((flag != 0) && (flag != 1)) { 1057 pr_err("Illegal value %d\n", flag); 1058 return -EINVAL; 1059 } 1060 /* 1061 * We expect this value to be non-zero when generic Block Layer 1062 * Discard supported is detected iblock_create_virtdevice(). 1063 */ 1064 if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { 1065 pr_err("Generic Block Discard not supported\n"); 1066 return -ENOSYS; 1067 } 1068 1069 dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag; 1070 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", 1071 dev, flag); 1072 return 0; 1073 } 1074 1075 int se_dev_set_emulate_tpws(struct se_device *dev, int flag) 1076 { 1077 if ((flag != 0) && (flag != 1)) { 1078 pr_err("Illegal value %d\n", flag); 1079 return -EINVAL; 1080 } 1081 /* 1082 * We expect this value to be non-zero when generic Block Layer 1083 * Discard supported is detected iblock_create_virtdevice(). 1084 */ 1085 if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { 1086 pr_err("Generic Block Discard not supported\n"); 1087 return -ENOSYS; 1088 } 1089 1090 dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag; 1091 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", 1092 dev, flag); 1093 return 0; 1094 } 1095 1096 int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) 1097 { 1098 if ((flag != 0) && (flag != 1)) { 1099 pr_err("Illegal value %d\n", flag); 1100 return -EINVAL; 1101 } 1102 dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag; 1103 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, 1104 (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled"); 1105 return 0; 1106 } 1107 1108 int se_dev_set_is_nonrot(struct se_device *dev, int flag) 1109 { 1110 if ((flag != 0) && (flag != 1)) { 1111 printk(KERN_ERR "Illegal value %d\n", flag); 1112 return -EINVAL; 1113 } 1114 dev->se_sub_dev->se_dev_attrib.is_nonrot = flag; 1115 pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n", 1116 dev, flag); 1117 return 0; 1118 } 1119 1120 int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag) 1121 { 1122 if (flag != 0) { 1123 printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted" 1124 " reordering not implemented\n", dev); 1125 return -ENOSYS; 1126 } 1127 dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = flag; 1128 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag); 1129 return 0; 1130 } 1131 1132 /* 1133 * Note, this can only be called on unexported SE Device Object. 1134 */ 1135 int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) 1136 { 1137 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1138 pr_err("dev[%p]: Unable to change SE Device TCQ while" 1139 " dev_export_obj: %d count exists\n", dev, 1140 atomic_read(&dev->dev_export_obj.obj_access_count)); 1141 return -EINVAL; 1142 } 1143 if (!queue_depth) { 1144 pr_err("dev[%p]: Illegal ZERO value for queue" 1145 "_depth\n", dev); 1146 return -EINVAL; 1147 } 1148 1149 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1150 if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) { 1151 pr_err("dev[%p]: Passed queue_depth: %u" 1152 " exceeds TCM/SE_Device TCQ: %u\n", 1153 dev, queue_depth, 1154 dev->se_sub_dev->se_dev_attrib.hw_queue_depth); 1155 return -EINVAL; 1156 } 1157 } else { 1158 if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) { 1159 if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) { 1160 pr_err("dev[%p]: Passed queue_depth:" 1161 " %u exceeds TCM/SE_Device MAX" 1162 " TCQ: %u\n", dev, queue_depth, 1163 dev->se_sub_dev->se_dev_attrib.hw_queue_depth); 1164 return -EINVAL; 1165 } 1166 } 1167 } 1168 1169 dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth; 1170 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", 1171 dev, queue_depth); 1172 return 0; 1173 } 1174 1175 int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) 1176 { 1177 int force = 0; /* Force setting for VDEVS */ 1178 1179 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1180 pr_err("dev[%p]: Unable to change SE Device" 1181 " max_sectors while dev_export_obj: %d count exists\n", 1182 dev, atomic_read(&dev->dev_export_obj.obj_access_count)); 1183 return -EINVAL; 1184 } 1185 if (!max_sectors) { 1186 pr_err("dev[%p]: Illegal ZERO value for" 1187 " max_sectors\n", dev); 1188 return -EINVAL; 1189 } 1190 if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) { 1191 pr_err("dev[%p]: Passed max_sectors: %u less than" 1192 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors, 1193 DA_STATUS_MAX_SECTORS_MIN); 1194 return -EINVAL; 1195 } 1196 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1197 if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) { 1198 pr_err("dev[%p]: Passed max_sectors: %u" 1199 " greater than TCM/SE_Device max_sectors:" 1200 " %u\n", dev, max_sectors, 1201 dev->se_sub_dev->se_dev_attrib.hw_max_sectors); 1202 return -EINVAL; 1203 } 1204 } else { 1205 if (!force && (max_sectors > 1206 dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) { 1207 pr_err("dev[%p]: Passed max_sectors: %u" 1208 " greater than TCM/SE_Device max_sectors" 1209 ": %u, use force=1 to override.\n", dev, 1210 max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors); 1211 return -EINVAL; 1212 } 1213 if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) { 1214 pr_err("dev[%p]: Passed max_sectors: %u" 1215 " greater than DA_STATUS_MAX_SECTORS_MAX:" 1216 " %u\n", dev, max_sectors, 1217 DA_STATUS_MAX_SECTORS_MAX); 1218 return -EINVAL; 1219 } 1220 } 1221 /* 1222 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() 1223 */ 1224 max_sectors = se_dev_align_max_sectors(max_sectors, 1225 dev->se_sub_dev->se_dev_attrib.block_size); 1226 1227 dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors; 1228 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", 1229 dev, max_sectors); 1230 return 0; 1231 } 1232 1233 int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) 1234 { 1235 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1236 pr_err("dev[%p]: Unable to change SE Device" 1237 " fabric_max_sectors while dev_export_obj: %d count exists\n", 1238 dev, atomic_read(&dev->dev_export_obj.obj_access_count)); 1239 return -EINVAL; 1240 } 1241 if (!fabric_max_sectors) { 1242 pr_err("dev[%p]: Illegal ZERO value for" 1243 " fabric_max_sectors\n", dev); 1244 return -EINVAL; 1245 } 1246 if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) { 1247 pr_err("dev[%p]: Passed fabric_max_sectors: %u less than" 1248 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors, 1249 DA_STATUS_MAX_SECTORS_MIN); 1250 return -EINVAL; 1251 } 1252 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1253 if (fabric_max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) { 1254 pr_err("dev[%p]: Passed fabric_max_sectors: %u" 1255 " greater than TCM/SE_Device max_sectors:" 1256 " %u\n", dev, fabric_max_sectors, 1257 dev->se_sub_dev->se_dev_attrib.hw_max_sectors); 1258 return -EINVAL; 1259 } 1260 } else { 1261 if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) { 1262 pr_err("dev[%p]: Passed fabric_max_sectors: %u" 1263 " greater than DA_STATUS_MAX_SECTORS_MAX:" 1264 " %u\n", dev, fabric_max_sectors, 1265 DA_STATUS_MAX_SECTORS_MAX); 1266 return -EINVAL; 1267 } 1268 } 1269 /* 1270 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() 1271 */ 1272 fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors, 1273 dev->se_sub_dev->se_dev_attrib.block_size); 1274 1275 dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = fabric_max_sectors; 1276 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", 1277 dev, fabric_max_sectors); 1278 return 0; 1279 } 1280 1281 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) 1282 { 1283 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1284 pr_err("dev[%p]: Unable to change SE Device" 1285 " optimal_sectors while dev_export_obj: %d count exists\n", 1286 dev, atomic_read(&dev->dev_export_obj.obj_access_count)); 1287 return -EINVAL; 1288 } 1289 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1290 pr_err("dev[%p]: Passed optimal_sectors cannot be" 1291 " changed for TCM/pSCSI\n", dev); 1292 return -EINVAL; 1293 } 1294 if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors) { 1295 pr_err("dev[%p]: Passed optimal_sectors %u cannot be" 1296 " greater than fabric_max_sectors: %u\n", dev, 1297 optimal_sectors, dev->se_sub_dev->se_dev_attrib.fabric_max_sectors); 1298 return -EINVAL; 1299 } 1300 1301 dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors; 1302 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n", 1303 dev, optimal_sectors); 1304 return 0; 1305 } 1306 1307 int se_dev_set_block_size(struct se_device *dev, u32 block_size) 1308 { 1309 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1310 pr_err("dev[%p]: Unable to change SE Device block_size" 1311 " while dev_export_obj: %d count exists\n", dev, 1312 atomic_read(&dev->dev_export_obj.obj_access_count)); 1313 return -EINVAL; 1314 } 1315 1316 if ((block_size != 512) && 1317 (block_size != 1024) && 1318 (block_size != 2048) && 1319 (block_size != 4096)) { 1320 pr_err("dev[%p]: Illegal value for block_device: %u" 1321 " for SE device, must be 512, 1024, 2048 or 4096\n", 1322 dev, block_size); 1323 return -EINVAL; 1324 } 1325 1326 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1327 pr_err("dev[%p]: Not allowed to change block_size for" 1328 " Physical Device, use for Linux/SCSI to change" 1329 " block_size for underlying hardware\n", dev); 1330 return -EINVAL; 1331 } 1332 1333 dev->se_sub_dev->se_dev_attrib.block_size = block_size; 1334 pr_debug("dev[%p]: SE Device block_size changed to %u\n", 1335 dev, block_size); 1336 return 0; 1337 } 1338 1339 struct se_lun *core_dev_add_lun( 1340 struct se_portal_group *tpg, 1341 struct se_hba *hba, 1342 struct se_device *dev, 1343 u32 lun) 1344 { 1345 struct se_lun *lun_p; 1346 u32 lun_access = 0; 1347 int rc; 1348 1349 if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) { 1350 pr_err("Unable to export struct se_device while dev_access_obj: %d\n", 1351 atomic_read(&dev->dev_access_obj.obj_access_count)); 1352 return ERR_PTR(-EACCES); 1353 } 1354 1355 lun_p = core_tpg_pre_addlun(tpg, lun); 1356 if (IS_ERR(lun_p)) 1357 return lun_p; 1358 1359 if (dev->dev_flags & DF_READ_ONLY) 1360 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 1361 else 1362 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; 1363 1364 rc = core_tpg_post_addlun(tpg, lun_p, lun_access, dev); 1365 if (rc < 0) 1366 return ERR_PTR(rc); 1367 1368 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" 1369 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), 1370 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun, 1371 tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id); 1372 /* 1373 * Update LUN maps for dynamically added initiators when 1374 * generate_node_acl is enabled. 1375 */ 1376 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { 1377 struct se_node_acl *acl; 1378 spin_lock_irq(&tpg->acl_node_lock); 1379 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 1380 if (acl->dynamic_node_acl && 1381 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || 1382 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { 1383 spin_unlock_irq(&tpg->acl_node_lock); 1384 core_tpg_add_node_to_devs(acl, tpg); 1385 spin_lock_irq(&tpg->acl_node_lock); 1386 } 1387 } 1388 spin_unlock_irq(&tpg->acl_node_lock); 1389 } 1390 1391 return lun_p; 1392 } 1393 1394 /* core_dev_del_lun(): 1395 * 1396 * 1397 */ 1398 int core_dev_del_lun( 1399 struct se_portal_group *tpg, 1400 u32 unpacked_lun) 1401 { 1402 struct se_lun *lun; 1403 1404 lun = core_tpg_pre_dellun(tpg, unpacked_lun); 1405 if (IS_ERR(lun)) 1406 return PTR_ERR(lun); 1407 1408 core_tpg_post_dellun(tpg, lun); 1409 1410 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" 1411 " device object\n", tpg->se_tpg_tfo->get_fabric_name(), 1412 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, 1413 tpg->se_tpg_tfo->get_fabric_name()); 1414 1415 return 0; 1416 } 1417 1418 struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun) 1419 { 1420 struct se_lun *lun; 1421 1422 spin_lock(&tpg->tpg_lun_lock); 1423 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { 1424 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS" 1425 "_PER_TPG-1: %u for Target Portal Group: %hu\n", 1426 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1427 TRANSPORT_MAX_LUNS_PER_TPG-1, 1428 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1429 spin_unlock(&tpg->tpg_lun_lock); 1430 return NULL; 1431 } 1432 lun = tpg->tpg_lun_list[unpacked_lun]; 1433 1434 if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { 1435 pr_err("%s Logical Unit Number: %u is not free on" 1436 " Target Portal Group: %hu, ignoring request.\n", 1437 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1438 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1439 spin_unlock(&tpg->tpg_lun_lock); 1440 return NULL; 1441 } 1442 spin_unlock(&tpg->tpg_lun_lock); 1443 1444 return lun; 1445 } 1446 1447 /* core_dev_get_lun(): 1448 * 1449 * 1450 */ 1451 static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun) 1452 { 1453 struct se_lun *lun; 1454 1455 spin_lock(&tpg->tpg_lun_lock); 1456 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { 1457 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" 1458 "_TPG-1: %u for Target Portal Group: %hu\n", 1459 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1460 TRANSPORT_MAX_LUNS_PER_TPG-1, 1461 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1462 spin_unlock(&tpg->tpg_lun_lock); 1463 return NULL; 1464 } 1465 lun = tpg->tpg_lun_list[unpacked_lun]; 1466 1467 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { 1468 pr_err("%s Logical Unit Number: %u is not active on" 1469 " Target Portal Group: %hu, ignoring request.\n", 1470 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1471 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1472 spin_unlock(&tpg->tpg_lun_lock); 1473 return NULL; 1474 } 1475 spin_unlock(&tpg->tpg_lun_lock); 1476 1477 return lun; 1478 } 1479 1480 struct se_lun_acl *core_dev_init_initiator_node_lun_acl( 1481 struct se_portal_group *tpg, 1482 u32 mapped_lun, 1483 char *initiatorname, 1484 int *ret) 1485 { 1486 struct se_lun_acl *lacl; 1487 struct se_node_acl *nacl; 1488 1489 if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) { 1490 pr_err("%s InitiatorName exceeds maximum size.\n", 1491 tpg->se_tpg_tfo->get_fabric_name()); 1492 *ret = -EOVERFLOW; 1493 return NULL; 1494 } 1495 nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname); 1496 if (!nacl) { 1497 *ret = -EINVAL; 1498 return NULL; 1499 } 1500 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); 1501 if (!lacl) { 1502 pr_err("Unable to allocate memory for struct se_lun_acl.\n"); 1503 *ret = -ENOMEM; 1504 return NULL; 1505 } 1506 1507 INIT_LIST_HEAD(&lacl->lacl_list); 1508 lacl->mapped_lun = mapped_lun; 1509 lacl->se_lun_nacl = nacl; 1510 snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 1511 1512 return lacl; 1513 } 1514 1515 int core_dev_add_initiator_node_lun_acl( 1516 struct se_portal_group *tpg, 1517 struct se_lun_acl *lacl, 1518 u32 unpacked_lun, 1519 u32 lun_access) 1520 { 1521 struct se_lun *lun; 1522 struct se_node_acl *nacl; 1523 1524 lun = core_dev_get_lun(tpg, unpacked_lun); 1525 if (!lun) { 1526 pr_err("%s Logical Unit Number: %u is not active on" 1527 " Target Portal Group: %hu, ignoring request.\n", 1528 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1529 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1530 return -EINVAL; 1531 } 1532 1533 nacl = lacl->se_lun_nacl; 1534 if (!nacl) 1535 return -EINVAL; 1536 1537 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) && 1538 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)) 1539 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 1540 1541 lacl->se_lun = lun; 1542 1543 if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun, 1544 lun_access, nacl, tpg, 1) < 0) 1545 return -EINVAL; 1546 1547 spin_lock(&lun->lun_acl_lock); 1548 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); 1549 atomic_inc(&lun->lun_acl_count); 1550 smp_mb__after_atomic_inc(); 1551 spin_unlock(&lun->lun_acl_lock); 1552 1553 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " 1554 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 1555 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, 1556 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", 1557 lacl->initiatorname); 1558 /* 1559 * Check to see if there are any existing persistent reservation APTPL 1560 * pre-registrations that need to be enabled for this LUN ACL.. 1561 */ 1562 core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl); 1563 return 0; 1564 } 1565 1566 /* core_dev_del_initiator_node_lun_acl(): 1567 * 1568 * 1569 */ 1570 int core_dev_del_initiator_node_lun_acl( 1571 struct se_portal_group *tpg, 1572 struct se_lun *lun, 1573 struct se_lun_acl *lacl) 1574 { 1575 struct se_node_acl *nacl; 1576 1577 nacl = lacl->se_lun_nacl; 1578 if (!nacl) 1579 return -EINVAL; 1580 1581 spin_lock(&lun->lun_acl_lock); 1582 list_del(&lacl->lacl_list); 1583 atomic_dec(&lun->lun_acl_count); 1584 smp_mb__after_atomic_dec(); 1585 spin_unlock(&lun->lun_acl_lock); 1586 1587 core_update_device_list_for_node(lun, NULL, lacl->mapped_lun, 1588 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); 1589 1590 lacl->se_lun = NULL; 1591 1592 pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for" 1593 " InitiatorNode: %s Mapped LUN: %u\n", 1594 tpg->se_tpg_tfo->get_fabric_name(), 1595 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 1596 lacl->initiatorname, lacl->mapped_lun); 1597 1598 return 0; 1599 } 1600 1601 void core_dev_free_initiator_node_lun_acl( 1602 struct se_portal_group *tpg, 1603 struct se_lun_acl *lacl) 1604 { 1605 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" 1606 " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(), 1607 tpg->se_tpg_tfo->tpg_get_tag(tpg), 1608 tpg->se_tpg_tfo->get_fabric_name(), 1609 lacl->initiatorname, lacl->mapped_lun); 1610 1611 kfree(lacl); 1612 } 1613 1614 int core_dev_setup_virtual_lun0(void) 1615 { 1616 struct se_hba *hba; 1617 struct se_device *dev; 1618 struct se_subsystem_dev *se_dev = NULL; 1619 struct se_subsystem_api *t; 1620 char buf[16]; 1621 int ret; 1622 1623 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE); 1624 if (IS_ERR(hba)) 1625 return PTR_ERR(hba); 1626 1627 lun0_hba = hba; 1628 t = hba->transport; 1629 1630 se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); 1631 if (!se_dev) { 1632 pr_err("Unable to allocate memory for" 1633 " struct se_subsystem_dev\n"); 1634 ret = -ENOMEM; 1635 goto out; 1636 } 1637 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); 1638 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); 1639 INIT_LIST_HEAD(&se_dev->t10_pr.registration_list); 1640 INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list); 1641 spin_lock_init(&se_dev->t10_pr.registration_lock); 1642 spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock); 1643 INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); 1644 spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); 1645 spin_lock_init(&se_dev->se_dev_lock); 1646 se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; 1647 se_dev->t10_wwn.t10_sub_dev = se_dev; 1648 se_dev->t10_alua.t10_sub_dev = se_dev; 1649 se_dev->se_dev_attrib.da_sub_dev = se_dev; 1650 se_dev->se_dev_hba = hba; 1651 1652 se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0"); 1653 if (!se_dev->se_dev_su_ptr) { 1654 pr_err("Unable to locate subsystem dependent pointer" 1655 " from allocate_virtdevice()\n"); 1656 ret = -ENOMEM; 1657 goto out; 1658 } 1659 lun0_su_dev = se_dev; 1660 1661 memset(buf, 0, 16); 1662 sprintf(buf, "rd_pages=8"); 1663 t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf)); 1664 1665 dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); 1666 if (IS_ERR(dev)) { 1667 ret = PTR_ERR(dev); 1668 goto out; 1669 } 1670 se_dev->se_dev_ptr = dev; 1671 g_lun0_dev = dev; 1672 1673 return 0; 1674 out: 1675 lun0_su_dev = NULL; 1676 kfree(se_dev); 1677 if (lun0_hba) { 1678 core_delete_hba(lun0_hba); 1679 lun0_hba = NULL; 1680 } 1681 return ret; 1682 } 1683 1684 1685 void core_dev_release_virtual_lun0(void) 1686 { 1687 struct se_hba *hba = lun0_hba; 1688 struct se_subsystem_dev *su_dev = lun0_su_dev; 1689 1690 if (!hba) 1691 return; 1692 1693 if (g_lun0_dev) 1694 se_free_virtual_device(g_lun0_dev, hba); 1695 1696 kfree(su_dev); 1697 core_delete_hba(hba); 1698 } 1699