1 /******************************************************************************* 2 * Filename: target_core_device.c (based on iscsi_target_device.c) 3 * 4 * This file contains the TCM Virtual Device and Disk Transport 5 * agnostic related functions. 6 * 7 * (c) Copyright 2003-2013 Datera, Inc. 8 * 9 * Nicholas A. Bellinger <nab@kernel.org> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2 of the License, or 14 * (at your option) any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 24 * 25 ******************************************************************************/ 26 27 #include <linux/net.h> 28 #include <linux/string.h> 29 #include <linux/delay.h> 30 #include <linux/timer.h> 31 #include <linux/slab.h> 32 #include <linux/spinlock.h> 33 #include <linux/kthread.h> 34 #include <linux/in.h> 35 #include <linux/export.h> 36 #include <asm/unaligned.h> 37 #include <net/sock.h> 38 #include <net/tcp.h> 39 #include <scsi/scsi.h> 40 #include <scsi/scsi_device.h> 41 42 #include <target/target_core_base.h> 43 #include <target/target_core_backend.h> 44 #include <target/target_core_fabric.h> 45 46 #include "target_core_internal.h" 47 #include "target_core_alua.h" 48 #include "target_core_pr.h" 49 #include "target_core_ua.h" 50 51 DEFINE_MUTEX(g_device_mutex); 52 LIST_HEAD(g_device_list); 53 54 static struct se_hba *lun0_hba; 55 /* not static, needed by tpg.c */ 56 struct se_device *g_lun0_dev; 57 58 sense_reason_t 59 transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) 60 { 61 struct se_lun *se_lun = NULL; 62 struct se_session *se_sess = se_cmd->se_sess; 63 struct se_node_acl *nacl = se_sess->se_node_acl; 64 struct se_device *dev; 65 struct se_dev_entry *deve; 66 67 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) 68 return TCM_NON_EXISTENT_LUN; 69 70 rcu_read_lock(); 71 deve = target_nacl_find_deve(nacl, unpacked_lun); 72 if (deve) { 73 atomic_long_inc(&deve->total_cmds); 74 75 if ((se_cmd->data_direction == DMA_TO_DEVICE) && 76 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { 77 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" 78 " Access for 0x%08x\n", 79 se_cmd->se_tfo->get_fabric_name(), 80 unpacked_lun); 81 rcu_read_unlock(); 82 return TCM_WRITE_PROTECTED; 83 } 84 85 if (se_cmd->data_direction == DMA_TO_DEVICE) 86 atomic_long_add(se_cmd->data_length, 87 &deve->write_bytes); 88 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 89 atomic_long_add(se_cmd->data_length, 90 &deve->read_bytes); 91 92 se_lun = rcu_dereference(deve->se_lun); 93 se_cmd->se_lun = rcu_dereference(deve->se_lun); 94 se_cmd->pr_res_key = deve->pr_res_key; 95 se_cmd->orig_fe_lun = unpacked_lun; 96 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 97 98 percpu_ref_get(&se_lun->lun_ref); 99 se_cmd->lun_ref_active = true; 100 } 101 rcu_read_unlock(); 102 103 if (!se_lun) { 104 /* 105 * Use the se_portal_group->tpg_virt_lun0 to allow for 106 * REPORT_LUNS, et al to be returned when no active 107 * MappedLUN=0 exists for this Initiator Port. 108 */ 109 if (unpacked_lun != 0) { 110 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 111 " Access for 0x%08x\n", 112 se_cmd->se_tfo->get_fabric_name(), 113 unpacked_lun); 114 return TCM_NON_EXISTENT_LUN; 115 } 116 /* 117 * Force WRITE PROTECT for virtual LUN 0 118 */ 119 if ((se_cmd->data_direction != DMA_FROM_DEVICE) && 120 (se_cmd->data_direction != DMA_NONE)) 121 return TCM_WRITE_PROTECTED; 122 123 se_lun = &se_sess->se_tpg->tpg_virt_lun0; 124 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; 125 se_cmd->orig_fe_lun = 0; 126 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 127 128 percpu_ref_get(&se_lun->lun_ref); 129 se_cmd->lun_ref_active = true; 130 } 131 132 /* Directly associate cmd with se_dev */ 133 se_cmd->se_dev = se_lun->lun_se_dev; 134 135 dev = se_lun->lun_se_dev; 136 atomic_long_inc(&dev->num_cmds); 137 if (se_cmd->data_direction == DMA_TO_DEVICE) 138 atomic_long_add(se_cmd->data_length, &dev->write_bytes); 139 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 140 atomic_long_add(se_cmd->data_length, &dev->read_bytes); 141 142 return 0; 143 } 144 EXPORT_SYMBOL(transport_lookup_cmd_lun); 145 146 int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun) 147 { 148 struct se_dev_entry *deve; 149 struct se_lun *se_lun = NULL; 150 struct se_session *se_sess = se_cmd->se_sess; 151 struct se_node_acl *nacl = se_sess->se_node_acl; 152 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 153 unsigned long flags; 154 155 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) 156 return -ENODEV; 157 158 rcu_read_lock(); 159 deve = target_nacl_find_deve(nacl, unpacked_lun); 160 if (deve) { 161 se_tmr->tmr_lun = rcu_dereference(deve->se_lun); 162 se_cmd->se_lun = rcu_dereference(deve->se_lun); 163 se_lun = rcu_dereference(deve->se_lun); 164 se_cmd->pr_res_key = deve->pr_res_key; 165 se_cmd->orig_fe_lun = unpacked_lun; 166 } 167 rcu_read_unlock(); 168 169 if (!se_lun) { 170 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 171 " Access for 0x%08x\n", 172 se_cmd->se_tfo->get_fabric_name(), 173 unpacked_lun); 174 return -ENODEV; 175 } 176 177 /* Directly associate cmd with se_dev */ 178 se_cmd->se_dev = se_lun->lun_se_dev; 179 se_tmr->tmr_dev = se_lun->lun_se_dev; 180 181 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags); 182 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list); 183 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags); 184 185 return 0; 186 } 187 EXPORT_SYMBOL(transport_lookup_tmr_lun); 188 189 bool target_lun_is_rdonly(struct se_cmd *cmd) 190 { 191 struct se_session *se_sess = cmd->se_sess; 192 struct se_dev_entry *deve; 193 bool ret; 194 195 if (cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) 196 return true; 197 198 rcu_read_lock(); 199 deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun); 200 ret = (deve && deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY); 201 rcu_read_unlock(); 202 203 return ret; 204 } 205 EXPORT_SYMBOL(target_lun_is_rdonly); 206 207 /* 208 * This function is called from core_scsi3_emulate_pro_register_and_move() 209 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref 210 * when a matching rtpi is found. 211 */ 212 struct se_dev_entry *core_get_se_deve_from_rtpi( 213 struct se_node_acl *nacl, 214 u16 rtpi) 215 { 216 struct se_dev_entry *deve; 217 struct se_lun *lun; 218 struct se_portal_group *tpg = nacl->se_tpg; 219 220 rcu_read_lock(); 221 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 222 lun = rcu_dereference(deve->se_lun); 223 if (!lun) { 224 pr_err("%s device entries device pointer is" 225 " NULL, but Initiator has access.\n", 226 tpg->se_tpg_tfo->get_fabric_name()); 227 continue; 228 } 229 if (lun->lun_rtpi != rtpi) 230 continue; 231 232 kref_get(&deve->pr_kref); 233 rcu_read_unlock(); 234 235 return deve; 236 } 237 rcu_read_unlock(); 238 239 return NULL; 240 } 241 242 void core_free_device_list_for_node( 243 struct se_node_acl *nacl, 244 struct se_portal_group *tpg) 245 { 246 struct se_dev_entry *deve; 247 248 mutex_lock(&nacl->lun_entry_mutex); 249 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 250 struct se_lun *lun = rcu_dereference_check(deve->se_lun, 251 lockdep_is_held(&nacl->lun_entry_mutex)); 252 core_disable_device_list_for_node(lun, deve, nacl, tpg); 253 } 254 mutex_unlock(&nacl->lun_entry_mutex); 255 } 256 257 void core_update_device_list_access( 258 u32 mapped_lun, 259 u32 lun_access, 260 struct se_node_acl *nacl) 261 { 262 struct se_dev_entry *deve; 263 264 mutex_lock(&nacl->lun_entry_mutex); 265 deve = target_nacl_find_deve(nacl, mapped_lun); 266 if (deve) { 267 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { 268 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; 269 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; 270 } else { 271 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; 272 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; 273 } 274 } 275 mutex_unlock(&nacl->lun_entry_mutex); 276 } 277 278 /* 279 * Called with rcu_read_lock or nacl->device_list_lock held. 280 */ 281 struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u32 mapped_lun) 282 { 283 struct se_dev_entry *deve; 284 285 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) 286 if (deve->mapped_lun == mapped_lun) 287 return deve; 288 289 return NULL; 290 } 291 EXPORT_SYMBOL(target_nacl_find_deve); 292 293 void target_pr_kref_release(struct kref *kref) 294 { 295 struct se_dev_entry *deve = container_of(kref, struct se_dev_entry, 296 pr_kref); 297 complete(&deve->pr_comp); 298 } 299 300 /* core_enable_device_list_for_node(): 301 * 302 * 303 */ 304 int core_enable_device_list_for_node( 305 struct se_lun *lun, 306 struct se_lun_acl *lun_acl, 307 u32 mapped_lun, 308 u32 lun_access, 309 struct se_node_acl *nacl, 310 struct se_portal_group *tpg) 311 { 312 struct se_port *port = lun->lun_sep; 313 struct se_dev_entry *orig, *new; 314 315 new = kzalloc(sizeof(*new), GFP_KERNEL); 316 if (!new) { 317 pr_err("Unable to allocate se_dev_entry memory\n"); 318 return -ENOMEM; 319 } 320 321 atomic_set(&new->ua_count, 0); 322 spin_lock_init(&new->ua_lock); 323 INIT_LIST_HEAD(&new->alua_port_list); 324 INIT_LIST_HEAD(&new->ua_list); 325 326 new->mapped_lun = mapped_lun; 327 kref_init(&new->pr_kref); 328 init_completion(&new->pr_comp); 329 330 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) 331 new->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; 332 else 333 new->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; 334 335 new->creation_time = get_jiffies_64(); 336 new->attach_count++; 337 338 mutex_lock(&nacl->lun_entry_mutex); 339 orig = target_nacl_find_deve(nacl, mapped_lun); 340 if (orig && orig->se_lun) { 341 struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun, 342 lockdep_is_held(&nacl->lun_entry_mutex)); 343 344 if (orig_lun != lun) { 345 pr_err("Existing orig->se_lun doesn't match new lun" 346 " for dynamic -> explicit NodeACL conversion:" 347 " %s\n", nacl->initiatorname); 348 mutex_unlock(&nacl->lun_entry_mutex); 349 kfree(new); 350 return -EINVAL; 351 } 352 BUG_ON(orig->se_lun_acl != NULL); 353 354 rcu_assign_pointer(new->se_lun, lun); 355 rcu_assign_pointer(new->se_lun_acl, lun_acl); 356 hlist_del_rcu(&orig->link); 357 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); 358 mutex_unlock(&nacl->lun_entry_mutex); 359 360 spin_lock_bh(&port->sep_alua_lock); 361 list_del(&orig->alua_port_list); 362 list_add_tail(&new->alua_port_list, &port->sep_alua_list); 363 spin_unlock_bh(&port->sep_alua_lock); 364 365 kref_put(&orig->pr_kref, target_pr_kref_release); 366 wait_for_completion(&orig->pr_comp); 367 368 kfree_rcu(orig, rcu_head); 369 return 0; 370 } 371 372 rcu_assign_pointer(new->se_lun, lun); 373 rcu_assign_pointer(new->se_lun_acl, lun_acl); 374 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); 375 mutex_unlock(&nacl->lun_entry_mutex); 376 377 spin_lock_bh(&port->sep_alua_lock); 378 list_add_tail(&new->alua_port_list, &port->sep_alua_list); 379 spin_unlock_bh(&port->sep_alua_lock); 380 381 return 0; 382 } 383 384 /* 385 * Called with se_node_acl->lun_entry_mutex held. 386 */ 387 void core_disable_device_list_for_node( 388 struct se_lun *lun, 389 struct se_dev_entry *orig, 390 struct se_node_acl *nacl, 391 struct se_portal_group *tpg) 392 { 393 struct se_port *port = lun->lun_sep; 394 /* 395 * If the MappedLUN entry is being disabled, the entry in 396 * port->sep_alua_list must be removed now before clearing the 397 * struct se_dev_entry pointers below as logic in 398 * core_alua_do_transition_tg_pt() depends on these being present. 399 * 400 * deve->se_lun_acl will be NULL for demo-mode created LUNs 401 * that have not been explicitly converted to MappedLUNs -> 402 * struct se_lun_acl, but we remove deve->alua_port_list from 403 * port->sep_alua_list. This also means that active UAs and 404 * NodeACL context specific PR metadata for demo-mode 405 * MappedLUN *deve will be released below.. 406 */ 407 spin_lock_bh(&port->sep_alua_lock); 408 list_del(&orig->alua_port_list); 409 spin_unlock_bh(&port->sep_alua_lock); 410 /* 411 * Disable struct se_dev_entry LUN ACL mapping 412 */ 413 core_scsi3_ua_release_all(orig); 414 415 hlist_del_rcu(&orig->link); 416 clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags); 417 rcu_assign_pointer(orig->se_lun, NULL); 418 rcu_assign_pointer(orig->se_lun_acl, NULL); 419 orig->lun_flags = 0; 420 orig->creation_time = 0; 421 orig->attach_count--; 422 /* 423 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1 424 * or REGISTER_AND_MOVE PR operation to complete. 425 */ 426 kref_put(&orig->pr_kref, target_pr_kref_release); 427 wait_for_completion(&orig->pr_comp); 428 429 kfree_rcu(orig, rcu_head); 430 431 core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl); 432 } 433 434 /* core_clear_lun_from_tpg(): 435 * 436 * 437 */ 438 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) 439 { 440 struct se_node_acl *nacl; 441 struct se_dev_entry *deve; 442 443 mutex_lock(&tpg->acl_node_mutex); 444 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { 445 446 mutex_lock(&nacl->lun_entry_mutex); 447 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 448 struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun, 449 lockdep_is_held(&nacl->lun_entry_mutex)); 450 451 if (lun != tmp_lun) 452 continue; 453 454 core_disable_device_list_for_node(lun, deve, nacl, tpg); 455 } 456 mutex_unlock(&nacl->lun_entry_mutex); 457 } 458 mutex_unlock(&tpg->acl_node_mutex); 459 } 460 461 static struct se_port *core_alloc_port(struct se_device *dev) 462 { 463 struct se_port *port, *port_tmp; 464 465 port = kzalloc(sizeof(struct se_port), GFP_KERNEL); 466 if (!port) { 467 pr_err("Unable to allocate struct se_port\n"); 468 return ERR_PTR(-ENOMEM); 469 } 470 INIT_LIST_HEAD(&port->sep_alua_list); 471 INIT_LIST_HEAD(&port->sep_list); 472 atomic_set(&port->sep_tg_pt_secondary_offline, 0); 473 spin_lock_init(&port->sep_alua_lock); 474 mutex_init(&port->sep_tg_pt_md_mutex); 475 476 spin_lock(&dev->se_port_lock); 477 if (dev->dev_port_count == 0x0000ffff) { 478 pr_warn("Reached dev->dev_port_count ==" 479 " 0x0000ffff\n"); 480 spin_unlock(&dev->se_port_lock); 481 return ERR_PTR(-ENOSPC); 482 } 483 again: 484 /* 485 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device 486 * Here is the table from spc4r17 section 7.7.3.8. 487 * 488 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field 489 * 490 * Code Description 491 * 0h Reserved 492 * 1h Relative port 1, historically known as port A 493 * 2h Relative port 2, historically known as port B 494 * 3h to FFFFh Relative port 3 through 65 535 495 */ 496 port->sep_rtpi = dev->dev_rpti_counter++; 497 if (!port->sep_rtpi) 498 goto again; 499 500 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) { 501 /* 502 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique 503 * for 16-bit wrap.. 504 */ 505 if (port->sep_rtpi == port_tmp->sep_rtpi) 506 goto again; 507 } 508 spin_unlock(&dev->se_port_lock); 509 510 return port; 511 } 512 513 static void core_export_port( 514 struct se_device *dev, 515 struct se_portal_group *tpg, 516 struct se_port *port, 517 struct se_lun *lun) 518 { 519 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL; 520 521 spin_lock(&dev->se_port_lock); 522 spin_lock(&lun->lun_sep_lock); 523 port->sep_tpg = tpg; 524 port->sep_lun = lun; 525 lun->lun_sep = port; 526 spin_unlock(&lun->lun_sep_lock); 527 528 list_add_tail(&port->sep_list, &dev->dev_sep_list); 529 spin_unlock(&dev->se_port_lock); 530 531 if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && 532 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { 533 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); 534 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { 535 pr_err("Unable to allocate t10_alua_tg_pt" 536 "_gp_member_t\n"); 537 return; 538 } 539 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 540 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, 541 dev->t10_alua.default_tg_pt_gp); 542 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 543 pr_debug("%s/%s: Adding to default ALUA Target Port" 544 " Group: alua/default_tg_pt_gp\n", 545 dev->transport->name, tpg->se_tpg_tfo->get_fabric_name()); 546 } 547 548 dev->dev_port_count++; 549 port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFIER */ 550 } 551 552 /* 553 * Called with struct se_device->se_port_lock spinlock held. 554 */ 555 static void core_release_port(struct se_device *dev, struct se_port *port) 556 __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock) 557 { 558 /* 559 * Wait for any port reference for PR ALL_TG_PT=1 operation 560 * to complete in __core_scsi3_alloc_registration() 561 */ 562 spin_unlock(&dev->se_port_lock); 563 if (atomic_read(&port->sep_tg_pt_ref_cnt)) 564 cpu_relax(); 565 spin_lock(&dev->se_port_lock); 566 567 core_alua_free_tg_pt_gp_mem(port); 568 569 list_del(&port->sep_list); 570 dev->dev_port_count--; 571 kfree(port); 572 } 573 574 int core_dev_export( 575 struct se_device *dev, 576 struct se_portal_group *tpg, 577 struct se_lun *lun) 578 { 579 struct se_hba *hba = dev->se_hba; 580 struct se_port *port; 581 582 port = core_alloc_port(dev); 583 if (IS_ERR(port)) 584 return PTR_ERR(port); 585 586 lun->lun_index = dev->dev_index; 587 lun->lun_se_dev = dev; 588 lun->lun_rtpi = port->sep_rtpi; 589 590 spin_lock(&hba->device_lock); 591 dev->export_count++; 592 spin_unlock(&hba->device_lock); 593 594 core_export_port(dev, tpg, port, lun); 595 return 0; 596 } 597 598 void core_dev_unexport( 599 struct se_device *dev, 600 struct se_portal_group *tpg, 601 struct se_lun *lun) 602 { 603 struct se_hba *hba = dev->se_hba; 604 struct se_port *port = lun->lun_sep; 605 606 spin_lock(&lun->lun_sep_lock); 607 if (lun->lun_se_dev == NULL) { 608 spin_unlock(&lun->lun_sep_lock); 609 return; 610 } 611 spin_unlock(&lun->lun_sep_lock); 612 613 spin_lock(&dev->se_port_lock); 614 core_release_port(dev, port); 615 spin_unlock(&dev->se_port_lock); 616 617 spin_lock(&hba->device_lock); 618 dev->export_count--; 619 spin_unlock(&hba->device_lock); 620 621 lun->lun_sep = NULL; 622 lun->lun_se_dev = NULL; 623 } 624 625 static void se_release_vpd_for_dev(struct se_device *dev) 626 { 627 struct t10_vpd *vpd, *vpd_tmp; 628 629 spin_lock(&dev->t10_wwn.t10_vpd_lock); 630 list_for_each_entry_safe(vpd, vpd_tmp, 631 &dev->t10_wwn.t10_vpd_list, vpd_list) { 632 list_del(&vpd->vpd_list); 633 kfree(vpd); 634 } 635 spin_unlock(&dev->t10_wwn.t10_vpd_lock); 636 } 637 638 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) 639 { 640 u32 aligned_max_sectors; 641 u32 alignment; 642 /* 643 * Limit max_sectors to a PAGE_SIZE aligned value for modern 644 * transport_allocate_data_tasks() operation. 645 */ 646 alignment = max(1ul, PAGE_SIZE / block_size); 647 aligned_max_sectors = rounddown(max_sectors, alignment); 648 649 if (max_sectors != aligned_max_sectors) 650 pr_info("Rounding down aligned max_sectors from %u to %u\n", 651 max_sectors, aligned_max_sectors); 652 653 return aligned_max_sectors; 654 } 655 656 bool se_dev_check_wce(struct se_device *dev) 657 { 658 bool wce = false; 659 660 if (dev->transport->get_write_cache) 661 wce = dev->transport->get_write_cache(dev); 662 else if (dev->dev_attrib.emulate_write_cache > 0) 663 wce = true; 664 665 return wce; 666 } 667 668 int se_dev_set_max_unmap_lba_count( 669 struct se_device *dev, 670 u32 max_unmap_lba_count) 671 { 672 dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count; 673 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n", 674 dev, dev->dev_attrib.max_unmap_lba_count); 675 return 0; 676 } 677 EXPORT_SYMBOL(se_dev_set_max_unmap_lba_count); 678 679 int se_dev_set_max_unmap_block_desc_count( 680 struct se_device *dev, 681 u32 max_unmap_block_desc_count) 682 { 683 dev->dev_attrib.max_unmap_block_desc_count = 684 max_unmap_block_desc_count; 685 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n", 686 dev, dev->dev_attrib.max_unmap_block_desc_count); 687 return 0; 688 } 689 EXPORT_SYMBOL(se_dev_set_max_unmap_block_desc_count); 690 691 int se_dev_set_unmap_granularity( 692 struct se_device *dev, 693 u32 unmap_granularity) 694 { 695 dev->dev_attrib.unmap_granularity = unmap_granularity; 696 pr_debug("dev[%p]: Set unmap_granularity: %u\n", 697 dev, dev->dev_attrib.unmap_granularity); 698 return 0; 699 } 700 EXPORT_SYMBOL(se_dev_set_unmap_granularity); 701 702 int se_dev_set_unmap_granularity_alignment( 703 struct se_device *dev, 704 u32 unmap_granularity_alignment) 705 { 706 dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment; 707 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n", 708 dev, dev->dev_attrib.unmap_granularity_alignment); 709 return 0; 710 } 711 EXPORT_SYMBOL(se_dev_set_unmap_granularity_alignment); 712 713 int se_dev_set_max_write_same_len( 714 struct se_device *dev, 715 u32 max_write_same_len) 716 { 717 dev->dev_attrib.max_write_same_len = max_write_same_len; 718 pr_debug("dev[%p]: Set max_write_same_len: %u\n", 719 dev, dev->dev_attrib.max_write_same_len); 720 return 0; 721 } 722 EXPORT_SYMBOL(se_dev_set_max_write_same_len); 723 724 static void dev_set_t10_wwn_model_alias(struct se_device *dev) 725 { 726 const char *configname; 727 728 configname = config_item_name(&dev->dev_group.cg_item); 729 if (strlen(configname) >= 16) { 730 pr_warn("dev[%p]: Backstore name '%s' is too long for " 731 "INQUIRY_MODEL, truncating to 16 bytes\n", dev, 732 configname); 733 } 734 snprintf(&dev->t10_wwn.model[0], 16, "%s", configname); 735 } 736 737 int se_dev_set_emulate_model_alias(struct se_device *dev, int flag) 738 { 739 if (dev->export_count) { 740 pr_err("dev[%p]: Unable to change model alias" 741 " while export_count is %d\n", 742 dev, dev->export_count); 743 return -EINVAL; 744 } 745 746 if (flag != 0 && flag != 1) { 747 pr_err("Illegal value %d\n", flag); 748 return -EINVAL; 749 } 750 751 if (flag) { 752 dev_set_t10_wwn_model_alias(dev); 753 } else { 754 strncpy(&dev->t10_wwn.model[0], 755 dev->transport->inquiry_prod, 16); 756 } 757 dev->dev_attrib.emulate_model_alias = flag; 758 759 return 0; 760 } 761 EXPORT_SYMBOL(se_dev_set_emulate_model_alias); 762 763 int se_dev_set_emulate_dpo(struct se_device *dev, int flag) 764 { 765 printk_once(KERN_WARNING 766 "ignoring deprecated emulate_dpo attribute\n"); 767 return 0; 768 } 769 EXPORT_SYMBOL(se_dev_set_emulate_dpo); 770 771 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) 772 { 773 if (flag != 0 && flag != 1) { 774 pr_err("Illegal value %d\n", flag); 775 return -EINVAL; 776 } 777 if (flag && 778 dev->transport->get_write_cache) { 779 pr_warn("emulate_fua_write not supported for this device, ignoring\n"); 780 return 0; 781 } 782 if (dev->export_count) { 783 pr_err("emulate_fua_write cannot be changed with active" 784 " exports: %d\n", dev->export_count); 785 return -EINVAL; 786 } 787 dev->dev_attrib.emulate_fua_write = flag; 788 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", 789 dev, dev->dev_attrib.emulate_fua_write); 790 return 0; 791 } 792 EXPORT_SYMBOL(se_dev_set_emulate_fua_write); 793 794 int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) 795 { 796 printk_once(KERN_WARNING 797 "ignoring deprecated emulate_fua_read attribute\n"); 798 return 0; 799 } 800 EXPORT_SYMBOL(se_dev_set_emulate_fua_read); 801 802 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) 803 { 804 if (flag != 0 && flag != 1) { 805 pr_err("Illegal value %d\n", flag); 806 return -EINVAL; 807 } 808 if (flag && 809 dev->transport->get_write_cache) { 810 pr_err("emulate_write_cache not supported for this device\n"); 811 return -EINVAL; 812 } 813 if (dev->export_count) { 814 pr_err("emulate_write_cache cannot be changed with active" 815 " exports: %d\n", dev->export_count); 816 return -EINVAL; 817 } 818 dev->dev_attrib.emulate_write_cache = flag; 819 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", 820 dev, dev->dev_attrib.emulate_write_cache); 821 return 0; 822 } 823 EXPORT_SYMBOL(se_dev_set_emulate_write_cache); 824 825 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) 826 { 827 if ((flag != 0) && (flag != 1) && (flag != 2)) { 828 pr_err("Illegal value %d\n", flag); 829 return -EINVAL; 830 } 831 832 if (dev->export_count) { 833 pr_err("dev[%p]: Unable to change SE Device" 834 " UA_INTRLCK_CTRL while export_count is %d\n", 835 dev, dev->export_count); 836 return -EINVAL; 837 } 838 dev->dev_attrib.emulate_ua_intlck_ctrl = flag; 839 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", 840 dev, dev->dev_attrib.emulate_ua_intlck_ctrl); 841 842 return 0; 843 } 844 EXPORT_SYMBOL(se_dev_set_emulate_ua_intlck_ctrl); 845 846 int se_dev_set_emulate_tas(struct se_device *dev, int flag) 847 { 848 if ((flag != 0) && (flag != 1)) { 849 pr_err("Illegal value %d\n", flag); 850 return -EINVAL; 851 } 852 853 if (dev->export_count) { 854 pr_err("dev[%p]: Unable to change SE Device TAS while" 855 " export_count is %d\n", 856 dev, dev->export_count); 857 return -EINVAL; 858 } 859 dev->dev_attrib.emulate_tas = flag; 860 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n", 861 dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled"); 862 863 return 0; 864 } 865 EXPORT_SYMBOL(se_dev_set_emulate_tas); 866 867 int se_dev_set_emulate_tpu(struct se_device *dev, int flag) 868 { 869 if ((flag != 0) && (flag != 1)) { 870 pr_err("Illegal value %d\n", flag); 871 return -EINVAL; 872 } 873 /* 874 * We expect this value to be non-zero when generic Block Layer 875 * Discard supported is detected iblock_create_virtdevice(). 876 */ 877 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) { 878 pr_err("Generic Block Discard not supported\n"); 879 return -ENOSYS; 880 } 881 882 dev->dev_attrib.emulate_tpu = flag; 883 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", 884 dev, flag); 885 return 0; 886 } 887 EXPORT_SYMBOL(se_dev_set_emulate_tpu); 888 889 int se_dev_set_emulate_tpws(struct se_device *dev, int flag) 890 { 891 if ((flag != 0) && (flag != 1)) { 892 pr_err("Illegal value %d\n", flag); 893 return -EINVAL; 894 } 895 /* 896 * We expect this value to be non-zero when generic Block Layer 897 * Discard supported is detected iblock_create_virtdevice(). 898 */ 899 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) { 900 pr_err("Generic Block Discard not supported\n"); 901 return -ENOSYS; 902 } 903 904 dev->dev_attrib.emulate_tpws = flag; 905 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", 906 dev, flag); 907 return 0; 908 } 909 EXPORT_SYMBOL(se_dev_set_emulate_tpws); 910 911 int se_dev_set_emulate_caw(struct se_device *dev, int flag) 912 { 913 if (flag != 0 && flag != 1) { 914 pr_err("Illegal value %d\n", flag); 915 return -EINVAL; 916 } 917 dev->dev_attrib.emulate_caw = flag; 918 pr_debug("dev[%p]: SE Device CompareAndWrite (AtomicTestandSet): %d\n", 919 dev, flag); 920 921 return 0; 922 } 923 EXPORT_SYMBOL(se_dev_set_emulate_caw); 924 925 int se_dev_set_emulate_3pc(struct se_device *dev, int flag) 926 { 927 if (flag != 0 && flag != 1) { 928 pr_err("Illegal value %d\n", flag); 929 return -EINVAL; 930 } 931 dev->dev_attrib.emulate_3pc = flag; 932 pr_debug("dev[%p]: SE Device 3rd Party Copy (EXTENDED_COPY): %d\n", 933 dev, flag); 934 935 return 0; 936 } 937 EXPORT_SYMBOL(se_dev_set_emulate_3pc); 938 939 int se_dev_set_pi_prot_type(struct se_device *dev, int flag) 940 { 941 int rc, old_prot = dev->dev_attrib.pi_prot_type; 942 943 if (flag != 0 && flag != 1 && flag != 2 && flag != 3) { 944 pr_err("Illegal value %d for pi_prot_type\n", flag); 945 return -EINVAL; 946 } 947 if (flag == 2) { 948 pr_err("DIF TYPE2 protection currently not supported\n"); 949 return -ENOSYS; 950 } 951 if (dev->dev_attrib.hw_pi_prot_type) { 952 pr_warn("DIF protection enabled on underlying hardware," 953 " ignoring\n"); 954 return 0; 955 } 956 if (!dev->transport->init_prot || !dev->transport->free_prot) { 957 /* 0 is only allowed value for non-supporting backends */ 958 if (flag == 0) 959 return 0; 960 961 pr_err("DIF protection not supported by backend: %s\n", 962 dev->transport->name); 963 return -ENOSYS; 964 } 965 if (!(dev->dev_flags & DF_CONFIGURED)) { 966 pr_err("DIF protection requires device to be configured\n"); 967 return -ENODEV; 968 } 969 if (dev->export_count) { 970 pr_err("dev[%p]: Unable to change SE Device PROT type while" 971 " export_count is %d\n", dev, dev->export_count); 972 return -EINVAL; 973 } 974 975 dev->dev_attrib.pi_prot_type = flag; 976 977 if (flag && !old_prot) { 978 rc = dev->transport->init_prot(dev); 979 if (rc) { 980 dev->dev_attrib.pi_prot_type = old_prot; 981 return rc; 982 } 983 984 } else if (!flag && old_prot) { 985 dev->transport->free_prot(dev); 986 } 987 pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag); 988 989 return 0; 990 } 991 EXPORT_SYMBOL(se_dev_set_pi_prot_type); 992 993 int se_dev_set_pi_prot_format(struct se_device *dev, int flag) 994 { 995 int rc; 996 997 if (!flag) 998 return 0; 999 1000 if (flag != 1) { 1001 pr_err("Illegal value %d for pi_prot_format\n", flag); 1002 return -EINVAL; 1003 } 1004 if (!dev->transport->format_prot) { 1005 pr_err("DIF protection format not supported by backend %s\n", 1006 dev->transport->name); 1007 return -ENOSYS; 1008 } 1009 if (!(dev->dev_flags & DF_CONFIGURED)) { 1010 pr_err("DIF protection format requires device to be configured\n"); 1011 return -ENODEV; 1012 } 1013 if (dev->export_count) { 1014 pr_err("dev[%p]: Unable to format SE Device PROT type while" 1015 " export_count is %d\n", dev, dev->export_count); 1016 return -EINVAL; 1017 } 1018 1019 rc = dev->transport->format_prot(dev); 1020 if (rc) 1021 return rc; 1022 1023 pr_debug("dev[%p]: SE Device Protection Format complete\n", dev); 1024 1025 return 0; 1026 } 1027 EXPORT_SYMBOL(se_dev_set_pi_prot_format); 1028 1029 int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) 1030 { 1031 if ((flag != 0) && (flag != 1)) { 1032 pr_err("Illegal value %d\n", flag); 1033 return -EINVAL; 1034 } 1035 dev->dev_attrib.enforce_pr_isids = flag; 1036 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, 1037 (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled"); 1038 return 0; 1039 } 1040 EXPORT_SYMBOL(se_dev_set_enforce_pr_isids); 1041 1042 int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag) 1043 { 1044 if ((flag != 0) && (flag != 1)) { 1045 printk(KERN_ERR "Illegal value %d\n", flag); 1046 return -EINVAL; 1047 } 1048 if (dev->export_count) { 1049 pr_err("dev[%p]: Unable to set force_pr_aptpl while" 1050 " export_count is %d\n", dev, dev->export_count); 1051 return -EINVAL; 1052 } 1053 1054 dev->dev_attrib.force_pr_aptpl = flag; 1055 pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev, flag); 1056 return 0; 1057 } 1058 EXPORT_SYMBOL(se_dev_set_force_pr_aptpl); 1059 1060 int se_dev_set_is_nonrot(struct se_device *dev, int flag) 1061 { 1062 if ((flag != 0) && (flag != 1)) { 1063 printk(KERN_ERR "Illegal value %d\n", flag); 1064 return -EINVAL; 1065 } 1066 dev->dev_attrib.is_nonrot = flag; 1067 pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n", 1068 dev, flag); 1069 return 0; 1070 } 1071 EXPORT_SYMBOL(se_dev_set_is_nonrot); 1072 1073 int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag) 1074 { 1075 if (flag != 0) { 1076 printk(KERN_ERR "dev[%p]: SE Device emulation of restricted" 1077 " reordering not implemented\n", dev); 1078 return -ENOSYS; 1079 } 1080 dev->dev_attrib.emulate_rest_reord = flag; 1081 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag); 1082 return 0; 1083 } 1084 EXPORT_SYMBOL(se_dev_set_emulate_rest_reord); 1085 1086 /* 1087 * Note, this can only be called on unexported SE Device Object. 1088 */ 1089 int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) 1090 { 1091 if (dev->export_count) { 1092 pr_err("dev[%p]: Unable to change SE Device TCQ while" 1093 " export_count is %d\n", 1094 dev, dev->export_count); 1095 return -EINVAL; 1096 } 1097 if (!queue_depth) { 1098 pr_err("dev[%p]: Illegal ZERO value for queue" 1099 "_depth\n", dev); 1100 return -EINVAL; 1101 } 1102 1103 if (queue_depth > dev->dev_attrib.queue_depth) { 1104 if (queue_depth > dev->dev_attrib.hw_queue_depth) { 1105 pr_err("dev[%p]: Passed queue_depth:" 1106 " %u exceeds TCM/SE_Device MAX" 1107 " TCQ: %u\n", dev, queue_depth, 1108 dev->dev_attrib.hw_queue_depth); 1109 return -EINVAL; 1110 } 1111 } 1112 dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth; 1113 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", 1114 dev, queue_depth); 1115 return 0; 1116 } 1117 EXPORT_SYMBOL(se_dev_set_queue_depth); 1118 1119 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) 1120 { 1121 if (dev->export_count) { 1122 pr_err("dev[%p]: Unable to change SE Device" 1123 " optimal_sectors while export_count is %d\n", 1124 dev, dev->export_count); 1125 return -EINVAL; 1126 } 1127 if (optimal_sectors > dev->dev_attrib.hw_max_sectors) { 1128 pr_err("dev[%p]: Passed optimal_sectors %u cannot be" 1129 " greater than hw_max_sectors: %u\n", dev, 1130 optimal_sectors, dev->dev_attrib.hw_max_sectors); 1131 return -EINVAL; 1132 } 1133 1134 dev->dev_attrib.optimal_sectors = optimal_sectors; 1135 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n", 1136 dev, optimal_sectors); 1137 return 0; 1138 } 1139 EXPORT_SYMBOL(se_dev_set_optimal_sectors); 1140 1141 int se_dev_set_block_size(struct se_device *dev, u32 block_size) 1142 { 1143 if (dev->export_count) { 1144 pr_err("dev[%p]: Unable to change SE Device block_size" 1145 " while export_count is %d\n", 1146 dev, dev->export_count); 1147 return -EINVAL; 1148 } 1149 1150 if ((block_size != 512) && 1151 (block_size != 1024) && 1152 (block_size != 2048) && 1153 (block_size != 4096)) { 1154 pr_err("dev[%p]: Illegal value for block_device: %u" 1155 " for SE device, must be 512, 1024, 2048 or 4096\n", 1156 dev, block_size); 1157 return -EINVAL; 1158 } 1159 1160 dev->dev_attrib.block_size = block_size; 1161 pr_debug("dev[%p]: SE Device block_size changed to %u\n", 1162 dev, block_size); 1163 1164 if (dev->dev_attrib.max_bytes_per_io) 1165 dev->dev_attrib.hw_max_sectors = 1166 dev->dev_attrib.max_bytes_per_io / block_size; 1167 1168 return 0; 1169 } 1170 EXPORT_SYMBOL(se_dev_set_block_size); 1171 1172 int core_dev_add_lun( 1173 struct se_portal_group *tpg, 1174 struct se_device *dev, 1175 struct se_lun *lun) 1176 { 1177 int rc; 1178 1179 rc = core_tpg_add_lun(tpg, lun, 1180 TRANSPORT_LUNFLAGS_READ_WRITE, dev); 1181 if (rc < 0) 1182 return rc; 1183 1184 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" 1185 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), 1186 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 1187 tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id); 1188 /* 1189 * Update LUN maps for dynamically added initiators when 1190 * generate_node_acl is enabled. 1191 */ 1192 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { 1193 struct se_node_acl *acl; 1194 1195 mutex_lock(&tpg->acl_node_mutex); 1196 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 1197 if (acl->dynamic_node_acl && 1198 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || 1199 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { 1200 core_tpg_add_node_to_devs(acl, tpg); 1201 } 1202 } 1203 mutex_unlock(&tpg->acl_node_mutex); 1204 } 1205 1206 return 0; 1207 } 1208 1209 /* core_dev_del_lun(): 1210 * 1211 * 1212 */ 1213 void core_dev_del_lun( 1214 struct se_portal_group *tpg, 1215 struct se_lun *lun) 1216 { 1217 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivating %s Logical Unit from" 1218 " device object\n", tpg->se_tpg_tfo->get_fabric_name(), 1219 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 1220 tpg->se_tpg_tfo->get_fabric_name()); 1221 1222 core_tpg_remove_lun(tpg, lun); 1223 } 1224 1225 struct se_lun_acl *core_dev_init_initiator_node_lun_acl( 1226 struct se_portal_group *tpg, 1227 struct se_node_acl *nacl, 1228 u32 mapped_lun, 1229 int *ret) 1230 { 1231 struct se_lun_acl *lacl; 1232 1233 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) { 1234 pr_err("%s InitiatorName exceeds maximum size.\n", 1235 tpg->se_tpg_tfo->get_fabric_name()); 1236 *ret = -EOVERFLOW; 1237 return NULL; 1238 } 1239 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); 1240 if (!lacl) { 1241 pr_err("Unable to allocate memory for struct se_lun_acl.\n"); 1242 *ret = -ENOMEM; 1243 return NULL; 1244 } 1245 1246 lacl->mapped_lun = mapped_lun; 1247 lacl->se_lun_nacl = nacl; 1248 snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", 1249 nacl->initiatorname); 1250 1251 return lacl; 1252 } 1253 1254 int core_dev_add_initiator_node_lun_acl( 1255 struct se_portal_group *tpg, 1256 struct se_lun_acl *lacl, 1257 struct se_lun *lun, 1258 u32 lun_access) 1259 { 1260 struct se_node_acl *nacl = lacl->se_lun_nacl; 1261 1262 if (!nacl) 1263 return -EINVAL; 1264 1265 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) && 1266 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)) 1267 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 1268 1269 lacl->se_lun = lun; 1270 1271 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun, 1272 lun_access, nacl, tpg) < 0) 1273 return -EINVAL; 1274 1275 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " 1276 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 1277 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun, 1278 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", 1279 lacl->initiatorname); 1280 /* 1281 * Check to see if there are any existing persistent reservation APTPL 1282 * pre-registrations that need to be enabled for this LUN ACL.. 1283 */ 1284 core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, nacl, 1285 lacl->mapped_lun); 1286 return 0; 1287 } 1288 1289 int core_dev_del_initiator_node_lun_acl( 1290 struct se_portal_group *tpg, 1291 struct se_lun *lun, 1292 struct se_lun_acl *lacl) 1293 { 1294 struct se_node_acl *nacl; 1295 struct se_dev_entry *deve; 1296 1297 nacl = lacl->se_lun_nacl; 1298 if (!nacl) 1299 return -EINVAL; 1300 1301 mutex_lock(&nacl->lun_entry_mutex); 1302 deve = target_nacl_find_deve(nacl, lacl->mapped_lun); 1303 if (deve) 1304 core_disable_device_list_for_node(lun, deve, nacl, tpg); 1305 mutex_unlock(&nacl->lun_entry_mutex); 1306 1307 pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for" 1308 " InitiatorNode: %s Mapped LUN: %u\n", 1309 tpg->se_tpg_tfo->get_fabric_name(), 1310 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 1311 lacl->initiatorname, lacl->mapped_lun); 1312 1313 return 0; 1314 } 1315 1316 void core_dev_free_initiator_node_lun_acl( 1317 struct se_portal_group *tpg, 1318 struct se_lun_acl *lacl) 1319 { 1320 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" 1321 " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(), 1322 tpg->se_tpg_tfo->tpg_get_tag(tpg), 1323 tpg->se_tpg_tfo->get_fabric_name(), 1324 lacl->initiatorname, lacl->mapped_lun); 1325 1326 kfree(lacl); 1327 } 1328 1329 static void scsi_dump_inquiry(struct se_device *dev) 1330 { 1331 struct t10_wwn *wwn = &dev->t10_wwn; 1332 char buf[17]; 1333 int i, device_type; 1334 /* 1335 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer 1336 */ 1337 for (i = 0; i < 8; i++) 1338 if (wwn->vendor[i] >= 0x20) 1339 buf[i] = wwn->vendor[i]; 1340 else 1341 buf[i] = ' '; 1342 buf[i] = '\0'; 1343 pr_debug(" Vendor: %s\n", buf); 1344 1345 for (i = 0; i < 16; i++) 1346 if (wwn->model[i] >= 0x20) 1347 buf[i] = wwn->model[i]; 1348 else 1349 buf[i] = ' '; 1350 buf[i] = '\0'; 1351 pr_debug(" Model: %s\n", buf); 1352 1353 for (i = 0; i < 4; i++) 1354 if (wwn->revision[i] >= 0x20) 1355 buf[i] = wwn->revision[i]; 1356 else 1357 buf[i] = ' '; 1358 buf[i] = '\0'; 1359 pr_debug(" Revision: %s\n", buf); 1360 1361 device_type = dev->transport->get_device_type(dev); 1362 pr_debug(" Type: %s ", scsi_device_type(device_type)); 1363 } 1364 1365 struct se_device *target_alloc_device(struct se_hba *hba, const char *name) 1366 { 1367 struct se_device *dev; 1368 struct se_lun *xcopy_lun; 1369 1370 dev = hba->transport->alloc_device(hba, name); 1371 if (!dev) 1372 return NULL; 1373 1374 dev->dev_link_magic = SE_DEV_LINK_MAGIC; 1375 dev->se_hba = hba; 1376 dev->transport = hba->transport; 1377 dev->prot_length = sizeof(struct se_dif_v1_tuple); 1378 1379 INIT_LIST_HEAD(&dev->dev_list); 1380 INIT_LIST_HEAD(&dev->dev_sep_list); 1381 INIT_LIST_HEAD(&dev->dev_tmr_list); 1382 INIT_LIST_HEAD(&dev->delayed_cmd_list); 1383 INIT_LIST_HEAD(&dev->state_list); 1384 INIT_LIST_HEAD(&dev->qf_cmd_list); 1385 INIT_LIST_HEAD(&dev->g_dev_node); 1386 spin_lock_init(&dev->execute_task_lock); 1387 spin_lock_init(&dev->delayed_cmd_lock); 1388 spin_lock_init(&dev->dev_reservation_lock); 1389 spin_lock_init(&dev->se_port_lock); 1390 spin_lock_init(&dev->se_tmr_lock); 1391 spin_lock_init(&dev->qf_cmd_lock); 1392 sema_init(&dev->caw_sem, 1); 1393 atomic_set(&dev->dev_ordered_id, 0); 1394 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list); 1395 spin_lock_init(&dev->t10_wwn.t10_vpd_lock); 1396 INIT_LIST_HEAD(&dev->t10_pr.registration_list); 1397 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list); 1398 spin_lock_init(&dev->t10_pr.registration_lock); 1399 spin_lock_init(&dev->t10_pr.aptpl_reg_lock); 1400 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list); 1401 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock); 1402 INIT_LIST_HEAD(&dev->t10_alua.lba_map_list); 1403 spin_lock_init(&dev->t10_alua.lba_map_lock); 1404 1405 dev->t10_wwn.t10_dev = dev; 1406 dev->t10_alua.t10_dev = dev; 1407 1408 dev->dev_attrib.da_dev = dev; 1409 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS; 1410 dev->dev_attrib.emulate_dpo = 1; 1411 dev->dev_attrib.emulate_fua_write = 1; 1412 dev->dev_attrib.emulate_fua_read = 1; 1413 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; 1414 dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; 1415 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS; 1416 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU; 1417 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; 1418 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW; 1419 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; 1420 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT; 1421 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 1422 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL; 1423 dev->dev_attrib.is_nonrot = DA_IS_NONROT; 1424 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; 1425 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; 1426 dev->dev_attrib.max_unmap_block_desc_count = 1427 DA_MAX_UNMAP_BLOCK_DESC_COUNT; 1428 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; 1429 dev->dev_attrib.unmap_granularity_alignment = 1430 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; 1431 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; 1432 1433 xcopy_lun = &dev->xcopy_lun; 1434 xcopy_lun->lun_se_dev = dev; 1435 init_completion(&xcopy_lun->lun_shutdown_comp); 1436 spin_lock_init(&xcopy_lun->lun_sep_lock); 1437 init_completion(&xcopy_lun->lun_ref_comp); 1438 1439 return dev; 1440 } 1441 1442 int target_configure_device(struct se_device *dev) 1443 { 1444 struct se_hba *hba = dev->se_hba; 1445 int ret; 1446 1447 if (dev->dev_flags & DF_CONFIGURED) { 1448 pr_err("se_dev->se_dev_ptr already set for storage" 1449 " object\n"); 1450 return -EEXIST; 1451 } 1452 1453 ret = dev->transport->configure_device(dev); 1454 if (ret) 1455 goto out; 1456 /* 1457 * XXX: there is not much point to have two different values here.. 1458 */ 1459 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size; 1460 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth; 1461 1462 /* 1463 * Align max_hw_sectors down to PAGE_SIZE I/O transfers 1464 */ 1465 dev->dev_attrib.hw_max_sectors = 1466 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors, 1467 dev->dev_attrib.hw_block_size); 1468 dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors; 1469 1470 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); 1471 dev->creation_time = get_jiffies_64(); 1472 1473 ret = core_setup_alua(dev); 1474 if (ret) 1475 goto out; 1476 1477 /* 1478 * Startup the struct se_device processing thread 1479 */ 1480 dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1, 1481 dev->transport->name); 1482 if (!dev->tmr_wq) { 1483 pr_err("Unable to create tmr workqueue for %s\n", 1484 dev->transport->name); 1485 ret = -ENOMEM; 1486 goto out_free_alua; 1487 } 1488 1489 /* 1490 * Setup work_queue for QUEUE_FULL 1491 */ 1492 INIT_WORK(&dev->qf_work_queue, target_qf_do_work); 1493 1494 /* 1495 * Preload the initial INQUIRY const values if we are doing 1496 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI 1497 * passthrough because this is being provided by the backend LLD. 1498 */ 1499 if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) { 1500 strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8); 1501 strncpy(&dev->t10_wwn.model[0], 1502 dev->transport->inquiry_prod, 16); 1503 strncpy(&dev->t10_wwn.revision[0], 1504 dev->transport->inquiry_rev, 4); 1505 } 1506 1507 scsi_dump_inquiry(dev); 1508 1509 spin_lock(&hba->device_lock); 1510 hba->dev_count++; 1511 spin_unlock(&hba->device_lock); 1512 1513 mutex_lock(&g_device_mutex); 1514 list_add_tail(&dev->g_dev_node, &g_device_list); 1515 mutex_unlock(&g_device_mutex); 1516 1517 dev->dev_flags |= DF_CONFIGURED; 1518 1519 return 0; 1520 1521 out_free_alua: 1522 core_alua_free_lu_gp_mem(dev); 1523 out: 1524 se_release_vpd_for_dev(dev); 1525 return ret; 1526 } 1527 1528 void target_free_device(struct se_device *dev) 1529 { 1530 struct se_hba *hba = dev->se_hba; 1531 1532 WARN_ON(!list_empty(&dev->dev_sep_list)); 1533 1534 if (dev->dev_flags & DF_CONFIGURED) { 1535 destroy_workqueue(dev->tmr_wq); 1536 1537 mutex_lock(&g_device_mutex); 1538 list_del(&dev->g_dev_node); 1539 mutex_unlock(&g_device_mutex); 1540 1541 spin_lock(&hba->device_lock); 1542 hba->dev_count--; 1543 spin_unlock(&hba->device_lock); 1544 } 1545 1546 core_alua_free_lu_gp_mem(dev); 1547 core_alua_set_lba_map(dev, NULL, 0, 0); 1548 core_scsi3_free_all_registrations(dev); 1549 se_release_vpd_for_dev(dev); 1550 1551 if (dev->transport->free_prot) 1552 dev->transport->free_prot(dev); 1553 1554 dev->transport->free_device(dev); 1555 } 1556 1557 int core_dev_setup_virtual_lun0(void) 1558 { 1559 struct se_hba *hba; 1560 struct se_device *dev; 1561 char buf[] = "rd_pages=8,rd_nullio=1"; 1562 int ret; 1563 1564 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE); 1565 if (IS_ERR(hba)) 1566 return PTR_ERR(hba); 1567 1568 dev = target_alloc_device(hba, "virt_lun0"); 1569 if (!dev) { 1570 ret = -ENOMEM; 1571 goto out_free_hba; 1572 } 1573 1574 hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf)); 1575 1576 ret = target_configure_device(dev); 1577 if (ret) 1578 goto out_free_se_dev; 1579 1580 lun0_hba = hba; 1581 g_lun0_dev = dev; 1582 return 0; 1583 1584 out_free_se_dev: 1585 target_free_device(dev); 1586 out_free_hba: 1587 core_delete_hba(hba); 1588 return ret; 1589 } 1590 1591 1592 void core_dev_release_virtual_lun0(void) 1593 { 1594 struct se_hba *hba = lun0_hba; 1595 1596 if (!hba) 1597 return; 1598 1599 if (g_lun0_dev) 1600 target_free_device(g_lun0_dev); 1601 core_delete_hba(hba); 1602 } 1603 1604 /* 1605 * Common CDB parsing for kernel and user passthrough. 1606 */ 1607 sense_reason_t 1608 passthrough_parse_cdb(struct se_cmd *cmd, 1609 sense_reason_t (*exec_cmd)(struct se_cmd *cmd)) 1610 { 1611 unsigned char *cdb = cmd->t_task_cdb; 1612 1613 /* 1614 * Clear a lun set in the cdb if the initiator talking to use spoke 1615 * and old standards version, as we can't assume the underlying device 1616 * won't choke up on it. 1617 */ 1618 switch (cdb[0]) { 1619 case READ_10: /* SBC - RDProtect */ 1620 case READ_12: /* SBC - RDProtect */ 1621 case READ_16: /* SBC - RDProtect */ 1622 case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ 1623 case VERIFY: /* SBC - VRProtect */ 1624 case VERIFY_16: /* SBC - VRProtect */ 1625 case WRITE_VERIFY: /* SBC - VRProtect */ 1626 case WRITE_VERIFY_12: /* SBC - VRProtect */ 1627 case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */ 1628 break; 1629 default: 1630 cdb[1] &= 0x1f; /* clear logical unit number */ 1631 break; 1632 } 1633 1634 /* 1635 * For REPORT LUNS we always need to emulate the response, for everything 1636 * else, pass it up. 1637 */ 1638 if (cdb[0] == REPORT_LUNS) { 1639 cmd->execute_cmd = spc_emulate_report_luns; 1640 return TCM_NO_SENSE; 1641 } 1642 1643 /* Set DATA_CDB flag for ops that should have it */ 1644 switch (cdb[0]) { 1645 case READ_6: 1646 case READ_10: 1647 case READ_12: 1648 case READ_16: 1649 case WRITE_6: 1650 case WRITE_10: 1651 case WRITE_12: 1652 case WRITE_16: 1653 case WRITE_VERIFY: 1654 case WRITE_VERIFY_12: 1655 case 0x8e: /* WRITE_VERIFY_16 */ 1656 case COMPARE_AND_WRITE: 1657 case XDWRITEREAD_10: 1658 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1659 break; 1660 case VARIABLE_LENGTH_CMD: 1661 switch (get_unaligned_be16(&cdb[8])) { 1662 case READ_32: 1663 case WRITE_32: 1664 case 0x0c: /* WRITE_VERIFY_32 */ 1665 case XDWRITEREAD_32: 1666 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1667 break; 1668 } 1669 } 1670 1671 cmd->execute_cmd = exec_cmd; 1672 1673 return TCM_NO_SENSE; 1674 } 1675 EXPORT_SYMBOL(passthrough_parse_cdb); 1676