1 /******************************************************************************* 2 * Filename: target_core_device.c (based on iscsi_target_device.c) 3 * 4 * This file contains the TCM Virtual Device and Disk Transport 5 * agnostic related functions. 6 * 7 * (c) Copyright 2003-2013 Datera, Inc. 8 * 9 * Nicholas A. Bellinger <nab@kernel.org> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2 of the License, or 14 * (at your option) any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 24 * 25 ******************************************************************************/ 26 27 #include <linux/net.h> 28 #include <linux/string.h> 29 #include <linux/delay.h> 30 #include <linux/timer.h> 31 #include <linux/slab.h> 32 #include <linux/spinlock.h> 33 #include <linux/kthread.h> 34 #include <linux/in.h> 35 #include <linux/export.h> 36 #include <asm/unaligned.h> 37 #include <net/sock.h> 38 #include <net/tcp.h> 39 #include <scsi/scsi.h> 40 #include <scsi/scsi_device.h> 41 42 #include <target/target_core_base.h> 43 #include <target/target_core_backend.h> 44 #include <target/target_core_fabric.h> 45 46 #include "target_core_internal.h" 47 #include "target_core_alua.h" 48 #include "target_core_pr.h" 49 #include "target_core_ua.h" 50 51 DEFINE_MUTEX(g_device_mutex); 52 LIST_HEAD(g_device_list); 53 54 static struct se_hba *lun0_hba; 55 /* not static, needed by tpg.c */ 56 struct se_device *g_lun0_dev; 57 58 sense_reason_t 59 transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) 60 { 61 struct se_lun *se_lun = NULL; 62 struct se_session *se_sess = se_cmd->se_sess; 63 struct se_node_acl *nacl = se_sess->se_node_acl; 64 struct se_dev_entry *deve; 65 66 rcu_read_lock(); 67 deve = target_nacl_find_deve(nacl, unpacked_lun); 68 if (deve) { 69 atomic_long_inc(&deve->total_cmds); 70 71 if ((se_cmd->data_direction == DMA_TO_DEVICE) && 72 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { 73 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" 74 " Access for 0x%08llx\n", 75 se_cmd->se_tfo->get_fabric_name(), 76 unpacked_lun); 77 rcu_read_unlock(); 78 return TCM_WRITE_PROTECTED; 79 } 80 81 if (se_cmd->data_direction == DMA_TO_DEVICE) 82 atomic_long_add(se_cmd->data_length, 83 &deve->write_bytes); 84 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 85 atomic_long_add(se_cmd->data_length, 86 &deve->read_bytes); 87 88 se_lun = rcu_dereference(deve->se_lun); 89 se_cmd->se_lun = rcu_dereference(deve->se_lun); 90 se_cmd->pr_res_key = deve->pr_res_key; 91 se_cmd->orig_fe_lun = unpacked_lun; 92 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 93 94 percpu_ref_get(&se_lun->lun_ref); 95 se_cmd->lun_ref_active = true; 96 } 97 rcu_read_unlock(); 98 99 if (!se_lun) { 100 /* 101 * Use the se_portal_group->tpg_virt_lun0 to allow for 102 * REPORT_LUNS, et al to be returned when no active 103 * MappedLUN=0 exists for this Initiator Port. 104 */ 105 if (unpacked_lun != 0) { 106 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 107 " Access for 0x%08llx\n", 108 se_cmd->se_tfo->get_fabric_name(), 109 unpacked_lun); 110 return TCM_NON_EXISTENT_LUN; 111 } 112 /* 113 * Force WRITE PROTECT for virtual LUN 0 114 */ 115 if ((se_cmd->data_direction != DMA_FROM_DEVICE) && 116 (se_cmd->data_direction != DMA_NONE)) 117 return TCM_WRITE_PROTECTED; 118 119 se_lun = se_sess->se_tpg->tpg_virt_lun0; 120 se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0; 121 se_cmd->orig_fe_lun = 0; 122 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 123 124 percpu_ref_get(&se_lun->lun_ref); 125 se_cmd->lun_ref_active = true; 126 } 127 /* 128 * RCU reference protected by percpu se_lun->lun_ref taken above that 129 * must drop to zero (including initial reference) before this se_lun 130 * pointer can be kfree_rcu() by the final se_lun->lun_group put via 131 * target_core_fabric_configfs.c:target_fabric_port_release 132 */ 133 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); 134 atomic_long_inc(&se_cmd->se_dev->num_cmds); 135 136 if (se_cmd->data_direction == DMA_TO_DEVICE) 137 atomic_long_add(se_cmd->data_length, 138 &se_cmd->se_dev->write_bytes); 139 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 140 atomic_long_add(se_cmd->data_length, 141 &se_cmd->se_dev->read_bytes); 142 143 return 0; 144 } 145 EXPORT_SYMBOL(transport_lookup_cmd_lun); 146 147 int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun) 148 { 149 struct se_dev_entry *deve; 150 struct se_lun *se_lun = NULL; 151 struct se_session *se_sess = se_cmd->se_sess; 152 struct se_node_acl *nacl = se_sess->se_node_acl; 153 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 154 unsigned long flags; 155 156 rcu_read_lock(); 157 deve = target_nacl_find_deve(nacl, unpacked_lun); 158 if (deve) { 159 se_tmr->tmr_lun = rcu_dereference(deve->se_lun); 160 se_cmd->se_lun = rcu_dereference(deve->se_lun); 161 se_lun = rcu_dereference(deve->se_lun); 162 se_cmd->pr_res_key = deve->pr_res_key; 163 se_cmd->orig_fe_lun = unpacked_lun; 164 } 165 rcu_read_unlock(); 166 167 if (!se_lun) { 168 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 169 " Access for 0x%08llx\n", 170 se_cmd->se_tfo->get_fabric_name(), 171 unpacked_lun); 172 return -ENODEV; 173 } 174 /* 175 * XXX: Add percpu se_lun->lun_ref reference count for TMR 176 */ 177 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); 178 se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev); 179 180 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags); 181 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list); 182 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags); 183 184 return 0; 185 } 186 EXPORT_SYMBOL(transport_lookup_tmr_lun); 187 188 bool target_lun_is_rdonly(struct se_cmd *cmd) 189 { 190 struct se_session *se_sess = cmd->se_sess; 191 struct se_dev_entry *deve; 192 bool ret; 193 194 rcu_read_lock(); 195 deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun); 196 ret = (deve && deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY); 197 rcu_read_unlock(); 198 199 return ret; 200 } 201 EXPORT_SYMBOL(target_lun_is_rdonly); 202 203 /* 204 * This function is called from core_scsi3_emulate_pro_register_and_move() 205 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref 206 * when a matching rtpi is found. 207 */ 208 struct se_dev_entry *core_get_se_deve_from_rtpi( 209 struct se_node_acl *nacl, 210 u16 rtpi) 211 { 212 struct se_dev_entry *deve; 213 struct se_lun *lun; 214 struct se_portal_group *tpg = nacl->se_tpg; 215 216 rcu_read_lock(); 217 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 218 lun = rcu_dereference(deve->se_lun); 219 if (!lun) { 220 pr_err("%s device entries device pointer is" 221 " NULL, but Initiator has access.\n", 222 tpg->se_tpg_tfo->get_fabric_name()); 223 continue; 224 } 225 if (lun->lun_rtpi != rtpi) 226 continue; 227 228 kref_get(&deve->pr_kref); 229 rcu_read_unlock(); 230 231 return deve; 232 } 233 rcu_read_unlock(); 234 235 return NULL; 236 } 237 238 void core_free_device_list_for_node( 239 struct se_node_acl *nacl, 240 struct se_portal_group *tpg) 241 { 242 struct se_dev_entry *deve; 243 244 mutex_lock(&nacl->lun_entry_mutex); 245 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 246 struct se_lun *lun = rcu_dereference_check(deve->se_lun, 247 lockdep_is_held(&nacl->lun_entry_mutex)); 248 core_disable_device_list_for_node(lun, deve, nacl, tpg); 249 } 250 mutex_unlock(&nacl->lun_entry_mutex); 251 } 252 253 void core_update_device_list_access( 254 u64 mapped_lun, 255 u32 lun_access, 256 struct se_node_acl *nacl) 257 { 258 struct se_dev_entry *deve; 259 260 mutex_lock(&nacl->lun_entry_mutex); 261 deve = target_nacl_find_deve(nacl, mapped_lun); 262 if (deve) { 263 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { 264 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; 265 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; 266 } else { 267 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; 268 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; 269 } 270 } 271 mutex_unlock(&nacl->lun_entry_mutex); 272 } 273 274 /* 275 * Called with rcu_read_lock or nacl->device_list_lock held. 276 */ 277 struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun) 278 { 279 struct se_dev_entry *deve; 280 281 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) 282 if (deve->mapped_lun == mapped_lun) 283 return deve; 284 285 return NULL; 286 } 287 EXPORT_SYMBOL(target_nacl_find_deve); 288 289 void target_pr_kref_release(struct kref *kref) 290 { 291 struct se_dev_entry *deve = container_of(kref, struct se_dev_entry, 292 pr_kref); 293 complete(&deve->pr_comp); 294 } 295 296 /* core_enable_device_list_for_node(): 297 * 298 * 299 */ 300 int core_enable_device_list_for_node( 301 struct se_lun *lun, 302 struct se_lun_acl *lun_acl, 303 u64 mapped_lun, 304 u32 lun_access, 305 struct se_node_acl *nacl, 306 struct se_portal_group *tpg) 307 { 308 struct se_dev_entry *orig, *new; 309 310 new = kzalloc(sizeof(*new), GFP_KERNEL); 311 if (!new) { 312 pr_err("Unable to allocate se_dev_entry memory\n"); 313 return -ENOMEM; 314 } 315 316 atomic_set(&new->ua_count, 0); 317 spin_lock_init(&new->ua_lock); 318 INIT_LIST_HEAD(&new->ua_list); 319 INIT_LIST_HEAD(&new->lun_link); 320 321 new->mapped_lun = mapped_lun; 322 kref_init(&new->pr_kref); 323 init_completion(&new->pr_comp); 324 325 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) 326 new->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; 327 else 328 new->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; 329 330 new->creation_time = get_jiffies_64(); 331 new->attach_count++; 332 333 mutex_lock(&nacl->lun_entry_mutex); 334 orig = target_nacl_find_deve(nacl, mapped_lun); 335 if (orig && orig->se_lun) { 336 struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun, 337 lockdep_is_held(&nacl->lun_entry_mutex)); 338 339 if (orig_lun != lun) { 340 pr_err("Existing orig->se_lun doesn't match new lun" 341 " for dynamic -> explicit NodeACL conversion:" 342 " %s\n", nacl->initiatorname); 343 mutex_unlock(&nacl->lun_entry_mutex); 344 kfree(new); 345 return -EINVAL; 346 } 347 BUG_ON(orig->se_lun_acl != NULL); 348 349 rcu_assign_pointer(new->se_lun, lun); 350 rcu_assign_pointer(new->se_lun_acl, lun_acl); 351 hlist_del_rcu(&orig->link); 352 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); 353 mutex_unlock(&nacl->lun_entry_mutex); 354 355 spin_lock(&lun->lun_deve_lock); 356 list_del(&orig->lun_link); 357 list_add_tail(&new->lun_link, &lun->lun_deve_list); 358 spin_unlock(&lun->lun_deve_lock); 359 360 kref_put(&orig->pr_kref, target_pr_kref_release); 361 wait_for_completion(&orig->pr_comp); 362 363 kfree_rcu(orig, rcu_head); 364 return 0; 365 } 366 367 rcu_assign_pointer(new->se_lun, lun); 368 rcu_assign_pointer(new->se_lun_acl, lun_acl); 369 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); 370 mutex_unlock(&nacl->lun_entry_mutex); 371 372 spin_lock(&lun->lun_deve_lock); 373 list_add_tail(&new->lun_link, &lun->lun_deve_list); 374 spin_unlock(&lun->lun_deve_lock); 375 376 return 0; 377 } 378 379 /* 380 * Called with se_node_acl->lun_entry_mutex held. 381 */ 382 void core_disable_device_list_for_node( 383 struct se_lun *lun, 384 struct se_dev_entry *orig, 385 struct se_node_acl *nacl, 386 struct se_portal_group *tpg) 387 { 388 /* 389 * rcu_dereference_raw protected by se_lun->lun_group symlink 390 * reference to se_device->dev_group. 391 */ 392 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 393 /* 394 * If the MappedLUN entry is being disabled, the entry in 395 * lun->lun_deve_list must be removed now before clearing the 396 * struct se_dev_entry pointers below as logic in 397 * core_alua_do_transition_tg_pt() depends on these being present. 398 * 399 * deve->se_lun_acl will be NULL for demo-mode created LUNs 400 * that have not been explicitly converted to MappedLUNs -> 401 * struct se_lun_acl, but we remove deve->lun_link from 402 * lun->lun_deve_list. This also means that active UAs and 403 * NodeACL context specific PR metadata for demo-mode 404 * MappedLUN *deve will be released below.. 405 */ 406 spin_lock(&lun->lun_deve_lock); 407 list_del(&orig->lun_link); 408 spin_unlock(&lun->lun_deve_lock); 409 /* 410 * Disable struct se_dev_entry LUN ACL mapping 411 */ 412 core_scsi3_ua_release_all(orig); 413 414 hlist_del_rcu(&orig->link); 415 clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags); 416 rcu_assign_pointer(orig->se_lun, NULL); 417 rcu_assign_pointer(orig->se_lun_acl, NULL); 418 orig->lun_flags = 0; 419 orig->creation_time = 0; 420 orig->attach_count--; 421 /* 422 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1 423 * or REGISTER_AND_MOVE PR operation to complete. 424 */ 425 kref_put(&orig->pr_kref, target_pr_kref_release); 426 wait_for_completion(&orig->pr_comp); 427 428 kfree_rcu(orig, rcu_head); 429 430 core_scsi3_free_pr_reg_from_nacl(dev, nacl); 431 } 432 433 /* core_clear_lun_from_tpg(): 434 * 435 * 436 */ 437 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) 438 { 439 struct se_node_acl *nacl; 440 struct se_dev_entry *deve; 441 442 mutex_lock(&tpg->acl_node_mutex); 443 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { 444 445 mutex_lock(&nacl->lun_entry_mutex); 446 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 447 struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun, 448 lockdep_is_held(&nacl->lun_entry_mutex)); 449 450 if (lun != tmp_lun) 451 continue; 452 453 core_disable_device_list_for_node(lun, deve, nacl, tpg); 454 } 455 mutex_unlock(&nacl->lun_entry_mutex); 456 } 457 mutex_unlock(&tpg->acl_node_mutex); 458 } 459 460 int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev) 461 { 462 struct se_lun *tmp; 463 464 spin_lock(&dev->se_port_lock); 465 if (dev->export_count == 0x0000ffff) { 466 pr_warn("Reached dev->dev_port_count ==" 467 " 0x0000ffff\n"); 468 spin_unlock(&dev->se_port_lock); 469 return -ENOSPC; 470 } 471 again: 472 /* 473 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device 474 * Here is the table from spc4r17 section 7.7.3.8. 475 * 476 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field 477 * 478 * Code Description 479 * 0h Reserved 480 * 1h Relative port 1, historically known as port A 481 * 2h Relative port 2, historically known as port B 482 * 3h to FFFFh Relative port 3 through 65 535 483 */ 484 lun->lun_rtpi = dev->dev_rpti_counter++; 485 if (!lun->lun_rtpi) 486 goto again; 487 488 list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) { 489 /* 490 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique 491 * for 16-bit wrap.. 492 */ 493 if (lun->lun_rtpi == tmp->lun_rtpi) 494 goto again; 495 } 496 spin_unlock(&dev->se_port_lock); 497 498 return 0; 499 } 500 501 static void se_release_vpd_for_dev(struct se_device *dev) 502 { 503 struct t10_vpd *vpd, *vpd_tmp; 504 505 spin_lock(&dev->t10_wwn.t10_vpd_lock); 506 list_for_each_entry_safe(vpd, vpd_tmp, 507 &dev->t10_wwn.t10_vpd_list, vpd_list) { 508 list_del(&vpd->vpd_list); 509 kfree(vpd); 510 } 511 spin_unlock(&dev->t10_wwn.t10_vpd_lock); 512 } 513 514 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) 515 { 516 u32 aligned_max_sectors; 517 u32 alignment; 518 /* 519 * Limit max_sectors to a PAGE_SIZE aligned value for modern 520 * transport_allocate_data_tasks() operation. 521 */ 522 alignment = max(1ul, PAGE_SIZE / block_size); 523 aligned_max_sectors = rounddown(max_sectors, alignment); 524 525 if (max_sectors != aligned_max_sectors) 526 pr_info("Rounding down aligned max_sectors from %u to %u\n", 527 max_sectors, aligned_max_sectors); 528 529 return aligned_max_sectors; 530 } 531 532 int core_dev_add_lun( 533 struct se_portal_group *tpg, 534 struct se_device *dev, 535 struct se_lun *lun) 536 { 537 int rc; 538 539 rc = core_tpg_add_lun(tpg, lun, 540 TRANSPORT_LUNFLAGS_READ_WRITE, dev); 541 if (rc < 0) 542 return rc; 543 544 pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from" 545 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), 546 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 547 tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id); 548 /* 549 * Update LUN maps for dynamically added initiators when 550 * generate_node_acl is enabled. 551 */ 552 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { 553 struct se_node_acl *acl; 554 555 mutex_lock(&tpg->acl_node_mutex); 556 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 557 if (acl->dynamic_node_acl && 558 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || 559 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { 560 core_tpg_add_node_to_devs(acl, tpg, lun); 561 } 562 } 563 mutex_unlock(&tpg->acl_node_mutex); 564 } 565 566 return 0; 567 } 568 569 /* core_dev_del_lun(): 570 * 571 * 572 */ 573 void core_dev_del_lun( 574 struct se_portal_group *tpg, 575 struct se_lun *lun) 576 { 577 pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from" 578 " device object\n", tpg->se_tpg_tfo->get_fabric_name(), 579 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 580 tpg->se_tpg_tfo->get_fabric_name()); 581 582 core_tpg_remove_lun(tpg, lun); 583 } 584 585 struct se_lun_acl *core_dev_init_initiator_node_lun_acl( 586 struct se_portal_group *tpg, 587 struct se_node_acl *nacl, 588 u64 mapped_lun, 589 int *ret) 590 { 591 struct se_lun_acl *lacl; 592 593 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) { 594 pr_err("%s InitiatorName exceeds maximum size.\n", 595 tpg->se_tpg_tfo->get_fabric_name()); 596 *ret = -EOVERFLOW; 597 return NULL; 598 } 599 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); 600 if (!lacl) { 601 pr_err("Unable to allocate memory for struct se_lun_acl.\n"); 602 *ret = -ENOMEM; 603 return NULL; 604 } 605 606 lacl->mapped_lun = mapped_lun; 607 lacl->se_lun_nacl = nacl; 608 snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", 609 nacl->initiatorname); 610 611 return lacl; 612 } 613 614 int core_dev_add_initiator_node_lun_acl( 615 struct se_portal_group *tpg, 616 struct se_lun_acl *lacl, 617 struct se_lun *lun, 618 u32 lun_access) 619 { 620 struct se_node_acl *nacl = lacl->se_lun_nacl; 621 /* 622 * rcu_dereference_raw protected by se_lun->lun_group symlink 623 * reference to se_device->dev_group. 624 */ 625 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 626 627 if (!nacl) 628 return -EINVAL; 629 630 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) && 631 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)) 632 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 633 634 lacl->se_lun = lun; 635 636 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun, 637 lun_access, nacl, tpg) < 0) 638 return -EINVAL; 639 640 pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for " 641 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 642 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun, 643 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", 644 lacl->initiatorname); 645 /* 646 * Check to see if there are any existing persistent reservation APTPL 647 * pre-registrations that need to be enabled for this LUN ACL.. 648 */ 649 core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl, 650 lacl->mapped_lun); 651 return 0; 652 } 653 654 int core_dev_del_initiator_node_lun_acl( 655 struct se_lun *lun, 656 struct se_lun_acl *lacl) 657 { 658 struct se_portal_group *tpg = lun->lun_tpg; 659 struct se_node_acl *nacl; 660 struct se_dev_entry *deve; 661 662 nacl = lacl->se_lun_nacl; 663 if (!nacl) 664 return -EINVAL; 665 666 mutex_lock(&nacl->lun_entry_mutex); 667 deve = target_nacl_find_deve(nacl, lacl->mapped_lun); 668 if (deve) 669 core_disable_device_list_for_node(lun, deve, nacl, tpg); 670 mutex_unlock(&nacl->lun_entry_mutex); 671 672 pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for" 673 " InitiatorNode: %s Mapped LUN: %llu\n", 674 tpg->se_tpg_tfo->get_fabric_name(), 675 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 676 lacl->initiatorname, lacl->mapped_lun); 677 678 return 0; 679 } 680 681 void core_dev_free_initiator_node_lun_acl( 682 struct se_portal_group *tpg, 683 struct se_lun_acl *lacl) 684 { 685 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" 686 " Mapped LUN: %llu\n", tpg->se_tpg_tfo->get_fabric_name(), 687 tpg->se_tpg_tfo->tpg_get_tag(tpg), 688 tpg->se_tpg_tfo->get_fabric_name(), 689 lacl->initiatorname, lacl->mapped_lun); 690 691 kfree(lacl); 692 } 693 694 static void scsi_dump_inquiry(struct se_device *dev) 695 { 696 struct t10_wwn *wwn = &dev->t10_wwn; 697 char buf[17]; 698 int i, device_type; 699 /* 700 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer 701 */ 702 for (i = 0; i < 8; i++) 703 if (wwn->vendor[i] >= 0x20) 704 buf[i] = wwn->vendor[i]; 705 else 706 buf[i] = ' '; 707 buf[i] = '\0'; 708 pr_debug(" Vendor: %s\n", buf); 709 710 for (i = 0; i < 16; i++) 711 if (wwn->model[i] >= 0x20) 712 buf[i] = wwn->model[i]; 713 else 714 buf[i] = ' '; 715 buf[i] = '\0'; 716 pr_debug(" Model: %s\n", buf); 717 718 for (i = 0; i < 4; i++) 719 if (wwn->revision[i] >= 0x20) 720 buf[i] = wwn->revision[i]; 721 else 722 buf[i] = ' '; 723 buf[i] = '\0'; 724 pr_debug(" Revision: %s\n", buf); 725 726 device_type = dev->transport->get_device_type(dev); 727 pr_debug(" Type: %s ", scsi_device_type(device_type)); 728 } 729 730 struct se_device *target_alloc_device(struct se_hba *hba, const char *name) 731 { 732 struct se_device *dev; 733 struct se_lun *xcopy_lun; 734 735 dev = hba->backend->ops->alloc_device(hba, name); 736 if (!dev) 737 return NULL; 738 739 dev->dev_link_magic = SE_DEV_LINK_MAGIC; 740 dev->se_hba = hba; 741 dev->transport = hba->backend->ops; 742 dev->prot_length = sizeof(struct se_dif_v1_tuple); 743 dev->hba_index = hba->hba_index; 744 745 INIT_LIST_HEAD(&dev->dev_list); 746 INIT_LIST_HEAD(&dev->dev_sep_list); 747 INIT_LIST_HEAD(&dev->dev_tmr_list); 748 INIT_LIST_HEAD(&dev->delayed_cmd_list); 749 INIT_LIST_HEAD(&dev->state_list); 750 INIT_LIST_HEAD(&dev->qf_cmd_list); 751 INIT_LIST_HEAD(&dev->g_dev_node); 752 spin_lock_init(&dev->execute_task_lock); 753 spin_lock_init(&dev->delayed_cmd_lock); 754 spin_lock_init(&dev->dev_reservation_lock); 755 spin_lock_init(&dev->se_port_lock); 756 spin_lock_init(&dev->se_tmr_lock); 757 spin_lock_init(&dev->qf_cmd_lock); 758 sema_init(&dev->caw_sem, 1); 759 atomic_set(&dev->dev_ordered_id, 0); 760 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list); 761 spin_lock_init(&dev->t10_wwn.t10_vpd_lock); 762 INIT_LIST_HEAD(&dev->t10_pr.registration_list); 763 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list); 764 spin_lock_init(&dev->t10_pr.registration_lock); 765 spin_lock_init(&dev->t10_pr.aptpl_reg_lock); 766 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list); 767 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock); 768 INIT_LIST_HEAD(&dev->t10_alua.lba_map_list); 769 spin_lock_init(&dev->t10_alua.lba_map_lock); 770 771 dev->t10_wwn.t10_dev = dev; 772 dev->t10_alua.t10_dev = dev; 773 774 dev->dev_attrib.da_dev = dev; 775 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS; 776 dev->dev_attrib.emulate_dpo = 1; 777 dev->dev_attrib.emulate_fua_write = 1; 778 dev->dev_attrib.emulate_fua_read = 1; 779 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; 780 dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; 781 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS; 782 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU; 783 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; 784 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW; 785 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; 786 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT; 787 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 788 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL; 789 dev->dev_attrib.is_nonrot = DA_IS_NONROT; 790 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; 791 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; 792 dev->dev_attrib.max_unmap_block_desc_count = 793 DA_MAX_UNMAP_BLOCK_DESC_COUNT; 794 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; 795 dev->dev_attrib.unmap_granularity_alignment = 796 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; 797 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; 798 799 xcopy_lun = &dev->xcopy_lun; 800 rcu_assign_pointer(xcopy_lun->lun_se_dev, dev); 801 init_completion(&xcopy_lun->lun_ref_comp); 802 INIT_LIST_HEAD(&xcopy_lun->lun_deve_list); 803 INIT_LIST_HEAD(&xcopy_lun->lun_dev_link); 804 mutex_init(&xcopy_lun->lun_tg_pt_md_mutex); 805 xcopy_lun->lun_tpg = &xcopy_pt_tpg; 806 807 return dev; 808 } 809 810 int target_configure_device(struct se_device *dev) 811 { 812 struct se_hba *hba = dev->se_hba; 813 int ret; 814 815 if (dev->dev_flags & DF_CONFIGURED) { 816 pr_err("se_dev->se_dev_ptr already set for storage" 817 " object\n"); 818 return -EEXIST; 819 } 820 821 ret = dev->transport->configure_device(dev); 822 if (ret) 823 goto out; 824 /* 825 * XXX: there is not much point to have two different values here.. 826 */ 827 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size; 828 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth; 829 830 /* 831 * Align max_hw_sectors down to PAGE_SIZE I/O transfers 832 */ 833 dev->dev_attrib.hw_max_sectors = 834 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors, 835 dev->dev_attrib.hw_block_size); 836 dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors; 837 838 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); 839 dev->creation_time = get_jiffies_64(); 840 841 ret = core_setup_alua(dev); 842 if (ret) 843 goto out; 844 845 /* 846 * Startup the struct se_device processing thread 847 */ 848 dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1, 849 dev->transport->name); 850 if (!dev->tmr_wq) { 851 pr_err("Unable to create tmr workqueue for %s\n", 852 dev->transport->name); 853 ret = -ENOMEM; 854 goto out_free_alua; 855 } 856 857 /* 858 * Setup work_queue for QUEUE_FULL 859 */ 860 INIT_WORK(&dev->qf_work_queue, target_qf_do_work); 861 862 /* 863 * Preload the initial INQUIRY const values if we are doing 864 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI 865 * passthrough because this is being provided by the backend LLD. 866 */ 867 if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) { 868 strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8); 869 strncpy(&dev->t10_wwn.model[0], 870 dev->transport->inquiry_prod, 16); 871 strncpy(&dev->t10_wwn.revision[0], 872 dev->transport->inquiry_rev, 4); 873 } 874 875 scsi_dump_inquiry(dev); 876 877 spin_lock(&hba->device_lock); 878 hba->dev_count++; 879 spin_unlock(&hba->device_lock); 880 881 mutex_lock(&g_device_mutex); 882 list_add_tail(&dev->g_dev_node, &g_device_list); 883 mutex_unlock(&g_device_mutex); 884 885 dev->dev_flags |= DF_CONFIGURED; 886 887 return 0; 888 889 out_free_alua: 890 core_alua_free_lu_gp_mem(dev); 891 out: 892 se_release_vpd_for_dev(dev); 893 return ret; 894 } 895 896 void target_free_device(struct se_device *dev) 897 { 898 struct se_hba *hba = dev->se_hba; 899 900 WARN_ON(!list_empty(&dev->dev_sep_list)); 901 902 if (dev->dev_flags & DF_CONFIGURED) { 903 destroy_workqueue(dev->tmr_wq); 904 905 mutex_lock(&g_device_mutex); 906 list_del(&dev->g_dev_node); 907 mutex_unlock(&g_device_mutex); 908 909 spin_lock(&hba->device_lock); 910 hba->dev_count--; 911 spin_unlock(&hba->device_lock); 912 } 913 914 core_alua_free_lu_gp_mem(dev); 915 core_alua_set_lba_map(dev, NULL, 0, 0); 916 core_scsi3_free_all_registrations(dev); 917 se_release_vpd_for_dev(dev); 918 919 if (dev->transport->free_prot) 920 dev->transport->free_prot(dev); 921 922 dev->transport->free_device(dev); 923 } 924 925 int core_dev_setup_virtual_lun0(void) 926 { 927 struct se_hba *hba; 928 struct se_device *dev; 929 char buf[] = "rd_pages=8,rd_nullio=1"; 930 int ret; 931 932 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE); 933 if (IS_ERR(hba)) 934 return PTR_ERR(hba); 935 936 dev = target_alloc_device(hba, "virt_lun0"); 937 if (!dev) { 938 ret = -ENOMEM; 939 goto out_free_hba; 940 } 941 942 hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf)); 943 944 ret = target_configure_device(dev); 945 if (ret) 946 goto out_free_se_dev; 947 948 lun0_hba = hba; 949 g_lun0_dev = dev; 950 return 0; 951 952 out_free_se_dev: 953 target_free_device(dev); 954 out_free_hba: 955 core_delete_hba(hba); 956 return ret; 957 } 958 959 960 void core_dev_release_virtual_lun0(void) 961 { 962 struct se_hba *hba = lun0_hba; 963 964 if (!hba) 965 return; 966 967 if (g_lun0_dev) 968 target_free_device(g_lun0_dev); 969 core_delete_hba(hba); 970 } 971 972 /* 973 * Common CDB parsing for kernel and user passthrough. 974 */ 975 sense_reason_t 976 passthrough_parse_cdb(struct se_cmd *cmd, 977 sense_reason_t (*exec_cmd)(struct se_cmd *cmd)) 978 { 979 unsigned char *cdb = cmd->t_task_cdb; 980 981 /* 982 * Clear a lun set in the cdb if the initiator talking to use spoke 983 * and old standards version, as we can't assume the underlying device 984 * won't choke up on it. 985 */ 986 switch (cdb[0]) { 987 case READ_10: /* SBC - RDProtect */ 988 case READ_12: /* SBC - RDProtect */ 989 case READ_16: /* SBC - RDProtect */ 990 case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ 991 case VERIFY: /* SBC - VRProtect */ 992 case VERIFY_16: /* SBC - VRProtect */ 993 case WRITE_VERIFY: /* SBC - VRProtect */ 994 case WRITE_VERIFY_12: /* SBC - VRProtect */ 995 case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */ 996 break; 997 default: 998 cdb[1] &= 0x1f; /* clear logical unit number */ 999 break; 1000 } 1001 1002 /* 1003 * For REPORT LUNS we always need to emulate the response, for everything 1004 * else, pass it up. 1005 */ 1006 if (cdb[0] == REPORT_LUNS) { 1007 cmd->execute_cmd = spc_emulate_report_luns; 1008 return TCM_NO_SENSE; 1009 } 1010 1011 /* Set DATA_CDB flag for ops that should have it */ 1012 switch (cdb[0]) { 1013 case READ_6: 1014 case READ_10: 1015 case READ_12: 1016 case READ_16: 1017 case WRITE_6: 1018 case WRITE_10: 1019 case WRITE_12: 1020 case WRITE_16: 1021 case WRITE_VERIFY: 1022 case WRITE_VERIFY_12: 1023 case 0x8e: /* WRITE_VERIFY_16 */ 1024 case COMPARE_AND_WRITE: 1025 case XDWRITEREAD_10: 1026 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1027 break; 1028 case VARIABLE_LENGTH_CMD: 1029 switch (get_unaligned_be16(&cdb[8])) { 1030 case READ_32: 1031 case WRITE_32: 1032 case 0x0c: /* WRITE_VERIFY_32 */ 1033 case XDWRITEREAD_32: 1034 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1035 break; 1036 } 1037 } 1038 1039 cmd->execute_cmd = exec_cmd; 1040 1041 return TCM_NO_SENSE; 1042 } 1043 EXPORT_SYMBOL(passthrough_parse_cdb); 1044