1 /******************************************************************************* 2 * Filename: target_core_device.c (based on iscsi_target_device.c) 3 * 4 * This file contains the TCM Virtual Device and Disk Transport 5 * agnostic related functions. 6 * 7 * (c) Copyright 2003-2013 Datera, Inc. 8 * 9 * Nicholas A. Bellinger <nab@kernel.org> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2 of the License, or 14 * (at your option) any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 24 * 25 ******************************************************************************/ 26 27 #include <linux/net.h> 28 #include <linux/string.h> 29 #include <linux/delay.h> 30 #include <linux/timer.h> 31 #include <linux/slab.h> 32 #include <linux/spinlock.h> 33 #include <linux/kthread.h> 34 #include <linux/in.h> 35 #include <linux/export.h> 36 #include <linux/t10-pi.h> 37 #include <asm/unaligned.h> 38 #include <net/sock.h> 39 #include <net/tcp.h> 40 #include <scsi/scsi_common.h> 41 #include <scsi/scsi_proto.h> 42 43 #include <target/target_core_base.h> 44 #include <target/target_core_backend.h> 45 #include <target/target_core_fabric.h> 46 47 #include "target_core_internal.h" 48 #include "target_core_alua.h" 49 #include "target_core_pr.h" 50 #include "target_core_ua.h" 51 52 DEFINE_MUTEX(g_device_mutex); 53 LIST_HEAD(g_device_list); 54 55 static struct se_hba *lun0_hba; 56 /* not static, needed by tpg.c */ 57 struct se_device *g_lun0_dev; 58 59 sense_reason_t 60 transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) 61 { 62 struct se_lun *se_lun = NULL; 63 struct se_session *se_sess = se_cmd->se_sess; 64 struct se_node_acl *nacl = se_sess->se_node_acl; 65 struct se_dev_entry *deve; 66 sense_reason_t ret = TCM_NO_SENSE; 67 68 rcu_read_lock(); 69 deve = target_nacl_find_deve(nacl, unpacked_lun); 70 if (deve) { 71 atomic_long_inc(&deve->total_cmds); 72 73 if (se_cmd->data_direction == DMA_TO_DEVICE) 74 atomic_long_add(se_cmd->data_length, 75 &deve->write_bytes); 76 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 77 atomic_long_add(se_cmd->data_length, 78 &deve->read_bytes); 79 80 se_lun = rcu_dereference(deve->se_lun); 81 se_cmd->se_lun = rcu_dereference(deve->se_lun); 82 se_cmd->pr_res_key = deve->pr_res_key; 83 se_cmd->orig_fe_lun = unpacked_lun; 84 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 85 86 percpu_ref_get(&se_lun->lun_ref); 87 se_cmd->lun_ref_active = true; 88 89 if ((se_cmd->data_direction == DMA_TO_DEVICE) && 90 deve->lun_access_ro) { 91 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" 92 " Access for 0x%08llx\n", 93 se_cmd->se_tfo->get_fabric_name(), 94 unpacked_lun); 95 rcu_read_unlock(); 96 ret = TCM_WRITE_PROTECTED; 97 goto ref_dev; 98 } 99 } 100 rcu_read_unlock(); 101 102 if (!se_lun) { 103 /* 104 * Use the se_portal_group->tpg_virt_lun0 to allow for 105 * REPORT_LUNS, et al to be returned when no active 106 * MappedLUN=0 exists for this Initiator Port. 107 */ 108 if (unpacked_lun != 0) { 109 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 110 " Access for 0x%08llx\n", 111 se_cmd->se_tfo->get_fabric_name(), 112 unpacked_lun); 113 return TCM_NON_EXISTENT_LUN; 114 } 115 116 se_lun = se_sess->se_tpg->tpg_virt_lun0; 117 se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0; 118 se_cmd->orig_fe_lun = 0; 119 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 120 121 percpu_ref_get(&se_lun->lun_ref); 122 se_cmd->lun_ref_active = true; 123 124 /* 125 * Force WRITE PROTECT for virtual LUN 0 126 */ 127 if ((se_cmd->data_direction != DMA_FROM_DEVICE) && 128 (se_cmd->data_direction != DMA_NONE)) { 129 ret = TCM_WRITE_PROTECTED; 130 goto ref_dev; 131 } 132 } 133 /* 134 * RCU reference protected by percpu se_lun->lun_ref taken above that 135 * must drop to zero (including initial reference) before this se_lun 136 * pointer can be kfree_rcu() by the final se_lun->lun_group put via 137 * target_core_fabric_configfs.c:target_fabric_port_release 138 */ 139 ref_dev: 140 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); 141 atomic_long_inc(&se_cmd->se_dev->num_cmds); 142 143 if (se_cmd->data_direction == DMA_TO_DEVICE) 144 atomic_long_add(se_cmd->data_length, 145 &se_cmd->se_dev->write_bytes); 146 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 147 atomic_long_add(se_cmd->data_length, 148 &se_cmd->se_dev->read_bytes); 149 150 return ret; 151 } 152 EXPORT_SYMBOL(transport_lookup_cmd_lun); 153 154 int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun) 155 { 156 struct se_dev_entry *deve; 157 struct se_lun *se_lun = NULL; 158 struct se_session *se_sess = se_cmd->se_sess; 159 struct se_node_acl *nacl = se_sess->se_node_acl; 160 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 161 unsigned long flags; 162 163 rcu_read_lock(); 164 deve = target_nacl_find_deve(nacl, unpacked_lun); 165 if (deve) { 166 se_tmr->tmr_lun = rcu_dereference(deve->se_lun); 167 se_cmd->se_lun = rcu_dereference(deve->se_lun); 168 se_lun = rcu_dereference(deve->se_lun); 169 se_cmd->pr_res_key = deve->pr_res_key; 170 se_cmd->orig_fe_lun = unpacked_lun; 171 } 172 rcu_read_unlock(); 173 174 if (!se_lun) { 175 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 176 " Access for 0x%08llx\n", 177 se_cmd->se_tfo->get_fabric_name(), 178 unpacked_lun); 179 return -ENODEV; 180 } 181 /* 182 * XXX: Add percpu se_lun->lun_ref reference count for TMR 183 */ 184 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); 185 se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev); 186 187 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags); 188 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list); 189 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags); 190 191 return 0; 192 } 193 EXPORT_SYMBOL(transport_lookup_tmr_lun); 194 195 bool target_lun_is_rdonly(struct se_cmd *cmd) 196 { 197 struct se_session *se_sess = cmd->se_sess; 198 struct se_dev_entry *deve; 199 bool ret; 200 201 rcu_read_lock(); 202 deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun); 203 ret = deve && deve->lun_access_ro; 204 rcu_read_unlock(); 205 206 return ret; 207 } 208 EXPORT_SYMBOL(target_lun_is_rdonly); 209 210 /* 211 * This function is called from core_scsi3_emulate_pro_register_and_move() 212 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref 213 * when a matching rtpi is found. 214 */ 215 struct se_dev_entry *core_get_se_deve_from_rtpi( 216 struct se_node_acl *nacl, 217 u16 rtpi) 218 { 219 struct se_dev_entry *deve; 220 struct se_lun *lun; 221 struct se_portal_group *tpg = nacl->se_tpg; 222 223 rcu_read_lock(); 224 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 225 lun = rcu_dereference(deve->se_lun); 226 if (!lun) { 227 pr_err("%s device entries device pointer is" 228 " NULL, but Initiator has access.\n", 229 tpg->se_tpg_tfo->get_fabric_name()); 230 continue; 231 } 232 if (lun->lun_rtpi != rtpi) 233 continue; 234 235 kref_get(&deve->pr_kref); 236 rcu_read_unlock(); 237 238 return deve; 239 } 240 rcu_read_unlock(); 241 242 return NULL; 243 } 244 245 void core_free_device_list_for_node( 246 struct se_node_acl *nacl, 247 struct se_portal_group *tpg) 248 { 249 struct se_dev_entry *deve; 250 251 mutex_lock(&nacl->lun_entry_mutex); 252 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 253 struct se_lun *lun = rcu_dereference_check(deve->se_lun, 254 lockdep_is_held(&nacl->lun_entry_mutex)); 255 core_disable_device_list_for_node(lun, deve, nacl, tpg); 256 } 257 mutex_unlock(&nacl->lun_entry_mutex); 258 } 259 260 void core_update_device_list_access( 261 u64 mapped_lun, 262 bool lun_access_ro, 263 struct se_node_acl *nacl) 264 { 265 struct se_dev_entry *deve; 266 267 mutex_lock(&nacl->lun_entry_mutex); 268 deve = target_nacl_find_deve(nacl, mapped_lun); 269 if (deve) 270 deve->lun_access_ro = lun_access_ro; 271 mutex_unlock(&nacl->lun_entry_mutex); 272 } 273 274 /* 275 * Called with rcu_read_lock or nacl->device_list_lock held. 276 */ 277 struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun) 278 { 279 struct se_dev_entry *deve; 280 281 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) 282 if (deve->mapped_lun == mapped_lun) 283 return deve; 284 285 return NULL; 286 } 287 EXPORT_SYMBOL(target_nacl_find_deve); 288 289 void target_pr_kref_release(struct kref *kref) 290 { 291 struct se_dev_entry *deve = container_of(kref, struct se_dev_entry, 292 pr_kref); 293 complete(&deve->pr_comp); 294 } 295 296 static void 297 target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new, 298 bool skip_new) 299 { 300 struct se_dev_entry *tmp; 301 302 rcu_read_lock(); 303 hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) { 304 if (skip_new && tmp == new) 305 continue; 306 core_scsi3_ua_allocate(tmp, 0x3F, 307 ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED); 308 } 309 rcu_read_unlock(); 310 } 311 312 int core_enable_device_list_for_node( 313 struct se_lun *lun, 314 struct se_lun_acl *lun_acl, 315 u64 mapped_lun, 316 bool lun_access_ro, 317 struct se_node_acl *nacl, 318 struct se_portal_group *tpg) 319 { 320 struct se_dev_entry *orig, *new; 321 322 new = kzalloc(sizeof(*new), GFP_KERNEL); 323 if (!new) { 324 pr_err("Unable to allocate se_dev_entry memory\n"); 325 return -ENOMEM; 326 } 327 328 atomic_set(&new->ua_count, 0); 329 spin_lock_init(&new->ua_lock); 330 INIT_LIST_HEAD(&new->ua_list); 331 INIT_LIST_HEAD(&new->lun_link); 332 333 new->mapped_lun = mapped_lun; 334 kref_init(&new->pr_kref); 335 init_completion(&new->pr_comp); 336 337 new->lun_access_ro = lun_access_ro; 338 new->creation_time = get_jiffies_64(); 339 new->attach_count++; 340 341 mutex_lock(&nacl->lun_entry_mutex); 342 orig = target_nacl_find_deve(nacl, mapped_lun); 343 if (orig && orig->se_lun) { 344 struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun, 345 lockdep_is_held(&nacl->lun_entry_mutex)); 346 347 if (orig_lun != lun) { 348 pr_err("Existing orig->se_lun doesn't match new lun" 349 " for dynamic -> explicit NodeACL conversion:" 350 " %s\n", nacl->initiatorname); 351 mutex_unlock(&nacl->lun_entry_mutex); 352 kfree(new); 353 return -EINVAL; 354 } 355 if (orig->se_lun_acl != NULL) { 356 pr_warn_ratelimited("Detected existing explicit" 357 " se_lun_acl->se_lun_group reference for %s" 358 " mapped_lun: %llu, failing\n", 359 nacl->initiatorname, mapped_lun); 360 mutex_unlock(&nacl->lun_entry_mutex); 361 kfree(new); 362 return -EINVAL; 363 } 364 365 rcu_assign_pointer(new->se_lun, lun); 366 rcu_assign_pointer(new->se_lun_acl, lun_acl); 367 hlist_del_rcu(&orig->link); 368 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); 369 mutex_unlock(&nacl->lun_entry_mutex); 370 371 spin_lock(&lun->lun_deve_lock); 372 list_del(&orig->lun_link); 373 list_add_tail(&new->lun_link, &lun->lun_deve_list); 374 spin_unlock(&lun->lun_deve_lock); 375 376 kref_put(&orig->pr_kref, target_pr_kref_release); 377 wait_for_completion(&orig->pr_comp); 378 379 target_luns_data_has_changed(nacl, new, true); 380 kfree_rcu(orig, rcu_head); 381 return 0; 382 } 383 384 rcu_assign_pointer(new->se_lun, lun); 385 rcu_assign_pointer(new->se_lun_acl, lun_acl); 386 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); 387 mutex_unlock(&nacl->lun_entry_mutex); 388 389 spin_lock(&lun->lun_deve_lock); 390 list_add_tail(&new->lun_link, &lun->lun_deve_list); 391 spin_unlock(&lun->lun_deve_lock); 392 393 target_luns_data_has_changed(nacl, new, true); 394 return 0; 395 } 396 397 /* 398 * Called with se_node_acl->lun_entry_mutex held. 399 */ 400 void core_disable_device_list_for_node( 401 struct se_lun *lun, 402 struct se_dev_entry *orig, 403 struct se_node_acl *nacl, 404 struct se_portal_group *tpg) 405 { 406 /* 407 * rcu_dereference_raw protected by se_lun->lun_group symlink 408 * reference to se_device->dev_group. 409 */ 410 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 411 /* 412 * If the MappedLUN entry is being disabled, the entry in 413 * lun->lun_deve_list must be removed now before clearing the 414 * struct se_dev_entry pointers below as logic in 415 * core_alua_do_transition_tg_pt() depends on these being present. 416 * 417 * deve->se_lun_acl will be NULL for demo-mode created LUNs 418 * that have not been explicitly converted to MappedLUNs -> 419 * struct se_lun_acl, but we remove deve->lun_link from 420 * lun->lun_deve_list. This also means that active UAs and 421 * NodeACL context specific PR metadata for demo-mode 422 * MappedLUN *deve will be released below.. 423 */ 424 spin_lock(&lun->lun_deve_lock); 425 list_del(&orig->lun_link); 426 spin_unlock(&lun->lun_deve_lock); 427 /* 428 * Disable struct se_dev_entry LUN ACL mapping 429 */ 430 core_scsi3_ua_release_all(orig); 431 432 hlist_del_rcu(&orig->link); 433 clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags); 434 orig->lun_access_ro = false; 435 orig->creation_time = 0; 436 orig->attach_count--; 437 /* 438 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1 439 * or REGISTER_AND_MOVE PR operation to complete. 440 */ 441 kref_put(&orig->pr_kref, target_pr_kref_release); 442 wait_for_completion(&orig->pr_comp); 443 444 rcu_assign_pointer(orig->se_lun, NULL); 445 rcu_assign_pointer(orig->se_lun_acl, NULL); 446 447 kfree_rcu(orig, rcu_head); 448 449 core_scsi3_free_pr_reg_from_nacl(dev, nacl); 450 target_luns_data_has_changed(nacl, NULL, false); 451 } 452 453 /* core_clear_lun_from_tpg(): 454 * 455 * 456 */ 457 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) 458 { 459 struct se_node_acl *nacl; 460 struct se_dev_entry *deve; 461 462 mutex_lock(&tpg->acl_node_mutex); 463 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { 464 465 mutex_lock(&nacl->lun_entry_mutex); 466 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 467 struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun, 468 lockdep_is_held(&nacl->lun_entry_mutex)); 469 470 if (lun != tmp_lun) 471 continue; 472 473 core_disable_device_list_for_node(lun, deve, nacl, tpg); 474 } 475 mutex_unlock(&nacl->lun_entry_mutex); 476 } 477 mutex_unlock(&tpg->acl_node_mutex); 478 } 479 480 int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev) 481 { 482 struct se_lun *tmp; 483 484 spin_lock(&dev->se_port_lock); 485 if (dev->export_count == 0x0000ffff) { 486 pr_warn("Reached dev->dev_port_count ==" 487 " 0x0000ffff\n"); 488 spin_unlock(&dev->se_port_lock); 489 return -ENOSPC; 490 } 491 again: 492 /* 493 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device 494 * Here is the table from spc4r17 section 7.7.3.8. 495 * 496 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field 497 * 498 * Code Description 499 * 0h Reserved 500 * 1h Relative port 1, historically known as port A 501 * 2h Relative port 2, historically known as port B 502 * 3h to FFFFh Relative port 3 through 65 535 503 */ 504 lun->lun_rtpi = dev->dev_rpti_counter++; 505 if (!lun->lun_rtpi) 506 goto again; 507 508 list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) { 509 /* 510 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique 511 * for 16-bit wrap.. 512 */ 513 if (lun->lun_rtpi == tmp->lun_rtpi) 514 goto again; 515 } 516 spin_unlock(&dev->se_port_lock); 517 518 return 0; 519 } 520 521 static void se_release_vpd_for_dev(struct se_device *dev) 522 { 523 struct t10_vpd *vpd, *vpd_tmp; 524 525 spin_lock(&dev->t10_wwn.t10_vpd_lock); 526 list_for_each_entry_safe(vpd, vpd_tmp, 527 &dev->t10_wwn.t10_vpd_list, vpd_list) { 528 list_del(&vpd->vpd_list); 529 kfree(vpd); 530 } 531 spin_unlock(&dev->t10_wwn.t10_vpd_lock); 532 } 533 534 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) 535 { 536 u32 aligned_max_sectors; 537 u32 alignment; 538 /* 539 * Limit max_sectors to a PAGE_SIZE aligned value for modern 540 * transport_allocate_data_tasks() operation. 541 */ 542 alignment = max(1ul, PAGE_SIZE / block_size); 543 aligned_max_sectors = rounddown(max_sectors, alignment); 544 545 if (max_sectors != aligned_max_sectors) 546 pr_info("Rounding down aligned max_sectors from %u to %u\n", 547 max_sectors, aligned_max_sectors); 548 549 return aligned_max_sectors; 550 } 551 552 int core_dev_add_lun( 553 struct se_portal_group *tpg, 554 struct se_device *dev, 555 struct se_lun *lun) 556 { 557 int rc; 558 559 rc = core_tpg_add_lun(tpg, lun, false, dev); 560 if (rc < 0) 561 return rc; 562 563 pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from" 564 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), 565 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 566 tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id); 567 /* 568 * Update LUN maps for dynamically added initiators when 569 * generate_node_acl is enabled. 570 */ 571 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { 572 struct se_node_acl *acl; 573 574 mutex_lock(&tpg->acl_node_mutex); 575 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 576 if (acl->dynamic_node_acl && 577 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || 578 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { 579 core_tpg_add_node_to_devs(acl, tpg, lun); 580 } 581 } 582 mutex_unlock(&tpg->acl_node_mutex); 583 } 584 585 return 0; 586 } 587 588 /* core_dev_del_lun(): 589 * 590 * 591 */ 592 void core_dev_del_lun( 593 struct se_portal_group *tpg, 594 struct se_lun *lun) 595 { 596 pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from" 597 " device object\n", tpg->se_tpg_tfo->get_fabric_name(), 598 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 599 tpg->se_tpg_tfo->get_fabric_name()); 600 601 core_tpg_remove_lun(tpg, lun); 602 } 603 604 struct se_lun_acl *core_dev_init_initiator_node_lun_acl( 605 struct se_portal_group *tpg, 606 struct se_node_acl *nacl, 607 u64 mapped_lun, 608 int *ret) 609 { 610 struct se_lun_acl *lacl; 611 612 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) { 613 pr_err("%s InitiatorName exceeds maximum size.\n", 614 tpg->se_tpg_tfo->get_fabric_name()); 615 *ret = -EOVERFLOW; 616 return NULL; 617 } 618 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); 619 if (!lacl) { 620 pr_err("Unable to allocate memory for struct se_lun_acl.\n"); 621 *ret = -ENOMEM; 622 return NULL; 623 } 624 625 lacl->mapped_lun = mapped_lun; 626 lacl->se_lun_nacl = nacl; 627 628 return lacl; 629 } 630 631 int core_dev_add_initiator_node_lun_acl( 632 struct se_portal_group *tpg, 633 struct se_lun_acl *lacl, 634 struct se_lun *lun, 635 bool lun_access_ro) 636 { 637 struct se_node_acl *nacl = lacl->se_lun_nacl; 638 /* 639 * rcu_dereference_raw protected by se_lun->lun_group symlink 640 * reference to se_device->dev_group. 641 */ 642 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 643 644 if (!nacl) 645 return -EINVAL; 646 647 if (lun->lun_access_ro) 648 lun_access_ro = true; 649 650 lacl->se_lun = lun; 651 652 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun, 653 lun_access_ro, nacl, tpg) < 0) 654 return -EINVAL; 655 656 pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for " 657 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 658 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun, 659 lun_access_ro ? "RO" : "RW", 660 nacl->initiatorname); 661 /* 662 * Check to see if there are any existing persistent reservation APTPL 663 * pre-registrations that need to be enabled for this LUN ACL.. 664 */ 665 core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl, 666 lacl->mapped_lun); 667 return 0; 668 } 669 670 int core_dev_del_initiator_node_lun_acl( 671 struct se_lun *lun, 672 struct se_lun_acl *lacl) 673 { 674 struct se_portal_group *tpg = lun->lun_tpg; 675 struct se_node_acl *nacl; 676 struct se_dev_entry *deve; 677 678 nacl = lacl->se_lun_nacl; 679 if (!nacl) 680 return -EINVAL; 681 682 mutex_lock(&nacl->lun_entry_mutex); 683 deve = target_nacl_find_deve(nacl, lacl->mapped_lun); 684 if (deve) 685 core_disable_device_list_for_node(lun, deve, nacl, tpg); 686 mutex_unlock(&nacl->lun_entry_mutex); 687 688 pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for" 689 " InitiatorNode: %s Mapped LUN: %llu\n", 690 tpg->se_tpg_tfo->get_fabric_name(), 691 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 692 nacl->initiatorname, lacl->mapped_lun); 693 694 return 0; 695 } 696 697 void core_dev_free_initiator_node_lun_acl( 698 struct se_portal_group *tpg, 699 struct se_lun_acl *lacl) 700 { 701 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" 702 " Mapped LUN: %llu\n", tpg->se_tpg_tfo->get_fabric_name(), 703 tpg->se_tpg_tfo->tpg_get_tag(tpg), 704 tpg->se_tpg_tfo->get_fabric_name(), 705 lacl->se_lun_nacl->initiatorname, lacl->mapped_lun); 706 707 kfree(lacl); 708 } 709 710 static void scsi_dump_inquiry(struct se_device *dev) 711 { 712 struct t10_wwn *wwn = &dev->t10_wwn; 713 char buf[17]; 714 int i, device_type; 715 /* 716 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer 717 */ 718 for (i = 0; i < 8; i++) 719 if (wwn->vendor[i] >= 0x20) 720 buf[i] = wwn->vendor[i]; 721 else 722 buf[i] = ' '; 723 buf[i] = '\0'; 724 pr_debug(" Vendor: %s\n", buf); 725 726 for (i = 0; i < 16; i++) 727 if (wwn->model[i] >= 0x20) 728 buf[i] = wwn->model[i]; 729 else 730 buf[i] = ' '; 731 buf[i] = '\0'; 732 pr_debug(" Model: %s\n", buf); 733 734 for (i = 0; i < 4; i++) 735 if (wwn->revision[i] >= 0x20) 736 buf[i] = wwn->revision[i]; 737 else 738 buf[i] = ' '; 739 buf[i] = '\0'; 740 pr_debug(" Revision: %s\n", buf); 741 742 device_type = dev->transport->get_device_type(dev); 743 pr_debug(" Type: %s ", scsi_device_type(device_type)); 744 } 745 746 struct se_device *target_alloc_device(struct se_hba *hba, const char *name) 747 { 748 struct se_device *dev; 749 struct se_lun *xcopy_lun; 750 751 dev = hba->backend->ops->alloc_device(hba, name); 752 if (!dev) 753 return NULL; 754 755 dev->dev_link_magic = SE_DEV_LINK_MAGIC; 756 dev->se_hba = hba; 757 dev->transport = hba->backend->ops; 758 dev->prot_length = sizeof(struct t10_pi_tuple); 759 dev->hba_index = hba->hba_index; 760 761 INIT_LIST_HEAD(&dev->dev_list); 762 INIT_LIST_HEAD(&dev->dev_sep_list); 763 INIT_LIST_HEAD(&dev->dev_tmr_list); 764 INIT_LIST_HEAD(&dev->delayed_cmd_list); 765 INIT_LIST_HEAD(&dev->state_list); 766 INIT_LIST_HEAD(&dev->qf_cmd_list); 767 INIT_LIST_HEAD(&dev->g_dev_node); 768 spin_lock_init(&dev->execute_task_lock); 769 spin_lock_init(&dev->delayed_cmd_lock); 770 spin_lock_init(&dev->dev_reservation_lock); 771 spin_lock_init(&dev->se_port_lock); 772 spin_lock_init(&dev->se_tmr_lock); 773 spin_lock_init(&dev->qf_cmd_lock); 774 sema_init(&dev->caw_sem, 1); 775 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list); 776 spin_lock_init(&dev->t10_wwn.t10_vpd_lock); 777 INIT_LIST_HEAD(&dev->t10_pr.registration_list); 778 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list); 779 spin_lock_init(&dev->t10_pr.registration_lock); 780 spin_lock_init(&dev->t10_pr.aptpl_reg_lock); 781 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list); 782 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock); 783 INIT_LIST_HEAD(&dev->t10_alua.lba_map_list); 784 spin_lock_init(&dev->t10_alua.lba_map_lock); 785 786 dev->t10_wwn.t10_dev = dev; 787 dev->t10_alua.t10_dev = dev; 788 789 dev->dev_attrib.da_dev = dev; 790 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS; 791 dev->dev_attrib.emulate_dpo = 1; 792 dev->dev_attrib.emulate_fua_write = 1; 793 dev->dev_attrib.emulate_fua_read = 1; 794 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; 795 dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; 796 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS; 797 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU; 798 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; 799 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW; 800 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; 801 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT; 802 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 803 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL; 804 dev->dev_attrib.is_nonrot = DA_IS_NONROT; 805 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; 806 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; 807 dev->dev_attrib.max_unmap_block_desc_count = 808 DA_MAX_UNMAP_BLOCK_DESC_COUNT; 809 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; 810 dev->dev_attrib.unmap_granularity_alignment = 811 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; 812 dev->dev_attrib.unmap_zeroes_data = 813 DA_UNMAP_ZEROES_DATA_DEFAULT; 814 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; 815 816 xcopy_lun = &dev->xcopy_lun; 817 rcu_assign_pointer(xcopy_lun->lun_se_dev, dev); 818 init_completion(&xcopy_lun->lun_ref_comp); 819 INIT_LIST_HEAD(&xcopy_lun->lun_deve_list); 820 INIT_LIST_HEAD(&xcopy_lun->lun_dev_link); 821 mutex_init(&xcopy_lun->lun_tg_pt_md_mutex); 822 xcopy_lun->lun_tpg = &xcopy_pt_tpg; 823 824 return dev; 825 } 826 827 /* 828 * Check if the underlying struct block_device request_queue supports 829 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM 830 * in ATA and we need to set TPE=1 831 */ 832 bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, 833 struct request_queue *q) 834 { 835 int block_size = queue_logical_block_size(q); 836 837 if (!blk_queue_discard(q)) 838 return false; 839 840 attrib->max_unmap_lba_count = 841 q->limits.max_discard_sectors >> (ilog2(block_size) - 9); 842 /* 843 * Currently hardcoded to 1 in Linux/SCSI code.. 844 */ 845 attrib->max_unmap_block_desc_count = 1; 846 attrib->unmap_granularity = q->limits.discard_granularity / block_size; 847 attrib->unmap_granularity_alignment = q->limits.discard_alignment / 848 block_size; 849 attrib->unmap_zeroes_data = q->limits.discard_zeroes_data; 850 return true; 851 } 852 EXPORT_SYMBOL(target_configure_unmap_from_queue); 853 854 /* 855 * Convert from blocksize advertised to the initiator to the 512 byte 856 * units unconditionally used by the Linux block layer. 857 */ 858 sector_t target_to_linux_sector(struct se_device *dev, sector_t lb) 859 { 860 switch (dev->dev_attrib.block_size) { 861 case 4096: 862 return lb << 3; 863 case 2048: 864 return lb << 2; 865 case 1024: 866 return lb << 1; 867 default: 868 return lb; 869 } 870 } 871 EXPORT_SYMBOL(target_to_linux_sector); 872 873 int target_configure_device(struct se_device *dev) 874 { 875 struct se_hba *hba = dev->se_hba; 876 int ret; 877 878 if (dev->dev_flags & DF_CONFIGURED) { 879 pr_err("se_dev->se_dev_ptr already set for storage" 880 " object\n"); 881 return -EEXIST; 882 } 883 884 ret = dev->transport->configure_device(dev); 885 if (ret) 886 goto out; 887 /* 888 * XXX: there is not much point to have two different values here.. 889 */ 890 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size; 891 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth; 892 893 /* 894 * Align max_hw_sectors down to PAGE_SIZE I/O transfers 895 */ 896 dev->dev_attrib.hw_max_sectors = 897 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors, 898 dev->dev_attrib.hw_block_size); 899 dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors; 900 901 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); 902 dev->creation_time = get_jiffies_64(); 903 904 ret = core_setup_alua(dev); 905 if (ret) 906 goto out; 907 908 /* 909 * Startup the struct se_device processing thread 910 */ 911 dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1, 912 dev->transport->name); 913 if (!dev->tmr_wq) { 914 pr_err("Unable to create tmr workqueue for %s\n", 915 dev->transport->name); 916 ret = -ENOMEM; 917 goto out_free_alua; 918 } 919 920 /* 921 * Setup work_queue for QUEUE_FULL 922 */ 923 INIT_WORK(&dev->qf_work_queue, target_qf_do_work); 924 925 /* 926 * Preload the initial INQUIRY const values if we are doing 927 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI 928 * passthrough because this is being provided by the backend LLD. 929 */ 930 if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) { 931 strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8); 932 strncpy(&dev->t10_wwn.model[0], 933 dev->transport->inquiry_prod, 16); 934 strncpy(&dev->t10_wwn.revision[0], 935 dev->transport->inquiry_rev, 4); 936 } 937 938 scsi_dump_inquiry(dev); 939 940 spin_lock(&hba->device_lock); 941 hba->dev_count++; 942 spin_unlock(&hba->device_lock); 943 944 mutex_lock(&g_device_mutex); 945 list_add_tail(&dev->g_dev_node, &g_device_list); 946 mutex_unlock(&g_device_mutex); 947 948 dev->dev_flags |= DF_CONFIGURED; 949 950 return 0; 951 952 out_free_alua: 953 core_alua_free_lu_gp_mem(dev); 954 out: 955 se_release_vpd_for_dev(dev); 956 return ret; 957 } 958 959 void target_free_device(struct se_device *dev) 960 { 961 struct se_hba *hba = dev->se_hba; 962 963 WARN_ON(!list_empty(&dev->dev_sep_list)); 964 965 if (dev->dev_flags & DF_CONFIGURED) { 966 destroy_workqueue(dev->tmr_wq); 967 968 mutex_lock(&g_device_mutex); 969 list_del(&dev->g_dev_node); 970 mutex_unlock(&g_device_mutex); 971 972 spin_lock(&hba->device_lock); 973 hba->dev_count--; 974 spin_unlock(&hba->device_lock); 975 } 976 977 core_alua_free_lu_gp_mem(dev); 978 core_alua_set_lba_map(dev, NULL, 0, 0); 979 core_scsi3_free_all_registrations(dev); 980 se_release_vpd_for_dev(dev); 981 982 if (dev->transport->free_prot) 983 dev->transport->free_prot(dev); 984 985 dev->transport->free_device(dev); 986 } 987 988 int core_dev_setup_virtual_lun0(void) 989 { 990 struct se_hba *hba; 991 struct se_device *dev; 992 char buf[] = "rd_pages=8,rd_nullio=1"; 993 int ret; 994 995 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE); 996 if (IS_ERR(hba)) 997 return PTR_ERR(hba); 998 999 dev = target_alloc_device(hba, "virt_lun0"); 1000 if (!dev) { 1001 ret = -ENOMEM; 1002 goto out_free_hba; 1003 } 1004 1005 hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf)); 1006 1007 ret = target_configure_device(dev); 1008 if (ret) 1009 goto out_free_se_dev; 1010 1011 lun0_hba = hba; 1012 g_lun0_dev = dev; 1013 return 0; 1014 1015 out_free_se_dev: 1016 target_free_device(dev); 1017 out_free_hba: 1018 core_delete_hba(hba); 1019 return ret; 1020 } 1021 1022 1023 void core_dev_release_virtual_lun0(void) 1024 { 1025 struct se_hba *hba = lun0_hba; 1026 1027 if (!hba) 1028 return; 1029 1030 if (g_lun0_dev) 1031 target_free_device(g_lun0_dev); 1032 core_delete_hba(hba); 1033 } 1034 1035 /* 1036 * Common CDB parsing for kernel and user passthrough. 1037 */ 1038 sense_reason_t 1039 passthrough_parse_cdb(struct se_cmd *cmd, 1040 sense_reason_t (*exec_cmd)(struct se_cmd *cmd)) 1041 { 1042 unsigned char *cdb = cmd->t_task_cdb; 1043 1044 /* 1045 * Clear a lun set in the cdb if the initiator talking to use spoke 1046 * and old standards version, as we can't assume the underlying device 1047 * won't choke up on it. 1048 */ 1049 switch (cdb[0]) { 1050 case READ_10: /* SBC - RDProtect */ 1051 case READ_12: /* SBC - RDProtect */ 1052 case READ_16: /* SBC - RDProtect */ 1053 case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ 1054 case VERIFY: /* SBC - VRProtect */ 1055 case VERIFY_16: /* SBC - VRProtect */ 1056 case WRITE_VERIFY: /* SBC - VRProtect */ 1057 case WRITE_VERIFY_12: /* SBC - VRProtect */ 1058 case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */ 1059 break; 1060 default: 1061 cdb[1] &= 0x1f; /* clear logical unit number */ 1062 break; 1063 } 1064 1065 /* 1066 * For REPORT LUNS we always need to emulate the response, for everything 1067 * else, pass it up. 1068 */ 1069 if (cdb[0] == REPORT_LUNS) { 1070 cmd->execute_cmd = spc_emulate_report_luns; 1071 return TCM_NO_SENSE; 1072 } 1073 1074 /* Set DATA_CDB flag for ops that should have it */ 1075 switch (cdb[0]) { 1076 case READ_6: 1077 case READ_10: 1078 case READ_12: 1079 case READ_16: 1080 case WRITE_6: 1081 case WRITE_10: 1082 case WRITE_12: 1083 case WRITE_16: 1084 case WRITE_VERIFY: 1085 case WRITE_VERIFY_12: 1086 case 0x8e: /* WRITE_VERIFY_16 */ 1087 case COMPARE_AND_WRITE: 1088 case XDWRITEREAD_10: 1089 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1090 break; 1091 case VARIABLE_LENGTH_CMD: 1092 switch (get_unaligned_be16(&cdb[8])) { 1093 case READ_32: 1094 case WRITE_32: 1095 case 0x0c: /* WRITE_VERIFY_32 */ 1096 case XDWRITEREAD_32: 1097 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1098 break; 1099 } 1100 } 1101 1102 cmd->execute_cmd = exec_cmd; 1103 1104 return TCM_NO_SENSE; 1105 } 1106 EXPORT_SYMBOL(passthrough_parse_cdb); 1107