1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /******************************************************************************* 3 * Filename: target_core_device.c (based on iscsi_target_device.c) 4 * 5 * This file contains the TCM Virtual Device and Disk Transport 6 * agnostic related functions. 7 * 8 * (c) Copyright 2003-2013 Datera, Inc. 9 * 10 * Nicholas A. Bellinger <nab@kernel.org> 11 * 12 ******************************************************************************/ 13 14 #include <linux/net.h> 15 #include <linux/string.h> 16 #include <linux/delay.h> 17 #include <linux/timer.h> 18 #include <linux/slab.h> 19 #include <linux/spinlock.h> 20 #include <linux/kthread.h> 21 #include <linux/in.h> 22 #include <linux/export.h> 23 #include <linux/t10-pi.h> 24 #include <asm/unaligned.h> 25 #include <net/sock.h> 26 #include <net/tcp.h> 27 #include <scsi/scsi_common.h> 28 #include <scsi/scsi_proto.h> 29 30 #include <target/target_core_base.h> 31 #include <target/target_core_backend.h> 32 #include <target/target_core_fabric.h> 33 34 #include "target_core_internal.h" 35 #include "target_core_alua.h" 36 #include "target_core_pr.h" 37 #include "target_core_ua.h" 38 39 static DEFINE_MUTEX(device_mutex); 40 static LIST_HEAD(device_list); 41 static DEFINE_IDR(devices_idr); 42 43 static struct se_hba *lun0_hba; 44 /* not static, needed by tpg.c */ 45 struct se_device *g_lun0_dev; 46 47 sense_reason_t 48 transport_lookup_cmd_lun(struct se_cmd *se_cmd) 49 { 50 struct se_lun *se_lun = NULL; 51 struct se_session *se_sess = se_cmd->se_sess; 52 struct se_node_acl *nacl = se_sess->se_node_acl; 53 struct se_dev_entry *deve; 54 sense_reason_t ret = TCM_NO_SENSE; 55 56 rcu_read_lock(); 57 deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun); 58 if (deve) { 59 atomic_long_inc(&deve->total_cmds); 60 61 if (se_cmd->data_direction == DMA_TO_DEVICE) 62 atomic_long_add(se_cmd->data_length, 63 &deve->write_bytes); 64 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 65 atomic_long_add(se_cmd->data_length, 66 &deve->read_bytes); 67 68 if ((se_cmd->data_direction == DMA_TO_DEVICE) && 69 deve->lun_access_ro) { 70 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" 71 " Access for 0x%08llx\n", 72 se_cmd->se_tfo->fabric_name, 73 se_cmd->orig_fe_lun); 74 rcu_read_unlock(); 75 return TCM_WRITE_PROTECTED; 76 } 77 78 se_lun = rcu_dereference(deve->se_lun); 79 80 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { 81 se_lun = NULL; 82 goto out_unlock; 83 } 84 85 se_cmd->se_lun = se_lun; 86 se_cmd->pr_res_key = deve->pr_res_key; 87 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 88 se_cmd->lun_ref_active = true; 89 } 90 out_unlock: 91 rcu_read_unlock(); 92 93 if (!se_lun) { 94 /* 95 * Use the se_portal_group->tpg_virt_lun0 to allow for 96 * REPORT_LUNS, et al to be returned when no active 97 * MappedLUN=0 exists for this Initiator Port. 98 */ 99 if (se_cmd->orig_fe_lun != 0) { 100 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 101 " Access for 0x%08llx from %s\n", 102 se_cmd->se_tfo->fabric_name, 103 se_cmd->orig_fe_lun, 104 nacl->initiatorname); 105 return TCM_NON_EXISTENT_LUN; 106 } 107 108 /* 109 * Force WRITE PROTECT for virtual LUN 0 110 */ 111 if ((se_cmd->data_direction != DMA_FROM_DEVICE) && 112 (se_cmd->data_direction != DMA_NONE)) 113 return TCM_WRITE_PROTECTED; 114 115 se_lun = se_sess->se_tpg->tpg_virt_lun0; 116 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) 117 return TCM_NON_EXISTENT_LUN; 118 119 se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0; 120 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 121 se_cmd->lun_ref_active = true; 122 } 123 /* 124 * RCU reference protected by percpu se_lun->lun_ref taken above that 125 * must drop to zero (including initial reference) before this se_lun 126 * pointer can be kfree_rcu() by the final se_lun->lun_group put via 127 * target_core_fabric_configfs.c:target_fabric_port_release 128 */ 129 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); 130 atomic_long_inc(&se_cmd->se_dev->num_cmds); 131 132 if (se_cmd->data_direction == DMA_TO_DEVICE) 133 atomic_long_add(se_cmd->data_length, 134 &se_cmd->se_dev->write_bytes); 135 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 136 atomic_long_add(se_cmd->data_length, 137 &se_cmd->se_dev->read_bytes); 138 139 return ret; 140 } 141 EXPORT_SYMBOL(transport_lookup_cmd_lun); 142 143 int transport_lookup_tmr_lun(struct se_cmd *se_cmd) 144 { 145 struct se_dev_entry *deve; 146 struct se_lun *se_lun = NULL; 147 struct se_session *se_sess = se_cmd->se_sess; 148 struct se_node_acl *nacl = se_sess->se_node_acl; 149 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 150 unsigned long flags; 151 152 rcu_read_lock(); 153 deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun); 154 if (deve) { 155 se_lun = rcu_dereference(deve->se_lun); 156 157 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { 158 se_lun = NULL; 159 goto out_unlock; 160 } 161 162 se_cmd->se_lun = se_lun; 163 se_cmd->pr_res_key = deve->pr_res_key; 164 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 165 se_cmd->lun_ref_active = true; 166 } 167 out_unlock: 168 rcu_read_unlock(); 169 170 if (!se_lun) { 171 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 172 " Access for 0x%08llx for %s\n", 173 se_cmd->se_tfo->fabric_name, 174 se_cmd->orig_fe_lun, 175 nacl->initiatorname); 176 return -ENODEV; 177 } 178 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); 179 se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev); 180 181 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags); 182 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list); 183 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags); 184 185 return 0; 186 } 187 EXPORT_SYMBOL(transport_lookup_tmr_lun); 188 189 bool target_lun_is_rdonly(struct se_cmd *cmd) 190 { 191 struct se_session *se_sess = cmd->se_sess; 192 struct se_dev_entry *deve; 193 bool ret; 194 195 rcu_read_lock(); 196 deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun); 197 ret = deve && deve->lun_access_ro; 198 rcu_read_unlock(); 199 200 return ret; 201 } 202 EXPORT_SYMBOL(target_lun_is_rdonly); 203 204 /* 205 * This function is called from core_scsi3_emulate_pro_register_and_move() 206 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref 207 * when a matching rtpi is found. 208 */ 209 struct se_dev_entry *core_get_se_deve_from_rtpi( 210 struct se_node_acl *nacl, 211 u16 rtpi) 212 { 213 struct se_dev_entry *deve; 214 struct se_lun *lun; 215 struct se_portal_group *tpg = nacl->se_tpg; 216 217 rcu_read_lock(); 218 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 219 lun = rcu_dereference(deve->se_lun); 220 if (!lun) { 221 pr_err("%s device entries device pointer is" 222 " NULL, but Initiator has access.\n", 223 tpg->se_tpg_tfo->fabric_name); 224 continue; 225 } 226 if (lun->lun_rtpi != rtpi) 227 continue; 228 229 kref_get(&deve->pr_kref); 230 rcu_read_unlock(); 231 232 return deve; 233 } 234 rcu_read_unlock(); 235 236 return NULL; 237 } 238 239 void core_free_device_list_for_node( 240 struct se_node_acl *nacl, 241 struct se_portal_group *tpg) 242 { 243 struct se_dev_entry *deve; 244 245 mutex_lock(&nacl->lun_entry_mutex); 246 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 247 struct se_lun *lun = rcu_dereference_check(deve->se_lun, 248 lockdep_is_held(&nacl->lun_entry_mutex)); 249 core_disable_device_list_for_node(lun, deve, nacl, tpg); 250 } 251 mutex_unlock(&nacl->lun_entry_mutex); 252 } 253 254 void core_update_device_list_access( 255 u64 mapped_lun, 256 bool lun_access_ro, 257 struct se_node_acl *nacl) 258 { 259 struct se_dev_entry *deve; 260 261 mutex_lock(&nacl->lun_entry_mutex); 262 deve = target_nacl_find_deve(nacl, mapped_lun); 263 if (deve) 264 deve->lun_access_ro = lun_access_ro; 265 mutex_unlock(&nacl->lun_entry_mutex); 266 } 267 268 /* 269 * Called with rcu_read_lock or nacl->device_list_lock held. 270 */ 271 struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun) 272 { 273 struct se_dev_entry *deve; 274 275 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) 276 if (deve->mapped_lun == mapped_lun) 277 return deve; 278 279 return NULL; 280 } 281 EXPORT_SYMBOL(target_nacl_find_deve); 282 283 void target_pr_kref_release(struct kref *kref) 284 { 285 struct se_dev_entry *deve = container_of(kref, struct se_dev_entry, 286 pr_kref); 287 complete(&deve->pr_comp); 288 } 289 290 static void 291 target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new, 292 bool skip_new) 293 { 294 struct se_dev_entry *tmp; 295 296 rcu_read_lock(); 297 hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) { 298 if (skip_new && tmp == new) 299 continue; 300 core_scsi3_ua_allocate(tmp, 0x3F, 301 ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED); 302 } 303 rcu_read_unlock(); 304 } 305 306 int core_enable_device_list_for_node( 307 struct se_lun *lun, 308 struct se_lun_acl *lun_acl, 309 u64 mapped_lun, 310 bool lun_access_ro, 311 struct se_node_acl *nacl, 312 struct se_portal_group *tpg) 313 { 314 struct se_dev_entry *orig, *new; 315 316 new = kzalloc(sizeof(*new), GFP_KERNEL); 317 if (!new) { 318 pr_err("Unable to allocate se_dev_entry memory\n"); 319 return -ENOMEM; 320 } 321 322 spin_lock_init(&new->ua_lock); 323 INIT_LIST_HEAD(&new->ua_list); 324 INIT_LIST_HEAD(&new->lun_link); 325 326 new->mapped_lun = mapped_lun; 327 kref_init(&new->pr_kref); 328 init_completion(&new->pr_comp); 329 330 new->lun_access_ro = lun_access_ro; 331 new->creation_time = get_jiffies_64(); 332 new->attach_count++; 333 334 mutex_lock(&nacl->lun_entry_mutex); 335 orig = target_nacl_find_deve(nacl, mapped_lun); 336 if (orig && orig->se_lun) { 337 struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun, 338 lockdep_is_held(&nacl->lun_entry_mutex)); 339 340 if (orig_lun != lun) { 341 pr_err("Existing orig->se_lun doesn't match new lun" 342 " for dynamic -> explicit NodeACL conversion:" 343 " %s\n", nacl->initiatorname); 344 mutex_unlock(&nacl->lun_entry_mutex); 345 kfree(new); 346 return -EINVAL; 347 } 348 if (orig->se_lun_acl != NULL) { 349 pr_warn_ratelimited("Detected existing explicit" 350 " se_lun_acl->se_lun_group reference for %s" 351 " mapped_lun: %llu, failing\n", 352 nacl->initiatorname, mapped_lun); 353 mutex_unlock(&nacl->lun_entry_mutex); 354 kfree(new); 355 return -EINVAL; 356 } 357 358 rcu_assign_pointer(new->se_lun, lun); 359 rcu_assign_pointer(new->se_lun_acl, lun_acl); 360 hlist_del_rcu(&orig->link); 361 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); 362 mutex_unlock(&nacl->lun_entry_mutex); 363 364 spin_lock(&lun->lun_deve_lock); 365 list_del(&orig->lun_link); 366 list_add_tail(&new->lun_link, &lun->lun_deve_list); 367 spin_unlock(&lun->lun_deve_lock); 368 369 kref_put(&orig->pr_kref, target_pr_kref_release); 370 wait_for_completion(&orig->pr_comp); 371 372 target_luns_data_has_changed(nacl, new, true); 373 kfree_rcu(orig, rcu_head); 374 return 0; 375 } 376 377 rcu_assign_pointer(new->se_lun, lun); 378 rcu_assign_pointer(new->se_lun_acl, lun_acl); 379 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); 380 mutex_unlock(&nacl->lun_entry_mutex); 381 382 spin_lock(&lun->lun_deve_lock); 383 list_add_tail(&new->lun_link, &lun->lun_deve_list); 384 spin_unlock(&lun->lun_deve_lock); 385 386 target_luns_data_has_changed(nacl, new, true); 387 return 0; 388 } 389 390 void core_disable_device_list_for_node( 391 struct se_lun *lun, 392 struct se_dev_entry *orig, 393 struct se_node_acl *nacl, 394 struct se_portal_group *tpg) 395 { 396 /* 397 * rcu_dereference_raw protected by se_lun->lun_group symlink 398 * reference to se_device->dev_group. 399 */ 400 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 401 402 lockdep_assert_held(&nacl->lun_entry_mutex); 403 404 /* 405 * If the MappedLUN entry is being disabled, the entry in 406 * lun->lun_deve_list must be removed now before clearing the 407 * struct se_dev_entry pointers below as logic in 408 * core_alua_do_transition_tg_pt() depends on these being present. 409 * 410 * deve->se_lun_acl will be NULL for demo-mode created LUNs 411 * that have not been explicitly converted to MappedLUNs -> 412 * struct se_lun_acl, but we remove deve->lun_link from 413 * lun->lun_deve_list. This also means that active UAs and 414 * NodeACL context specific PR metadata for demo-mode 415 * MappedLUN *deve will be released below.. 416 */ 417 spin_lock(&lun->lun_deve_lock); 418 list_del(&orig->lun_link); 419 spin_unlock(&lun->lun_deve_lock); 420 /* 421 * Disable struct se_dev_entry LUN ACL mapping 422 */ 423 core_scsi3_ua_release_all(orig); 424 425 hlist_del_rcu(&orig->link); 426 clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags); 427 orig->lun_access_ro = false; 428 orig->creation_time = 0; 429 orig->attach_count--; 430 /* 431 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1 432 * or REGISTER_AND_MOVE PR operation to complete. 433 */ 434 kref_put(&orig->pr_kref, target_pr_kref_release); 435 wait_for_completion(&orig->pr_comp); 436 437 kfree_rcu(orig, rcu_head); 438 439 core_scsi3_free_pr_reg_from_nacl(dev, nacl); 440 target_luns_data_has_changed(nacl, NULL, false); 441 } 442 443 /* core_clear_lun_from_tpg(): 444 * 445 * 446 */ 447 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) 448 { 449 struct se_node_acl *nacl; 450 struct se_dev_entry *deve; 451 452 mutex_lock(&tpg->acl_node_mutex); 453 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { 454 455 mutex_lock(&nacl->lun_entry_mutex); 456 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 457 struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun, 458 lockdep_is_held(&nacl->lun_entry_mutex)); 459 460 if (lun != tmp_lun) 461 continue; 462 463 core_disable_device_list_for_node(lun, deve, nacl, tpg); 464 } 465 mutex_unlock(&nacl->lun_entry_mutex); 466 } 467 mutex_unlock(&tpg->acl_node_mutex); 468 } 469 470 int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev) 471 { 472 struct se_lun *tmp; 473 474 spin_lock(&dev->se_port_lock); 475 if (dev->export_count == 0x0000ffff) { 476 pr_warn("Reached dev->dev_port_count ==" 477 " 0x0000ffff\n"); 478 spin_unlock(&dev->se_port_lock); 479 return -ENOSPC; 480 } 481 again: 482 /* 483 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device 484 * Here is the table from spc4r17 section 7.7.3.8. 485 * 486 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field 487 * 488 * Code Description 489 * 0h Reserved 490 * 1h Relative port 1, historically known as port A 491 * 2h Relative port 2, historically known as port B 492 * 3h to FFFFh Relative port 3 through 65 535 493 */ 494 lun->lun_rtpi = dev->dev_rpti_counter++; 495 if (!lun->lun_rtpi) 496 goto again; 497 498 list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) { 499 /* 500 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique 501 * for 16-bit wrap.. 502 */ 503 if (lun->lun_rtpi == tmp->lun_rtpi) 504 goto again; 505 } 506 spin_unlock(&dev->se_port_lock); 507 508 return 0; 509 } 510 511 static void se_release_vpd_for_dev(struct se_device *dev) 512 { 513 struct t10_vpd *vpd, *vpd_tmp; 514 515 spin_lock(&dev->t10_wwn.t10_vpd_lock); 516 list_for_each_entry_safe(vpd, vpd_tmp, 517 &dev->t10_wwn.t10_vpd_list, vpd_list) { 518 list_del(&vpd->vpd_list); 519 kfree(vpd); 520 } 521 spin_unlock(&dev->t10_wwn.t10_vpd_lock); 522 } 523 524 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) 525 { 526 u32 aligned_max_sectors; 527 u32 alignment; 528 /* 529 * Limit max_sectors to a PAGE_SIZE aligned value for modern 530 * transport_allocate_data_tasks() operation. 531 */ 532 alignment = max(1ul, PAGE_SIZE / block_size); 533 aligned_max_sectors = rounddown(max_sectors, alignment); 534 535 if (max_sectors != aligned_max_sectors) 536 pr_info("Rounding down aligned max_sectors from %u to %u\n", 537 max_sectors, aligned_max_sectors); 538 539 return aligned_max_sectors; 540 } 541 542 int core_dev_add_lun( 543 struct se_portal_group *tpg, 544 struct se_device *dev, 545 struct se_lun *lun) 546 { 547 int rc; 548 549 rc = core_tpg_add_lun(tpg, lun, false, dev); 550 if (rc < 0) 551 return rc; 552 553 pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from" 554 " CORE HBA: %u\n", tpg->se_tpg_tfo->fabric_name, 555 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 556 tpg->se_tpg_tfo->fabric_name, dev->se_hba->hba_id); 557 /* 558 * Update LUN maps for dynamically added initiators when 559 * generate_node_acl is enabled. 560 */ 561 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { 562 struct se_node_acl *acl; 563 564 mutex_lock(&tpg->acl_node_mutex); 565 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 566 if (acl->dynamic_node_acl && 567 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || 568 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { 569 core_tpg_add_node_to_devs(acl, tpg, lun); 570 } 571 } 572 mutex_unlock(&tpg->acl_node_mutex); 573 } 574 575 return 0; 576 } 577 578 /* core_dev_del_lun(): 579 * 580 * 581 */ 582 void core_dev_del_lun( 583 struct se_portal_group *tpg, 584 struct se_lun *lun) 585 { 586 pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from" 587 " device object\n", tpg->se_tpg_tfo->fabric_name, 588 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 589 tpg->se_tpg_tfo->fabric_name); 590 591 core_tpg_remove_lun(tpg, lun); 592 } 593 594 struct se_lun_acl *core_dev_init_initiator_node_lun_acl( 595 struct se_portal_group *tpg, 596 struct se_node_acl *nacl, 597 u64 mapped_lun, 598 int *ret) 599 { 600 struct se_lun_acl *lacl; 601 602 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) { 603 pr_err("%s InitiatorName exceeds maximum size.\n", 604 tpg->se_tpg_tfo->fabric_name); 605 *ret = -EOVERFLOW; 606 return NULL; 607 } 608 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); 609 if (!lacl) { 610 pr_err("Unable to allocate memory for struct se_lun_acl.\n"); 611 *ret = -ENOMEM; 612 return NULL; 613 } 614 615 lacl->mapped_lun = mapped_lun; 616 lacl->se_lun_nacl = nacl; 617 618 return lacl; 619 } 620 621 int core_dev_add_initiator_node_lun_acl( 622 struct se_portal_group *tpg, 623 struct se_lun_acl *lacl, 624 struct se_lun *lun, 625 bool lun_access_ro) 626 { 627 struct se_node_acl *nacl = lacl->se_lun_nacl; 628 /* 629 * rcu_dereference_raw protected by se_lun->lun_group symlink 630 * reference to se_device->dev_group. 631 */ 632 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 633 634 if (!nacl) 635 return -EINVAL; 636 637 if (lun->lun_access_ro) 638 lun_access_ro = true; 639 640 lacl->se_lun = lun; 641 642 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun, 643 lun_access_ro, nacl, tpg) < 0) 644 return -EINVAL; 645 646 pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for " 647 " InitiatorNode: %s\n", tpg->se_tpg_tfo->fabric_name, 648 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun, 649 lun_access_ro ? "RO" : "RW", 650 nacl->initiatorname); 651 /* 652 * Check to see if there are any existing persistent reservation APTPL 653 * pre-registrations that need to be enabled for this LUN ACL.. 654 */ 655 core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl, 656 lacl->mapped_lun); 657 return 0; 658 } 659 660 int core_dev_del_initiator_node_lun_acl( 661 struct se_lun *lun, 662 struct se_lun_acl *lacl) 663 { 664 struct se_portal_group *tpg = lun->lun_tpg; 665 struct se_node_acl *nacl; 666 struct se_dev_entry *deve; 667 668 nacl = lacl->se_lun_nacl; 669 if (!nacl) 670 return -EINVAL; 671 672 mutex_lock(&nacl->lun_entry_mutex); 673 deve = target_nacl_find_deve(nacl, lacl->mapped_lun); 674 if (deve) 675 core_disable_device_list_for_node(lun, deve, nacl, tpg); 676 mutex_unlock(&nacl->lun_entry_mutex); 677 678 pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for" 679 " InitiatorNode: %s Mapped LUN: %llu\n", 680 tpg->se_tpg_tfo->fabric_name, 681 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 682 nacl->initiatorname, lacl->mapped_lun); 683 684 return 0; 685 } 686 687 void core_dev_free_initiator_node_lun_acl( 688 struct se_portal_group *tpg, 689 struct se_lun_acl *lacl) 690 { 691 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" 692 " Mapped LUN: %llu\n", tpg->se_tpg_tfo->fabric_name, 693 tpg->se_tpg_tfo->tpg_get_tag(tpg), 694 tpg->se_tpg_tfo->fabric_name, 695 lacl->se_lun_nacl->initiatorname, lacl->mapped_lun); 696 697 kfree(lacl); 698 } 699 700 static void scsi_dump_inquiry(struct se_device *dev) 701 { 702 struct t10_wwn *wwn = &dev->t10_wwn; 703 int device_type = dev->transport->get_device_type(dev); 704 705 /* 706 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer 707 */ 708 pr_debug(" Vendor: %-" __stringify(INQUIRY_VENDOR_LEN) "s\n", 709 wwn->vendor); 710 pr_debug(" Model: %-" __stringify(INQUIRY_MODEL_LEN) "s\n", 711 wwn->model); 712 pr_debug(" Revision: %-" __stringify(INQUIRY_REVISION_LEN) "s\n", 713 wwn->revision); 714 pr_debug(" Type: %s ", scsi_device_type(device_type)); 715 } 716 717 struct se_device *target_alloc_device(struct se_hba *hba, const char *name) 718 { 719 struct se_device *dev; 720 struct se_lun *xcopy_lun; 721 int i; 722 723 dev = hba->backend->ops->alloc_device(hba, name); 724 if (!dev) 725 return NULL; 726 727 dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL); 728 if (!dev->queues) { 729 dev->transport->free_device(dev); 730 return NULL; 731 } 732 733 dev->queue_cnt = nr_cpu_ids; 734 for (i = 0; i < dev->queue_cnt; i++) { 735 struct se_device_queue *q; 736 737 q = &dev->queues[i]; 738 INIT_LIST_HEAD(&q->state_list); 739 spin_lock_init(&q->lock); 740 741 init_llist_head(&q->sq.cmd_list); 742 INIT_WORK(&q->sq.work, target_queued_submit_work); 743 } 744 745 dev->se_hba = hba; 746 dev->transport = hba->backend->ops; 747 dev->transport_flags = dev->transport->transport_flags_default; 748 dev->prot_length = sizeof(struct t10_pi_tuple); 749 dev->hba_index = hba->hba_index; 750 751 INIT_LIST_HEAD(&dev->dev_sep_list); 752 INIT_LIST_HEAD(&dev->dev_tmr_list); 753 INIT_LIST_HEAD(&dev->delayed_cmd_list); 754 INIT_LIST_HEAD(&dev->qf_cmd_list); 755 spin_lock_init(&dev->delayed_cmd_lock); 756 spin_lock_init(&dev->dev_reservation_lock); 757 spin_lock_init(&dev->se_port_lock); 758 spin_lock_init(&dev->se_tmr_lock); 759 spin_lock_init(&dev->qf_cmd_lock); 760 sema_init(&dev->caw_sem, 1); 761 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list); 762 spin_lock_init(&dev->t10_wwn.t10_vpd_lock); 763 INIT_LIST_HEAD(&dev->t10_pr.registration_list); 764 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list); 765 spin_lock_init(&dev->t10_pr.registration_lock); 766 spin_lock_init(&dev->t10_pr.aptpl_reg_lock); 767 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list); 768 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock); 769 INIT_LIST_HEAD(&dev->t10_alua.lba_map_list); 770 spin_lock_init(&dev->t10_alua.lba_map_lock); 771 772 INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work); 773 774 dev->t10_wwn.t10_dev = dev; 775 /* 776 * Use OpenFabrics IEEE Company ID: 00 14 05 777 */ 778 dev->t10_wwn.company_id = 0x001405; 779 780 dev->t10_alua.t10_dev = dev; 781 782 dev->dev_attrib.da_dev = dev; 783 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS; 784 dev->dev_attrib.emulate_dpo = 1; 785 dev->dev_attrib.emulate_fua_write = 1; 786 dev->dev_attrib.emulate_fua_read = 1; 787 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; 788 dev->dev_attrib.emulate_ua_intlck_ctrl = TARGET_UA_INTLCK_CTRL_CLEAR; 789 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS; 790 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU; 791 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; 792 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW; 793 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; 794 dev->dev_attrib.emulate_pr = DA_EMULATE_PR; 795 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT; 796 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 797 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL; 798 dev->dev_attrib.is_nonrot = DA_IS_NONROT; 799 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; 800 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; 801 dev->dev_attrib.max_unmap_block_desc_count = 802 DA_MAX_UNMAP_BLOCK_DESC_COUNT; 803 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; 804 dev->dev_attrib.unmap_granularity_alignment = 805 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; 806 dev->dev_attrib.unmap_zeroes_data = 807 DA_UNMAP_ZEROES_DATA_DEFAULT; 808 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; 809 810 xcopy_lun = &dev->xcopy_lun; 811 rcu_assign_pointer(xcopy_lun->lun_se_dev, dev); 812 init_completion(&xcopy_lun->lun_shutdown_comp); 813 INIT_LIST_HEAD(&xcopy_lun->lun_deve_list); 814 INIT_LIST_HEAD(&xcopy_lun->lun_dev_link); 815 mutex_init(&xcopy_lun->lun_tg_pt_md_mutex); 816 xcopy_lun->lun_tpg = &xcopy_pt_tpg; 817 818 /* Preload the default INQUIRY const values */ 819 strlcpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor)); 820 strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod, 821 sizeof(dev->t10_wwn.model)); 822 strlcpy(dev->t10_wwn.revision, dev->transport->inquiry_rev, 823 sizeof(dev->t10_wwn.revision)); 824 825 return dev; 826 } 827 828 /* 829 * Check if the underlying struct block_device supports discard and if yes 830 * configure the UNMAP parameters. 831 */ 832 bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, 833 struct block_device *bdev) 834 { 835 int block_size = bdev_logical_block_size(bdev); 836 837 if (!bdev_max_discard_sectors(bdev)) 838 return false; 839 840 attrib->max_unmap_lba_count = 841 bdev_max_discard_sectors(bdev) >> (ilog2(block_size) - 9); 842 /* 843 * Currently hardcoded to 1 in Linux/SCSI code.. 844 */ 845 attrib->max_unmap_block_desc_count = 1; 846 attrib->unmap_granularity = bdev_discard_granularity(bdev) / block_size; 847 attrib->unmap_granularity_alignment = 848 bdev_discard_alignment(bdev) / block_size; 849 return true; 850 } 851 EXPORT_SYMBOL(target_configure_unmap_from_queue); 852 853 /* 854 * Convert from blocksize advertised to the initiator to the 512 byte 855 * units unconditionally used by the Linux block layer. 856 */ 857 sector_t target_to_linux_sector(struct se_device *dev, sector_t lb) 858 { 859 switch (dev->dev_attrib.block_size) { 860 case 4096: 861 return lb << 3; 862 case 2048: 863 return lb << 2; 864 case 1024: 865 return lb << 1; 866 default: 867 return lb; 868 } 869 } 870 EXPORT_SYMBOL(target_to_linux_sector); 871 872 struct devices_idr_iter { 873 struct config_item *prev_item; 874 int (*fn)(struct se_device *dev, void *data); 875 void *data; 876 }; 877 878 static int target_devices_idr_iter(int id, void *p, void *data) 879 __must_hold(&device_mutex) 880 { 881 struct devices_idr_iter *iter = data; 882 struct se_device *dev = p; 883 int ret; 884 885 config_item_put(iter->prev_item); 886 iter->prev_item = NULL; 887 888 /* 889 * We add the device early to the idr, so it can be used 890 * by backend modules during configuration. We do not want 891 * to allow other callers to access partially setup devices, 892 * so we skip them here. 893 */ 894 if (!target_dev_configured(dev)) 895 return 0; 896 897 iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item); 898 if (!iter->prev_item) 899 return 0; 900 mutex_unlock(&device_mutex); 901 902 ret = iter->fn(dev, iter->data); 903 904 mutex_lock(&device_mutex); 905 return ret; 906 } 907 908 /** 909 * target_for_each_device - iterate over configured devices 910 * @fn: iterator function 911 * @data: pointer to data that will be passed to fn 912 * 913 * fn must return 0 to continue looping over devices. non-zero will break 914 * from the loop and return that value to the caller. 915 */ 916 int target_for_each_device(int (*fn)(struct se_device *dev, void *data), 917 void *data) 918 { 919 struct devices_idr_iter iter = { .fn = fn, .data = data }; 920 int ret; 921 922 mutex_lock(&device_mutex); 923 ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter); 924 mutex_unlock(&device_mutex); 925 config_item_put(iter.prev_item); 926 return ret; 927 } 928 929 int target_configure_device(struct se_device *dev) 930 { 931 struct se_hba *hba = dev->se_hba; 932 int ret, id; 933 934 if (target_dev_configured(dev)) { 935 pr_err("se_dev->se_dev_ptr already set for storage" 936 " object\n"); 937 return -EEXIST; 938 } 939 940 /* 941 * Add early so modules like tcmu can use during its 942 * configuration. 943 */ 944 mutex_lock(&device_mutex); 945 /* 946 * Use cyclic to try and avoid collisions with devices 947 * that were recently removed. 948 */ 949 id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL); 950 mutex_unlock(&device_mutex); 951 if (id < 0) { 952 ret = -ENOMEM; 953 goto out; 954 } 955 dev->dev_index = id; 956 957 ret = dev->transport->configure_device(dev); 958 if (ret) 959 goto out_free_index; 960 961 if (dev->transport->configure_unmap && 962 dev->transport->configure_unmap(dev)) { 963 pr_debug("Discard support available, but disabled by default.\n"); 964 } 965 966 /* 967 * XXX: there is not much point to have two different values here.. 968 */ 969 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size; 970 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth; 971 972 /* 973 * Align max_hw_sectors down to PAGE_SIZE I/O transfers 974 */ 975 dev->dev_attrib.hw_max_sectors = 976 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors, 977 dev->dev_attrib.hw_block_size); 978 dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors; 979 980 dev->creation_time = get_jiffies_64(); 981 982 ret = core_setup_alua(dev); 983 if (ret) 984 goto out_destroy_device; 985 986 /* 987 * Setup work_queue for QUEUE_FULL 988 */ 989 INIT_WORK(&dev->qf_work_queue, target_qf_do_work); 990 991 scsi_dump_inquiry(dev); 992 993 spin_lock(&hba->device_lock); 994 hba->dev_count++; 995 spin_unlock(&hba->device_lock); 996 997 dev->dev_flags |= DF_CONFIGURED; 998 999 return 0; 1000 1001 out_destroy_device: 1002 dev->transport->destroy_device(dev); 1003 out_free_index: 1004 mutex_lock(&device_mutex); 1005 idr_remove(&devices_idr, dev->dev_index); 1006 mutex_unlock(&device_mutex); 1007 out: 1008 se_release_vpd_for_dev(dev); 1009 return ret; 1010 } 1011 1012 void target_free_device(struct se_device *dev) 1013 { 1014 struct se_hba *hba = dev->se_hba; 1015 1016 WARN_ON(!list_empty(&dev->dev_sep_list)); 1017 1018 if (target_dev_configured(dev)) { 1019 dev->transport->destroy_device(dev); 1020 1021 mutex_lock(&device_mutex); 1022 idr_remove(&devices_idr, dev->dev_index); 1023 mutex_unlock(&device_mutex); 1024 1025 spin_lock(&hba->device_lock); 1026 hba->dev_count--; 1027 spin_unlock(&hba->device_lock); 1028 } 1029 1030 core_alua_free_lu_gp_mem(dev); 1031 core_alua_set_lba_map(dev, NULL, 0, 0); 1032 core_scsi3_free_all_registrations(dev); 1033 se_release_vpd_for_dev(dev); 1034 1035 if (dev->transport->free_prot) 1036 dev->transport->free_prot(dev); 1037 1038 kfree(dev->queues); 1039 dev->transport->free_device(dev); 1040 } 1041 1042 int core_dev_setup_virtual_lun0(void) 1043 { 1044 struct se_hba *hba; 1045 struct se_device *dev; 1046 char buf[] = "rd_pages=8,rd_nullio=1,rd_dummy=1"; 1047 int ret; 1048 1049 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE); 1050 if (IS_ERR(hba)) 1051 return PTR_ERR(hba); 1052 1053 dev = target_alloc_device(hba, "virt_lun0"); 1054 if (!dev) { 1055 ret = -ENOMEM; 1056 goto out_free_hba; 1057 } 1058 1059 hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf)); 1060 1061 ret = target_configure_device(dev); 1062 if (ret) 1063 goto out_free_se_dev; 1064 1065 lun0_hba = hba; 1066 g_lun0_dev = dev; 1067 return 0; 1068 1069 out_free_se_dev: 1070 target_free_device(dev); 1071 out_free_hba: 1072 core_delete_hba(hba); 1073 return ret; 1074 } 1075 1076 1077 void core_dev_release_virtual_lun0(void) 1078 { 1079 struct se_hba *hba = lun0_hba; 1080 1081 if (!hba) 1082 return; 1083 1084 if (g_lun0_dev) 1085 target_free_device(g_lun0_dev); 1086 core_delete_hba(hba); 1087 } 1088 1089 /* 1090 * Common CDB parsing for kernel and user passthrough. 1091 */ 1092 sense_reason_t 1093 passthrough_parse_cdb(struct se_cmd *cmd, 1094 sense_reason_t (*exec_cmd)(struct se_cmd *cmd)) 1095 { 1096 unsigned char *cdb = cmd->t_task_cdb; 1097 struct se_device *dev = cmd->se_dev; 1098 unsigned int size; 1099 1100 /* 1101 * For REPORT LUNS we always need to emulate the response, for everything 1102 * else, pass it up. 1103 */ 1104 if (cdb[0] == REPORT_LUNS) { 1105 cmd->execute_cmd = spc_emulate_report_luns; 1106 return TCM_NO_SENSE; 1107 } 1108 1109 /* 1110 * With emulate_pr disabled, all reservation requests should fail, 1111 * regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set. 1112 */ 1113 if (!dev->dev_attrib.emulate_pr && 1114 ((cdb[0] == PERSISTENT_RESERVE_IN) || 1115 (cdb[0] == PERSISTENT_RESERVE_OUT) || 1116 (cdb[0] == RELEASE || cdb[0] == RELEASE_10) || 1117 (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) { 1118 return TCM_UNSUPPORTED_SCSI_OPCODE; 1119 } 1120 1121 /* 1122 * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to 1123 * emulate the response, since tcmu does not have the information 1124 * required to process these commands. 1125 */ 1126 if (!(dev->transport_flags & 1127 TRANSPORT_FLAG_PASSTHROUGH_PGR)) { 1128 if (cdb[0] == PERSISTENT_RESERVE_IN) { 1129 cmd->execute_cmd = target_scsi3_emulate_pr_in; 1130 size = get_unaligned_be16(&cdb[7]); 1131 return target_cmd_size_check(cmd, size); 1132 } 1133 if (cdb[0] == PERSISTENT_RESERVE_OUT) { 1134 cmd->execute_cmd = target_scsi3_emulate_pr_out; 1135 size = get_unaligned_be32(&cdb[5]); 1136 return target_cmd_size_check(cmd, size); 1137 } 1138 1139 if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) { 1140 cmd->execute_cmd = target_scsi2_reservation_release; 1141 if (cdb[0] == RELEASE_10) 1142 size = get_unaligned_be16(&cdb[7]); 1143 else 1144 size = cmd->data_length; 1145 return target_cmd_size_check(cmd, size); 1146 } 1147 if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) { 1148 cmd->execute_cmd = target_scsi2_reservation_reserve; 1149 if (cdb[0] == RESERVE_10) 1150 size = get_unaligned_be16(&cdb[7]); 1151 else 1152 size = cmd->data_length; 1153 return target_cmd_size_check(cmd, size); 1154 } 1155 } 1156 1157 /* Set DATA_CDB flag for ops that should have it */ 1158 switch (cdb[0]) { 1159 case READ_6: 1160 case READ_10: 1161 case READ_12: 1162 case READ_16: 1163 case WRITE_6: 1164 case WRITE_10: 1165 case WRITE_12: 1166 case WRITE_16: 1167 case WRITE_VERIFY: 1168 case WRITE_VERIFY_12: 1169 case WRITE_VERIFY_16: 1170 case COMPARE_AND_WRITE: 1171 case XDWRITEREAD_10: 1172 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1173 break; 1174 case VARIABLE_LENGTH_CMD: 1175 switch (get_unaligned_be16(&cdb[8])) { 1176 case READ_32: 1177 case WRITE_32: 1178 case WRITE_VERIFY_32: 1179 case XDWRITEREAD_32: 1180 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1181 break; 1182 } 1183 } 1184 1185 cmd->execute_cmd = exec_cmd; 1186 1187 return TCM_NO_SENSE; 1188 } 1189 EXPORT_SYMBOL(passthrough_parse_cdb); 1190