1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /******************************************************************************* 3 * Filename: target_core_device.c (based on iscsi_target_device.c) 4 * 5 * This file contains the TCM Virtual Device and Disk Transport 6 * agnostic related functions. 7 * 8 * (c) Copyright 2003-2013 Datera, Inc. 9 * 10 * Nicholas A. Bellinger <nab@kernel.org> 11 * 12 ******************************************************************************/ 13 14 #include <linux/net.h> 15 #include <linux/string.h> 16 #include <linux/delay.h> 17 #include <linux/timer.h> 18 #include <linux/slab.h> 19 #include <linux/spinlock.h> 20 #include <linux/kthread.h> 21 #include <linux/in.h> 22 #include <linux/export.h> 23 #include <linux/t10-pi.h> 24 #include <asm/unaligned.h> 25 #include <net/sock.h> 26 #include <net/tcp.h> 27 #include <scsi/scsi_common.h> 28 #include <scsi/scsi_proto.h> 29 30 #include <target/target_core_base.h> 31 #include <target/target_core_backend.h> 32 #include <target/target_core_fabric.h> 33 34 #include "target_core_internal.h" 35 #include "target_core_alua.h" 36 #include "target_core_pr.h" 37 #include "target_core_ua.h" 38 39 static DEFINE_MUTEX(device_mutex); 40 static LIST_HEAD(device_list); 41 static DEFINE_IDR(devices_idr); 42 43 static struct se_hba *lun0_hba; 44 /* not static, needed by tpg.c */ 45 struct se_device *g_lun0_dev; 46 47 sense_reason_t 48 transport_lookup_cmd_lun(struct se_cmd *se_cmd) 49 { 50 struct se_lun *se_lun = NULL; 51 struct se_session *se_sess = se_cmd->se_sess; 52 struct se_node_acl *nacl = se_sess->se_node_acl; 53 struct se_dev_entry *deve; 54 sense_reason_t ret = TCM_NO_SENSE; 55 56 rcu_read_lock(); 57 deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun); 58 if (deve) { 59 atomic_long_inc(&deve->total_cmds); 60 61 if (se_cmd->data_direction == DMA_TO_DEVICE) 62 atomic_long_add(se_cmd->data_length, 63 &deve->write_bytes); 64 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 65 atomic_long_add(se_cmd->data_length, 66 &deve->read_bytes); 67 68 if ((se_cmd->data_direction == DMA_TO_DEVICE) && 69 deve->lun_access_ro) { 70 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" 71 " Access for 0x%08llx\n", 72 se_cmd->se_tfo->fabric_name, 73 se_cmd->orig_fe_lun); 74 rcu_read_unlock(); 75 return TCM_WRITE_PROTECTED; 76 } 77 78 se_lun = rcu_dereference(deve->se_lun); 79 80 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { 81 se_lun = NULL; 82 goto out_unlock; 83 } 84 85 se_cmd->se_lun = se_lun; 86 se_cmd->pr_res_key = deve->pr_res_key; 87 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 88 se_cmd->lun_ref_active = true; 89 } 90 out_unlock: 91 rcu_read_unlock(); 92 93 if (!se_lun) { 94 /* 95 * Use the se_portal_group->tpg_virt_lun0 to allow for 96 * REPORT_LUNS, et al to be returned when no active 97 * MappedLUN=0 exists for this Initiator Port. 98 */ 99 if (se_cmd->orig_fe_lun != 0) { 100 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 101 " Access for 0x%08llx from %s\n", 102 se_cmd->se_tfo->fabric_name, 103 se_cmd->orig_fe_lun, 104 nacl->initiatorname); 105 return TCM_NON_EXISTENT_LUN; 106 } 107 108 /* 109 * Force WRITE PROTECT for virtual LUN 0 110 */ 111 if ((se_cmd->data_direction != DMA_FROM_DEVICE) && 112 (se_cmd->data_direction != DMA_NONE)) 113 return TCM_WRITE_PROTECTED; 114 115 se_lun = se_sess->se_tpg->tpg_virt_lun0; 116 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) 117 return TCM_NON_EXISTENT_LUN; 118 119 se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0; 120 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 121 se_cmd->lun_ref_active = true; 122 } 123 /* 124 * RCU reference protected by percpu se_lun->lun_ref taken above that 125 * must drop to zero (including initial reference) before this se_lun 126 * pointer can be kfree_rcu() by the final se_lun->lun_group put via 127 * target_core_fabric_configfs.c:target_fabric_port_release 128 */ 129 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); 130 atomic_long_inc(&se_cmd->se_dev->num_cmds); 131 132 if (se_cmd->data_direction == DMA_TO_DEVICE) 133 atomic_long_add(se_cmd->data_length, 134 &se_cmd->se_dev->write_bytes); 135 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 136 atomic_long_add(se_cmd->data_length, 137 &se_cmd->se_dev->read_bytes); 138 139 return ret; 140 } 141 EXPORT_SYMBOL(transport_lookup_cmd_lun); 142 143 int transport_lookup_tmr_lun(struct se_cmd *se_cmd) 144 { 145 struct se_dev_entry *deve; 146 struct se_lun *se_lun = NULL; 147 struct se_session *se_sess = se_cmd->se_sess; 148 struct se_node_acl *nacl = se_sess->se_node_acl; 149 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 150 unsigned long flags; 151 152 rcu_read_lock(); 153 deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun); 154 if (deve) { 155 se_lun = rcu_dereference(deve->se_lun); 156 157 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { 158 se_lun = NULL; 159 goto out_unlock; 160 } 161 162 se_cmd->se_lun = se_lun; 163 se_cmd->pr_res_key = deve->pr_res_key; 164 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 165 se_cmd->lun_ref_active = true; 166 } 167 out_unlock: 168 rcu_read_unlock(); 169 170 if (!se_lun) { 171 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 172 " Access for 0x%08llx for %s\n", 173 se_cmd->se_tfo->fabric_name, 174 se_cmd->orig_fe_lun, 175 nacl->initiatorname); 176 return -ENODEV; 177 } 178 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); 179 se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev); 180 181 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags); 182 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list); 183 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags); 184 185 return 0; 186 } 187 EXPORT_SYMBOL(transport_lookup_tmr_lun); 188 189 bool target_lun_is_rdonly(struct se_cmd *cmd) 190 { 191 struct se_session *se_sess = cmd->se_sess; 192 struct se_dev_entry *deve; 193 bool ret; 194 195 rcu_read_lock(); 196 deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun); 197 ret = deve && deve->lun_access_ro; 198 rcu_read_unlock(); 199 200 return ret; 201 } 202 EXPORT_SYMBOL(target_lun_is_rdonly); 203 204 /* 205 * This function is called from core_scsi3_emulate_pro_register_and_move() 206 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref 207 * when a matching rtpi is found. 208 */ 209 struct se_dev_entry *core_get_se_deve_from_rtpi( 210 struct se_node_acl *nacl, 211 u16 rtpi) 212 { 213 struct se_dev_entry *deve; 214 struct se_lun *lun; 215 struct se_portal_group *tpg = nacl->se_tpg; 216 217 rcu_read_lock(); 218 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 219 lun = rcu_dereference(deve->se_lun); 220 if (!lun) { 221 pr_err("%s device entries device pointer is" 222 " NULL, but Initiator has access.\n", 223 tpg->se_tpg_tfo->fabric_name); 224 continue; 225 } 226 if (lun->lun_rtpi != rtpi) 227 continue; 228 229 kref_get(&deve->pr_kref); 230 rcu_read_unlock(); 231 232 return deve; 233 } 234 rcu_read_unlock(); 235 236 return NULL; 237 } 238 239 void core_free_device_list_for_node( 240 struct se_node_acl *nacl, 241 struct se_portal_group *tpg) 242 { 243 struct se_dev_entry *deve; 244 245 mutex_lock(&nacl->lun_entry_mutex); 246 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 247 struct se_lun *lun = rcu_dereference_check(deve->se_lun, 248 lockdep_is_held(&nacl->lun_entry_mutex)); 249 core_disable_device_list_for_node(lun, deve, nacl, tpg); 250 } 251 mutex_unlock(&nacl->lun_entry_mutex); 252 } 253 254 void core_update_device_list_access( 255 u64 mapped_lun, 256 bool lun_access_ro, 257 struct se_node_acl *nacl) 258 { 259 struct se_dev_entry *deve; 260 261 mutex_lock(&nacl->lun_entry_mutex); 262 deve = target_nacl_find_deve(nacl, mapped_lun); 263 if (deve) 264 deve->lun_access_ro = lun_access_ro; 265 mutex_unlock(&nacl->lun_entry_mutex); 266 } 267 268 /* 269 * Called with rcu_read_lock or nacl->device_list_lock held. 270 */ 271 struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun) 272 { 273 struct se_dev_entry *deve; 274 275 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) 276 if (deve->mapped_lun == mapped_lun) 277 return deve; 278 279 return NULL; 280 } 281 EXPORT_SYMBOL(target_nacl_find_deve); 282 283 void target_pr_kref_release(struct kref *kref) 284 { 285 struct se_dev_entry *deve = container_of(kref, struct se_dev_entry, 286 pr_kref); 287 complete(&deve->pr_comp); 288 } 289 290 static void 291 target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new, 292 bool skip_new) 293 { 294 struct se_dev_entry *tmp; 295 296 rcu_read_lock(); 297 hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) { 298 if (skip_new && tmp == new) 299 continue; 300 core_scsi3_ua_allocate(tmp, 0x3F, 301 ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED); 302 } 303 rcu_read_unlock(); 304 } 305 306 int core_enable_device_list_for_node( 307 struct se_lun *lun, 308 struct se_lun_acl *lun_acl, 309 u64 mapped_lun, 310 bool lun_access_ro, 311 struct se_node_acl *nacl, 312 struct se_portal_group *tpg) 313 { 314 struct se_dev_entry *orig, *new; 315 316 new = kzalloc(sizeof(*new), GFP_KERNEL); 317 if (!new) { 318 pr_err("Unable to allocate se_dev_entry memory\n"); 319 return -ENOMEM; 320 } 321 322 spin_lock_init(&new->ua_lock); 323 INIT_LIST_HEAD(&new->ua_list); 324 INIT_LIST_HEAD(&new->lun_link); 325 326 new->mapped_lun = mapped_lun; 327 kref_init(&new->pr_kref); 328 init_completion(&new->pr_comp); 329 330 new->lun_access_ro = lun_access_ro; 331 new->creation_time = get_jiffies_64(); 332 new->attach_count++; 333 334 mutex_lock(&nacl->lun_entry_mutex); 335 orig = target_nacl_find_deve(nacl, mapped_lun); 336 if (orig && orig->se_lun) { 337 struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun, 338 lockdep_is_held(&nacl->lun_entry_mutex)); 339 340 if (orig_lun != lun) { 341 pr_err("Existing orig->se_lun doesn't match new lun" 342 " for dynamic -> explicit NodeACL conversion:" 343 " %s\n", nacl->initiatorname); 344 mutex_unlock(&nacl->lun_entry_mutex); 345 kfree(new); 346 return -EINVAL; 347 } 348 if (orig->se_lun_acl != NULL) { 349 pr_warn_ratelimited("Detected existing explicit" 350 " se_lun_acl->se_lun_group reference for %s" 351 " mapped_lun: %llu, failing\n", 352 nacl->initiatorname, mapped_lun); 353 mutex_unlock(&nacl->lun_entry_mutex); 354 kfree(new); 355 return -EINVAL; 356 } 357 358 rcu_assign_pointer(new->se_lun, lun); 359 rcu_assign_pointer(new->se_lun_acl, lun_acl); 360 hlist_del_rcu(&orig->link); 361 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); 362 mutex_unlock(&nacl->lun_entry_mutex); 363 364 spin_lock(&lun->lun_deve_lock); 365 list_del(&orig->lun_link); 366 list_add_tail(&new->lun_link, &lun->lun_deve_list); 367 spin_unlock(&lun->lun_deve_lock); 368 369 kref_put(&orig->pr_kref, target_pr_kref_release); 370 wait_for_completion(&orig->pr_comp); 371 372 target_luns_data_has_changed(nacl, new, true); 373 kfree_rcu(orig, rcu_head); 374 return 0; 375 } 376 377 rcu_assign_pointer(new->se_lun, lun); 378 rcu_assign_pointer(new->se_lun_acl, lun_acl); 379 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); 380 mutex_unlock(&nacl->lun_entry_mutex); 381 382 spin_lock(&lun->lun_deve_lock); 383 list_add_tail(&new->lun_link, &lun->lun_deve_list); 384 spin_unlock(&lun->lun_deve_lock); 385 386 target_luns_data_has_changed(nacl, new, true); 387 return 0; 388 } 389 390 void core_disable_device_list_for_node( 391 struct se_lun *lun, 392 struct se_dev_entry *orig, 393 struct se_node_acl *nacl, 394 struct se_portal_group *tpg) 395 { 396 /* 397 * rcu_dereference_raw protected by se_lun->lun_group symlink 398 * reference to se_device->dev_group. 399 */ 400 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 401 402 lockdep_assert_held(&nacl->lun_entry_mutex); 403 404 /* 405 * If the MappedLUN entry is being disabled, the entry in 406 * lun->lun_deve_list must be removed now before clearing the 407 * struct se_dev_entry pointers below as logic in 408 * core_alua_do_transition_tg_pt() depends on these being present. 409 * 410 * deve->se_lun_acl will be NULL for demo-mode created LUNs 411 * that have not been explicitly converted to MappedLUNs -> 412 * struct se_lun_acl, but we remove deve->lun_link from 413 * lun->lun_deve_list. This also means that active UAs and 414 * NodeACL context specific PR metadata for demo-mode 415 * MappedLUN *deve will be released below.. 416 */ 417 spin_lock(&lun->lun_deve_lock); 418 list_del(&orig->lun_link); 419 spin_unlock(&lun->lun_deve_lock); 420 /* 421 * Disable struct se_dev_entry LUN ACL mapping 422 */ 423 core_scsi3_ua_release_all(orig); 424 425 hlist_del_rcu(&orig->link); 426 clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags); 427 orig->lun_access_ro = false; 428 orig->creation_time = 0; 429 orig->attach_count--; 430 /* 431 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1 432 * or REGISTER_AND_MOVE PR operation to complete. 433 */ 434 kref_put(&orig->pr_kref, target_pr_kref_release); 435 wait_for_completion(&orig->pr_comp); 436 437 rcu_assign_pointer(orig->se_lun, NULL); 438 rcu_assign_pointer(orig->se_lun_acl, NULL); 439 440 kfree_rcu(orig, rcu_head); 441 442 core_scsi3_free_pr_reg_from_nacl(dev, nacl); 443 target_luns_data_has_changed(nacl, NULL, false); 444 } 445 446 /* core_clear_lun_from_tpg(): 447 * 448 * 449 */ 450 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) 451 { 452 struct se_node_acl *nacl; 453 struct se_dev_entry *deve; 454 455 mutex_lock(&tpg->acl_node_mutex); 456 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { 457 458 mutex_lock(&nacl->lun_entry_mutex); 459 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 460 struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun, 461 lockdep_is_held(&nacl->lun_entry_mutex)); 462 463 if (lun != tmp_lun) 464 continue; 465 466 core_disable_device_list_for_node(lun, deve, nacl, tpg); 467 } 468 mutex_unlock(&nacl->lun_entry_mutex); 469 } 470 mutex_unlock(&tpg->acl_node_mutex); 471 } 472 473 int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev) 474 { 475 struct se_lun *tmp; 476 477 spin_lock(&dev->se_port_lock); 478 if (dev->export_count == 0x0000ffff) { 479 pr_warn("Reached dev->dev_port_count ==" 480 " 0x0000ffff\n"); 481 spin_unlock(&dev->se_port_lock); 482 return -ENOSPC; 483 } 484 again: 485 /* 486 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device 487 * Here is the table from spc4r17 section 7.7.3.8. 488 * 489 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field 490 * 491 * Code Description 492 * 0h Reserved 493 * 1h Relative port 1, historically known as port A 494 * 2h Relative port 2, historically known as port B 495 * 3h to FFFFh Relative port 3 through 65 535 496 */ 497 lun->lun_rtpi = dev->dev_rpti_counter++; 498 if (!lun->lun_rtpi) 499 goto again; 500 501 list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) { 502 /* 503 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique 504 * for 16-bit wrap.. 505 */ 506 if (lun->lun_rtpi == tmp->lun_rtpi) 507 goto again; 508 } 509 spin_unlock(&dev->se_port_lock); 510 511 return 0; 512 } 513 514 static void se_release_vpd_for_dev(struct se_device *dev) 515 { 516 struct t10_vpd *vpd, *vpd_tmp; 517 518 spin_lock(&dev->t10_wwn.t10_vpd_lock); 519 list_for_each_entry_safe(vpd, vpd_tmp, 520 &dev->t10_wwn.t10_vpd_list, vpd_list) { 521 list_del(&vpd->vpd_list); 522 kfree(vpd); 523 } 524 spin_unlock(&dev->t10_wwn.t10_vpd_lock); 525 } 526 527 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) 528 { 529 u32 aligned_max_sectors; 530 u32 alignment; 531 /* 532 * Limit max_sectors to a PAGE_SIZE aligned value for modern 533 * transport_allocate_data_tasks() operation. 534 */ 535 alignment = max(1ul, PAGE_SIZE / block_size); 536 aligned_max_sectors = rounddown(max_sectors, alignment); 537 538 if (max_sectors != aligned_max_sectors) 539 pr_info("Rounding down aligned max_sectors from %u to %u\n", 540 max_sectors, aligned_max_sectors); 541 542 return aligned_max_sectors; 543 } 544 545 int core_dev_add_lun( 546 struct se_portal_group *tpg, 547 struct se_device *dev, 548 struct se_lun *lun) 549 { 550 int rc; 551 552 rc = core_tpg_add_lun(tpg, lun, false, dev); 553 if (rc < 0) 554 return rc; 555 556 pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from" 557 " CORE HBA: %u\n", tpg->se_tpg_tfo->fabric_name, 558 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 559 tpg->se_tpg_tfo->fabric_name, dev->se_hba->hba_id); 560 /* 561 * Update LUN maps for dynamically added initiators when 562 * generate_node_acl is enabled. 563 */ 564 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { 565 struct se_node_acl *acl; 566 567 mutex_lock(&tpg->acl_node_mutex); 568 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 569 if (acl->dynamic_node_acl && 570 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || 571 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { 572 core_tpg_add_node_to_devs(acl, tpg, lun); 573 } 574 } 575 mutex_unlock(&tpg->acl_node_mutex); 576 } 577 578 return 0; 579 } 580 581 /* core_dev_del_lun(): 582 * 583 * 584 */ 585 void core_dev_del_lun( 586 struct se_portal_group *tpg, 587 struct se_lun *lun) 588 { 589 pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from" 590 " device object\n", tpg->se_tpg_tfo->fabric_name, 591 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 592 tpg->se_tpg_tfo->fabric_name); 593 594 core_tpg_remove_lun(tpg, lun); 595 } 596 597 struct se_lun_acl *core_dev_init_initiator_node_lun_acl( 598 struct se_portal_group *tpg, 599 struct se_node_acl *nacl, 600 u64 mapped_lun, 601 int *ret) 602 { 603 struct se_lun_acl *lacl; 604 605 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) { 606 pr_err("%s InitiatorName exceeds maximum size.\n", 607 tpg->se_tpg_tfo->fabric_name); 608 *ret = -EOVERFLOW; 609 return NULL; 610 } 611 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); 612 if (!lacl) { 613 pr_err("Unable to allocate memory for struct se_lun_acl.\n"); 614 *ret = -ENOMEM; 615 return NULL; 616 } 617 618 lacl->mapped_lun = mapped_lun; 619 lacl->se_lun_nacl = nacl; 620 621 return lacl; 622 } 623 624 int core_dev_add_initiator_node_lun_acl( 625 struct se_portal_group *tpg, 626 struct se_lun_acl *lacl, 627 struct se_lun *lun, 628 bool lun_access_ro) 629 { 630 struct se_node_acl *nacl = lacl->se_lun_nacl; 631 /* 632 * rcu_dereference_raw protected by se_lun->lun_group symlink 633 * reference to se_device->dev_group. 634 */ 635 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 636 637 if (!nacl) 638 return -EINVAL; 639 640 if (lun->lun_access_ro) 641 lun_access_ro = true; 642 643 lacl->se_lun = lun; 644 645 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun, 646 lun_access_ro, nacl, tpg) < 0) 647 return -EINVAL; 648 649 pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for " 650 " InitiatorNode: %s\n", tpg->se_tpg_tfo->fabric_name, 651 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun, 652 lun_access_ro ? "RO" : "RW", 653 nacl->initiatorname); 654 /* 655 * Check to see if there are any existing persistent reservation APTPL 656 * pre-registrations that need to be enabled for this LUN ACL.. 657 */ 658 core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl, 659 lacl->mapped_lun); 660 return 0; 661 } 662 663 int core_dev_del_initiator_node_lun_acl( 664 struct se_lun *lun, 665 struct se_lun_acl *lacl) 666 { 667 struct se_portal_group *tpg = lun->lun_tpg; 668 struct se_node_acl *nacl; 669 struct se_dev_entry *deve; 670 671 nacl = lacl->se_lun_nacl; 672 if (!nacl) 673 return -EINVAL; 674 675 mutex_lock(&nacl->lun_entry_mutex); 676 deve = target_nacl_find_deve(nacl, lacl->mapped_lun); 677 if (deve) 678 core_disable_device_list_for_node(lun, deve, nacl, tpg); 679 mutex_unlock(&nacl->lun_entry_mutex); 680 681 pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for" 682 " InitiatorNode: %s Mapped LUN: %llu\n", 683 tpg->se_tpg_tfo->fabric_name, 684 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 685 nacl->initiatorname, lacl->mapped_lun); 686 687 return 0; 688 } 689 690 void core_dev_free_initiator_node_lun_acl( 691 struct se_portal_group *tpg, 692 struct se_lun_acl *lacl) 693 { 694 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" 695 " Mapped LUN: %llu\n", tpg->se_tpg_tfo->fabric_name, 696 tpg->se_tpg_tfo->tpg_get_tag(tpg), 697 tpg->se_tpg_tfo->fabric_name, 698 lacl->se_lun_nacl->initiatorname, lacl->mapped_lun); 699 700 kfree(lacl); 701 } 702 703 static void scsi_dump_inquiry(struct se_device *dev) 704 { 705 struct t10_wwn *wwn = &dev->t10_wwn; 706 int device_type = dev->transport->get_device_type(dev); 707 708 /* 709 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer 710 */ 711 pr_debug(" Vendor: %-" __stringify(INQUIRY_VENDOR_LEN) "s\n", 712 wwn->vendor); 713 pr_debug(" Model: %-" __stringify(INQUIRY_MODEL_LEN) "s\n", 714 wwn->model); 715 pr_debug(" Revision: %-" __stringify(INQUIRY_REVISION_LEN) "s\n", 716 wwn->revision); 717 pr_debug(" Type: %s ", scsi_device_type(device_type)); 718 } 719 720 struct se_device *target_alloc_device(struct se_hba *hba, const char *name) 721 { 722 struct se_device *dev; 723 struct se_lun *xcopy_lun; 724 int i; 725 726 dev = hba->backend->ops->alloc_device(hba, name); 727 if (!dev) 728 return NULL; 729 730 dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL); 731 if (!dev->queues) { 732 dev->transport->free_device(dev); 733 return NULL; 734 } 735 736 dev->queue_cnt = nr_cpu_ids; 737 for (i = 0; i < dev->queue_cnt; i++) { 738 INIT_LIST_HEAD(&dev->queues[i].state_list); 739 spin_lock_init(&dev->queues[i].lock); 740 } 741 742 dev->se_hba = hba; 743 dev->transport = hba->backend->ops; 744 dev->transport_flags = dev->transport->transport_flags_default; 745 dev->prot_length = sizeof(struct t10_pi_tuple); 746 dev->hba_index = hba->hba_index; 747 748 INIT_LIST_HEAD(&dev->dev_sep_list); 749 INIT_LIST_HEAD(&dev->dev_tmr_list); 750 INIT_LIST_HEAD(&dev->delayed_cmd_list); 751 INIT_LIST_HEAD(&dev->qf_cmd_list); 752 spin_lock_init(&dev->delayed_cmd_lock); 753 spin_lock_init(&dev->dev_reservation_lock); 754 spin_lock_init(&dev->se_port_lock); 755 spin_lock_init(&dev->se_tmr_lock); 756 spin_lock_init(&dev->qf_cmd_lock); 757 sema_init(&dev->caw_sem, 1); 758 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list); 759 spin_lock_init(&dev->t10_wwn.t10_vpd_lock); 760 INIT_LIST_HEAD(&dev->t10_pr.registration_list); 761 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list); 762 spin_lock_init(&dev->t10_pr.registration_lock); 763 spin_lock_init(&dev->t10_pr.aptpl_reg_lock); 764 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list); 765 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock); 766 INIT_LIST_HEAD(&dev->t10_alua.lba_map_list); 767 spin_lock_init(&dev->t10_alua.lba_map_lock); 768 769 dev->t10_wwn.t10_dev = dev; 770 dev->t10_alua.t10_dev = dev; 771 772 dev->dev_attrib.da_dev = dev; 773 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS; 774 dev->dev_attrib.emulate_dpo = 1; 775 dev->dev_attrib.emulate_fua_write = 1; 776 dev->dev_attrib.emulate_fua_read = 1; 777 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; 778 dev->dev_attrib.emulate_ua_intlck_ctrl = TARGET_UA_INTLCK_CTRL_CLEAR; 779 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS; 780 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU; 781 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; 782 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW; 783 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; 784 dev->dev_attrib.emulate_pr = DA_EMULATE_PR; 785 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT; 786 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 787 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL; 788 dev->dev_attrib.is_nonrot = DA_IS_NONROT; 789 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; 790 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; 791 dev->dev_attrib.max_unmap_block_desc_count = 792 DA_MAX_UNMAP_BLOCK_DESC_COUNT; 793 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; 794 dev->dev_attrib.unmap_granularity_alignment = 795 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; 796 dev->dev_attrib.unmap_zeroes_data = 797 DA_UNMAP_ZEROES_DATA_DEFAULT; 798 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; 799 800 xcopy_lun = &dev->xcopy_lun; 801 rcu_assign_pointer(xcopy_lun->lun_se_dev, dev); 802 init_completion(&xcopy_lun->lun_shutdown_comp); 803 INIT_LIST_HEAD(&xcopy_lun->lun_deve_list); 804 INIT_LIST_HEAD(&xcopy_lun->lun_dev_link); 805 mutex_init(&xcopy_lun->lun_tg_pt_md_mutex); 806 xcopy_lun->lun_tpg = &xcopy_pt_tpg; 807 808 /* Preload the default INQUIRY const values */ 809 strlcpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor)); 810 strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod, 811 sizeof(dev->t10_wwn.model)); 812 strlcpy(dev->t10_wwn.revision, dev->transport->inquiry_rev, 813 sizeof(dev->t10_wwn.revision)); 814 815 return dev; 816 } 817 818 /* 819 * Check if the underlying struct block_device request_queue supports 820 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM 821 * in ATA and we need to set TPE=1 822 */ 823 bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, 824 struct request_queue *q) 825 { 826 int block_size = queue_logical_block_size(q); 827 828 if (!blk_queue_discard(q)) 829 return false; 830 831 attrib->max_unmap_lba_count = 832 q->limits.max_discard_sectors >> (ilog2(block_size) - 9); 833 /* 834 * Currently hardcoded to 1 in Linux/SCSI code.. 835 */ 836 attrib->max_unmap_block_desc_count = 1; 837 attrib->unmap_granularity = q->limits.discard_granularity / block_size; 838 attrib->unmap_granularity_alignment = q->limits.discard_alignment / 839 block_size; 840 attrib->unmap_zeroes_data = !!(q->limits.max_write_zeroes_sectors); 841 return true; 842 } 843 EXPORT_SYMBOL(target_configure_unmap_from_queue); 844 845 /* 846 * Convert from blocksize advertised to the initiator to the 512 byte 847 * units unconditionally used by the Linux block layer. 848 */ 849 sector_t target_to_linux_sector(struct se_device *dev, sector_t lb) 850 { 851 switch (dev->dev_attrib.block_size) { 852 case 4096: 853 return lb << 3; 854 case 2048: 855 return lb << 2; 856 case 1024: 857 return lb << 1; 858 default: 859 return lb; 860 } 861 } 862 EXPORT_SYMBOL(target_to_linux_sector); 863 864 struct devices_idr_iter { 865 struct config_item *prev_item; 866 int (*fn)(struct se_device *dev, void *data); 867 void *data; 868 }; 869 870 static int target_devices_idr_iter(int id, void *p, void *data) 871 __must_hold(&device_mutex) 872 { 873 struct devices_idr_iter *iter = data; 874 struct se_device *dev = p; 875 int ret; 876 877 config_item_put(iter->prev_item); 878 iter->prev_item = NULL; 879 880 /* 881 * We add the device early to the idr, so it can be used 882 * by backend modules during configuration. We do not want 883 * to allow other callers to access partially setup devices, 884 * so we skip them here. 885 */ 886 if (!target_dev_configured(dev)) 887 return 0; 888 889 iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item); 890 if (!iter->prev_item) 891 return 0; 892 mutex_unlock(&device_mutex); 893 894 ret = iter->fn(dev, iter->data); 895 896 mutex_lock(&device_mutex); 897 return ret; 898 } 899 900 /** 901 * target_for_each_device - iterate over configured devices 902 * @fn: iterator function 903 * @data: pointer to data that will be passed to fn 904 * 905 * fn must return 0 to continue looping over devices. non-zero will break 906 * from the loop and return that value to the caller. 907 */ 908 int target_for_each_device(int (*fn)(struct se_device *dev, void *data), 909 void *data) 910 { 911 struct devices_idr_iter iter = { .fn = fn, .data = data }; 912 int ret; 913 914 mutex_lock(&device_mutex); 915 ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter); 916 mutex_unlock(&device_mutex); 917 config_item_put(iter.prev_item); 918 return ret; 919 } 920 921 int target_configure_device(struct se_device *dev) 922 { 923 struct se_hba *hba = dev->se_hba; 924 int ret, id; 925 926 if (target_dev_configured(dev)) { 927 pr_err("se_dev->se_dev_ptr already set for storage" 928 " object\n"); 929 return -EEXIST; 930 } 931 932 /* 933 * Add early so modules like tcmu can use during its 934 * configuration. 935 */ 936 mutex_lock(&device_mutex); 937 /* 938 * Use cyclic to try and avoid collisions with devices 939 * that were recently removed. 940 */ 941 id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL); 942 mutex_unlock(&device_mutex); 943 if (id < 0) { 944 ret = -ENOMEM; 945 goto out; 946 } 947 dev->dev_index = id; 948 949 ret = dev->transport->configure_device(dev); 950 if (ret) 951 goto out_free_index; 952 /* 953 * XXX: there is not much point to have two different values here.. 954 */ 955 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size; 956 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth; 957 958 /* 959 * Align max_hw_sectors down to PAGE_SIZE I/O transfers 960 */ 961 dev->dev_attrib.hw_max_sectors = 962 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors, 963 dev->dev_attrib.hw_block_size); 964 dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors; 965 966 dev->creation_time = get_jiffies_64(); 967 968 ret = core_setup_alua(dev); 969 if (ret) 970 goto out_destroy_device; 971 972 /* 973 * Setup work_queue for QUEUE_FULL 974 */ 975 INIT_WORK(&dev->qf_work_queue, target_qf_do_work); 976 977 scsi_dump_inquiry(dev); 978 979 spin_lock(&hba->device_lock); 980 hba->dev_count++; 981 spin_unlock(&hba->device_lock); 982 983 dev->dev_flags |= DF_CONFIGURED; 984 985 return 0; 986 987 out_destroy_device: 988 dev->transport->destroy_device(dev); 989 out_free_index: 990 mutex_lock(&device_mutex); 991 idr_remove(&devices_idr, dev->dev_index); 992 mutex_unlock(&device_mutex); 993 out: 994 se_release_vpd_for_dev(dev); 995 return ret; 996 } 997 998 void target_free_device(struct se_device *dev) 999 { 1000 struct se_hba *hba = dev->se_hba; 1001 1002 WARN_ON(!list_empty(&dev->dev_sep_list)); 1003 1004 if (target_dev_configured(dev)) { 1005 dev->transport->destroy_device(dev); 1006 1007 mutex_lock(&device_mutex); 1008 idr_remove(&devices_idr, dev->dev_index); 1009 mutex_unlock(&device_mutex); 1010 1011 spin_lock(&hba->device_lock); 1012 hba->dev_count--; 1013 spin_unlock(&hba->device_lock); 1014 } 1015 1016 core_alua_free_lu_gp_mem(dev); 1017 core_alua_set_lba_map(dev, NULL, 0, 0); 1018 core_scsi3_free_all_registrations(dev); 1019 se_release_vpd_for_dev(dev); 1020 1021 if (dev->transport->free_prot) 1022 dev->transport->free_prot(dev); 1023 1024 kfree(dev->queues); 1025 dev->transport->free_device(dev); 1026 } 1027 1028 int core_dev_setup_virtual_lun0(void) 1029 { 1030 struct se_hba *hba; 1031 struct se_device *dev; 1032 char buf[] = "rd_pages=8,rd_nullio=1"; 1033 int ret; 1034 1035 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE); 1036 if (IS_ERR(hba)) 1037 return PTR_ERR(hba); 1038 1039 dev = target_alloc_device(hba, "virt_lun0"); 1040 if (!dev) { 1041 ret = -ENOMEM; 1042 goto out_free_hba; 1043 } 1044 1045 hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf)); 1046 1047 ret = target_configure_device(dev); 1048 if (ret) 1049 goto out_free_se_dev; 1050 1051 lun0_hba = hba; 1052 g_lun0_dev = dev; 1053 return 0; 1054 1055 out_free_se_dev: 1056 target_free_device(dev); 1057 out_free_hba: 1058 core_delete_hba(hba); 1059 return ret; 1060 } 1061 1062 1063 void core_dev_release_virtual_lun0(void) 1064 { 1065 struct se_hba *hba = lun0_hba; 1066 1067 if (!hba) 1068 return; 1069 1070 if (g_lun0_dev) 1071 target_free_device(g_lun0_dev); 1072 core_delete_hba(hba); 1073 } 1074 1075 /* 1076 * Common CDB parsing for kernel and user passthrough. 1077 */ 1078 sense_reason_t 1079 passthrough_parse_cdb(struct se_cmd *cmd, 1080 sense_reason_t (*exec_cmd)(struct se_cmd *cmd)) 1081 { 1082 unsigned char *cdb = cmd->t_task_cdb; 1083 struct se_device *dev = cmd->se_dev; 1084 unsigned int size; 1085 1086 /* 1087 * For REPORT LUNS we always need to emulate the response, for everything 1088 * else, pass it up. 1089 */ 1090 if (cdb[0] == REPORT_LUNS) { 1091 cmd->execute_cmd = spc_emulate_report_luns; 1092 return TCM_NO_SENSE; 1093 } 1094 1095 /* 1096 * With emulate_pr disabled, all reservation requests should fail, 1097 * regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set. 1098 */ 1099 if (!dev->dev_attrib.emulate_pr && 1100 ((cdb[0] == PERSISTENT_RESERVE_IN) || 1101 (cdb[0] == PERSISTENT_RESERVE_OUT) || 1102 (cdb[0] == RELEASE || cdb[0] == RELEASE_10) || 1103 (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) { 1104 return TCM_UNSUPPORTED_SCSI_OPCODE; 1105 } 1106 1107 /* 1108 * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to 1109 * emulate the response, since tcmu does not have the information 1110 * required to process these commands. 1111 */ 1112 if (!(dev->transport_flags & 1113 TRANSPORT_FLAG_PASSTHROUGH_PGR)) { 1114 if (cdb[0] == PERSISTENT_RESERVE_IN) { 1115 cmd->execute_cmd = target_scsi3_emulate_pr_in; 1116 size = get_unaligned_be16(&cdb[7]); 1117 return target_cmd_size_check(cmd, size); 1118 } 1119 if (cdb[0] == PERSISTENT_RESERVE_OUT) { 1120 cmd->execute_cmd = target_scsi3_emulate_pr_out; 1121 size = get_unaligned_be32(&cdb[5]); 1122 return target_cmd_size_check(cmd, size); 1123 } 1124 1125 if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) { 1126 cmd->execute_cmd = target_scsi2_reservation_release; 1127 if (cdb[0] == RELEASE_10) 1128 size = get_unaligned_be16(&cdb[7]); 1129 else 1130 size = cmd->data_length; 1131 return target_cmd_size_check(cmd, size); 1132 } 1133 if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) { 1134 cmd->execute_cmd = target_scsi2_reservation_reserve; 1135 if (cdb[0] == RESERVE_10) 1136 size = get_unaligned_be16(&cdb[7]); 1137 else 1138 size = cmd->data_length; 1139 return target_cmd_size_check(cmd, size); 1140 } 1141 } 1142 1143 /* Set DATA_CDB flag for ops that should have it */ 1144 switch (cdb[0]) { 1145 case READ_6: 1146 case READ_10: 1147 case READ_12: 1148 case READ_16: 1149 case WRITE_6: 1150 case WRITE_10: 1151 case WRITE_12: 1152 case WRITE_16: 1153 case WRITE_VERIFY: 1154 case WRITE_VERIFY_12: 1155 case WRITE_VERIFY_16: 1156 case COMPARE_AND_WRITE: 1157 case XDWRITEREAD_10: 1158 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1159 break; 1160 case VARIABLE_LENGTH_CMD: 1161 switch (get_unaligned_be16(&cdb[8])) { 1162 case READ_32: 1163 case WRITE_32: 1164 case WRITE_VERIFY_32: 1165 case XDWRITEREAD_32: 1166 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1167 break; 1168 } 1169 } 1170 1171 cmd->execute_cmd = exec_cmd; 1172 1173 return TCM_NO_SENSE; 1174 } 1175 EXPORT_SYMBOL(passthrough_parse_cdb); 1176