1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /******************************************************************************* 3 * Filename: target_core_device.c (based on iscsi_target_device.c) 4 * 5 * This file contains the TCM Virtual Device and Disk Transport 6 * agnostic related functions. 7 * 8 * (c) Copyright 2003-2013 Datera, Inc. 9 * 10 * Nicholas A. Bellinger <nab@kernel.org> 11 * 12 ******************************************************************************/ 13 14 #include <linux/net.h> 15 #include <linux/string.h> 16 #include <linux/delay.h> 17 #include <linux/timer.h> 18 #include <linux/slab.h> 19 #include <linux/spinlock.h> 20 #include <linux/kthread.h> 21 #include <linux/in.h> 22 #include <linux/export.h> 23 #include <linux/t10-pi.h> 24 #include <asm/unaligned.h> 25 #include <net/sock.h> 26 #include <net/tcp.h> 27 #include <scsi/scsi_common.h> 28 #include <scsi/scsi_proto.h> 29 30 #include <target/target_core_base.h> 31 #include <target/target_core_backend.h> 32 #include <target/target_core_fabric.h> 33 34 #include "target_core_internal.h" 35 #include "target_core_alua.h" 36 #include "target_core_pr.h" 37 #include "target_core_ua.h" 38 39 static DEFINE_MUTEX(device_mutex); 40 static LIST_HEAD(device_list); 41 static DEFINE_IDR(devices_idr); 42 43 static struct se_hba *lun0_hba; 44 /* not static, needed by tpg.c */ 45 struct se_device *g_lun0_dev; 46 47 sense_reason_t 48 transport_lookup_cmd_lun(struct se_cmd *se_cmd) 49 { 50 struct se_lun *se_lun = NULL; 51 struct se_session *se_sess = se_cmd->se_sess; 52 struct se_node_acl *nacl = se_sess->se_node_acl; 53 struct se_dev_entry *deve; 54 sense_reason_t ret = TCM_NO_SENSE; 55 56 rcu_read_lock(); 57 deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun); 58 if (deve) { 59 atomic_long_inc(&deve->total_cmds); 60 61 if (se_cmd->data_direction == DMA_TO_DEVICE) 62 atomic_long_add(se_cmd->data_length, 63 &deve->write_bytes); 64 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 65 atomic_long_add(se_cmd->data_length, 66 &deve->read_bytes); 67 68 if ((se_cmd->data_direction == DMA_TO_DEVICE) && 69 deve->lun_access_ro) { 70 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" 71 " Access for 0x%08llx\n", 72 se_cmd->se_tfo->fabric_name, 73 se_cmd->orig_fe_lun); 74 rcu_read_unlock(); 75 return TCM_WRITE_PROTECTED; 76 } 77 78 se_lun = deve->se_lun; 79 80 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { 81 se_lun = NULL; 82 goto out_unlock; 83 } 84 85 se_cmd->se_lun = se_lun; 86 se_cmd->pr_res_key = deve->pr_res_key; 87 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 88 se_cmd->lun_ref_active = true; 89 } 90 out_unlock: 91 rcu_read_unlock(); 92 93 if (!se_lun) { 94 /* 95 * Use the se_portal_group->tpg_virt_lun0 to allow for 96 * REPORT_LUNS, et al to be returned when no active 97 * MappedLUN=0 exists for this Initiator Port. 98 */ 99 if (se_cmd->orig_fe_lun != 0) { 100 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 101 " Access for 0x%08llx from %s\n", 102 se_cmd->se_tfo->fabric_name, 103 se_cmd->orig_fe_lun, 104 nacl->initiatorname); 105 return TCM_NON_EXISTENT_LUN; 106 } 107 108 /* 109 * Force WRITE PROTECT for virtual LUN 0 110 */ 111 if ((se_cmd->data_direction != DMA_FROM_DEVICE) && 112 (se_cmd->data_direction != DMA_NONE)) 113 return TCM_WRITE_PROTECTED; 114 115 se_lun = se_sess->se_tpg->tpg_virt_lun0; 116 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) 117 return TCM_NON_EXISTENT_LUN; 118 119 se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0; 120 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 121 se_cmd->lun_ref_active = true; 122 } 123 /* 124 * RCU reference protected by percpu se_lun->lun_ref taken above that 125 * must drop to zero (including initial reference) before this se_lun 126 * pointer can be kfree_rcu() by the final se_lun->lun_group put via 127 * target_core_fabric_configfs.c:target_fabric_port_release 128 */ 129 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); 130 atomic_long_inc(&se_cmd->se_dev->num_cmds); 131 132 if (se_cmd->data_direction == DMA_TO_DEVICE) 133 atomic_long_add(se_cmd->data_length, 134 &se_cmd->se_dev->write_bytes); 135 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 136 atomic_long_add(se_cmd->data_length, 137 &se_cmd->se_dev->read_bytes); 138 139 return ret; 140 } 141 EXPORT_SYMBOL(transport_lookup_cmd_lun); 142 143 int transport_lookup_tmr_lun(struct se_cmd *se_cmd) 144 { 145 struct se_dev_entry *deve; 146 struct se_lun *se_lun = NULL; 147 struct se_session *se_sess = se_cmd->se_sess; 148 struct se_node_acl *nacl = se_sess->se_node_acl; 149 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 150 unsigned long flags; 151 152 rcu_read_lock(); 153 deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun); 154 if (deve) { 155 se_lun = deve->se_lun; 156 157 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { 158 se_lun = NULL; 159 goto out_unlock; 160 } 161 162 se_cmd->se_lun = se_lun; 163 se_cmd->pr_res_key = deve->pr_res_key; 164 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 165 se_cmd->lun_ref_active = true; 166 } 167 out_unlock: 168 rcu_read_unlock(); 169 170 if (!se_lun) { 171 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 172 " Access for 0x%08llx for %s\n", 173 se_cmd->se_tfo->fabric_name, 174 se_cmd->orig_fe_lun, 175 nacl->initiatorname); 176 return -ENODEV; 177 } 178 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); 179 se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev); 180 181 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags); 182 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list); 183 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags); 184 185 return 0; 186 } 187 EXPORT_SYMBOL(transport_lookup_tmr_lun); 188 189 bool target_lun_is_rdonly(struct se_cmd *cmd) 190 { 191 struct se_session *se_sess = cmd->se_sess; 192 struct se_dev_entry *deve; 193 bool ret; 194 195 rcu_read_lock(); 196 deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun); 197 ret = deve && deve->lun_access_ro; 198 rcu_read_unlock(); 199 200 return ret; 201 } 202 EXPORT_SYMBOL(target_lun_is_rdonly); 203 204 /* 205 * This function is called from core_scsi3_emulate_pro_register_and_move() 206 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref 207 * when a matching rtpi is found. 208 */ 209 struct se_dev_entry *core_get_se_deve_from_rtpi( 210 struct se_node_acl *nacl, 211 u16 rtpi) 212 { 213 struct se_dev_entry *deve; 214 struct se_lun *lun; 215 struct se_portal_group *tpg = nacl->se_tpg; 216 217 rcu_read_lock(); 218 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 219 lun = deve->se_lun; 220 if (!lun) { 221 pr_err("%s device entries device pointer is" 222 " NULL, but Initiator has access.\n", 223 tpg->se_tpg_tfo->fabric_name); 224 continue; 225 } 226 if (lun->lun_tpg->tpg_rtpi != rtpi) 227 continue; 228 229 kref_get(&deve->pr_kref); 230 rcu_read_unlock(); 231 232 return deve; 233 } 234 rcu_read_unlock(); 235 236 return NULL; 237 } 238 239 void core_free_device_list_for_node( 240 struct se_node_acl *nacl, 241 struct se_portal_group *tpg) 242 { 243 struct se_dev_entry *deve; 244 245 mutex_lock(&nacl->lun_entry_mutex); 246 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) 247 core_disable_device_list_for_node(deve->se_lun, deve, nacl, tpg); 248 mutex_unlock(&nacl->lun_entry_mutex); 249 } 250 251 void core_update_device_list_access( 252 u64 mapped_lun, 253 bool lun_access_ro, 254 struct se_node_acl *nacl) 255 { 256 struct se_dev_entry *deve; 257 258 mutex_lock(&nacl->lun_entry_mutex); 259 deve = target_nacl_find_deve(nacl, mapped_lun); 260 if (deve) 261 deve->lun_access_ro = lun_access_ro; 262 mutex_unlock(&nacl->lun_entry_mutex); 263 } 264 265 /* 266 * Called with rcu_read_lock or nacl->device_list_lock held. 267 */ 268 struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun) 269 { 270 struct se_dev_entry *deve; 271 272 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) 273 if (deve->mapped_lun == mapped_lun) 274 return deve; 275 276 return NULL; 277 } 278 EXPORT_SYMBOL(target_nacl_find_deve); 279 280 void target_pr_kref_release(struct kref *kref) 281 { 282 struct se_dev_entry *deve = container_of(kref, struct se_dev_entry, 283 pr_kref); 284 complete(&deve->pr_comp); 285 } 286 287 /* 288 * Establish UA condition on SCSI device - all LUNs 289 */ 290 void target_dev_ua_allocate(struct se_device *dev, u8 asc, u8 ascq) 291 { 292 struct se_dev_entry *se_deve; 293 struct se_lun *lun; 294 295 spin_lock(&dev->se_port_lock); 296 list_for_each_entry(lun, &dev->dev_sep_list, lun_dev_link) { 297 298 spin_lock(&lun->lun_deve_lock); 299 list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) 300 core_scsi3_ua_allocate(se_deve, asc, ascq); 301 spin_unlock(&lun->lun_deve_lock); 302 } 303 spin_unlock(&dev->se_port_lock); 304 } 305 306 static void 307 target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new, 308 bool skip_new) 309 { 310 struct se_dev_entry *tmp; 311 312 rcu_read_lock(); 313 hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) { 314 if (skip_new && tmp == new) 315 continue; 316 core_scsi3_ua_allocate(tmp, 0x3F, 317 ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED); 318 } 319 rcu_read_unlock(); 320 } 321 322 int core_enable_device_list_for_node( 323 struct se_lun *lun, 324 struct se_lun_acl *lun_acl, 325 u64 mapped_lun, 326 bool lun_access_ro, 327 struct se_node_acl *nacl, 328 struct se_portal_group *tpg) 329 { 330 struct se_dev_entry *orig, *new; 331 332 new = kzalloc(sizeof(*new), GFP_KERNEL); 333 if (!new) { 334 pr_err("Unable to allocate se_dev_entry memory\n"); 335 return -ENOMEM; 336 } 337 338 spin_lock_init(&new->ua_lock); 339 INIT_LIST_HEAD(&new->ua_list); 340 INIT_LIST_HEAD(&new->lun_link); 341 342 new->mapped_lun = mapped_lun; 343 kref_init(&new->pr_kref); 344 init_completion(&new->pr_comp); 345 346 new->lun_access_ro = lun_access_ro; 347 new->creation_time = get_jiffies_64(); 348 new->attach_count++; 349 350 mutex_lock(&nacl->lun_entry_mutex); 351 orig = target_nacl_find_deve(nacl, mapped_lun); 352 if (orig && orig->se_lun) { 353 struct se_lun *orig_lun = orig->se_lun; 354 355 if (orig_lun != lun) { 356 pr_err("Existing orig->se_lun doesn't match new lun" 357 " for dynamic -> explicit NodeACL conversion:" 358 " %s\n", nacl->initiatorname); 359 mutex_unlock(&nacl->lun_entry_mutex); 360 kfree(new); 361 return -EINVAL; 362 } 363 if (orig->se_lun_acl != NULL) { 364 pr_warn_ratelimited("Detected existing explicit" 365 " se_lun_acl->se_lun_group reference for %s" 366 " mapped_lun: %llu, failing\n", 367 nacl->initiatorname, mapped_lun); 368 mutex_unlock(&nacl->lun_entry_mutex); 369 kfree(new); 370 return -EINVAL; 371 } 372 373 new->se_lun = lun; 374 new->se_lun_acl = lun_acl; 375 hlist_del_rcu(&orig->link); 376 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); 377 mutex_unlock(&nacl->lun_entry_mutex); 378 379 spin_lock(&lun->lun_deve_lock); 380 list_del(&orig->lun_link); 381 list_add_tail(&new->lun_link, &lun->lun_deve_list); 382 spin_unlock(&lun->lun_deve_lock); 383 384 kref_put(&orig->pr_kref, target_pr_kref_release); 385 wait_for_completion(&orig->pr_comp); 386 387 target_luns_data_has_changed(nacl, new, true); 388 kfree_rcu(orig, rcu_head); 389 return 0; 390 } 391 392 new->se_lun = lun; 393 new->se_lun_acl = lun_acl; 394 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); 395 mutex_unlock(&nacl->lun_entry_mutex); 396 397 spin_lock(&lun->lun_deve_lock); 398 list_add_tail(&new->lun_link, &lun->lun_deve_list); 399 spin_unlock(&lun->lun_deve_lock); 400 401 target_luns_data_has_changed(nacl, new, true); 402 return 0; 403 } 404 405 void core_disable_device_list_for_node( 406 struct se_lun *lun, 407 struct se_dev_entry *orig, 408 struct se_node_acl *nacl, 409 struct se_portal_group *tpg) 410 { 411 /* 412 * rcu_dereference_raw protected by se_lun->lun_group symlink 413 * reference to se_device->dev_group. 414 */ 415 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 416 417 lockdep_assert_held(&nacl->lun_entry_mutex); 418 419 /* 420 * If the MappedLUN entry is being disabled, the entry in 421 * lun->lun_deve_list must be removed now before clearing the 422 * struct se_dev_entry pointers below as logic in 423 * core_alua_do_transition_tg_pt() depends on these being present. 424 * 425 * deve->se_lun_acl will be NULL for demo-mode created LUNs 426 * that have not been explicitly converted to MappedLUNs -> 427 * struct se_lun_acl, but we remove deve->lun_link from 428 * lun->lun_deve_list. This also means that active UAs and 429 * NodeACL context specific PR metadata for demo-mode 430 * MappedLUN *deve will be released below.. 431 */ 432 spin_lock(&lun->lun_deve_lock); 433 list_del(&orig->lun_link); 434 spin_unlock(&lun->lun_deve_lock); 435 /* 436 * Disable struct se_dev_entry LUN ACL mapping 437 */ 438 core_scsi3_ua_release_all(orig); 439 440 hlist_del_rcu(&orig->link); 441 clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags); 442 orig->lun_access_ro = false; 443 orig->creation_time = 0; 444 orig->attach_count--; 445 /* 446 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1 447 * or REGISTER_AND_MOVE PR operation to complete. 448 */ 449 kref_put(&orig->pr_kref, target_pr_kref_release); 450 wait_for_completion(&orig->pr_comp); 451 452 kfree_rcu(orig, rcu_head); 453 454 core_scsi3_free_pr_reg_from_nacl(dev, nacl); 455 target_luns_data_has_changed(nacl, NULL, false); 456 } 457 458 /* core_clear_lun_from_tpg(): 459 * 460 * 461 */ 462 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) 463 { 464 struct se_node_acl *nacl; 465 struct se_dev_entry *deve; 466 467 mutex_lock(&tpg->acl_node_mutex); 468 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { 469 470 mutex_lock(&nacl->lun_entry_mutex); 471 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 472 if (lun != deve->se_lun) 473 continue; 474 475 core_disable_device_list_for_node(lun, deve, nacl, tpg); 476 } 477 mutex_unlock(&nacl->lun_entry_mutex); 478 } 479 mutex_unlock(&tpg->acl_node_mutex); 480 } 481 482 static void se_release_vpd_for_dev(struct se_device *dev) 483 { 484 struct t10_vpd *vpd, *vpd_tmp; 485 486 spin_lock(&dev->t10_wwn.t10_vpd_lock); 487 list_for_each_entry_safe(vpd, vpd_tmp, 488 &dev->t10_wwn.t10_vpd_list, vpd_list) { 489 list_del(&vpd->vpd_list); 490 kfree(vpd); 491 } 492 spin_unlock(&dev->t10_wwn.t10_vpd_lock); 493 } 494 495 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) 496 { 497 u32 aligned_max_sectors; 498 u32 alignment; 499 /* 500 * Limit max_sectors to a PAGE_SIZE aligned value for modern 501 * transport_allocate_data_tasks() operation. 502 */ 503 alignment = max(1ul, PAGE_SIZE / block_size); 504 aligned_max_sectors = rounddown(max_sectors, alignment); 505 506 if (max_sectors != aligned_max_sectors) 507 pr_info("Rounding down aligned max_sectors from %u to %u\n", 508 max_sectors, aligned_max_sectors); 509 510 return aligned_max_sectors; 511 } 512 513 int core_dev_add_lun( 514 struct se_portal_group *tpg, 515 struct se_device *dev, 516 struct se_lun *lun) 517 { 518 int rc; 519 520 rc = core_tpg_add_lun(tpg, lun, false, dev); 521 if (rc < 0) 522 return rc; 523 524 pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from" 525 " CORE HBA: %u\n", tpg->se_tpg_tfo->fabric_name, 526 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 527 tpg->se_tpg_tfo->fabric_name, dev->se_hba->hba_id); 528 /* 529 * Update LUN maps for dynamically added initiators when 530 * generate_node_acl is enabled. 531 */ 532 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { 533 struct se_node_acl *acl; 534 535 mutex_lock(&tpg->acl_node_mutex); 536 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 537 if (acl->dynamic_node_acl && 538 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || 539 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { 540 core_tpg_add_node_to_devs(acl, tpg, lun); 541 } 542 } 543 mutex_unlock(&tpg->acl_node_mutex); 544 } 545 546 return 0; 547 } 548 549 /* core_dev_del_lun(): 550 * 551 * 552 */ 553 void core_dev_del_lun( 554 struct se_portal_group *tpg, 555 struct se_lun *lun) 556 { 557 pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from" 558 " device object\n", tpg->se_tpg_tfo->fabric_name, 559 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 560 tpg->se_tpg_tfo->fabric_name); 561 562 core_tpg_remove_lun(tpg, lun); 563 } 564 565 struct se_lun_acl *core_dev_init_initiator_node_lun_acl( 566 struct se_portal_group *tpg, 567 struct se_node_acl *nacl, 568 u64 mapped_lun, 569 int *ret) 570 { 571 struct se_lun_acl *lacl; 572 573 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) { 574 pr_err("%s InitiatorName exceeds maximum size.\n", 575 tpg->se_tpg_tfo->fabric_name); 576 *ret = -EOVERFLOW; 577 return NULL; 578 } 579 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); 580 if (!lacl) { 581 pr_err("Unable to allocate memory for struct se_lun_acl.\n"); 582 *ret = -ENOMEM; 583 return NULL; 584 } 585 586 lacl->mapped_lun = mapped_lun; 587 lacl->se_lun_nacl = nacl; 588 589 return lacl; 590 } 591 592 int core_dev_add_initiator_node_lun_acl( 593 struct se_portal_group *tpg, 594 struct se_lun_acl *lacl, 595 struct se_lun *lun, 596 bool lun_access_ro) 597 { 598 struct se_node_acl *nacl = lacl->se_lun_nacl; 599 /* 600 * rcu_dereference_raw protected by se_lun->lun_group symlink 601 * reference to se_device->dev_group. 602 */ 603 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 604 605 if (!nacl) 606 return -EINVAL; 607 608 if (lun->lun_access_ro) 609 lun_access_ro = true; 610 611 lacl->se_lun = lun; 612 613 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun, 614 lun_access_ro, nacl, tpg) < 0) 615 return -EINVAL; 616 617 pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for " 618 " InitiatorNode: %s\n", tpg->se_tpg_tfo->fabric_name, 619 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun, 620 lun_access_ro ? "RO" : "RW", 621 nacl->initiatorname); 622 /* 623 * Check to see if there are any existing persistent reservation APTPL 624 * pre-registrations that need to be enabled for this LUN ACL.. 625 */ 626 core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl, 627 lacl->mapped_lun); 628 return 0; 629 } 630 631 int core_dev_del_initiator_node_lun_acl( 632 struct se_lun *lun, 633 struct se_lun_acl *lacl) 634 { 635 struct se_portal_group *tpg = lun->lun_tpg; 636 struct se_node_acl *nacl; 637 struct se_dev_entry *deve; 638 639 nacl = lacl->se_lun_nacl; 640 if (!nacl) 641 return -EINVAL; 642 643 mutex_lock(&nacl->lun_entry_mutex); 644 deve = target_nacl_find_deve(nacl, lacl->mapped_lun); 645 if (deve) 646 core_disable_device_list_for_node(lun, deve, nacl, tpg); 647 mutex_unlock(&nacl->lun_entry_mutex); 648 649 pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for" 650 " InitiatorNode: %s Mapped LUN: %llu\n", 651 tpg->se_tpg_tfo->fabric_name, 652 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 653 nacl->initiatorname, lacl->mapped_lun); 654 655 return 0; 656 } 657 658 void core_dev_free_initiator_node_lun_acl( 659 struct se_portal_group *tpg, 660 struct se_lun_acl *lacl) 661 { 662 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" 663 " Mapped LUN: %llu\n", tpg->se_tpg_tfo->fabric_name, 664 tpg->se_tpg_tfo->tpg_get_tag(tpg), 665 tpg->se_tpg_tfo->fabric_name, 666 lacl->se_lun_nacl->initiatorname, lacl->mapped_lun); 667 668 kfree(lacl); 669 } 670 671 static void scsi_dump_inquiry(struct se_device *dev) 672 { 673 struct t10_wwn *wwn = &dev->t10_wwn; 674 int device_type = dev->transport->get_device_type(dev); 675 676 /* 677 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer 678 */ 679 pr_debug(" Vendor: %-" __stringify(INQUIRY_VENDOR_LEN) "s\n", 680 wwn->vendor); 681 pr_debug(" Model: %-" __stringify(INQUIRY_MODEL_LEN) "s\n", 682 wwn->model); 683 pr_debug(" Revision: %-" __stringify(INQUIRY_REVISION_LEN) "s\n", 684 wwn->revision); 685 pr_debug(" Type: %s ", scsi_device_type(device_type)); 686 } 687 688 struct se_device *target_alloc_device(struct se_hba *hba, const char *name) 689 { 690 struct se_device *dev; 691 struct se_lun *xcopy_lun; 692 int i; 693 694 dev = hba->backend->ops->alloc_device(hba, name); 695 if (!dev) 696 return NULL; 697 698 dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL); 699 if (!dev->queues) { 700 dev->transport->free_device(dev); 701 return NULL; 702 } 703 704 dev->queue_cnt = nr_cpu_ids; 705 for (i = 0; i < dev->queue_cnt; i++) { 706 struct se_device_queue *q; 707 708 q = &dev->queues[i]; 709 INIT_LIST_HEAD(&q->state_list); 710 spin_lock_init(&q->lock); 711 712 init_llist_head(&q->sq.cmd_list); 713 INIT_WORK(&q->sq.work, target_queued_submit_work); 714 } 715 716 dev->se_hba = hba; 717 dev->transport = hba->backend->ops; 718 dev->transport_flags = dev->transport->transport_flags_default; 719 dev->prot_length = sizeof(struct t10_pi_tuple); 720 dev->hba_index = hba->hba_index; 721 722 INIT_LIST_HEAD(&dev->dev_sep_list); 723 INIT_LIST_HEAD(&dev->dev_tmr_list); 724 INIT_LIST_HEAD(&dev->delayed_cmd_list); 725 INIT_LIST_HEAD(&dev->qf_cmd_list); 726 spin_lock_init(&dev->delayed_cmd_lock); 727 spin_lock_init(&dev->dev_reservation_lock); 728 spin_lock_init(&dev->se_port_lock); 729 spin_lock_init(&dev->se_tmr_lock); 730 spin_lock_init(&dev->qf_cmd_lock); 731 sema_init(&dev->caw_sem, 1); 732 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list); 733 spin_lock_init(&dev->t10_wwn.t10_vpd_lock); 734 INIT_LIST_HEAD(&dev->t10_pr.registration_list); 735 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list); 736 spin_lock_init(&dev->t10_pr.registration_lock); 737 spin_lock_init(&dev->t10_pr.aptpl_reg_lock); 738 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list); 739 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock); 740 INIT_LIST_HEAD(&dev->t10_alua.lba_map_list); 741 spin_lock_init(&dev->t10_alua.lba_map_lock); 742 743 INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work); 744 mutex_init(&dev->lun_reset_mutex); 745 746 dev->t10_wwn.t10_dev = dev; 747 /* 748 * Use OpenFabrics IEEE Company ID: 00 14 05 749 */ 750 dev->t10_wwn.company_id = 0x001405; 751 752 dev->t10_alua.t10_dev = dev; 753 754 dev->dev_attrib.da_dev = dev; 755 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS; 756 dev->dev_attrib.emulate_dpo = 1; 757 dev->dev_attrib.emulate_fua_write = 1; 758 dev->dev_attrib.emulate_fua_read = 1; 759 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; 760 dev->dev_attrib.emulate_ua_intlck_ctrl = TARGET_UA_INTLCK_CTRL_CLEAR; 761 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS; 762 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU; 763 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; 764 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW; 765 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; 766 dev->dev_attrib.emulate_pr = DA_EMULATE_PR; 767 dev->dev_attrib.emulate_rsoc = DA_EMULATE_RSOC; 768 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT; 769 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 770 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL; 771 dev->dev_attrib.is_nonrot = DA_IS_NONROT; 772 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; 773 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; 774 dev->dev_attrib.max_unmap_block_desc_count = 775 DA_MAX_UNMAP_BLOCK_DESC_COUNT; 776 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; 777 dev->dev_attrib.unmap_granularity_alignment = 778 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; 779 dev->dev_attrib.unmap_zeroes_data = 780 DA_UNMAP_ZEROES_DATA_DEFAULT; 781 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; 782 783 xcopy_lun = &dev->xcopy_lun; 784 rcu_assign_pointer(xcopy_lun->lun_se_dev, dev); 785 init_completion(&xcopy_lun->lun_shutdown_comp); 786 INIT_LIST_HEAD(&xcopy_lun->lun_deve_list); 787 INIT_LIST_HEAD(&xcopy_lun->lun_dev_link); 788 mutex_init(&xcopy_lun->lun_tg_pt_md_mutex); 789 xcopy_lun->lun_tpg = &xcopy_pt_tpg; 790 791 /* Preload the default INQUIRY const values */ 792 strscpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor)); 793 strscpy(dev->t10_wwn.model, dev->transport->inquiry_prod, 794 sizeof(dev->t10_wwn.model)); 795 strscpy(dev->t10_wwn.revision, dev->transport->inquiry_rev, 796 sizeof(dev->t10_wwn.revision)); 797 798 return dev; 799 } 800 801 /* 802 * Check if the underlying struct block_device supports discard and if yes 803 * configure the UNMAP parameters. 804 */ 805 bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, 806 struct block_device *bdev) 807 { 808 int block_size = bdev_logical_block_size(bdev); 809 810 if (!bdev_max_discard_sectors(bdev)) 811 return false; 812 813 attrib->max_unmap_lba_count = 814 bdev_max_discard_sectors(bdev) >> (ilog2(block_size) - 9); 815 /* 816 * Currently hardcoded to 1 in Linux/SCSI code.. 817 */ 818 attrib->max_unmap_block_desc_count = 1; 819 attrib->unmap_granularity = bdev_discard_granularity(bdev) / block_size; 820 attrib->unmap_granularity_alignment = 821 bdev_discard_alignment(bdev) / block_size; 822 return true; 823 } 824 EXPORT_SYMBOL(target_configure_unmap_from_queue); 825 826 /* 827 * Convert from blocksize advertised to the initiator to the 512 byte 828 * units unconditionally used by the Linux block layer. 829 */ 830 sector_t target_to_linux_sector(struct se_device *dev, sector_t lb) 831 { 832 switch (dev->dev_attrib.block_size) { 833 case 4096: 834 return lb << 3; 835 case 2048: 836 return lb << 2; 837 case 1024: 838 return lb << 1; 839 default: 840 return lb; 841 } 842 } 843 EXPORT_SYMBOL(target_to_linux_sector); 844 845 struct devices_idr_iter { 846 int (*fn)(struct se_device *dev, void *data); 847 void *data; 848 }; 849 850 static int target_devices_idr_iter(int id, void *p, void *data) 851 __must_hold(&device_mutex) 852 { 853 struct devices_idr_iter *iter = data; 854 struct se_device *dev = p; 855 struct config_item *item; 856 int ret; 857 858 /* 859 * We add the device early to the idr, so it can be used 860 * by backend modules during configuration. We do not want 861 * to allow other callers to access partially setup devices, 862 * so we skip them here. 863 */ 864 if (!target_dev_configured(dev)) 865 return 0; 866 867 item = config_item_get_unless_zero(&dev->dev_group.cg_item); 868 if (!item) 869 return 0; 870 mutex_unlock(&device_mutex); 871 872 ret = iter->fn(dev, iter->data); 873 config_item_put(item); 874 875 mutex_lock(&device_mutex); 876 return ret; 877 } 878 879 /** 880 * target_for_each_device - iterate over configured devices 881 * @fn: iterator function 882 * @data: pointer to data that will be passed to fn 883 * 884 * fn must return 0 to continue looping over devices. non-zero will break 885 * from the loop and return that value to the caller. 886 */ 887 int target_for_each_device(int (*fn)(struct se_device *dev, void *data), 888 void *data) 889 { 890 struct devices_idr_iter iter = { .fn = fn, .data = data }; 891 int ret; 892 893 mutex_lock(&device_mutex); 894 ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter); 895 mutex_unlock(&device_mutex); 896 return ret; 897 } 898 899 int target_configure_device(struct se_device *dev) 900 { 901 struct se_hba *hba = dev->se_hba; 902 int ret, id; 903 904 if (target_dev_configured(dev)) { 905 pr_err("se_dev->se_dev_ptr already set for storage" 906 " object\n"); 907 return -EEXIST; 908 } 909 910 /* 911 * Add early so modules like tcmu can use during its 912 * configuration. 913 */ 914 mutex_lock(&device_mutex); 915 /* 916 * Use cyclic to try and avoid collisions with devices 917 * that were recently removed. 918 */ 919 id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL); 920 mutex_unlock(&device_mutex); 921 if (id < 0) { 922 ret = -ENOMEM; 923 goto out; 924 } 925 dev->dev_index = id; 926 927 ret = dev->transport->configure_device(dev); 928 if (ret) 929 goto out_free_index; 930 931 if (dev->transport->configure_unmap && 932 dev->transport->configure_unmap(dev)) { 933 pr_debug("Discard support available, but disabled by default.\n"); 934 } 935 936 /* 937 * XXX: there is not much point to have two different values here.. 938 */ 939 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size; 940 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth; 941 942 /* 943 * Align max_hw_sectors down to PAGE_SIZE I/O transfers 944 */ 945 dev->dev_attrib.hw_max_sectors = 946 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors, 947 dev->dev_attrib.hw_block_size); 948 dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors; 949 950 dev->creation_time = get_jiffies_64(); 951 952 ret = core_setup_alua(dev); 953 if (ret) 954 goto out_destroy_device; 955 956 /* 957 * Setup work_queue for QUEUE_FULL 958 */ 959 INIT_WORK(&dev->qf_work_queue, target_qf_do_work); 960 961 scsi_dump_inquiry(dev); 962 963 spin_lock(&hba->device_lock); 964 hba->dev_count++; 965 spin_unlock(&hba->device_lock); 966 967 dev->dev_flags |= DF_CONFIGURED; 968 969 return 0; 970 971 out_destroy_device: 972 dev->transport->destroy_device(dev); 973 out_free_index: 974 mutex_lock(&device_mutex); 975 idr_remove(&devices_idr, dev->dev_index); 976 mutex_unlock(&device_mutex); 977 out: 978 se_release_vpd_for_dev(dev); 979 return ret; 980 } 981 982 void target_free_device(struct se_device *dev) 983 { 984 struct se_hba *hba = dev->se_hba; 985 986 WARN_ON(!list_empty(&dev->dev_sep_list)); 987 988 if (target_dev_configured(dev)) { 989 dev->transport->destroy_device(dev); 990 991 mutex_lock(&device_mutex); 992 idr_remove(&devices_idr, dev->dev_index); 993 mutex_unlock(&device_mutex); 994 995 spin_lock(&hba->device_lock); 996 hba->dev_count--; 997 spin_unlock(&hba->device_lock); 998 } 999 1000 core_alua_free_lu_gp_mem(dev); 1001 core_alua_set_lba_map(dev, NULL, 0, 0); 1002 core_scsi3_free_all_registrations(dev); 1003 se_release_vpd_for_dev(dev); 1004 1005 if (dev->transport->free_prot) 1006 dev->transport->free_prot(dev); 1007 1008 kfree(dev->queues); 1009 dev->transport->free_device(dev); 1010 } 1011 1012 int core_dev_setup_virtual_lun0(void) 1013 { 1014 struct se_hba *hba; 1015 struct se_device *dev; 1016 char buf[] = "rd_pages=8,rd_nullio=1,rd_dummy=1"; 1017 int ret; 1018 1019 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE); 1020 if (IS_ERR(hba)) 1021 return PTR_ERR(hba); 1022 1023 dev = target_alloc_device(hba, "virt_lun0"); 1024 if (!dev) { 1025 ret = -ENOMEM; 1026 goto out_free_hba; 1027 } 1028 1029 hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf)); 1030 1031 ret = target_configure_device(dev); 1032 if (ret) 1033 goto out_free_se_dev; 1034 1035 lun0_hba = hba; 1036 g_lun0_dev = dev; 1037 return 0; 1038 1039 out_free_se_dev: 1040 target_free_device(dev); 1041 out_free_hba: 1042 core_delete_hba(hba); 1043 return ret; 1044 } 1045 1046 1047 void core_dev_release_virtual_lun0(void) 1048 { 1049 struct se_hba *hba = lun0_hba; 1050 1051 if (!hba) 1052 return; 1053 1054 if (g_lun0_dev) 1055 target_free_device(g_lun0_dev); 1056 core_delete_hba(hba); 1057 } 1058 1059 /* 1060 * Common CDB parsing for kernel and user passthrough. 1061 */ 1062 sense_reason_t 1063 passthrough_parse_cdb(struct se_cmd *cmd, 1064 sense_reason_t (*exec_cmd)(struct se_cmd *cmd)) 1065 { 1066 unsigned char *cdb = cmd->t_task_cdb; 1067 struct se_device *dev = cmd->se_dev; 1068 unsigned int size; 1069 1070 /* 1071 * For REPORT LUNS we always need to emulate the response, for everything 1072 * else, pass it up. 1073 */ 1074 if (cdb[0] == REPORT_LUNS) { 1075 cmd->execute_cmd = spc_emulate_report_luns; 1076 return TCM_NO_SENSE; 1077 } 1078 1079 /* 1080 * With emulate_pr disabled, all reservation requests should fail, 1081 * regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set. 1082 */ 1083 if (!dev->dev_attrib.emulate_pr && 1084 ((cdb[0] == PERSISTENT_RESERVE_IN) || 1085 (cdb[0] == PERSISTENT_RESERVE_OUT) || 1086 (cdb[0] == RELEASE || cdb[0] == RELEASE_10) || 1087 (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) { 1088 return TCM_UNSUPPORTED_SCSI_OPCODE; 1089 } 1090 1091 /* 1092 * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to 1093 * emulate the response, since tcmu does not have the information 1094 * required to process these commands. 1095 */ 1096 if (!(dev->transport_flags & 1097 TRANSPORT_FLAG_PASSTHROUGH_PGR)) { 1098 if (cdb[0] == PERSISTENT_RESERVE_IN) { 1099 cmd->execute_cmd = target_scsi3_emulate_pr_in; 1100 size = get_unaligned_be16(&cdb[7]); 1101 return target_cmd_size_check(cmd, size); 1102 } 1103 if (cdb[0] == PERSISTENT_RESERVE_OUT) { 1104 cmd->execute_cmd = target_scsi3_emulate_pr_out; 1105 size = get_unaligned_be32(&cdb[5]); 1106 return target_cmd_size_check(cmd, size); 1107 } 1108 1109 if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) { 1110 cmd->execute_cmd = target_scsi2_reservation_release; 1111 if (cdb[0] == RELEASE_10) 1112 size = get_unaligned_be16(&cdb[7]); 1113 else 1114 size = cmd->data_length; 1115 return target_cmd_size_check(cmd, size); 1116 } 1117 if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) { 1118 cmd->execute_cmd = target_scsi2_reservation_reserve; 1119 if (cdb[0] == RESERVE_10) 1120 size = get_unaligned_be16(&cdb[7]); 1121 else 1122 size = cmd->data_length; 1123 return target_cmd_size_check(cmd, size); 1124 } 1125 } 1126 1127 /* Set DATA_CDB flag for ops that should have it */ 1128 switch (cdb[0]) { 1129 case READ_6: 1130 case READ_10: 1131 case READ_12: 1132 case READ_16: 1133 case WRITE_6: 1134 case WRITE_10: 1135 case WRITE_12: 1136 case WRITE_16: 1137 case WRITE_VERIFY: 1138 case WRITE_VERIFY_12: 1139 case WRITE_VERIFY_16: 1140 case COMPARE_AND_WRITE: 1141 case XDWRITEREAD_10: 1142 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1143 break; 1144 case VARIABLE_LENGTH_CMD: 1145 switch (get_unaligned_be16(&cdb[8])) { 1146 case READ_32: 1147 case WRITE_32: 1148 case WRITE_VERIFY_32: 1149 case XDWRITEREAD_32: 1150 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1151 break; 1152 } 1153 } 1154 1155 cmd->execute_cmd = exec_cmd; 1156 1157 return TCM_NO_SENSE; 1158 } 1159 EXPORT_SYMBOL(passthrough_parse_cdb); 1160