1 /******************************************************************************* 2 * Filename: target_core_device.c (based on iscsi_target_device.c) 3 * 4 * This file contains the TCM Virtual Device and Disk Transport 5 * agnostic related functions. 6 * 7 * (c) Copyright 2003-2013 Datera, Inc. 8 * 9 * Nicholas A. Bellinger <nab@kernel.org> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2 of the License, or 14 * (at your option) any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 24 * 25 ******************************************************************************/ 26 27 #include <linux/net.h> 28 #include <linux/string.h> 29 #include <linux/delay.h> 30 #include <linux/timer.h> 31 #include <linux/slab.h> 32 #include <linux/spinlock.h> 33 #include <linux/kthread.h> 34 #include <linux/in.h> 35 #include <linux/export.h> 36 #include <linux/t10-pi.h> 37 #include <asm/unaligned.h> 38 #include <net/sock.h> 39 #include <net/tcp.h> 40 #include <scsi/scsi_common.h> 41 #include <scsi/scsi_proto.h> 42 43 #include <target/target_core_base.h> 44 #include <target/target_core_backend.h> 45 #include <target/target_core_fabric.h> 46 47 #include "target_core_internal.h" 48 #include "target_core_alua.h" 49 #include "target_core_pr.h" 50 #include "target_core_ua.h" 51 52 static DEFINE_MUTEX(device_mutex); 53 static LIST_HEAD(device_list); 54 static DEFINE_IDR(devices_idr); 55 56 static struct se_hba *lun0_hba; 57 /* not static, needed by tpg.c */ 58 struct se_device *g_lun0_dev; 59 60 sense_reason_t 61 transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun) 62 { 63 struct se_lun *se_lun = NULL; 64 struct se_session *se_sess = se_cmd->se_sess; 65 struct se_node_acl *nacl = se_sess->se_node_acl; 66 struct se_dev_entry *deve; 67 sense_reason_t ret = TCM_NO_SENSE; 68 69 rcu_read_lock(); 70 deve = target_nacl_find_deve(nacl, unpacked_lun); 71 if (deve) { 72 atomic_long_inc(&deve->total_cmds); 73 74 if (se_cmd->data_direction == DMA_TO_DEVICE) 75 atomic_long_add(se_cmd->data_length, 76 &deve->write_bytes); 77 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 78 atomic_long_add(se_cmd->data_length, 79 &deve->read_bytes); 80 81 se_lun = rcu_dereference(deve->se_lun); 82 83 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { 84 se_lun = NULL; 85 goto out_unlock; 86 } 87 88 se_cmd->se_lun = rcu_dereference(deve->se_lun); 89 se_cmd->pr_res_key = deve->pr_res_key; 90 se_cmd->orig_fe_lun = unpacked_lun; 91 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 92 se_cmd->lun_ref_active = true; 93 94 if ((se_cmd->data_direction == DMA_TO_DEVICE) && 95 deve->lun_access_ro) { 96 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" 97 " Access for 0x%08llx\n", 98 se_cmd->se_tfo->get_fabric_name(), 99 unpacked_lun); 100 rcu_read_unlock(); 101 ret = TCM_WRITE_PROTECTED; 102 goto ref_dev; 103 } 104 } 105 out_unlock: 106 rcu_read_unlock(); 107 108 if (!se_lun) { 109 /* 110 * Use the se_portal_group->tpg_virt_lun0 to allow for 111 * REPORT_LUNS, et al to be returned when no active 112 * MappedLUN=0 exists for this Initiator Port. 113 */ 114 if (unpacked_lun != 0) { 115 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 116 " Access for 0x%08llx\n", 117 se_cmd->se_tfo->get_fabric_name(), 118 unpacked_lun); 119 return TCM_NON_EXISTENT_LUN; 120 } 121 122 se_lun = se_sess->se_tpg->tpg_virt_lun0; 123 se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0; 124 se_cmd->orig_fe_lun = 0; 125 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 126 127 percpu_ref_get(&se_lun->lun_ref); 128 se_cmd->lun_ref_active = true; 129 130 /* 131 * Force WRITE PROTECT for virtual LUN 0 132 */ 133 if ((se_cmd->data_direction != DMA_FROM_DEVICE) && 134 (se_cmd->data_direction != DMA_NONE)) { 135 ret = TCM_WRITE_PROTECTED; 136 goto ref_dev; 137 } 138 } 139 /* 140 * RCU reference protected by percpu se_lun->lun_ref taken above that 141 * must drop to zero (including initial reference) before this se_lun 142 * pointer can be kfree_rcu() by the final se_lun->lun_group put via 143 * target_core_fabric_configfs.c:target_fabric_port_release 144 */ 145 ref_dev: 146 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); 147 atomic_long_inc(&se_cmd->se_dev->num_cmds); 148 149 if (se_cmd->data_direction == DMA_TO_DEVICE) 150 atomic_long_add(se_cmd->data_length, 151 &se_cmd->se_dev->write_bytes); 152 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 153 atomic_long_add(se_cmd->data_length, 154 &se_cmd->se_dev->read_bytes); 155 156 return ret; 157 } 158 EXPORT_SYMBOL(transport_lookup_cmd_lun); 159 160 int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun) 161 { 162 struct se_dev_entry *deve; 163 struct se_lun *se_lun = NULL; 164 struct se_session *se_sess = se_cmd->se_sess; 165 struct se_node_acl *nacl = se_sess->se_node_acl; 166 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 167 unsigned long flags; 168 169 rcu_read_lock(); 170 deve = target_nacl_find_deve(nacl, unpacked_lun); 171 if (deve) { 172 se_lun = rcu_dereference(deve->se_lun); 173 174 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { 175 se_lun = NULL; 176 goto out_unlock; 177 } 178 179 se_cmd->se_lun = rcu_dereference(deve->se_lun); 180 se_cmd->pr_res_key = deve->pr_res_key; 181 se_cmd->orig_fe_lun = unpacked_lun; 182 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 183 se_cmd->lun_ref_active = true; 184 } 185 out_unlock: 186 rcu_read_unlock(); 187 188 if (!se_lun) { 189 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 190 " Access for 0x%08llx\n", 191 se_cmd->se_tfo->get_fabric_name(), 192 unpacked_lun); 193 return -ENODEV; 194 } 195 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); 196 se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev); 197 198 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags); 199 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list); 200 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags); 201 202 return 0; 203 } 204 EXPORT_SYMBOL(transport_lookup_tmr_lun); 205 206 bool target_lun_is_rdonly(struct se_cmd *cmd) 207 { 208 struct se_session *se_sess = cmd->se_sess; 209 struct se_dev_entry *deve; 210 bool ret; 211 212 rcu_read_lock(); 213 deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun); 214 ret = deve && deve->lun_access_ro; 215 rcu_read_unlock(); 216 217 return ret; 218 } 219 EXPORT_SYMBOL(target_lun_is_rdonly); 220 221 /* 222 * This function is called from core_scsi3_emulate_pro_register_and_move() 223 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref 224 * when a matching rtpi is found. 225 */ 226 struct se_dev_entry *core_get_se_deve_from_rtpi( 227 struct se_node_acl *nacl, 228 u16 rtpi) 229 { 230 struct se_dev_entry *deve; 231 struct se_lun *lun; 232 struct se_portal_group *tpg = nacl->se_tpg; 233 234 rcu_read_lock(); 235 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 236 lun = rcu_dereference(deve->se_lun); 237 if (!lun) { 238 pr_err("%s device entries device pointer is" 239 " NULL, but Initiator has access.\n", 240 tpg->se_tpg_tfo->get_fabric_name()); 241 continue; 242 } 243 if (lun->lun_rtpi != rtpi) 244 continue; 245 246 kref_get(&deve->pr_kref); 247 rcu_read_unlock(); 248 249 return deve; 250 } 251 rcu_read_unlock(); 252 253 return NULL; 254 } 255 256 void core_free_device_list_for_node( 257 struct se_node_acl *nacl, 258 struct se_portal_group *tpg) 259 { 260 struct se_dev_entry *deve; 261 262 mutex_lock(&nacl->lun_entry_mutex); 263 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 264 struct se_lun *lun = rcu_dereference_check(deve->se_lun, 265 lockdep_is_held(&nacl->lun_entry_mutex)); 266 core_disable_device_list_for_node(lun, deve, nacl, tpg); 267 } 268 mutex_unlock(&nacl->lun_entry_mutex); 269 } 270 271 void core_update_device_list_access( 272 u64 mapped_lun, 273 bool lun_access_ro, 274 struct se_node_acl *nacl) 275 { 276 struct se_dev_entry *deve; 277 278 mutex_lock(&nacl->lun_entry_mutex); 279 deve = target_nacl_find_deve(nacl, mapped_lun); 280 if (deve) 281 deve->lun_access_ro = lun_access_ro; 282 mutex_unlock(&nacl->lun_entry_mutex); 283 } 284 285 /* 286 * Called with rcu_read_lock or nacl->device_list_lock held. 287 */ 288 struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun) 289 { 290 struct se_dev_entry *deve; 291 292 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) 293 if (deve->mapped_lun == mapped_lun) 294 return deve; 295 296 return NULL; 297 } 298 EXPORT_SYMBOL(target_nacl_find_deve); 299 300 void target_pr_kref_release(struct kref *kref) 301 { 302 struct se_dev_entry *deve = container_of(kref, struct se_dev_entry, 303 pr_kref); 304 complete(&deve->pr_comp); 305 } 306 307 static void 308 target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new, 309 bool skip_new) 310 { 311 struct se_dev_entry *tmp; 312 313 rcu_read_lock(); 314 hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) { 315 if (skip_new && tmp == new) 316 continue; 317 core_scsi3_ua_allocate(tmp, 0x3F, 318 ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED); 319 } 320 rcu_read_unlock(); 321 } 322 323 int core_enable_device_list_for_node( 324 struct se_lun *lun, 325 struct se_lun_acl *lun_acl, 326 u64 mapped_lun, 327 bool lun_access_ro, 328 struct se_node_acl *nacl, 329 struct se_portal_group *tpg) 330 { 331 struct se_dev_entry *orig, *new; 332 333 new = kzalloc(sizeof(*new), GFP_KERNEL); 334 if (!new) { 335 pr_err("Unable to allocate se_dev_entry memory\n"); 336 return -ENOMEM; 337 } 338 339 spin_lock_init(&new->ua_lock); 340 INIT_LIST_HEAD(&new->ua_list); 341 INIT_LIST_HEAD(&new->lun_link); 342 343 new->mapped_lun = mapped_lun; 344 kref_init(&new->pr_kref); 345 init_completion(&new->pr_comp); 346 347 new->lun_access_ro = lun_access_ro; 348 new->creation_time = get_jiffies_64(); 349 new->attach_count++; 350 351 mutex_lock(&nacl->lun_entry_mutex); 352 orig = target_nacl_find_deve(nacl, mapped_lun); 353 if (orig && orig->se_lun) { 354 struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun, 355 lockdep_is_held(&nacl->lun_entry_mutex)); 356 357 if (orig_lun != lun) { 358 pr_err("Existing orig->se_lun doesn't match new lun" 359 " for dynamic -> explicit NodeACL conversion:" 360 " %s\n", nacl->initiatorname); 361 mutex_unlock(&nacl->lun_entry_mutex); 362 kfree(new); 363 return -EINVAL; 364 } 365 if (orig->se_lun_acl != NULL) { 366 pr_warn_ratelimited("Detected existing explicit" 367 " se_lun_acl->se_lun_group reference for %s" 368 " mapped_lun: %llu, failing\n", 369 nacl->initiatorname, mapped_lun); 370 mutex_unlock(&nacl->lun_entry_mutex); 371 kfree(new); 372 return -EINVAL; 373 } 374 375 rcu_assign_pointer(new->se_lun, lun); 376 rcu_assign_pointer(new->se_lun_acl, lun_acl); 377 hlist_del_rcu(&orig->link); 378 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); 379 mutex_unlock(&nacl->lun_entry_mutex); 380 381 spin_lock(&lun->lun_deve_lock); 382 list_del(&orig->lun_link); 383 list_add_tail(&new->lun_link, &lun->lun_deve_list); 384 spin_unlock(&lun->lun_deve_lock); 385 386 kref_put(&orig->pr_kref, target_pr_kref_release); 387 wait_for_completion(&orig->pr_comp); 388 389 target_luns_data_has_changed(nacl, new, true); 390 kfree_rcu(orig, rcu_head); 391 return 0; 392 } 393 394 rcu_assign_pointer(new->se_lun, lun); 395 rcu_assign_pointer(new->se_lun_acl, lun_acl); 396 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); 397 mutex_unlock(&nacl->lun_entry_mutex); 398 399 spin_lock(&lun->lun_deve_lock); 400 list_add_tail(&new->lun_link, &lun->lun_deve_list); 401 spin_unlock(&lun->lun_deve_lock); 402 403 target_luns_data_has_changed(nacl, new, true); 404 return 0; 405 } 406 407 /* 408 * Called with se_node_acl->lun_entry_mutex held. 409 */ 410 void core_disable_device_list_for_node( 411 struct se_lun *lun, 412 struct se_dev_entry *orig, 413 struct se_node_acl *nacl, 414 struct se_portal_group *tpg) 415 { 416 /* 417 * rcu_dereference_raw protected by se_lun->lun_group symlink 418 * reference to se_device->dev_group. 419 */ 420 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 421 /* 422 * If the MappedLUN entry is being disabled, the entry in 423 * lun->lun_deve_list must be removed now before clearing the 424 * struct se_dev_entry pointers below as logic in 425 * core_alua_do_transition_tg_pt() depends on these being present. 426 * 427 * deve->se_lun_acl will be NULL for demo-mode created LUNs 428 * that have not been explicitly converted to MappedLUNs -> 429 * struct se_lun_acl, but we remove deve->lun_link from 430 * lun->lun_deve_list. This also means that active UAs and 431 * NodeACL context specific PR metadata for demo-mode 432 * MappedLUN *deve will be released below.. 433 */ 434 spin_lock(&lun->lun_deve_lock); 435 list_del(&orig->lun_link); 436 spin_unlock(&lun->lun_deve_lock); 437 /* 438 * Disable struct se_dev_entry LUN ACL mapping 439 */ 440 core_scsi3_ua_release_all(orig); 441 442 hlist_del_rcu(&orig->link); 443 clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags); 444 orig->lun_access_ro = false; 445 orig->creation_time = 0; 446 orig->attach_count--; 447 /* 448 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1 449 * or REGISTER_AND_MOVE PR operation to complete. 450 */ 451 kref_put(&orig->pr_kref, target_pr_kref_release); 452 wait_for_completion(&orig->pr_comp); 453 454 rcu_assign_pointer(orig->se_lun, NULL); 455 rcu_assign_pointer(orig->se_lun_acl, NULL); 456 457 kfree_rcu(orig, rcu_head); 458 459 core_scsi3_free_pr_reg_from_nacl(dev, nacl); 460 target_luns_data_has_changed(nacl, NULL, false); 461 } 462 463 /* core_clear_lun_from_tpg(): 464 * 465 * 466 */ 467 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) 468 { 469 struct se_node_acl *nacl; 470 struct se_dev_entry *deve; 471 472 mutex_lock(&tpg->acl_node_mutex); 473 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { 474 475 mutex_lock(&nacl->lun_entry_mutex); 476 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 477 struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun, 478 lockdep_is_held(&nacl->lun_entry_mutex)); 479 480 if (lun != tmp_lun) 481 continue; 482 483 core_disable_device_list_for_node(lun, deve, nacl, tpg); 484 } 485 mutex_unlock(&nacl->lun_entry_mutex); 486 } 487 mutex_unlock(&tpg->acl_node_mutex); 488 } 489 490 int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev) 491 { 492 struct se_lun *tmp; 493 494 spin_lock(&dev->se_port_lock); 495 if (dev->export_count == 0x0000ffff) { 496 pr_warn("Reached dev->dev_port_count ==" 497 " 0x0000ffff\n"); 498 spin_unlock(&dev->se_port_lock); 499 return -ENOSPC; 500 } 501 again: 502 /* 503 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device 504 * Here is the table from spc4r17 section 7.7.3.8. 505 * 506 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field 507 * 508 * Code Description 509 * 0h Reserved 510 * 1h Relative port 1, historically known as port A 511 * 2h Relative port 2, historically known as port B 512 * 3h to FFFFh Relative port 3 through 65 535 513 */ 514 lun->lun_rtpi = dev->dev_rpti_counter++; 515 if (!lun->lun_rtpi) 516 goto again; 517 518 list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) { 519 /* 520 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique 521 * for 16-bit wrap.. 522 */ 523 if (lun->lun_rtpi == tmp->lun_rtpi) 524 goto again; 525 } 526 spin_unlock(&dev->se_port_lock); 527 528 return 0; 529 } 530 531 static void se_release_vpd_for_dev(struct se_device *dev) 532 { 533 struct t10_vpd *vpd, *vpd_tmp; 534 535 spin_lock(&dev->t10_wwn.t10_vpd_lock); 536 list_for_each_entry_safe(vpd, vpd_tmp, 537 &dev->t10_wwn.t10_vpd_list, vpd_list) { 538 list_del(&vpd->vpd_list); 539 kfree(vpd); 540 } 541 spin_unlock(&dev->t10_wwn.t10_vpd_lock); 542 } 543 544 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) 545 { 546 u32 aligned_max_sectors; 547 u32 alignment; 548 /* 549 * Limit max_sectors to a PAGE_SIZE aligned value for modern 550 * transport_allocate_data_tasks() operation. 551 */ 552 alignment = max(1ul, PAGE_SIZE / block_size); 553 aligned_max_sectors = rounddown(max_sectors, alignment); 554 555 if (max_sectors != aligned_max_sectors) 556 pr_info("Rounding down aligned max_sectors from %u to %u\n", 557 max_sectors, aligned_max_sectors); 558 559 return aligned_max_sectors; 560 } 561 562 int core_dev_add_lun( 563 struct se_portal_group *tpg, 564 struct se_device *dev, 565 struct se_lun *lun) 566 { 567 int rc; 568 569 rc = core_tpg_add_lun(tpg, lun, false, dev); 570 if (rc < 0) 571 return rc; 572 573 pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from" 574 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), 575 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 576 tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id); 577 /* 578 * Update LUN maps for dynamically added initiators when 579 * generate_node_acl is enabled. 580 */ 581 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { 582 struct se_node_acl *acl; 583 584 mutex_lock(&tpg->acl_node_mutex); 585 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 586 if (acl->dynamic_node_acl && 587 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || 588 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { 589 core_tpg_add_node_to_devs(acl, tpg, lun); 590 } 591 } 592 mutex_unlock(&tpg->acl_node_mutex); 593 } 594 595 return 0; 596 } 597 598 /* core_dev_del_lun(): 599 * 600 * 601 */ 602 void core_dev_del_lun( 603 struct se_portal_group *tpg, 604 struct se_lun *lun) 605 { 606 pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from" 607 " device object\n", tpg->se_tpg_tfo->get_fabric_name(), 608 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 609 tpg->se_tpg_tfo->get_fabric_name()); 610 611 core_tpg_remove_lun(tpg, lun); 612 } 613 614 struct se_lun_acl *core_dev_init_initiator_node_lun_acl( 615 struct se_portal_group *tpg, 616 struct se_node_acl *nacl, 617 u64 mapped_lun, 618 int *ret) 619 { 620 struct se_lun_acl *lacl; 621 622 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) { 623 pr_err("%s InitiatorName exceeds maximum size.\n", 624 tpg->se_tpg_tfo->get_fabric_name()); 625 *ret = -EOVERFLOW; 626 return NULL; 627 } 628 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); 629 if (!lacl) { 630 pr_err("Unable to allocate memory for struct se_lun_acl.\n"); 631 *ret = -ENOMEM; 632 return NULL; 633 } 634 635 lacl->mapped_lun = mapped_lun; 636 lacl->se_lun_nacl = nacl; 637 638 return lacl; 639 } 640 641 int core_dev_add_initiator_node_lun_acl( 642 struct se_portal_group *tpg, 643 struct se_lun_acl *lacl, 644 struct se_lun *lun, 645 bool lun_access_ro) 646 { 647 struct se_node_acl *nacl = lacl->se_lun_nacl; 648 /* 649 * rcu_dereference_raw protected by se_lun->lun_group symlink 650 * reference to se_device->dev_group. 651 */ 652 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 653 654 if (!nacl) 655 return -EINVAL; 656 657 if (lun->lun_access_ro) 658 lun_access_ro = true; 659 660 lacl->se_lun = lun; 661 662 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun, 663 lun_access_ro, nacl, tpg) < 0) 664 return -EINVAL; 665 666 pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for " 667 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 668 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun, 669 lun_access_ro ? "RO" : "RW", 670 nacl->initiatorname); 671 /* 672 * Check to see if there are any existing persistent reservation APTPL 673 * pre-registrations that need to be enabled for this LUN ACL.. 674 */ 675 core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl, 676 lacl->mapped_lun); 677 return 0; 678 } 679 680 int core_dev_del_initiator_node_lun_acl( 681 struct se_lun *lun, 682 struct se_lun_acl *lacl) 683 { 684 struct se_portal_group *tpg = lun->lun_tpg; 685 struct se_node_acl *nacl; 686 struct se_dev_entry *deve; 687 688 nacl = lacl->se_lun_nacl; 689 if (!nacl) 690 return -EINVAL; 691 692 mutex_lock(&nacl->lun_entry_mutex); 693 deve = target_nacl_find_deve(nacl, lacl->mapped_lun); 694 if (deve) 695 core_disable_device_list_for_node(lun, deve, nacl, tpg); 696 mutex_unlock(&nacl->lun_entry_mutex); 697 698 pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for" 699 " InitiatorNode: %s Mapped LUN: %llu\n", 700 tpg->se_tpg_tfo->get_fabric_name(), 701 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 702 nacl->initiatorname, lacl->mapped_lun); 703 704 return 0; 705 } 706 707 void core_dev_free_initiator_node_lun_acl( 708 struct se_portal_group *tpg, 709 struct se_lun_acl *lacl) 710 { 711 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" 712 " Mapped LUN: %llu\n", tpg->se_tpg_tfo->get_fabric_name(), 713 tpg->se_tpg_tfo->tpg_get_tag(tpg), 714 tpg->se_tpg_tfo->get_fabric_name(), 715 lacl->se_lun_nacl->initiatorname, lacl->mapped_lun); 716 717 kfree(lacl); 718 } 719 720 static void scsi_dump_inquiry(struct se_device *dev) 721 { 722 struct t10_wwn *wwn = &dev->t10_wwn; 723 char buf[17]; 724 int i, device_type; 725 /* 726 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer 727 */ 728 for (i = 0; i < 8; i++) 729 if (wwn->vendor[i] >= 0x20) 730 buf[i] = wwn->vendor[i]; 731 else 732 buf[i] = ' '; 733 buf[i] = '\0'; 734 pr_debug(" Vendor: %s\n", buf); 735 736 for (i = 0; i < 16; i++) 737 if (wwn->model[i] >= 0x20) 738 buf[i] = wwn->model[i]; 739 else 740 buf[i] = ' '; 741 buf[i] = '\0'; 742 pr_debug(" Model: %s\n", buf); 743 744 for (i = 0; i < 4; i++) 745 if (wwn->revision[i] >= 0x20) 746 buf[i] = wwn->revision[i]; 747 else 748 buf[i] = ' '; 749 buf[i] = '\0'; 750 pr_debug(" Revision: %s\n", buf); 751 752 device_type = dev->transport->get_device_type(dev); 753 pr_debug(" Type: %s ", scsi_device_type(device_type)); 754 } 755 756 struct se_device *target_alloc_device(struct se_hba *hba, const char *name) 757 { 758 struct se_device *dev; 759 struct se_lun *xcopy_lun; 760 761 dev = hba->backend->ops->alloc_device(hba, name); 762 if (!dev) 763 return NULL; 764 765 dev->se_hba = hba; 766 dev->transport = hba->backend->ops; 767 dev->prot_length = sizeof(struct t10_pi_tuple); 768 dev->hba_index = hba->hba_index; 769 770 INIT_LIST_HEAD(&dev->dev_sep_list); 771 INIT_LIST_HEAD(&dev->dev_tmr_list); 772 INIT_LIST_HEAD(&dev->delayed_cmd_list); 773 INIT_LIST_HEAD(&dev->state_list); 774 INIT_LIST_HEAD(&dev->qf_cmd_list); 775 spin_lock_init(&dev->execute_task_lock); 776 spin_lock_init(&dev->delayed_cmd_lock); 777 spin_lock_init(&dev->dev_reservation_lock); 778 spin_lock_init(&dev->se_port_lock); 779 spin_lock_init(&dev->se_tmr_lock); 780 spin_lock_init(&dev->qf_cmd_lock); 781 sema_init(&dev->caw_sem, 1); 782 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list); 783 spin_lock_init(&dev->t10_wwn.t10_vpd_lock); 784 INIT_LIST_HEAD(&dev->t10_pr.registration_list); 785 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list); 786 spin_lock_init(&dev->t10_pr.registration_lock); 787 spin_lock_init(&dev->t10_pr.aptpl_reg_lock); 788 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list); 789 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock); 790 INIT_LIST_HEAD(&dev->t10_alua.lba_map_list); 791 spin_lock_init(&dev->t10_alua.lba_map_lock); 792 793 dev->t10_wwn.t10_dev = dev; 794 dev->t10_alua.t10_dev = dev; 795 796 dev->dev_attrib.da_dev = dev; 797 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS; 798 dev->dev_attrib.emulate_dpo = 1; 799 dev->dev_attrib.emulate_fua_write = 1; 800 dev->dev_attrib.emulate_fua_read = 1; 801 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; 802 dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; 803 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS; 804 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU; 805 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; 806 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW; 807 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; 808 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT; 809 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 810 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL; 811 dev->dev_attrib.is_nonrot = DA_IS_NONROT; 812 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; 813 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; 814 dev->dev_attrib.max_unmap_block_desc_count = 815 DA_MAX_UNMAP_BLOCK_DESC_COUNT; 816 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; 817 dev->dev_attrib.unmap_granularity_alignment = 818 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; 819 dev->dev_attrib.unmap_zeroes_data = 820 DA_UNMAP_ZEROES_DATA_DEFAULT; 821 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; 822 823 xcopy_lun = &dev->xcopy_lun; 824 rcu_assign_pointer(xcopy_lun->lun_se_dev, dev); 825 init_completion(&xcopy_lun->lun_ref_comp); 826 init_completion(&xcopy_lun->lun_shutdown_comp); 827 INIT_LIST_HEAD(&xcopy_lun->lun_deve_list); 828 INIT_LIST_HEAD(&xcopy_lun->lun_dev_link); 829 mutex_init(&xcopy_lun->lun_tg_pt_md_mutex); 830 xcopy_lun->lun_tpg = &xcopy_pt_tpg; 831 832 return dev; 833 } 834 835 /* 836 * Check if the underlying struct block_device request_queue supports 837 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM 838 * in ATA and we need to set TPE=1 839 */ 840 bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, 841 struct request_queue *q) 842 { 843 int block_size = queue_logical_block_size(q); 844 845 if (!blk_queue_discard(q)) 846 return false; 847 848 attrib->max_unmap_lba_count = 849 q->limits.max_discard_sectors >> (ilog2(block_size) - 9); 850 /* 851 * Currently hardcoded to 1 in Linux/SCSI code.. 852 */ 853 attrib->max_unmap_block_desc_count = 1; 854 attrib->unmap_granularity = q->limits.discard_granularity / block_size; 855 attrib->unmap_granularity_alignment = q->limits.discard_alignment / 856 block_size; 857 attrib->unmap_zeroes_data = (q->limits.max_write_zeroes_sectors); 858 return true; 859 } 860 EXPORT_SYMBOL(target_configure_unmap_from_queue); 861 862 /* 863 * Convert from blocksize advertised to the initiator to the 512 byte 864 * units unconditionally used by the Linux block layer. 865 */ 866 sector_t target_to_linux_sector(struct se_device *dev, sector_t lb) 867 { 868 switch (dev->dev_attrib.block_size) { 869 case 4096: 870 return lb << 3; 871 case 2048: 872 return lb << 2; 873 case 1024: 874 return lb << 1; 875 default: 876 return lb; 877 } 878 } 879 EXPORT_SYMBOL(target_to_linux_sector); 880 881 struct devices_idr_iter { 882 struct config_item *prev_item; 883 int (*fn)(struct se_device *dev, void *data); 884 void *data; 885 }; 886 887 static int target_devices_idr_iter(int id, void *p, void *data) 888 __must_hold(&device_mutex) 889 { 890 struct devices_idr_iter *iter = data; 891 struct se_device *dev = p; 892 int ret; 893 894 config_item_put(iter->prev_item); 895 iter->prev_item = NULL; 896 897 /* 898 * We add the device early to the idr, so it can be used 899 * by backend modules during configuration. We do not want 900 * to allow other callers to access partially setup devices, 901 * so we skip them here. 902 */ 903 if (!target_dev_configured(dev)) 904 return 0; 905 906 iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item); 907 if (!iter->prev_item) 908 return 0; 909 mutex_unlock(&device_mutex); 910 911 ret = iter->fn(dev, iter->data); 912 913 mutex_lock(&device_mutex); 914 return ret; 915 } 916 917 /** 918 * target_for_each_device - iterate over configured devices 919 * @fn: iterator function 920 * @data: pointer to data that will be passed to fn 921 * 922 * fn must return 0 to continue looping over devices. non-zero will break 923 * from the loop and return that value to the caller. 924 */ 925 int target_for_each_device(int (*fn)(struct se_device *dev, void *data), 926 void *data) 927 { 928 struct devices_idr_iter iter = { .fn = fn, .data = data }; 929 int ret; 930 931 mutex_lock(&device_mutex); 932 ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter); 933 mutex_unlock(&device_mutex); 934 config_item_put(iter.prev_item); 935 return ret; 936 } 937 938 int target_configure_device(struct se_device *dev) 939 { 940 struct se_hba *hba = dev->se_hba; 941 int ret, id; 942 943 if (target_dev_configured(dev)) { 944 pr_err("se_dev->se_dev_ptr already set for storage" 945 " object\n"); 946 return -EEXIST; 947 } 948 949 /* 950 * Add early so modules like tcmu can use during its 951 * configuration. 952 */ 953 mutex_lock(&device_mutex); 954 /* 955 * Use cyclic to try and avoid collisions with devices 956 * that were recently removed. 957 */ 958 id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL); 959 mutex_unlock(&device_mutex); 960 if (id < 0) { 961 ret = -ENOMEM; 962 goto out; 963 } 964 dev->dev_index = id; 965 966 ret = dev->transport->configure_device(dev); 967 if (ret) 968 goto out_free_index; 969 /* 970 * XXX: there is not much point to have two different values here.. 971 */ 972 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size; 973 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth; 974 975 /* 976 * Align max_hw_sectors down to PAGE_SIZE I/O transfers 977 */ 978 dev->dev_attrib.hw_max_sectors = 979 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors, 980 dev->dev_attrib.hw_block_size); 981 dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors; 982 983 dev->creation_time = get_jiffies_64(); 984 985 ret = core_setup_alua(dev); 986 if (ret) 987 goto out_destroy_device; 988 989 /* 990 * Startup the struct se_device processing thread 991 */ 992 dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1, 993 dev->transport->name); 994 if (!dev->tmr_wq) { 995 pr_err("Unable to create tmr workqueue for %s\n", 996 dev->transport->name); 997 ret = -ENOMEM; 998 goto out_free_alua; 999 } 1000 1001 /* 1002 * Setup work_queue for QUEUE_FULL 1003 */ 1004 INIT_WORK(&dev->qf_work_queue, target_qf_do_work); 1005 1006 /* 1007 * Preload the initial INQUIRY const values if we are doing 1008 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI 1009 * passthrough because this is being provided by the backend LLD. 1010 */ 1011 if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) { 1012 strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8); 1013 strncpy(&dev->t10_wwn.model[0], 1014 dev->transport->inquiry_prod, 16); 1015 strncpy(&dev->t10_wwn.revision[0], 1016 dev->transport->inquiry_rev, 4); 1017 } 1018 1019 scsi_dump_inquiry(dev); 1020 1021 spin_lock(&hba->device_lock); 1022 hba->dev_count++; 1023 spin_unlock(&hba->device_lock); 1024 1025 dev->dev_flags |= DF_CONFIGURED; 1026 1027 return 0; 1028 1029 out_free_alua: 1030 core_alua_free_lu_gp_mem(dev); 1031 out_destroy_device: 1032 dev->transport->destroy_device(dev); 1033 out_free_index: 1034 mutex_lock(&device_mutex); 1035 idr_remove(&devices_idr, dev->dev_index); 1036 mutex_unlock(&device_mutex); 1037 out: 1038 se_release_vpd_for_dev(dev); 1039 return ret; 1040 } 1041 1042 void target_free_device(struct se_device *dev) 1043 { 1044 struct se_hba *hba = dev->se_hba; 1045 1046 WARN_ON(!list_empty(&dev->dev_sep_list)); 1047 1048 if (target_dev_configured(dev)) { 1049 destroy_workqueue(dev->tmr_wq); 1050 1051 dev->transport->destroy_device(dev); 1052 1053 mutex_lock(&device_mutex); 1054 idr_remove(&devices_idr, dev->dev_index); 1055 mutex_unlock(&device_mutex); 1056 1057 spin_lock(&hba->device_lock); 1058 hba->dev_count--; 1059 spin_unlock(&hba->device_lock); 1060 } 1061 1062 core_alua_free_lu_gp_mem(dev); 1063 core_alua_set_lba_map(dev, NULL, 0, 0); 1064 core_scsi3_free_all_registrations(dev); 1065 se_release_vpd_for_dev(dev); 1066 1067 if (dev->transport->free_prot) 1068 dev->transport->free_prot(dev); 1069 1070 dev->transport->free_device(dev); 1071 } 1072 1073 int core_dev_setup_virtual_lun0(void) 1074 { 1075 struct se_hba *hba; 1076 struct se_device *dev; 1077 char buf[] = "rd_pages=8,rd_nullio=1"; 1078 int ret; 1079 1080 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE); 1081 if (IS_ERR(hba)) 1082 return PTR_ERR(hba); 1083 1084 dev = target_alloc_device(hba, "virt_lun0"); 1085 if (!dev) { 1086 ret = -ENOMEM; 1087 goto out_free_hba; 1088 } 1089 1090 hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf)); 1091 1092 ret = target_configure_device(dev); 1093 if (ret) 1094 goto out_free_se_dev; 1095 1096 lun0_hba = hba; 1097 g_lun0_dev = dev; 1098 return 0; 1099 1100 out_free_se_dev: 1101 target_free_device(dev); 1102 out_free_hba: 1103 core_delete_hba(hba); 1104 return ret; 1105 } 1106 1107 1108 void core_dev_release_virtual_lun0(void) 1109 { 1110 struct se_hba *hba = lun0_hba; 1111 1112 if (!hba) 1113 return; 1114 1115 if (g_lun0_dev) 1116 target_free_device(g_lun0_dev); 1117 core_delete_hba(hba); 1118 } 1119 1120 /* 1121 * Common CDB parsing for kernel and user passthrough. 1122 */ 1123 sense_reason_t 1124 passthrough_parse_cdb(struct se_cmd *cmd, 1125 sense_reason_t (*exec_cmd)(struct se_cmd *cmd)) 1126 { 1127 unsigned char *cdb = cmd->t_task_cdb; 1128 struct se_device *dev = cmd->se_dev; 1129 unsigned int size; 1130 1131 /* 1132 * Clear a lun set in the cdb if the initiator talking to use spoke 1133 * and old standards version, as we can't assume the underlying device 1134 * won't choke up on it. 1135 */ 1136 switch (cdb[0]) { 1137 case READ_10: /* SBC - RDProtect */ 1138 case READ_12: /* SBC - RDProtect */ 1139 case READ_16: /* SBC - RDProtect */ 1140 case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ 1141 case VERIFY: /* SBC - VRProtect */ 1142 case VERIFY_16: /* SBC - VRProtect */ 1143 case WRITE_VERIFY: /* SBC - VRProtect */ 1144 case WRITE_VERIFY_12: /* SBC - VRProtect */ 1145 case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */ 1146 break; 1147 default: 1148 cdb[1] &= 0x1f; /* clear logical unit number */ 1149 break; 1150 } 1151 1152 /* 1153 * For REPORT LUNS we always need to emulate the response, for everything 1154 * else, pass it up. 1155 */ 1156 if (cdb[0] == REPORT_LUNS) { 1157 cmd->execute_cmd = spc_emulate_report_luns; 1158 return TCM_NO_SENSE; 1159 } 1160 1161 /* 1162 * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to 1163 * emulate the response, since tcmu does not have the information 1164 * required to process these commands. 1165 */ 1166 if (!(dev->transport->transport_flags & 1167 TRANSPORT_FLAG_PASSTHROUGH_PGR)) { 1168 if (cdb[0] == PERSISTENT_RESERVE_IN) { 1169 cmd->execute_cmd = target_scsi3_emulate_pr_in; 1170 size = get_unaligned_be16(&cdb[7]); 1171 return target_cmd_size_check(cmd, size); 1172 } 1173 if (cdb[0] == PERSISTENT_RESERVE_OUT) { 1174 cmd->execute_cmd = target_scsi3_emulate_pr_out; 1175 size = get_unaligned_be32(&cdb[5]); 1176 return target_cmd_size_check(cmd, size); 1177 } 1178 1179 if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) { 1180 cmd->execute_cmd = target_scsi2_reservation_release; 1181 if (cdb[0] == RELEASE_10) 1182 size = get_unaligned_be16(&cdb[7]); 1183 else 1184 size = cmd->data_length; 1185 return target_cmd_size_check(cmd, size); 1186 } 1187 if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) { 1188 cmd->execute_cmd = target_scsi2_reservation_reserve; 1189 if (cdb[0] == RESERVE_10) 1190 size = get_unaligned_be16(&cdb[7]); 1191 else 1192 size = cmd->data_length; 1193 return target_cmd_size_check(cmd, size); 1194 } 1195 } 1196 1197 /* Set DATA_CDB flag for ops that should have it */ 1198 switch (cdb[0]) { 1199 case READ_6: 1200 case READ_10: 1201 case READ_12: 1202 case READ_16: 1203 case WRITE_6: 1204 case WRITE_10: 1205 case WRITE_12: 1206 case WRITE_16: 1207 case WRITE_VERIFY: 1208 case WRITE_VERIFY_12: 1209 case WRITE_VERIFY_16: 1210 case COMPARE_AND_WRITE: 1211 case XDWRITEREAD_10: 1212 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1213 break; 1214 case VARIABLE_LENGTH_CMD: 1215 switch (get_unaligned_be16(&cdb[8])) { 1216 case READ_32: 1217 case WRITE_32: 1218 case WRITE_VERIFY_32: 1219 case XDWRITEREAD_32: 1220 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 1221 break; 1222 } 1223 } 1224 1225 cmd->execute_cmd = exec_cmd; 1226 1227 return TCM_NO_SENSE; 1228 } 1229 EXPORT_SYMBOL(passthrough_parse_cdb); 1230