11a59d1b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 2c66ac9dbSNicholas Bellinger /******************************************************************************* 3c66ac9dbSNicholas Bellinger * Filename: target_core_device.c (based on iscsi_target_device.c) 4c66ac9dbSNicholas Bellinger * 5e3d6f909SAndy Grover * This file contains the TCM Virtual Device and Disk Transport 6c66ac9dbSNicholas Bellinger * agnostic related functions. 7c66ac9dbSNicholas Bellinger * 84c76251eSNicholas Bellinger * (c) Copyright 2003-2013 Datera, Inc. 9c66ac9dbSNicholas Bellinger * 10c66ac9dbSNicholas Bellinger * Nicholas A. Bellinger <nab@kernel.org> 11c66ac9dbSNicholas Bellinger * 12c66ac9dbSNicholas Bellinger ******************************************************************************/ 13c66ac9dbSNicholas Bellinger 14c66ac9dbSNicholas Bellinger #include <linux/net.h> 15c66ac9dbSNicholas Bellinger #include <linux/string.h> 16c66ac9dbSNicholas Bellinger #include <linux/delay.h> 17c66ac9dbSNicholas Bellinger #include <linux/timer.h> 18c66ac9dbSNicholas Bellinger #include <linux/slab.h> 19c66ac9dbSNicholas Bellinger #include <linux/spinlock.h> 20c66ac9dbSNicholas Bellinger #include <linux/kthread.h> 21c66ac9dbSNicholas Bellinger #include <linux/in.h> 22c53181afSPaul Gortmaker #include <linux/export.h> 238dcf07beSBart Van Assche #include <linux/t10-pi.h> 247bfea53bSAndy Grover #include <asm/unaligned.h> 25c66ac9dbSNicholas Bellinger #include <net/sock.h> 26c66ac9dbSNicholas Bellinger #include <net/tcp.h> 27ba929992SBart Van Assche #include <scsi/scsi_common.h> 28ba929992SBart Van Assche #include <scsi/scsi_proto.h> 29c66ac9dbSNicholas Bellinger 30c66ac9dbSNicholas Bellinger #include <target/target_core_base.h> 31c4795fb2SChristoph Hellwig #include <target/target_core_backend.h> 32c4795fb2SChristoph Hellwig #include <target/target_core_fabric.h> 33c66ac9dbSNicholas Bellinger 34e26d99aeSChristoph Hellwig #include "target_core_internal.h" 35c66ac9dbSNicholas Bellinger #include "target_core_alua.h" 36c66ac9dbSNicholas Bellinger #include "target_core_pr.h" 37c66ac9dbSNicholas Bellinger #include "target_core_ua.h" 38c66ac9dbSNicholas Bellinger 39c82ff239SColin Ian King static DEFINE_MUTEX(device_mutex); 40c82ff239SColin Ian King static LIST_HEAD(device_list); 410a5eee64SMike Christie static DEFINE_IDR(devices_idr); 42d9ea32bfSNicholas Bellinger 43e3d6f909SAndy Grover static struct se_hba *lun0_hba; 44e3d6f909SAndy Grover /* not static, needed by tpg.c */ 45e3d6f909SAndy Grover struct se_device *g_lun0_dev; 46e3d6f909SAndy Grover 47de103c93SChristoph Hellwig sense_reason_t 48a36840d8SSudhakar Panneerselvam transport_lookup_cmd_lun(struct se_cmd *se_cmd) 49c66ac9dbSNicholas Bellinger { 50c66ac9dbSNicholas Bellinger struct se_lun *se_lun = NULL; 51e3d6f909SAndy Grover struct se_session *se_sess = se_cmd->se_sess; 5229a05deeSNicholas Bellinger struct se_node_acl *nacl = se_sess->se_node_acl; 5329a05deeSNicholas Bellinger struct se_dev_entry *deve; 548fa3a867SNicholas Bellinger sense_reason_t ret = TCM_NO_SENSE; 55c66ac9dbSNicholas Bellinger 5629a05deeSNicholas Bellinger rcu_read_lock(); 57a36840d8SSudhakar Panneerselvam deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun); 5829a05deeSNicholas Bellinger if (deve) { 5929a05deeSNicholas Bellinger atomic_long_inc(&deve->total_cmds); 60c66ac9dbSNicholas Bellinger 615951146dSAndy Grover if (se_cmd->data_direction == DMA_TO_DEVICE) 6229a05deeSNicholas Bellinger atomic_long_add(se_cmd->data_length, 6329a05deeSNicholas Bellinger &deve->write_bytes); 645951146dSAndy Grover else if (se_cmd->data_direction == DMA_FROM_DEVICE) 6529a05deeSNicholas Bellinger atomic_long_add(se_cmd->data_length, 6629a05deeSNicholas Bellinger &deve->read_bytes); 675951146dSAndy Grover 68a2b5d6f9SMike Christie if ((se_cmd->data_direction == DMA_TO_DEVICE) && 69a2b5d6f9SMike Christie deve->lun_access_ro) { 70a2b5d6f9SMike Christie pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" 71a2b5d6f9SMike Christie " Access for 0x%08llx\n", 72a2b5d6f9SMike Christie se_cmd->se_tfo->fabric_name, 73a2b5d6f9SMike Christie se_cmd->orig_fe_lun); 74a2b5d6f9SMike Christie rcu_read_unlock(); 75a2b5d6f9SMike Christie return TCM_WRITE_PROTECTED; 76a2b5d6f9SMike Christie } 77a2b5d6f9SMike Christie 78ef4f7e4bSDmitry Bogdanov se_lun = deve->se_lun; 79bd4e2d29SNicholas Bellinger 80bd4e2d29SNicholas Bellinger if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { 81bd4e2d29SNicholas Bellinger se_lun = NULL; 82bd4e2d29SNicholas Bellinger goto out_unlock; 83bd4e2d29SNicholas Bellinger } 84bd4e2d29SNicholas Bellinger 8563f74794SBart Van Assche se_cmd->se_lun = se_lun; 865951146dSAndy Grover se_cmd->pr_res_key = deve->pr_res_key; 875951146dSAndy Grover se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 885277797dSNicholas Bellinger se_cmd->lun_ref_active = true; 895951146dSAndy Grover } 90bd4e2d29SNicholas Bellinger out_unlock: 9129a05deeSNicholas Bellinger rcu_read_unlock(); 925951146dSAndy Grover 935951146dSAndy Grover if (!se_lun) { 94c66ac9dbSNicholas Bellinger /* 95c66ac9dbSNicholas Bellinger * Use the se_portal_group->tpg_virt_lun0 to allow for 96c66ac9dbSNicholas Bellinger * REPORT_LUNS, et al to be returned when no active 97c66ac9dbSNicholas Bellinger * MappedLUN=0 exists for this Initiator Port. 98c66ac9dbSNicholas Bellinger */ 99a36840d8SSudhakar Panneerselvam if (se_cmd->orig_fe_lun != 0) { 1006708bb27SAndy Grover pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 1015482d56bSLance Digby " Access for 0x%08llx from %s\n", 10230c7ca93SDavid Disseldorp se_cmd->se_tfo->fabric_name, 103a36840d8SSudhakar Panneerselvam se_cmd->orig_fe_lun, 1045482d56bSLance Digby nacl->initiatorname); 105de103c93SChristoph Hellwig return TCM_NON_EXISTENT_LUN; 106c66ac9dbSNicholas Bellinger } 1075951146dSAndy Grover 1088fa3a867SNicholas Bellinger /* 1098fa3a867SNicholas Bellinger * Force WRITE PROTECT for virtual LUN 0 1108fa3a867SNicholas Bellinger */ 1118fa3a867SNicholas Bellinger if ((se_cmd->data_direction != DMA_FROM_DEVICE) && 112a2b5d6f9SMike Christie (se_cmd->data_direction != DMA_NONE)) 113a2b5d6f9SMike Christie return TCM_WRITE_PROTECTED; 114a2b5d6f9SMike Christie 115a2b5d6f9SMike Christie se_lun = se_sess->se_tpg->tpg_virt_lun0; 116a2b5d6f9SMike Christie if (!percpu_ref_tryget_live(&se_lun->lun_ref)) 117a2b5d6f9SMike Christie return TCM_NON_EXISTENT_LUN; 118a2b5d6f9SMike Christie 119a2b5d6f9SMike Christie se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0; 120a2b5d6f9SMike Christie se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 121a2b5d6f9SMike Christie se_cmd->lun_ref_active = true; 122c66ac9dbSNicholas Bellinger } 1234cc987eaSNicholas Bellinger /* 1244cc987eaSNicholas Bellinger * RCU reference protected by percpu se_lun->lun_ref taken above that 1254cc987eaSNicholas Bellinger * must drop to zero (including initial reference) before this se_lun 1264cc987eaSNicholas Bellinger * pointer can be kfree_rcu() by the final se_lun->lun_group put via 1274cc987eaSNicholas Bellinger * target_core_fabric_configfs.c:target_fabric_port_release 1284cc987eaSNicholas Bellinger */ 1294cc987eaSNicholas Bellinger se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); 1304cc987eaSNicholas Bellinger atomic_long_inc(&se_cmd->se_dev->num_cmds); 131c66ac9dbSNicholas Bellinger 132c66ac9dbSNicholas Bellinger if (se_cmd->data_direction == DMA_TO_DEVICE) 1334cc987eaSNicholas Bellinger atomic_long_add(se_cmd->data_length, 1344cc987eaSNicholas Bellinger &se_cmd->se_dev->write_bytes); 135c66ac9dbSNicholas Bellinger else if (se_cmd->data_direction == DMA_FROM_DEVICE) 1364cc987eaSNicholas Bellinger atomic_long_add(se_cmd->data_length, 1374cc987eaSNicholas Bellinger &se_cmd->se_dev->read_bytes); 138c66ac9dbSNicholas Bellinger 1398fa3a867SNicholas Bellinger return ret; 140c66ac9dbSNicholas Bellinger } 1415951146dSAndy Grover EXPORT_SYMBOL(transport_lookup_cmd_lun); 142c66ac9dbSNicholas Bellinger 143a36840d8SSudhakar Panneerselvam int transport_lookup_tmr_lun(struct se_cmd *se_cmd) 144c66ac9dbSNicholas Bellinger { 145c66ac9dbSNicholas Bellinger struct se_dev_entry *deve; 146c66ac9dbSNicholas Bellinger struct se_lun *se_lun = NULL; 147e3d6f909SAndy Grover struct se_session *se_sess = se_cmd->se_sess; 14829a05deeSNicholas Bellinger struct se_node_acl *nacl = se_sess->se_node_acl; 149c66ac9dbSNicholas Bellinger struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 1505e1be919SRoland Dreier unsigned long flags; 151c66ac9dbSNicholas Bellinger 15229a05deeSNicholas Bellinger rcu_read_lock(); 153a36840d8SSudhakar Panneerselvam deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun); 15429a05deeSNicholas Bellinger if (deve) { 155ef4f7e4bSDmitry Bogdanov se_lun = deve->se_lun; 156eeb64d23SNicholas Bellinger 157eeb64d23SNicholas Bellinger if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { 158eeb64d23SNicholas Bellinger se_lun = NULL; 159eeb64d23SNicholas Bellinger goto out_unlock; 160eeb64d23SNicholas Bellinger } 161eeb64d23SNicholas Bellinger 16263f74794SBart Van Assche se_cmd->se_lun = se_lun; 163c66ac9dbSNicholas Bellinger se_cmd->pr_res_key = deve->pr_res_key; 164eeb64d23SNicholas Bellinger se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 165eeb64d23SNicholas Bellinger se_cmd->lun_ref_active = true; 166c66ac9dbSNicholas Bellinger } 167eeb64d23SNicholas Bellinger out_unlock: 16829a05deeSNicholas Bellinger rcu_read_unlock(); 169c66ac9dbSNicholas Bellinger 170c66ac9dbSNicholas Bellinger if (!se_lun) { 1716708bb27SAndy Grover pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 1725482d56bSLance Digby " Access for 0x%08llx for %s\n", 17330c7ca93SDavid Disseldorp se_cmd->se_tfo->fabric_name, 174a36840d8SSudhakar Panneerselvam se_cmd->orig_fe_lun, 1755482d56bSLance Digby nacl->initiatorname); 176e3d6f909SAndy Grover return -ENODEV; 177c66ac9dbSNicholas Bellinger } 1784cc987eaSNicholas Bellinger se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); 1794cc987eaSNicholas Bellinger se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev); 1805951146dSAndy Grover 1815e1be919SRoland Dreier spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags); 1825951146dSAndy Grover list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list); 1835e1be919SRoland Dreier spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags); 184c66ac9dbSNicholas Bellinger 185c66ac9dbSNicholas Bellinger return 0; 186c66ac9dbSNicholas Bellinger } 1875951146dSAndy Grover EXPORT_SYMBOL(transport_lookup_tmr_lun); 188c66ac9dbSNicholas Bellinger 18929a05deeSNicholas Bellinger bool target_lun_is_rdonly(struct se_cmd *cmd) 19029a05deeSNicholas Bellinger { 19129a05deeSNicholas Bellinger struct se_session *se_sess = cmd->se_sess; 19229a05deeSNicholas Bellinger struct se_dev_entry *deve; 19329a05deeSNicholas Bellinger bool ret; 19429a05deeSNicholas Bellinger 19529a05deeSNicholas Bellinger rcu_read_lock(); 19629a05deeSNicholas Bellinger deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun); 19703a68b44SAndy Grover ret = deve && deve->lun_access_ro; 19829a05deeSNicholas Bellinger rcu_read_unlock(); 19929a05deeSNicholas Bellinger 20029a05deeSNicholas Bellinger return ret; 20129a05deeSNicholas Bellinger } 20229a05deeSNicholas Bellinger EXPORT_SYMBOL(target_lun_is_rdonly); 20329a05deeSNicholas Bellinger 204c66ac9dbSNicholas Bellinger /* 205c66ac9dbSNicholas Bellinger * This function is called from core_scsi3_emulate_pro_register_and_move() 20629a05deeSNicholas Bellinger * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref 207c66ac9dbSNicholas Bellinger * when a matching rtpi is found. 208c66ac9dbSNicholas Bellinger */ 209c66ac9dbSNicholas Bellinger struct se_dev_entry *core_get_se_deve_from_rtpi( 210c66ac9dbSNicholas Bellinger struct se_node_acl *nacl, 211c66ac9dbSNicholas Bellinger u16 rtpi) 212c66ac9dbSNicholas Bellinger { 213c66ac9dbSNicholas Bellinger struct se_dev_entry *deve; 214c66ac9dbSNicholas Bellinger struct se_lun *lun; 215c66ac9dbSNicholas Bellinger struct se_portal_group *tpg = nacl->se_tpg; 216c66ac9dbSNicholas Bellinger 21729a05deeSNicholas Bellinger rcu_read_lock(); 21829a05deeSNicholas Bellinger hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 219ef4f7e4bSDmitry Bogdanov lun = deve->se_lun; 2206708bb27SAndy Grover if (!lun) { 2216708bb27SAndy Grover pr_err("%s device entries device pointer is" 222c66ac9dbSNicholas Bellinger " NULL, but Initiator has access.\n", 22330c7ca93SDavid Disseldorp tpg->se_tpg_tfo->fabric_name); 224c66ac9dbSNicholas Bellinger continue; 225c66ac9dbSNicholas Bellinger } 22629a05deeSNicholas Bellinger if (lun->lun_rtpi != rtpi) 227c66ac9dbSNicholas Bellinger continue; 228c66ac9dbSNicholas Bellinger 22929a05deeSNicholas Bellinger kref_get(&deve->pr_kref); 23029a05deeSNicholas Bellinger rcu_read_unlock(); 231c66ac9dbSNicholas Bellinger 232c66ac9dbSNicholas Bellinger return deve; 233c66ac9dbSNicholas Bellinger } 23429a05deeSNicholas Bellinger rcu_read_unlock(); 235c66ac9dbSNicholas Bellinger 236c66ac9dbSNicholas Bellinger return NULL; 237c66ac9dbSNicholas Bellinger } 238c66ac9dbSNicholas Bellinger 23929a05deeSNicholas Bellinger void core_free_device_list_for_node( 240c66ac9dbSNicholas Bellinger struct se_node_acl *nacl, 241c66ac9dbSNicholas Bellinger struct se_portal_group *tpg) 242c66ac9dbSNicholas Bellinger { 243c66ac9dbSNicholas Bellinger struct se_dev_entry *deve; 244c66ac9dbSNicholas Bellinger 24529a05deeSNicholas Bellinger mutex_lock(&nacl->lun_entry_mutex); 246ef4f7e4bSDmitry Bogdanov hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) 247ef4f7e4bSDmitry Bogdanov core_disable_device_list_for_node(deve->se_lun, deve, nacl, tpg); 24829a05deeSNicholas Bellinger mutex_unlock(&nacl->lun_entry_mutex); 249c66ac9dbSNicholas Bellinger } 250c66ac9dbSNicholas Bellinger 251c66ac9dbSNicholas Bellinger void core_update_device_list_access( 252f2d30680SHannes Reinecke u64 mapped_lun, 25303a68b44SAndy Grover bool lun_access_ro, 254c66ac9dbSNicholas Bellinger struct se_node_acl *nacl) 255c66ac9dbSNicholas Bellinger { 256c66ac9dbSNicholas Bellinger struct se_dev_entry *deve; 257c66ac9dbSNicholas Bellinger 25829a05deeSNicholas Bellinger mutex_lock(&nacl->lun_entry_mutex); 25929a05deeSNicholas Bellinger deve = target_nacl_find_deve(nacl, mapped_lun); 26003a68b44SAndy Grover if (deve) 26103a68b44SAndy Grover deve->lun_access_ro = lun_access_ro; 26229a05deeSNicholas Bellinger mutex_unlock(&nacl->lun_entry_mutex); 263c66ac9dbSNicholas Bellinger } 264c66ac9dbSNicholas Bellinger 26529a05deeSNicholas Bellinger /* 26629a05deeSNicholas Bellinger * Called with rcu_read_lock or nacl->device_list_lock held. 267c66ac9dbSNicholas Bellinger */ 268f2d30680SHannes Reinecke struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun) 26929a05deeSNicholas Bellinger { 27029a05deeSNicholas Bellinger struct se_dev_entry *deve; 27129a05deeSNicholas Bellinger 27229a05deeSNicholas Bellinger hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) 27329a05deeSNicholas Bellinger if (deve->mapped_lun == mapped_lun) 27429a05deeSNicholas Bellinger return deve; 27529a05deeSNicholas Bellinger 27629a05deeSNicholas Bellinger return NULL; 27729a05deeSNicholas Bellinger } 27829a05deeSNicholas Bellinger EXPORT_SYMBOL(target_nacl_find_deve); 27929a05deeSNicholas Bellinger 28029a05deeSNicholas Bellinger void target_pr_kref_release(struct kref *kref) 28129a05deeSNicholas Bellinger { 28229a05deeSNicholas Bellinger struct se_dev_entry *deve = container_of(kref, struct se_dev_entry, 28329a05deeSNicholas Bellinger pr_kref); 28429a05deeSNicholas Bellinger complete(&deve->pr_comp); 285c66ac9dbSNicholas Bellinger } 286c66ac9dbSNicholas Bellinger 2877c0d0d51SHannes Reinecke static void 2887c0d0d51SHannes Reinecke target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new, 2897c0d0d51SHannes Reinecke bool skip_new) 2907c0d0d51SHannes Reinecke { 2917c0d0d51SHannes Reinecke struct se_dev_entry *tmp; 2927c0d0d51SHannes Reinecke 2937c0d0d51SHannes Reinecke rcu_read_lock(); 2947c0d0d51SHannes Reinecke hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) { 2957c0d0d51SHannes Reinecke if (skip_new && tmp == new) 2967c0d0d51SHannes Reinecke continue; 2977c0d0d51SHannes Reinecke core_scsi3_ua_allocate(tmp, 0x3F, 2987c0d0d51SHannes Reinecke ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED); 2997c0d0d51SHannes Reinecke } 3007c0d0d51SHannes Reinecke rcu_read_unlock(); 3017c0d0d51SHannes Reinecke } 3027c0d0d51SHannes Reinecke 303e80ac6c4SAndy Grover int core_enable_device_list_for_node( 304c66ac9dbSNicholas Bellinger struct se_lun *lun, 305c66ac9dbSNicholas Bellinger struct se_lun_acl *lun_acl, 306f2d30680SHannes Reinecke u64 mapped_lun, 30703a68b44SAndy Grover bool lun_access_ro, 308c66ac9dbSNicholas Bellinger struct se_node_acl *nacl, 309e80ac6c4SAndy Grover struct se_portal_group *tpg) 310c66ac9dbSNicholas Bellinger { 31129a05deeSNicholas Bellinger struct se_dev_entry *orig, *new; 312c66ac9dbSNicholas Bellinger 31329a05deeSNicholas Bellinger new = kzalloc(sizeof(*new), GFP_KERNEL); 31429a05deeSNicholas Bellinger if (!new) { 31529a05deeSNicholas Bellinger pr_err("Unable to allocate se_dev_entry memory\n"); 31629a05deeSNicholas Bellinger return -ENOMEM; 31729a05deeSNicholas Bellinger } 318e80ac6c4SAndy Grover 31929a05deeSNicholas Bellinger spin_lock_init(&new->ua_lock); 32029a05deeSNicholas Bellinger INIT_LIST_HEAD(&new->ua_list); 321adf653f9SChristoph Hellwig INIT_LIST_HEAD(&new->lun_link); 322e80ac6c4SAndy Grover 32329a05deeSNicholas Bellinger new->mapped_lun = mapped_lun; 32429a05deeSNicholas Bellinger kref_init(&new->pr_kref); 32529a05deeSNicholas Bellinger init_completion(&new->pr_comp); 32629a05deeSNicholas Bellinger 32703a68b44SAndy Grover new->lun_access_ro = lun_access_ro; 32829a05deeSNicholas Bellinger new->creation_time = get_jiffies_64(); 32929a05deeSNicholas Bellinger new->attach_count++; 33029a05deeSNicholas Bellinger 33129a05deeSNicholas Bellinger mutex_lock(&nacl->lun_entry_mutex); 33229a05deeSNicholas Bellinger orig = target_nacl_find_deve(nacl, mapped_lun); 33329a05deeSNicholas Bellinger if (orig && orig->se_lun) { 334ef4f7e4bSDmitry Bogdanov struct se_lun *orig_lun = orig->se_lun; 33529a05deeSNicholas Bellinger 33629a05deeSNicholas Bellinger if (orig_lun != lun) { 33729a05deeSNicholas Bellinger pr_err("Existing orig->se_lun doesn't match new lun" 33829a05deeSNicholas Bellinger " for dynamic -> explicit NodeACL conversion:" 33929a05deeSNicholas Bellinger " %s\n", nacl->initiatorname); 34029a05deeSNicholas Bellinger mutex_unlock(&nacl->lun_entry_mutex); 34129a05deeSNicholas Bellinger kfree(new); 342e3d6f909SAndy Grover return -EINVAL; 343c66ac9dbSNicholas Bellinger } 344391e2a6dSNicholas Bellinger if (orig->se_lun_acl != NULL) { 345391e2a6dSNicholas Bellinger pr_warn_ratelimited("Detected existing explicit" 346391e2a6dSNicholas Bellinger " se_lun_acl->se_lun_group reference for %s" 347391e2a6dSNicholas Bellinger " mapped_lun: %llu, failing\n", 348391e2a6dSNicholas Bellinger nacl->initiatorname, mapped_lun); 349391e2a6dSNicholas Bellinger mutex_unlock(&nacl->lun_entry_mutex); 350391e2a6dSNicholas Bellinger kfree(new); 351391e2a6dSNicholas Bellinger return -EINVAL; 352391e2a6dSNicholas Bellinger } 353c66ac9dbSNicholas Bellinger 354ef4f7e4bSDmitry Bogdanov new->se_lun = lun; 355ef4f7e4bSDmitry Bogdanov new->se_lun_acl = lun_acl; 35629a05deeSNicholas Bellinger hlist_del_rcu(&orig->link); 35729a05deeSNicholas Bellinger hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); 35829a05deeSNicholas Bellinger mutex_unlock(&nacl->lun_entry_mutex); 359c66ac9dbSNicholas Bellinger 3601adff1b3SNicholas Bellinger spin_lock(&lun->lun_deve_lock); 361adf653f9SChristoph Hellwig list_del(&orig->lun_link); 362adf653f9SChristoph Hellwig list_add_tail(&new->lun_link, &lun->lun_deve_list); 3631adff1b3SNicholas Bellinger spin_unlock(&lun->lun_deve_lock); 36429a05deeSNicholas Bellinger 36529a05deeSNicholas Bellinger kref_put(&orig->pr_kref, target_pr_kref_release); 36629a05deeSNicholas Bellinger wait_for_completion(&orig->pr_comp); 36729a05deeSNicholas Bellinger 3687c0d0d51SHannes Reinecke target_luns_data_has_changed(nacl, new, true); 36929a05deeSNicholas Bellinger kfree_rcu(orig, rcu_head); 370c66ac9dbSNicholas Bellinger return 0; 371c66ac9dbSNicholas Bellinger } 372e80ac6c4SAndy Grover 373ef4f7e4bSDmitry Bogdanov new->se_lun = lun; 374ef4f7e4bSDmitry Bogdanov new->se_lun_acl = lun_acl; 37529a05deeSNicholas Bellinger hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); 37629a05deeSNicholas Bellinger mutex_unlock(&nacl->lun_entry_mutex); 377c66ac9dbSNicholas Bellinger 3781adff1b3SNicholas Bellinger spin_lock(&lun->lun_deve_lock); 379adf653f9SChristoph Hellwig list_add_tail(&new->lun_link, &lun->lun_deve_list); 3801adff1b3SNicholas Bellinger spin_unlock(&lun->lun_deve_lock); 381c66ac9dbSNicholas Bellinger 3827c0d0d51SHannes Reinecke target_luns_data_has_changed(nacl, new, true); 383c66ac9dbSNicholas Bellinger return 0; 384c66ac9dbSNicholas Bellinger } 385c66ac9dbSNicholas Bellinger 38629a05deeSNicholas Bellinger void core_disable_device_list_for_node( 387e80ac6c4SAndy Grover struct se_lun *lun, 38829a05deeSNicholas Bellinger struct se_dev_entry *orig, 389e80ac6c4SAndy Grover struct se_node_acl *nacl, 390e80ac6c4SAndy Grover struct se_portal_group *tpg) 391e80ac6c4SAndy Grover { 392e80ac6c4SAndy Grover /* 3934cc987eaSNicholas Bellinger * rcu_dereference_raw protected by se_lun->lun_group symlink 3944cc987eaSNicholas Bellinger * reference to se_device->dev_group. 3954cc987eaSNicholas Bellinger */ 3964cc987eaSNicholas Bellinger struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 397618baaf7SBart Van Assche 398618baaf7SBart Van Assche lockdep_assert_held(&nacl->lun_entry_mutex); 399618baaf7SBart Van Assche 400e80ac6c4SAndy Grover /* 401e80ac6c4SAndy Grover * If the MappedLUN entry is being disabled, the entry in 402adf653f9SChristoph Hellwig * lun->lun_deve_list must be removed now before clearing the 403e80ac6c4SAndy Grover * struct se_dev_entry pointers below as logic in 404e80ac6c4SAndy Grover * core_alua_do_transition_tg_pt() depends on these being present. 405e80ac6c4SAndy Grover * 406e80ac6c4SAndy Grover * deve->se_lun_acl will be NULL for demo-mode created LUNs 407e80ac6c4SAndy Grover * that have not been explicitly converted to MappedLUNs -> 408adf653f9SChristoph Hellwig * struct se_lun_acl, but we remove deve->lun_link from 409adf653f9SChristoph Hellwig * lun->lun_deve_list. This also means that active UAs and 410e80ac6c4SAndy Grover * NodeACL context specific PR metadata for demo-mode 411e80ac6c4SAndy Grover * MappedLUN *deve will be released below.. 412e80ac6c4SAndy Grover */ 4131adff1b3SNicholas Bellinger spin_lock(&lun->lun_deve_lock); 414adf653f9SChristoph Hellwig list_del(&orig->lun_link); 4151adff1b3SNicholas Bellinger spin_unlock(&lun->lun_deve_lock); 416c66ac9dbSNicholas Bellinger /* 417c66ac9dbSNicholas Bellinger * Disable struct se_dev_entry LUN ACL mapping 418c66ac9dbSNicholas Bellinger */ 41929a05deeSNicholas Bellinger core_scsi3_ua_release_all(orig); 420c66ac9dbSNicholas Bellinger 42129a05deeSNicholas Bellinger hlist_del_rcu(&orig->link); 42280bfdfa9SNicholas Bellinger clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags); 42303a68b44SAndy Grover orig->lun_access_ro = false; 42429a05deeSNicholas Bellinger orig->creation_time = 0; 42529a05deeSNicholas Bellinger orig->attach_count--; 42629a05deeSNicholas Bellinger /* 42729a05deeSNicholas Bellinger * Before firing off RCU callback, wait for any in process SPEC_I_PT=1 42829a05deeSNicholas Bellinger * or REGISTER_AND_MOVE PR operation to complete. 42929a05deeSNicholas Bellinger */ 43029a05deeSNicholas Bellinger kref_put(&orig->pr_kref, target_pr_kref_release); 43129a05deeSNicholas Bellinger wait_for_completion(&orig->pr_comp); 43229a05deeSNicholas Bellinger 43329a05deeSNicholas Bellinger kfree_rcu(orig, rcu_head); 434c66ac9dbSNicholas Bellinger 4354cc987eaSNicholas Bellinger core_scsi3_free_pr_reg_from_nacl(dev, nacl); 4367c0d0d51SHannes Reinecke target_luns_data_has_changed(nacl, NULL, false); 437c66ac9dbSNicholas Bellinger } 438c66ac9dbSNicholas Bellinger 439c66ac9dbSNicholas Bellinger /* core_clear_lun_from_tpg(): 440c66ac9dbSNicholas Bellinger * 441c66ac9dbSNicholas Bellinger * 442c66ac9dbSNicholas Bellinger */ 443c66ac9dbSNicholas Bellinger void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) 444c66ac9dbSNicholas Bellinger { 445c66ac9dbSNicholas Bellinger struct se_node_acl *nacl; 446c66ac9dbSNicholas Bellinger struct se_dev_entry *deve; 447c66ac9dbSNicholas Bellinger 448403edd78SNicholas Bellinger mutex_lock(&tpg->acl_node_mutex); 449c66ac9dbSNicholas Bellinger list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { 450c66ac9dbSNicholas Bellinger 45129a05deeSNicholas Bellinger mutex_lock(&nacl->lun_entry_mutex); 45229a05deeSNicholas Bellinger hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 453ef4f7e4bSDmitry Bogdanov if (lun != deve->se_lun) 454c66ac9dbSNicholas Bellinger continue; 455c66ac9dbSNicholas Bellinger 45629a05deeSNicholas Bellinger core_disable_device_list_for_node(lun, deve, nacl, tpg); 457c66ac9dbSNicholas Bellinger } 45829a05deeSNicholas Bellinger mutex_unlock(&nacl->lun_entry_mutex); 459c66ac9dbSNicholas Bellinger } 460403edd78SNicholas Bellinger mutex_unlock(&tpg->acl_node_mutex); 461c66ac9dbSNicholas Bellinger } 462c66ac9dbSNicholas Bellinger 463adf653f9SChristoph Hellwig int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev) 464c66ac9dbSNicholas Bellinger { 465adf653f9SChristoph Hellwig struct se_lun *tmp; 466c66ac9dbSNicholas Bellinger 467c66ac9dbSNicholas Bellinger spin_lock(&dev->se_port_lock); 468adf653f9SChristoph Hellwig if (dev->export_count == 0x0000ffff) { 4696708bb27SAndy Grover pr_warn("Reached dev->dev_port_count ==" 470c66ac9dbSNicholas Bellinger " 0x0000ffff\n"); 471c66ac9dbSNicholas Bellinger spin_unlock(&dev->se_port_lock); 472adf653f9SChristoph Hellwig return -ENOSPC; 473c66ac9dbSNicholas Bellinger } 474c66ac9dbSNicholas Bellinger again: 475c66ac9dbSNicholas Bellinger /* 47635d1efe8SMasanari Iida * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device 477c66ac9dbSNicholas Bellinger * Here is the table from spc4r17 section 7.7.3.8. 478c66ac9dbSNicholas Bellinger * 479c66ac9dbSNicholas Bellinger * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field 480c66ac9dbSNicholas Bellinger * 481c66ac9dbSNicholas Bellinger * Code Description 482c66ac9dbSNicholas Bellinger * 0h Reserved 483c66ac9dbSNicholas Bellinger * 1h Relative port 1, historically known as port A 484c66ac9dbSNicholas Bellinger * 2h Relative port 2, historically known as port B 485c66ac9dbSNicholas Bellinger * 3h to FFFFh Relative port 3 through 65 535 486c66ac9dbSNicholas Bellinger */ 487adf653f9SChristoph Hellwig lun->lun_rtpi = dev->dev_rpti_counter++; 488adf653f9SChristoph Hellwig if (!lun->lun_rtpi) 489c66ac9dbSNicholas Bellinger goto again; 490c66ac9dbSNicholas Bellinger 491adf653f9SChristoph Hellwig list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) { 492c66ac9dbSNicholas Bellinger /* 49335d1efe8SMasanari Iida * Make sure RELATIVE TARGET PORT IDENTIFIER is unique 494c66ac9dbSNicholas Bellinger * for 16-bit wrap.. 495c66ac9dbSNicholas Bellinger */ 496adf653f9SChristoph Hellwig if (lun->lun_rtpi == tmp->lun_rtpi) 497c66ac9dbSNicholas Bellinger goto again; 498c66ac9dbSNicholas Bellinger } 499c66ac9dbSNicholas Bellinger spin_unlock(&dev->se_port_lock); 500c66ac9dbSNicholas Bellinger 501c66ac9dbSNicholas Bellinger return 0; 502c66ac9dbSNicholas Bellinger } 503c66ac9dbSNicholas Bellinger 5040fd97ccfSChristoph Hellwig static void se_release_vpd_for_dev(struct se_device *dev) 505c66ac9dbSNicholas Bellinger { 506c66ac9dbSNicholas Bellinger struct t10_vpd *vpd, *vpd_tmp; 507c66ac9dbSNicholas Bellinger 5080fd97ccfSChristoph Hellwig spin_lock(&dev->t10_wwn.t10_vpd_lock); 509c66ac9dbSNicholas Bellinger list_for_each_entry_safe(vpd, vpd_tmp, 5100fd97ccfSChristoph Hellwig &dev->t10_wwn.t10_vpd_list, vpd_list) { 511c66ac9dbSNicholas Bellinger list_del(&vpd->vpd_list); 512c66ac9dbSNicholas Bellinger kfree(vpd); 513c66ac9dbSNicholas Bellinger } 5140fd97ccfSChristoph Hellwig spin_unlock(&dev->t10_wwn.t10_vpd_lock); 515c66ac9dbSNicholas Bellinger } 516c66ac9dbSNicholas Bellinger 517c8045372SRoland Dreier static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) 518525a48a2SNicholas Bellinger { 5193e03989bSRoland Dreier u32 aligned_max_sectors; 5203e03989bSRoland Dreier u32 alignment; 521525a48a2SNicholas Bellinger /* 522525a48a2SNicholas Bellinger * Limit max_sectors to a PAGE_SIZE aligned value for modern 523525a48a2SNicholas Bellinger * transport_allocate_data_tasks() operation. 524525a48a2SNicholas Bellinger */ 5253e03989bSRoland Dreier alignment = max(1ul, PAGE_SIZE / block_size); 5263e03989bSRoland Dreier aligned_max_sectors = rounddown(max_sectors, alignment); 527525a48a2SNicholas Bellinger 5283e03989bSRoland Dreier if (max_sectors != aligned_max_sectors) 5293e03989bSRoland Dreier pr_info("Rounding down aligned max_sectors from %u to %u\n", 5303e03989bSRoland Dreier max_sectors, aligned_max_sectors); 5313e03989bSRoland Dreier 5323e03989bSRoland Dreier return aligned_max_sectors; 533525a48a2SNicholas Bellinger } 534525a48a2SNicholas Bellinger 5356bb82612SNicholas Bellinger int core_dev_add_lun( 536c66ac9dbSNicholas Bellinger struct se_portal_group *tpg, 537c66ac9dbSNicholas Bellinger struct se_device *dev, 5386bb82612SNicholas Bellinger struct se_lun *lun) 539c66ac9dbSNicholas Bellinger { 5408d9efe53SSebastian Andrzej Siewior int rc; 541c66ac9dbSNicholas Bellinger 54203a68b44SAndy Grover rc = core_tpg_add_lun(tpg, lun, false, dev); 5438d9efe53SSebastian Andrzej Siewior if (rc < 0) 5446bb82612SNicholas Bellinger return rc; 545c66ac9dbSNicholas Bellinger 546f2d30680SHannes Reinecke pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from" 54730c7ca93SDavid Disseldorp " CORE HBA: %u\n", tpg->se_tpg_tfo->fabric_name, 5482af7973aSAndy Grover tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 54930c7ca93SDavid Disseldorp tpg->se_tpg_tfo->fabric_name, dev->se_hba->hba_id); 550c66ac9dbSNicholas Bellinger /* 551c66ac9dbSNicholas Bellinger * Update LUN maps for dynamically added initiators when 552c66ac9dbSNicholas Bellinger * generate_node_acl is enabled. 553c66ac9dbSNicholas Bellinger */ 554e3d6f909SAndy Grover if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { 555c66ac9dbSNicholas Bellinger struct se_node_acl *acl; 556403edd78SNicholas Bellinger 557403edd78SNicholas Bellinger mutex_lock(&tpg->acl_node_mutex); 558c66ac9dbSNicholas Bellinger list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 559052605c6SNicholas Bellinger if (acl->dynamic_node_acl && 560052605c6SNicholas Bellinger (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || 561052605c6SNicholas Bellinger !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { 562df9766caSNicholas Bellinger core_tpg_add_node_to_devs(acl, tpg, lun); 563c66ac9dbSNicholas Bellinger } 564c66ac9dbSNicholas Bellinger } 565403edd78SNicholas Bellinger mutex_unlock(&tpg->acl_node_mutex); 566c66ac9dbSNicholas Bellinger } 567c66ac9dbSNicholas Bellinger 5686bb82612SNicholas Bellinger return 0; 569c66ac9dbSNicholas Bellinger } 570c66ac9dbSNicholas Bellinger 571c66ac9dbSNicholas Bellinger /* core_dev_del_lun(): 572c66ac9dbSNicholas Bellinger * 573c66ac9dbSNicholas Bellinger * 574c66ac9dbSNicholas Bellinger */ 575cd9d7cbaSAndy Grover void core_dev_del_lun( 576c66ac9dbSNicholas Bellinger struct se_portal_group *tpg, 577cd9d7cbaSAndy Grover struct se_lun *lun) 578c66ac9dbSNicholas Bellinger { 579f2d30680SHannes Reinecke pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from" 58030c7ca93SDavid Disseldorp " device object\n", tpg->se_tpg_tfo->fabric_name, 581cd9d7cbaSAndy Grover tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 58230c7ca93SDavid Disseldorp tpg->se_tpg_tfo->fabric_name); 583c66ac9dbSNicholas Bellinger 584cd9d7cbaSAndy Grover core_tpg_remove_lun(tpg, lun); 585c66ac9dbSNicholas Bellinger } 586c66ac9dbSNicholas Bellinger 587c66ac9dbSNicholas Bellinger struct se_lun_acl *core_dev_init_initiator_node_lun_acl( 588c66ac9dbSNicholas Bellinger struct se_portal_group *tpg, 589fcf29481SNicholas Bellinger struct se_node_acl *nacl, 590f2d30680SHannes Reinecke u64 mapped_lun, 591c66ac9dbSNicholas Bellinger int *ret) 592c66ac9dbSNicholas Bellinger { 593c66ac9dbSNicholas Bellinger struct se_lun_acl *lacl; 594c66ac9dbSNicholas Bellinger 595fcf29481SNicholas Bellinger if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) { 5966708bb27SAndy Grover pr_err("%s InitiatorName exceeds maximum size.\n", 59730c7ca93SDavid Disseldorp tpg->se_tpg_tfo->fabric_name); 598c66ac9dbSNicholas Bellinger *ret = -EOVERFLOW; 599c66ac9dbSNicholas Bellinger return NULL; 600c66ac9dbSNicholas Bellinger } 601c66ac9dbSNicholas Bellinger lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); 6026708bb27SAndy Grover if (!lacl) { 6036708bb27SAndy Grover pr_err("Unable to allocate memory for struct se_lun_acl.\n"); 604c66ac9dbSNicholas Bellinger *ret = -ENOMEM; 605c66ac9dbSNicholas Bellinger return NULL; 606c66ac9dbSNicholas Bellinger } 607c66ac9dbSNicholas Bellinger 608c66ac9dbSNicholas Bellinger lacl->mapped_lun = mapped_lun; 609c66ac9dbSNicholas Bellinger lacl->se_lun_nacl = nacl; 610c66ac9dbSNicholas Bellinger 611c66ac9dbSNicholas Bellinger return lacl; 612c66ac9dbSNicholas Bellinger } 613c66ac9dbSNicholas Bellinger 614c66ac9dbSNicholas Bellinger int core_dev_add_initiator_node_lun_acl( 615c66ac9dbSNicholas Bellinger struct se_portal_group *tpg, 616c66ac9dbSNicholas Bellinger struct se_lun_acl *lacl, 6176bb82612SNicholas Bellinger struct se_lun *lun, 61803a68b44SAndy Grover bool lun_access_ro) 619c66ac9dbSNicholas Bellinger { 6206bb82612SNicholas Bellinger struct se_node_acl *nacl = lacl->se_lun_nacl; 6214cc987eaSNicholas Bellinger /* 6224cc987eaSNicholas Bellinger * rcu_dereference_raw protected by se_lun->lun_group symlink 6234cc987eaSNicholas Bellinger * reference to se_device->dev_group. 6244cc987eaSNicholas Bellinger */ 6254cc987eaSNicholas Bellinger struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 626c66ac9dbSNicholas Bellinger 6276708bb27SAndy Grover if (!nacl) 628c66ac9dbSNicholas Bellinger return -EINVAL; 629c66ac9dbSNicholas Bellinger 63003a68b44SAndy Grover if (lun->lun_access_ro) 63103a68b44SAndy Grover lun_access_ro = true; 632c66ac9dbSNicholas Bellinger 633c66ac9dbSNicholas Bellinger lacl->se_lun = lun; 634c66ac9dbSNicholas Bellinger 635e80ac6c4SAndy Grover if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun, 63603a68b44SAndy Grover lun_access_ro, nacl, tpg) < 0) 637c66ac9dbSNicholas Bellinger return -EINVAL; 638c66ac9dbSNicholas Bellinger 639f2d30680SHannes Reinecke pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for " 64030c7ca93SDavid Disseldorp " InitiatorNode: %s\n", tpg->se_tpg_tfo->fabric_name, 6416bb82612SNicholas Bellinger tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun, 64203a68b44SAndy Grover lun_access_ro ? "RO" : "RW", 643b6a54b8dSChris Zankel nacl->initiatorname); 644c66ac9dbSNicholas Bellinger /* 645c66ac9dbSNicholas Bellinger * Check to see if there are any existing persistent reservation APTPL 646c66ac9dbSNicholas Bellinger * pre-registrations that need to be enabled for this LUN ACL.. 647c66ac9dbSNicholas Bellinger */ 6484cc987eaSNicholas Bellinger core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl, 649e2480563SNicholas Bellinger lacl->mapped_lun); 650c66ac9dbSNicholas Bellinger return 0; 651c66ac9dbSNicholas Bellinger } 652c66ac9dbSNicholas Bellinger 653c66ac9dbSNicholas Bellinger int core_dev_del_initiator_node_lun_acl( 654c66ac9dbSNicholas Bellinger struct se_lun *lun, 655c66ac9dbSNicholas Bellinger struct se_lun_acl *lacl) 656c66ac9dbSNicholas Bellinger { 657adf653f9SChristoph Hellwig struct se_portal_group *tpg = lun->lun_tpg; 658c66ac9dbSNicholas Bellinger struct se_node_acl *nacl; 65929a05deeSNicholas Bellinger struct se_dev_entry *deve; 660c66ac9dbSNicholas Bellinger 661c66ac9dbSNicholas Bellinger nacl = lacl->se_lun_nacl; 6626708bb27SAndy Grover if (!nacl) 663c66ac9dbSNicholas Bellinger return -EINVAL; 664c66ac9dbSNicholas Bellinger 66529a05deeSNicholas Bellinger mutex_lock(&nacl->lun_entry_mutex); 66629a05deeSNicholas Bellinger deve = target_nacl_find_deve(nacl, lacl->mapped_lun); 66729a05deeSNicholas Bellinger if (deve) 66829a05deeSNicholas Bellinger core_disable_device_list_for_node(lun, deve, nacl, tpg); 66929a05deeSNicholas Bellinger mutex_unlock(&nacl->lun_entry_mutex); 670c66ac9dbSNicholas Bellinger 671f2d30680SHannes Reinecke pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for" 672f2d30680SHannes Reinecke " InitiatorNode: %s Mapped LUN: %llu\n", 67330c7ca93SDavid Disseldorp tpg->se_tpg_tfo->fabric_name, 674e3d6f909SAndy Grover tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 675b6a54b8dSChris Zankel nacl->initiatorname, lacl->mapped_lun); 676c66ac9dbSNicholas Bellinger 677c66ac9dbSNicholas Bellinger return 0; 678c66ac9dbSNicholas Bellinger } 679c66ac9dbSNicholas Bellinger 680c66ac9dbSNicholas Bellinger void core_dev_free_initiator_node_lun_acl( 681c66ac9dbSNicholas Bellinger struct se_portal_group *tpg, 682c66ac9dbSNicholas Bellinger struct se_lun_acl *lacl) 683c66ac9dbSNicholas Bellinger { 6846708bb27SAndy Grover pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" 68530c7ca93SDavid Disseldorp " Mapped LUN: %llu\n", tpg->se_tpg_tfo->fabric_name, 686e3d6f909SAndy Grover tpg->se_tpg_tfo->tpg_get_tag(tpg), 68730c7ca93SDavid Disseldorp tpg->se_tpg_tfo->fabric_name, 688b6a54b8dSChris Zankel lacl->se_lun_nacl->initiatorname, lacl->mapped_lun); 689c66ac9dbSNicholas Bellinger 690c66ac9dbSNicholas Bellinger kfree(lacl); 691c66ac9dbSNicholas Bellinger } 692c66ac9dbSNicholas Bellinger 6930fd97ccfSChristoph Hellwig static void scsi_dump_inquiry(struct se_device *dev) 6940fd97ccfSChristoph Hellwig { 6950fd97ccfSChristoph Hellwig struct t10_wwn *wwn = &dev->t10_wwn; 696b2da4abfSDavid Disseldorp int device_type = dev->transport->get_device_type(dev); 697b2da4abfSDavid Disseldorp 6980fd97ccfSChristoph Hellwig /* 6990fd97ccfSChristoph Hellwig * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer 7000fd97ccfSChristoph Hellwig */ 701b2da4abfSDavid Disseldorp pr_debug(" Vendor: %-" __stringify(INQUIRY_VENDOR_LEN) "s\n", 702b2da4abfSDavid Disseldorp wwn->vendor); 703b2da4abfSDavid Disseldorp pr_debug(" Model: %-" __stringify(INQUIRY_MODEL_LEN) "s\n", 704b2da4abfSDavid Disseldorp wwn->model); 705b2da4abfSDavid Disseldorp pr_debug(" Revision: %-" __stringify(INQUIRY_REVISION_LEN) "s\n", 706b2da4abfSDavid Disseldorp wwn->revision); 7070fd97ccfSChristoph Hellwig pr_debug(" Type: %s ", scsi_device_type(device_type)); 7080fd97ccfSChristoph Hellwig } 7090fd97ccfSChristoph Hellwig 7100fd97ccfSChristoph Hellwig struct se_device *target_alloc_device(struct se_hba *hba, const char *name) 7110fd97ccfSChristoph Hellwig { 7120fd97ccfSChristoph Hellwig struct se_device *dev; 7134863e525SNicholas Bellinger struct se_lun *xcopy_lun; 7141526d9f1SMike Christie int i; 7150fd97ccfSChristoph Hellwig 7160a06d430SChristoph Hellwig dev = hba->backend->ops->alloc_device(hba, name); 7170fd97ccfSChristoph Hellwig if (!dev) 7180fd97ccfSChristoph Hellwig return NULL; 7190fd97ccfSChristoph Hellwig 7201526d9f1SMike Christie dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL); 7211526d9f1SMike Christie if (!dev->queues) { 7221526d9f1SMike Christie dev->transport->free_device(dev); 7231526d9f1SMike Christie return NULL; 7241526d9f1SMike Christie } 7251526d9f1SMike Christie 7261526d9f1SMike Christie dev->queue_cnt = nr_cpu_ids; 7271526d9f1SMike Christie for (i = 0; i < dev->queue_cnt; i++) { 728eb44ce8cSMike Christie struct se_device_queue *q; 729eb44ce8cSMike Christie 730eb44ce8cSMike Christie q = &dev->queues[i]; 731eb44ce8cSMike Christie INIT_LIST_HEAD(&q->state_list); 732eb44ce8cSMike Christie spin_lock_init(&q->lock); 733eb44ce8cSMike Christie 734eb44ce8cSMike Christie init_llist_head(&q->sq.cmd_list); 735eb44ce8cSMike Christie INIT_WORK(&q->sq.work, target_queued_submit_work); 7361526d9f1SMike Christie } 7371526d9f1SMike Christie 7380fd97ccfSChristoph Hellwig dev->se_hba = hba; 7390a06d430SChristoph Hellwig dev->transport = hba->backend->ops; 74069088a04SBodo Stroesser dev->transport_flags = dev->transport->transport_flags_default; 741fe052a18SSagi Grimberg dev->prot_length = sizeof(struct t10_pi_tuple); 7424cc987eaSNicholas Bellinger dev->hba_index = hba->hba_index; 7430fd97ccfSChristoph Hellwig 7440fd97ccfSChristoph Hellwig INIT_LIST_HEAD(&dev->dev_sep_list); 7450fd97ccfSChristoph Hellwig INIT_LIST_HEAD(&dev->dev_tmr_list); 7460fd97ccfSChristoph Hellwig INIT_LIST_HEAD(&dev->delayed_cmd_list); 7470fd97ccfSChristoph Hellwig INIT_LIST_HEAD(&dev->qf_cmd_list); 7480fd97ccfSChristoph Hellwig spin_lock_init(&dev->delayed_cmd_lock); 7490fd97ccfSChristoph Hellwig spin_lock_init(&dev->dev_reservation_lock); 7500fd97ccfSChristoph Hellwig spin_lock_init(&dev->se_port_lock); 7510fd97ccfSChristoph Hellwig spin_lock_init(&dev->se_tmr_lock); 7520fd97ccfSChristoph Hellwig spin_lock_init(&dev->qf_cmd_lock); 75368ff9b9bSNicholas Bellinger sema_init(&dev->caw_sem, 1); 7540fd97ccfSChristoph Hellwig INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list); 7550fd97ccfSChristoph Hellwig spin_lock_init(&dev->t10_wwn.t10_vpd_lock); 7560fd97ccfSChristoph Hellwig INIT_LIST_HEAD(&dev->t10_pr.registration_list); 7570fd97ccfSChristoph Hellwig INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list); 7580fd97ccfSChristoph Hellwig spin_lock_init(&dev->t10_pr.registration_lock); 7590fd97ccfSChristoph Hellwig spin_lock_init(&dev->t10_pr.aptpl_reg_lock); 7600fd97ccfSChristoph Hellwig INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list); 7610fd97ccfSChristoph Hellwig spin_lock_init(&dev->t10_alua.tg_pt_gps_lock); 762c66094bfSHannes Reinecke INIT_LIST_HEAD(&dev->t10_alua.lba_map_list); 763c66094bfSHannes Reinecke spin_lock_init(&dev->t10_alua.lba_map_lock); 7640fd97ccfSChristoph Hellwig 765ed1227e0SMike Christie INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work); 766ed1227e0SMike Christie 7670fd97ccfSChristoph Hellwig dev->t10_wwn.t10_dev = dev; 7682469f1e0SSergey Samoylenko /* 7692469f1e0SSergey Samoylenko * Use OpenFabrics IEEE Company ID: 00 14 05 7702469f1e0SSergey Samoylenko */ 7712469f1e0SSergey Samoylenko dev->t10_wwn.company_id = 0x001405; 7722469f1e0SSergey Samoylenko 7730fd97ccfSChristoph Hellwig dev->t10_alua.t10_dev = dev; 7740fd97ccfSChristoph Hellwig 7750fd97ccfSChristoph Hellwig dev->dev_attrib.da_dev = dev; 776adfa9570STregaron Bayly dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS; 777814e5b45SChristoph Hellwig dev->dev_attrib.emulate_dpo = 1; 778814e5b45SChristoph Hellwig dev->dev_attrib.emulate_fua_write = 1; 779814e5b45SChristoph Hellwig dev->dev_attrib.emulate_fua_read = 1; 7800fd97ccfSChristoph Hellwig dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; 7811bf630fdSDavid Disseldorp dev->dev_attrib.emulate_ua_intlck_ctrl = TARGET_UA_INTLCK_CTRL_CLEAR; 7820fd97ccfSChristoph Hellwig dev->dev_attrib.emulate_tas = DA_EMULATE_TAS; 7830fd97ccfSChristoph Hellwig dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU; 7840fd97ccfSChristoph Hellwig dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; 7850123a9ecSNicholas Bellinger dev->dev_attrib.emulate_caw = DA_EMULATE_CAW; 786d397a445SNicholas Bellinger dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; 787b49d6f78SDavid Disseldorp dev->dev_attrib.emulate_pr = DA_EMULATE_PR; 788*bd217b8cSDmitry Bogdanov dev->dev_attrib.emulate_rsoc = DA_EMULATE_RSOC; 7892ed22c9cSNicholas Bellinger dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT; 7900fd97ccfSChristoph Hellwig dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 79192404e60SNicholas Bellinger dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL; 7920fd97ccfSChristoph Hellwig dev->dev_attrib.is_nonrot = DA_IS_NONROT; 7930fd97ccfSChristoph Hellwig dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; 7940fd97ccfSChristoph Hellwig dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; 7950fd97ccfSChristoph Hellwig dev->dev_attrib.max_unmap_block_desc_count = 7960fd97ccfSChristoph Hellwig DA_MAX_UNMAP_BLOCK_DESC_COUNT; 7970fd97ccfSChristoph Hellwig dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; 7980fd97ccfSChristoph Hellwig dev->dev_attrib.unmap_granularity_alignment = 7990fd97ccfSChristoph Hellwig DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; 800e6f41633SJamie Pocas dev->dev_attrib.unmap_zeroes_data = 801e6f41633SJamie Pocas DA_UNMAP_ZEROES_DATA_DEFAULT; 802773cbaf7SNicholas Bellinger dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; 8030fd97ccfSChristoph Hellwig 8044863e525SNicholas Bellinger xcopy_lun = &dev->xcopy_lun; 8054cc987eaSNicholas Bellinger rcu_assign_pointer(xcopy_lun->lun_se_dev, dev); 806bd4e2d29SNicholas Bellinger init_completion(&xcopy_lun->lun_shutdown_comp); 807adf653f9SChristoph Hellwig INIT_LIST_HEAD(&xcopy_lun->lun_deve_list); 808adf653f9SChristoph Hellwig INIT_LIST_HEAD(&xcopy_lun->lun_dev_link); 809adf653f9SChristoph Hellwig mutex_init(&xcopy_lun->lun_tg_pt_md_mutex); 810adf653f9SChristoph Hellwig xcopy_lun->lun_tpg = &xcopy_pt_tpg; 8114863e525SNicholas Bellinger 8123beeabd5SDavid Disseldorp /* Preload the default INQUIRY const values */ 8133beeabd5SDavid Disseldorp strlcpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor)); 8143beeabd5SDavid Disseldorp strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod, 8153beeabd5SDavid Disseldorp sizeof(dev->t10_wwn.model)); 8163beeabd5SDavid Disseldorp strlcpy(dev->t10_wwn.revision, dev->transport->inquiry_rev, 8173beeabd5SDavid Disseldorp sizeof(dev->t10_wwn.revision)); 8183beeabd5SDavid Disseldorp 8190fd97ccfSChristoph Hellwig return dev; 8200fd97ccfSChristoph Hellwig } 8210fd97ccfSChristoph Hellwig 8228a9ebe71SMike Christie /* 823cf0fbf89SChristoph Hellwig * Check if the underlying struct block_device supports discard and if yes 824cf0fbf89SChristoph Hellwig * configure the UNMAP parameters. 8258a9ebe71SMike Christie */ 8268a9ebe71SMike Christie bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, 827817e8b51SChristoph Hellwig struct block_device *bdev) 8288a9ebe71SMike Christie { 829817e8b51SChristoph Hellwig int block_size = bdev_logical_block_size(bdev); 830ea263c7fSMike Christie 83170200574SChristoph Hellwig if (!bdev_max_discard_sectors(bdev)) 8328a9ebe71SMike Christie return false; 8338a9ebe71SMike Christie 834ea263c7fSMike Christie attrib->max_unmap_lba_count = 835cf0fbf89SChristoph Hellwig bdev_max_discard_sectors(bdev) >> (ilog2(block_size) - 9); 8368a9ebe71SMike Christie /* 8378a9ebe71SMike Christie * Currently hardcoded to 1 in Linux/SCSI code.. 8388a9ebe71SMike Christie */ 8398a9ebe71SMike Christie attrib->max_unmap_block_desc_count = 1; 8407b47ef52SChristoph Hellwig attrib->unmap_granularity = bdev_discard_granularity(bdev) / block_size; 841968786b9SChristoph Hellwig attrib->unmap_granularity_alignment = 842968786b9SChristoph Hellwig bdev_discard_alignment(bdev) / block_size; 8438a9ebe71SMike Christie return true; 8448a9ebe71SMike Christie } 8458a9ebe71SMike Christie EXPORT_SYMBOL(target_configure_unmap_from_queue); 8468a9ebe71SMike Christie 8478a9ebe71SMike Christie /* 8488a9ebe71SMike Christie * Convert from blocksize advertised to the initiator to the 512 byte 8498a9ebe71SMike Christie * units unconditionally used by the Linux block layer. 8508a9ebe71SMike Christie */ 8518a9ebe71SMike Christie sector_t target_to_linux_sector(struct se_device *dev, sector_t lb) 8528a9ebe71SMike Christie { 8538a9ebe71SMike Christie switch (dev->dev_attrib.block_size) { 8548a9ebe71SMike Christie case 4096: 8558a9ebe71SMike Christie return lb << 3; 8568a9ebe71SMike Christie case 2048: 8578a9ebe71SMike Christie return lb << 2; 8588a9ebe71SMike Christie case 1024: 8598a9ebe71SMike Christie return lb << 1; 8608a9ebe71SMike Christie default: 8618a9ebe71SMike Christie return lb; 8628a9ebe71SMike Christie } 8638a9ebe71SMike Christie } 8648a9ebe71SMike Christie EXPORT_SYMBOL(target_to_linux_sector); 8658a9ebe71SMike Christie 866b1943fd4SMike Christie struct devices_idr_iter { 86736d4cb46SBart Van Assche struct config_item *prev_item; 868b1943fd4SMike Christie int (*fn)(struct se_device *dev, void *data); 869b1943fd4SMike Christie void *data; 870b1943fd4SMike Christie }; 871b1943fd4SMike Christie 872b1943fd4SMike Christie static int target_devices_idr_iter(int id, void *p, void *data) 87336d4cb46SBart Van Assche __must_hold(&device_mutex) 874b1943fd4SMike Christie { 875b1943fd4SMike Christie struct devices_idr_iter *iter = data; 876b1943fd4SMike Christie struct se_device *dev = p; 87736d4cb46SBart Van Assche int ret; 87836d4cb46SBart Van Assche 87936d4cb46SBart Van Assche config_item_put(iter->prev_item); 88036d4cb46SBart Van Assche iter->prev_item = NULL; 881b1943fd4SMike Christie 882b1943fd4SMike Christie /* 883b1943fd4SMike Christie * We add the device early to the idr, so it can be used 884b1943fd4SMike Christie * by backend modules during configuration. We do not want 885b1943fd4SMike Christie * to allow other callers to access partially setup devices, 886b1943fd4SMike Christie * so we skip them here. 887b1943fd4SMike Christie */ 888cb0f32e1SMike Christie if (!target_dev_configured(dev)) 889b1943fd4SMike Christie return 0; 890b1943fd4SMike Christie 89136d4cb46SBart Van Assche iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item); 89236d4cb46SBart Van Assche if (!iter->prev_item) 89336d4cb46SBart Van Assche return 0; 89436d4cb46SBart Van Assche mutex_unlock(&device_mutex); 89536d4cb46SBart Van Assche 89636d4cb46SBart Van Assche ret = iter->fn(dev, iter->data); 89736d4cb46SBart Van Assche 89836d4cb46SBart Van Assche mutex_lock(&device_mutex); 89936d4cb46SBart Van Assche return ret; 900b1943fd4SMike Christie } 901b1943fd4SMike Christie 902b1943fd4SMike Christie /** 903b1943fd4SMike Christie * target_for_each_device - iterate over configured devices 904b1943fd4SMike Christie * @fn: iterator function 905b1943fd4SMike Christie * @data: pointer to data that will be passed to fn 906b1943fd4SMike Christie * 907b1943fd4SMike Christie * fn must return 0 to continue looping over devices. non-zero will break 908b1943fd4SMike Christie * from the loop and return that value to the caller. 909b1943fd4SMike Christie */ 910b1943fd4SMike Christie int target_for_each_device(int (*fn)(struct se_device *dev, void *data), 911b1943fd4SMike Christie void *data) 912b1943fd4SMike Christie { 91336d4cb46SBart Van Assche struct devices_idr_iter iter = { .fn = fn, .data = data }; 914b1943fd4SMike Christie int ret; 915b1943fd4SMike Christie 916be50f538SMike Christie mutex_lock(&device_mutex); 917b1943fd4SMike Christie ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter); 918be50f538SMike Christie mutex_unlock(&device_mutex); 91936d4cb46SBart Van Assche config_item_put(iter.prev_item); 920b1943fd4SMike Christie return ret; 921b1943fd4SMike Christie } 922b1943fd4SMike Christie 9230fd97ccfSChristoph Hellwig int target_configure_device(struct se_device *dev) 9240fd97ccfSChristoph Hellwig { 9250fd97ccfSChristoph Hellwig struct se_hba *hba = dev->se_hba; 9260a5eee64SMike Christie int ret, id; 9270fd97ccfSChristoph Hellwig 928cb0f32e1SMike Christie if (target_dev_configured(dev)) { 9290fd97ccfSChristoph Hellwig pr_err("se_dev->se_dev_ptr already set for storage" 9300fd97ccfSChristoph Hellwig " object\n"); 9310fd97ccfSChristoph Hellwig return -EEXIST; 9320fd97ccfSChristoph Hellwig } 9330fd97ccfSChristoph Hellwig 9340a5eee64SMike Christie /* 9350a5eee64SMike Christie * Add early so modules like tcmu can use during its 9360a5eee64SMike Christie * configuration. 9370a5eee64SMike Christie */ 938be50f538SMike Christie mutex_lock(&device_mutex); 9390a5eee64SMike Christie /* 9400a5eee64SMike Christie * Use cyclic to try and avoid collisions with devices 9410a5eee64SMike Christie * that were recently removed. 9420a5eee64SMike Christie */ 9430a5eee64SMike Christie id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL); 944be50f538SMike Christie mutex_unlock(&device_mutex); 9450a5eee64SMike Christie if (id < 0) { 9460a5eee64SMike Christie ret = -ENOMEM; 9470a5eee64SMike Christie goto out; 9480a5eee64SMike Christie } 9490a5eee64SMike Christie dev->dev_index = id; 9500a5eee64SMike Christie 9510fd97ccfSChristoph Hellwig ret = dev->transport->configure_device(dev); 9520fd97ccfSChristoph Hellwig if (ret) 9530a5eee64SMike Christie goto out_free_index; 9546b206a5aSMike Christie 9556b206a5aSMike Christie if (dev->transport->configure_unmap && 9566b206a5aSMike Christie dev->transport->configure_unmap(dev)) { 9576b206a5aSMike Christie pr_debug("Discard support available, but disabled by default.\n"); 9586b206a5aSMike Christie } 9596b206a5aSMike Christie 9600fd97ccfSChristoph Hellwig /* 9610fd97ccfSChristoph Hellwig * XXX: there is not much point to have two different values here.. 9620fd97ccfSChristoph Hellwig */ 9630fd97ccfSChristoph Hellwig dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size; 9640fd97ccfSChristoph Hellwig dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth; 9650fd97ccfSChristoph Hellwig 9660fd97ccfSChristoph Hellwig /* 9670fd97ccfSChristoph Hellwig * Align max_hw_sectors down to PAGE_SIZE I/O transfers 9680fd97ccfSChristoph Hellwig */ 9690fd97ccfSChristoph Hellwig dev->dev_attrib.hw_max_sectors = 9700fd97ccfSChristoph Hellwig se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors, 9710fd97ccfSChristoph Hellwig dev->dev_attrib.hw_block_size); 972046ba642SNicholas Bellinger dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors; 9730fd97ccfSChristoph Hellwig 9740fd97ccfSChristoph Hellwig dev->creation_time = get_jiffies_64(); 9750fd97ccfSChristoph Hellwig 9760fd97ccfSChristoph Hellwig ret = core_setup_alua(dev); 9770fd97ccfSChristoph Hellwig if (ret) 978c82b59e7Stangwenji goto out_destroy_device; 9790fd97ccfSChristoph Hellwig 9800fd97ccfSChristoph Hellwig /* 9810fd97ccfSChristoph Hellwig * Setup work_queue for QUEUE_FULL 9820fd97ccfSChristoph Hellwig */ 9830fd97ccfSChristoph Hellwig INIT_WORK(&dev->qf_work_queue, target_qf_do_work); 9840fd97ccfSChristoph Hellwig 9850fd97ccfSChristoph Hellwig scsi_dump_inquiry(dev); 9860fd97ccfSChristoph Hellwig 9870fd97ccfSChristoph Hellwig spin_lock(&hba->device_lock); 9880fd97ccfSChristoph Hellwig hba->dev_count++; 9890fd97ccfSChristoph Hellwig spin_unlock(&hba->device_lock); 990d9ea32bfSNicholas Bellinger 9915f7da044SNicholas Bellinger dev->dev_flags |= DF_CONFIGURED; 9925f7da044SNicholas Bellinger 9930fd97ccfSChristoph Hellwig return 0; 9940fd97ccfSChristoph Hellwig 995c82b59e7Stangwenji out_destroy_device: 996c82b59e7Stangwenji dev->transport->destroy_device(dev); 9970a5eee64SMike Christie out_free_index: 998be50f538SMike Christie mutex_lock(&device_mutex); 9990a5eee64SMike Christie idr_remove(&devices_idr, dev->dev_index); 1000be50f538SMike Christie mutex_unlock(&device_mutex); 10010fd97ccfSChristoph Hellwig out: 10020fd97ccfSChristoph Hellwig se_release_vpd_for_dev(dev); 10030fd97ccfSChristoph Hellwig return ret; 10040fd97ccfSChristoph Hellwig } 10050fd97ccfSChristoph Hellwig 10060fd97ccfSChristoph Hellwig void target_free_device(struct se_device *dev) 10070fd97ccfSChristoph Hellwig { 10080fd97ccfSChristoph Hellwig struct se_hba *hba = dev->se_hba; 10090fd97ccfSChristoph Hellwig 10100fd97ccfSChristoph Hellwig WARN_ON(!list_empty(&dev->dev_sep_list)); 10110fd97ccfSChristoph Hellwig 1012cb0f32e1SMike Christie if (target_dev_configured(dev)) { 101392634706SMike Christie dev->transport->destroy_device(dev); 101492634706SMike Christie 1015be50f538SMike Christie mutex_lock(&device_mutex); 10160a5eee64SMike Christie idr_remove(&devices_idr, dev->dev_index); 1017be50f538SMike Christie mutex_unlock(&device_mutex); 1018d9ea32bfSNicholas Bellinger 10190fd97ccfSChristoph Hellwig spin_lock(&hba->device_lock); 10200fd97ccfSChristoph Hellwig hba->dev_count--; 10210fd97ccfSChristoph Hellwig spin_unlock(&hba->device_lock); 10220fd97ccfSChristoph Hellwig } 10230fd97ccfSChristoph Hellwig 10240fd97ccfSChristoph Hellwig core_alua_free_lu_gp_mem(dev); 1025229d4f11SHannes Reinecke core_alua_set_lba_map(dev, NULL, 0, 0); 10260fd97ccfSChristoph Hellwig core_scsi3_free_all_registrations(dev); 10270fd97ccfSChristoph Hellwig se_release_vpd_for_dev(dev); 10280fd97ccfSChristoph Hellwig 10292ed22c9cSNicholas Bellinger if (dev->transport->free_prot) 10302ed22c9cSNicholas Bellinger dev->transport->free_prot(dev); 10312ed22c9cSNicholas Bellinger 10321526d9f1SMike Christie kfree(dev->queues); 10330fd97ccfSChristoph Hellwig dev->transport->free_device(dev); 10340fd97ccfSChristoph Hellwig } 10350fd97ccfSChristoph Hellwig 1036c66ac9dbSNicholas Bellinger int core_dev_setup_virtual_lun0(void) 1037c66ac9dbSNicholas Bellinger { 1038c66ac9dbSNicholas Bellinger struct se_hba *hba; 1039c66ac9dbSNicholas Bellinger struct se_device *dev; 10401b5ad814SKonstantin Shelekhin char buf[] = "rd_pages=8,rd_nullio=1,rd_dummy=1"; 1041c66ac9dbSNicholas Bellinger int ret; 1042c66ac9dbSNicholas Bellinger 10436708bb27SAndy Grover hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE); 1044c66ac9dbSNicholas Bellinger if (IS_ERR(hba)) 1045c66ac9dbSNicholas Bellinger return PTR_ERR(hba); 1046c66ac9dbSNicholas Bellinger 10470fd97ccfSChristoph Hellwig dev = target_alloc_device(hba, "virt_lun0"); 10480fd97ccfSChristoph Hellwig if (!dev) { 1049c66ac9dbSNicholas Bellinger ret = -ENOMEM; 10500fd97ccfSChristoph Hellwig goto out_free_hba; 1051c66ac9dbSNicholas Bellinger } 1052c66ac9dbSNicholas Bellinger 10530a06d430SChristoph Hellwig hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf)); 1054c66ac9dbSNicholas Bellinger 10550fd97ccfSChristoph Hellwig ret = target_configure_device(dev); 10560fd97ccfSChristoph Hellwig if (ret) 10570fd97ccfSChristoph Hellwig goto out_free_se_dev; 10580fd97ccfSChristoph Hellwig 10590fd97ccfSChristoph Hellwig lun0_hba = hba; 1060e3d6f909SAndy Grover g_lun0_dev = dev; 1061c66ac9dbSNicholas Bellinger return 0; 10620fd97ccfSChristoph Hellwig 10630fd97ccfSChristoph Hellwig out_free_se_dev: 10640fd97ccfSChristoph Hellwig target_free_device(dev); 10650fd97ccfSChristoph Hellwig out_free_hba: 10660fd97ccfSChristoph Hellwig core_delete_hba(hba); 1067c66ac9dbSNicholas Bellinger return ret; 1068c66ac9dbSNicholas Bellinger } 1069c66ac9dbSNicholas Bellinger 1070c66ac9dbSNicholas Bellinger 1071c66ac9dbSNicholas Bellinger void core_dev_release_virtual_lun0(void) 1072c66ac9dbSNicholas Bellinger { 1073e3d6f909SAndy Grover struct se_hba *hba = lun0_hba; 1074c66ac9dbSNicholas Bellinger 10756708bb27SAndy Grover if (!hba) 1076c66ac9dbSNicholas Bellinger return; 1077c66ac9dbSNicholas Bellinger 1078e3d6f909SAndy Grover if (g_lun0_dev) 10790fd97ccfSChristoph Hellwig target_free_device(g_lun0_dev); 1080c66ac9dbSNicholas Bellinger core_delete_hba(hba); 1081c66ac9dbSNicholas Bellinger } 10827bfea53bSAndy Grover 10837bfea53bSAndy Grover /* 10847bfea53bSAndy Grover * Common CDB parsing for kernel and user passthrough. 10857bfea53bSAndy Grover */ 10867bfea53bSAndy Grover sense_reason_t 10877bfea53bSAndy Grover passthrough_parse_cdb(struct se_cmd *cmd, 10887bfea53bSAndy Grover sense_reason_t (*exec_cmd)(struct se_cmd *cmd)) 10897bfea53bSAndy Grover { 10907bfea53bSAndy Grover unsigned char *cdb = cmd->t_task_cdb; 10914ec5bf0eSBryant G. Ly struct se_device *dev = cmd->se_dev; 10924ec5bf0eSBryant G. Ly unsigned int size; 10937bfea53bSAndy Grover 10947bfea53bSAndy Grover /* 10957bfea53bSAndy Grover * For REPORT LUNS we always need to emulate the response, for everything 10967bfea53bSAndy Grover * else, pass it up. 10977bfea53bSAndy Grover */ 10987bfea53bSAndy Grover if (cdb[0] == REPORT_LUNS) { 10997bfea53bSAndy Grover cmd->execute_cmd = spc_emulate_report_luns; 11007bfea53bSAndy Grover return TCM_NO_SENSE; 11017bfea53bSAndy Grover } 11027bfea53bSAndy Grover 11034ec5bf0eSBryant G. Ly /* 1104b49d6f78SDavid Disseldorp * With emulate_pr disabled, all reservation requests should fail, 1105b49d6f78SDavid Disseldorp * regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set. 1106b49d6f78SDavid Disseldorp */ 1107b49d6f78SDavid Disseldorp if (!dev->dev_attrib.emulate_pr && 1108b49d6f78SDavid Disseldorp ((cdb[0] == PERSISTENT_RESERVE_IN) || 1109b49d6f78SDavid Disseldorp (cdb[0] == PERSISTENT_RESERVE_OUT) || 1110b49d6f78SDavid Disseldorp (cdb[0] == RELEASE || cdb[0] == RELEASE_10) || 1111b49d6f78SDavid Disseldorp (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) { 1112b49d6f78SDavid Disseldorp return TCM_UNSUPPORTED_SCSI_OPCODE; 1113b49d6f78SDavid Disseldorp } 1114b49d6f78SDavid Disseldorp 1115b49d6f78SDavid Disseldorp /* 11164ec5bf0eSBryant G. Ly * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to 11174ec5bf0eSBryant G. Ly * emulate the response, since tcmu does not have the information 11184ec5bf0eSBryant G. Ly * required to process these commands. 11194ec5bf0eSBryant G. Ly */ 112069088a04SBodo Stroesser if (!(dev->transport_flags & 11214ec5bf0eSBryant G. Ly TRANSPORT_FLAG_PASSTHROUGH_PGR)) { 11224ec5bf0eSBryant G. Ly if (cdb[0] == PERSISTENT_RESERVE_IN) { 11234ec5bf0eSBryant G. Ly cmd->execute_cmd = target_scsi3_emulate_pr_in; 1124a85d667eSBart Van Assche size = get_unaligned_be16(&cdb[7]); 11254ec5bf0eSBryant G. Ly return target_cmd_size_check(cmd, size); 11264ec5bf0eSBryant G. Ly } 11274ec5bf0eSBryant G. Ly if (cdb[0] == PERSISTENT_RESERVE_OUT) { 11284ec5bf0eSBryant G. Ly cmd->execute_cmd = target_scsi3_emulate_pr_out; 1129388fe699STang Wenji size = get_unaligned_be32(&cdb[5]); 11304ec5bf0eSBryant G. Ly return target_cmd_size_check(cmd, size); 11314ec5bf0eSBryant G. Ly } 11324ec5bf0eSBryant G. Ly 11334ec5bf0eSBryant G. Ly if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) { 11344ec5bf0eSBryant G. Ly cmd->execute_cmd = target_scsi2_reservation_release; 11354ec5bf0eSBryant G. Ly if (cdb[0] == RELEASE_10) 1136a85d667eSBart Van Assche size = get_unaligned_be16(&cdb[7]); 11374ec5bf0eSBryant G. Ly else 11384ec5bf0eSBryant G. Ly size = cmd->data_length; 11394ec5bf0eSBryant G. Ly return target_cmd_size_check(cmd, size); 11404ec5bf0eSBryant G. Ly } 11414ec5bf0eSBryant G. Ly if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) { 11424ec5bf0eSBryant G. Ly cmd->execute_cmd = target_scsi2_reservation_reserve; 11434ec5bf0eSBryant G. Ly if (cdb[0] == RESERVE_10) 1144a85d667eSBart Van Assche size = get_unaligned_be16(&cdb[7]); 11454ec5bf0eSBryant G. Ly else 11464ec5bf0eSBryant G. Ly size = cmd->data_length; 11474ec5bf0eSBryant G. Ly return target_cmd_size_check(cmd, size); 11484ec5bf0eSBryant G. Ly } 11494ec5bf0eSBryant G. Ly } 11504ec5bf0eSBryant G. Ly 11517bfea53bSAndy Grover /* Set DATA_CDB flag for ops that should have it */ 11527bfea53bSAndy Grover switch (cdb[0]) { 11537bfea53bSAndy Grover case READ_6: 11547bfea53bSAndy Grover case READ_10: 11557bfea53bSAndy Grover case READ_12: 11567bfea53bSAndy Grover case READ_16: 11577bfea53bSAndy Grover case WRITE_6: 11587bfea53bSAndy Grover case WRITE_10: 11597bfea53bSAndy Grover case WRITE_12: 11607bfea53bSAndy Grover case WRITE_16: 11617bfea53bSAndy Grover case WRITE_VERIFY: 11627bfea53bSAndy Grover case WRITE_VERIFY_12: 11633e182db7SBart Van Assche case WRITE_VERIFY_16: 11647bfea53bSAndy Grover case COMPARE_AND_WRITE: 11657bfea53bSAndy Grover case XDWRITEREAD_10: 11667bfea53bSAndy Grover cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 11677bfea53bSAndy Grover break; 11687bfea53bSAndy Grover case VARIABLE_LENGTH_CMD: 11697bfea53bSAndy Grover switch (get_unaligned_be16(&cdb[8])) { 11707bfea53bSAndy Grover case READ_32: 11717bfea53bSAndy Grover case WRITE_32: 1172e5dc9a70SDamien Le Moal case WRITE_VERIFY_32: 11737bfea53bSAndy Grover case XDWRITEREAD_32: 11747bfea53bSAndy Grover cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 11757bfea53bSAndy Grover break; 11767bfea53bSAndy Grover } 11777bfea53bSAndy Grover } 11787bfea53bSAndy Grover 11797bfea53bSAndy Grover cmd->execute_cmd = exec_cmd; 11807bfea53bSAndy Grover 11817bfea53bSAndy Grover return TCM_NO_SENSE; 11827bfea53bSAndy Grover } 11837bfea53bSAndy Grover EXPORT_SYMBOL(passthrough_parse_cdb); 1184