11a59d1b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 2c66ac9dbSNicholas Bellinger /******************************************************************************* 3c66ac9dbSNicholas Bellinger * Filename: target_core_device.c (based on iscsi_target_device.c) 4c66ac9dbSNicholas Bellinger * 5e3d6f909SAndy Grover * This file contains the TCM Virtual Device and Disk Transport 6c66ac9dbSNicholas Bellinger * agnostic related functions. 7c66ac9dbSNicholas Bellinger * 84c76251eSNicholas Bellinger * (c) Copyright 2003-2013 Datera, Inc. 9c66ac9dbSNicholas Bellinger * 10c66ac9dbSNicholas Bellinger * Nicholas A. Bellinger <nab@kernel.org> 11c66ac9dbSNicholas Bellinger * 12c66ac9dbSNicholas Bellinger ******************************************************************************/ 13c66ac9dbSNicholas Bellinger 14c66ac9dbSNicholas Bellinger #include <linux/net.h> 15c66ac9dbSNicholas Bellinger #include <linux/string.h> 16c66ac9dbSNicholas Bellinger #include <linux/delay.h> 17c66ac9dbSNicholas Bellinger #include <linux/timer.h> 18c66ac9dbSNicholas Bellinger #include <linux/slab.h> 19c66ac9dbSNicholas Bellinger #include <linux/spinlock.h> 20c66ac9dbSNicholas Bellinger #include <linux/kthread.h> 21c66ac9dbSNicholas Bellinger #include <linux/in.h> 22c53181afSPaul Gortmaker #include <linux/export.h> 238dcf07beSBart Van Assche #include <linux/t10-pi.h> 247bfea53bSAndy Grover #include <asm/unaligned.h> 25c66ac9dbSNicholas Bellinger #include <net/sock.h> 26c66ac9dbSNicholas Bellinger #include <net/tcp.h> 27ba929992SBart Van Assche #include <scsi/scsi_common.h> 28ba929992SBart Van Assche #include <scsi/scsi_proto.h> 29c66ac9dbSNicholas Bellinger 30c66ac9dbSNicholas Bellinger #include <target/target_core_base.h> 31c4795fb2SChristoph Hellwig #include <target/target_core_backend.h> 32c4795fb2SChristoph Hellwig #include <target/target_core_fabric.h> 33c66ac9dbSNicholas Bellinger 34e26d99aeSChristoph Hellwig #include "target_core_internal.h" 35c66ac9dbSNicholas Bellinger #include "target_core_alua.h" 36c66ac9dbSNicholas Bellinger #include "target_core_pr.h" 37c66ac9dbSNicholas Bellinger #include "target_core_ua.h" 38c66ac9dbSNicholas Bellinger 39c82ff239SColin Ian King static DEFINE_MUTEX(device_mutex); 40c82ff239SColin Ian King static LIST_HEAD(device_list); 410a5eee64SMike Christie static DEFINE_IDR(devices_idr); 42d9ea32bfSNicholas Bellinger 43e3d6f909SAndy Grover static struct se_hba *lun0_hba; 44e3d6f909SAndy Grover /* not static, needed by tpg.c */ 45e3d6f909SAndy Grover struct se_device *g_lun0_dev; 46e3d6f909SAndy Grover 47de103c93SChristoph Hellwig sense_reason_t 48a36840d8SSudhakar Panneerselvam transport_lookup_cmd_lun(struct se_cmd *se_cmd) 49c66ac9dbSNicholas Bellinger { 50c66ac9dbSNicholas Bellinger struct se_lun *se_lun = NULL; 51e3d6f909SAndy Grover struct se_session *se_sess = se_cmd->se_sess; 5229a05deeSNicholas Bellinger struct se_node_acl *nacl = se_sess->se_node_acl; 5329a05deeSNicholas Bellinger struct se_dev_entry *deve; 548fa3a867SNicholas Bellinger sense_reason_t ret = TCM_NO_SENSE; 55c66ac9dbSNicholas Bellinger 5629a05deeSNicholas Bellinger rcu_read_lock(); 57a36840d8SSudhakar Panneerselvam deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun); 5829a05deeSNicholas Bellinger if (deve) { 5929a05deeSNicholas Bellinger atomic_long_inc(&deve->total_cmds); 60c66ac9dbSNicholas Bellinger 615951146dSAndy Grover if (se_cmd->data_direction == DMA_TO_DEVICE) 6229a05deeSNicholas Bellinger atomic_long_add(se_cmd->data_length, 6329a05deeSNicholas Bellinger &deve->write_bytes); 645951146dSAndy Grover else if (se_cmd->data_direction == DMA_FROM_DEVICE) 6529a05deeSNicholas Bellinger atomic_long_add(se_cmd->data_length, 6629a05deeSNicholas Bellinger &deve->read_bytes); 675951146dSAndy Grover 68a2b5d6f9SMike Christie if ((se_cmd->data_direction == DMA_TO_DEVICE) && 69a2b5d6f9SMike Christie deve->lun_access_ro) { 70a2b5d6f9SMike Christie pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" 71a2b5d6f9SMike Christie " Access for 0x%08llx\n", 72a2b5d6f9SMike Christie se_cmd->se_tfo->fabric_name, 73a2b5d6f9SMike Christie se_cmd->orig_fe_lun); 74a2b5d6f9SMike Christie rcu_read_unlock(); 75a2b5d6f9SMike Christie return TCM_WRITE_PROTECTED; 76a2b5d6f9SMike Christie } 77a2b5d6f9SMike Christie 78ef4f7e4bSDmitry Bogdanov se_lun = deve->se_lun; 79bd4e2d29SNicholas Bellinger 80bd4e2d29SNicholas Bellinger if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { 81bd4e2d29SNicholas Bellinger se_lun = NULL; 82bd4e2d29SNicholas Bellinger goto out_unlock; 83bd4e2d29SNicholas Bellinger } 84bd4e2d29SNicholas Bellinger 8563f74794SBart Van Assche se_cmd->se_lun = se_lun; 865951146dSAndy Grover se_cmd->pr_res_key = deve->pr_res_key; 875951146dSAndy Grover se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 885277797dSNicholas Bellinger se_cmd->lun_ref_active = true; 895951146dSAndy Grover } 90bd4e2d29SNicholas Bellinger out_unlock: 9129a05deeSNicholas Bellinger rcu_read_unlock(); 925951146dSAndy Grover 935951146dSAndy Grover if (!se_lun) { 94c66ac9dbSNicholas Bellinger /* 95c66ac9dbSNicholas Bellinger * Use the se_portal_group->tpg_virt_lun0 to allow for 96c66ac9dbSNicholas Bellinger * REPORT_LUNS, et al to be returned when no active 97c66ac9dbSNicholas Bellinger * MappedLUN=0 exists for this Initiator Port. 98c66ac9dbSNicholas Bellinger */ 99a36840d8SSudhakar Panneerselvam if (se_cmd->orig_fe_lun != 0) { 1006708bb27SAndy Grover pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 1015482d56bSLance Digby " Access for 0x%08llx from %s\n", 10230c7ca93SDavid Disseldorp se_cmd->se_tfo->fabric_name, 103a36840d8SSudhakar Panneerselvam se_cmd->orig_fe_lun, 1045482d56bSLance Digby nacl->initiatorname); 105de103c93SChristoph Hellwig return TCM_NON_EXISTENT_LUN; 106c66ac9dbSNicholas Bellinger } 1075951146dSAndy Grover 1088fa3a867SNicholas Bellinger /* 1098fa3a867SNicholas Bellinger * Force WRITE PROTECT for virtual LUN 0 1108fa3a867SNicholas Bellinger */ 1118fa3a867SNicholas Bellinger if ((se_cmd->data_direction != DMA_FROM_DEVICE) && 112a2b5d6f9SMike Christie (se_cmd->data_direction != DMA_NONE)) 113a2b5d6f9SMike Christie return TCM_WRITE_PROTECTED; 114a2b5d6f9SMike Christie 115a2b5d6f9SMike Christie se_lun = se_sess->se_tpg->tpg_virt_lun0; 116a2b5d6f9SMike Christie if (!percpu_ref_tryget_live(&se_lun->lun_ref)) 117a2b5d6f9SMike Christie return TCM_NON_EXISTENT_LUN; 118a2b5d6f9SMike Christie 119a2b5d6f9SMike Christie se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0; 120a2b5d6f9SMike Christie se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 121a2b5d6f9SMike Christie se_cmd->lun_ref_active = true; 122c66ac9dbSNicholas Bellinger } 1234cc987eaSNicholas Bellinger /* 1244cc987eaSNicholas Bellinger * RCU reference protected by percpu se_lun->lun_ref taken above that 1254cc987eaSNicholas Bellinger * must drop to zero (including initial reference) before this se_lun 1264cc987eaSNicholas Bellinger * pointer can be kfree_rcu() by the final se_lun->lun_group put via 1274cc987eaSNicholas Bellinger * target_core_fabric_configfs.c:target_fabric_port_release 1284cc987eaSNicholas Bellinger */ 1294cc987eaSNicholas Bellinger se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); 1304cc987eaSNicholas Bellinger atomic_long_inc(&se_cmd->se_dev->num_cmds); 131c66ac9dbSNicholas Bellinger 132c66ac9dbSNicholas Bellinger if (se_cmd->data_direction == DMA_TO_DEVICE) 1334cc987eaSNicholas Bellinger atomic_long_add(se_cmd->data_length, 1344cc987eaSNicholas Bellinger &se_cmd->se_dev->write_bytes); 135c66ac9dbSNicholas Bellinger else if (se_cmd->data_direction == DMA_FROM_DEVICE) 1364cc987eaSNicholas Bellinger atomic_long_add(se_cmd->data_length, 1374cc987eaSNicholas Bellinger &se_cmd->se_dev->read_bytes); 138c66ac9dbSNicholas Bellinger 1398fa3a867SNicholas Bellinger return ret; 140c66ac9dbSNicholas Bellinger } 1415951146dSAndy Grover EXPORT_SYMBOL(transport_lookup_cmd_lun); 142c66ac9dbSNicholas Bellinger 143a36840d8SSudhakar Panneerselvam int transport_lookup_tmr_lun(struct se_cmd *se_cmd) 144c66ac9dbSNicholas Bellinger { 145c66ac9dbSNicholas Bellinger struct se_dev_entry *deve; 146c66ac9dbSNicholas Bellinger struct se_lun *se_lun = NULL; 147e3d6f909SAndy Grover struct se_session *se_sess = se_cmd->se_sess; 14829a05deeSNicholas Bellinger struct se_node_acl *nacl = se_sess->se_node_acl; 149c66ac9dbSNicholas Bellinger struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 1505e1be919SRoland Dreier unsigned long flags; 151c66ac9dbSNicholas Bellinger 15229a05deeSNicholas Bellinger rcu_read_lock(); 153a36840d8SSudhakar Panneerselvam deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun); 15429a05deeSNicholas Bellinger if (deve) { 155ef4f7e4bSDmitry Bogdanov se_lun = deve->se_lun; 156eeb64d23SNicholas Bellinger 157eeb64d23SNicholas Bellinger if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { 158eeb64d23SNicholas Bellinger se_lun = NULL; 159eeb64d23SNicholas Bellinger goto out_unlock; 160eeb64d23SNicholas Bellinger } 161eeb64d23SNicholas Bellinger 16263f74794SBart Van Assche se_cmd->se_lun = se_lun; 163c66ac9dbSNicholas Bellinger se_cmd->pr_res_key = deve->pr_res_key; 164eeb64d23SNicholas Bellinger se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 165eeb64d23SNicholas Bellinger se_cmd->lun_ref_active = true; 166c66ac9dbSNicholas Bellinger } 167eeb64d23SNicholas Bellinger out_unlock: 16829a05deeSNicholas Bellinger rcu_read_unlock(); 169c66ac9dbSNicholas Bellinger 170c66ac9dbSNicholas Bellinger if (!se_lun) { 1716708bb27SAndy Grover pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 1725482d56bSLance Digby " Access for 0x%08llx for %s\n", 17330c7ca93SDavid Disseldorp se_cmd->se_tfo->fabric_name, 174a36840d8SSudhakar Panneerselvam se_cmd->orig_fe_lun, 1755482d56bSLance Digby nacl->initiatorname); 176e3d6f909SAndy Grover return -ENODEV; 177c66ac9dbSNicholas Bellinger } 1784cc987eaSNicholas Bellinger se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); 1794cc987eaSNicholas Bellinger se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev); 1805951146dSAndy Grover 1815e1be919SRoland Dreier spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags); 1825951146dSAndy Grover list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list); 1835e1be919SRoland Dreier spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags); 184c66ac9dbSNicholas Bellinger 185c66ac9dbSNicholas Bellinger return 0; 186c66ac9dbSNicholas Bellinger } 1875951146dSAndy Grover EXPORT_SYMBOL(transport_lookup_tmr_lun); 188c66ac9dbSNicholas Bellinger 18929a05deeSNicholas Bellinger bool target_lun_is_rdonly(struct se_cmd *cmd) 19029a05deeSNicholas Bellinger { 19129a05deeSNicholas Bellinger struct se_session *se_sess = cmd->se_sess; 19229a05deeSNicholas Bellinger struct se_dev_entry *deve; 19329a05deeSNicholas Bellinger bool ret; 19429a05deeSNicholas Bellinger 19529a05deeSNicholas Bellinger rcu_read_lock(); 19629a05deeSNicholas Bellinger deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun); 19703a68b44SAndy Grover ret = deve && deve->lun_access_ro; 19829a05deeSNicholas Bellinger rcu_read_unlock(); 19929a05deeSNicholas Bellinger 20029a05deeSNicholas Bellinger return ret; 20129a05deeSNicholas Bellinger } 20229a05deeSNicholas Bellinger EXPORT_SYMBOL(target_lun_is_rdonly); 20329a05deeSNicholas Bellinger 204c66ac9dbSNicholas Bellinger /* 205c66ac9dbSNicholas Bellinger * This function is called from core_scsi3_emulate_pro_register_and_move() 20629a05deeSNicholas Bellinger * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref 207c66ac9dbSNicholas Bellinger * when a matching rtpi is found. 208c66ac9dbSNicholas Bellinger */ 209c66ac9dbSNicholas Bellinger struct se_dev_entry *core_get_se_deve_from_rtpi( 210c66ac9dbSNicholas Bellinger struct se_node_acl *nacl, 211c66ac9dbSNicholas Bellinger u16 rtpi) 212c66ac9dbSNicholas Bellinger { 213c66ac9dbSNicholas Bellinger struct se_dev_entry *deve; 214c66ac9dbSNicholas Bellinger struct se_lun *lun; 215c66ac9dbSNicholas Bellinger struct se_portal_group *tpg = nacl->se_tpg; 216c66ac9dbSNicholas Bellinger 21729a05deeSNicholas Bellinger rcu_read_lock(); 21829a05deeSNicholas Bellinger hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 219ef4f7e4bSDmitry Bogdanov lun = deve->se_lun; 2206708bb27SAndy Grover if (!lun) { 2216708bb27SAndy Grover pr_err("%s device entries device pointer is" 222c66ac9dbSNicholas Bellinger " NULL, but Initiator has access.\n", 22330c7ca93SDavid Disseldorp tpg->se_tpg_tfo->fabric_name); 224c66ac9dbSNicholas Bellinger continue; 225c66ac9dbSNicholas Bellinger } 226*b9e063adSRoman Bolshakov if (lun->lun_tpg->tpg_rtpi != rtpi) 227c66ac9dbSNicholas Bellinger continue; 228c66ac9dbSNicholas Bellinger 22929a05deeSNicholas Bellinger kref_get(&deve->pr_kref); 23029a05deeSNicholas Bellinger rcu_read_unlock(); 231c66ac9dbSNicholas Bellinger 232c66ac9dbSNicholas Bellinger return deve; 233c66ac9dbSNicholas Bellinger } 23429a05deeSNicholas Bellinger rcu_read_unlock(); 235c66ac9dbSNicholas Bellinger 236c66ac9dbSNicholas Bellinger return NULL; 237c66ac9dbSNicholas Bellinger } 238c66ac9dbSNicholas Bellinger 23929a05deeSNicholas Bellinger void core_free_device_list_for_node( 240c66ac9dbSNicholas Bellinger struct se_node_acl *nacl, 241c66ac9dbSNicholas Bellinger struct se_portal_group *tpg) 242c66ac9dbSNicholas Bellinger { 243c66ac9dbSNicholas Bellinger struct se_dev_entry *deve; 244c66ac9dbSNicholas Bellinger 24529a05deeSNicholas Bellinger mutex_lock(&nacl->lun_entry_mutex); 246ef4f7e4bSDmitry Bogdanov hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) 247ef4f7e4bSDmitry Bogdanov core_disable_device_list_for_node(deve->se_lun, deve, nacl, tpg); 24829a05deeSNicholas Bellinger mutex_unlock(&nacl->lun_entry_mutex); 249c66ac9dbSNicholas Bellinger } 250c66ac9dbSNicholas Bellinger 251c66ac9dbSNicholas Bellinger void core_update_device_list_access( 252f2d30680SHannes Reinecke u64 mapped_lun, 25303a68b44SAndy Grover bool lun_access_ro, 254c66ac9dbSNicholas Bellinger struct se_node_acl *nacl) 255c66ac9dbSNicholas Bellinger { 256c66ac9dbSNicholas Bellinger struct se_dev_entry *deve; 257c66ac9dbSNicholas Bellinger 25829a05deeSNicholas Bellinger mutex_lock(&nacl->lun_entry_mutex); 25929a05deeSNicholas Bellinger deve = target_nacl_find_deve(nacl, mapped_lun); 26003a68b44SAndy Grover if (deve) 26103a68b44SAndy Grover deve->lun_access_ro = lun_access_ro; 26229a05deeSNicholas Bellinger mutex_unlock(&nacl->lun_entry_mutex); 263c66ac9dbSNicholas Bellinger } 264c66ac9dbSNicholas Bellinger 26529a05deeSNicholas Bellinger /* 26629a05deeSNicholas Bellinger * Called with rcu_read_lock or nacl->device_list_lock held. 267c66ac9dbSNicholas Bellinger */ 268f2d30680SHannes Reinecke struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun) 26929a05deeSNicholas Bellinger { 27029a05deeSNicholas Bellinger struct se_dev_entry *deve; 27129a05deeSNicholas Bellinger 27229a05deeSNicholas Bellinger hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) 27329a05deeSNicholas Bellinger if (deve->mapped_lun == mapped_lun) 27429a05deeSNicholas Bellinger return deve; 27529a05deeSNicholas Bellinger 27629a05deeSNicholas Bellinger return NULL; 27729a05deeSNicholas Bellinger } 27829a05deeSNicholas Bellinger EXPORT_SYMBOL(target_nacl_find_deve); 27929a05deeSNicholas Bellinger 28029a05deeSNicholas Bellinger void target_pr_kref_release(struct kref *kref) 28129a05deeSNicholas Bellinger { 28229a05deeSNicholas Bellinger struct se_dev_entry *deve = container_of(kref, struct se_dev_entry, 28329a05deeSNicholas Bellinger pr_kref); 28429a05deeSNicholas Bellinger complete(&deve->pr_comp); 285c66ac9dbSNicholas Bellinger } 286c66ac9dbSNicholas Bellinger 2876290e23fSDmitry Bogdanov /* 2886290e23fSDmitry Bogdanov * Establish UA condition on SCSI device - all LUNs 2896290e23fSDmitry Bogdanov */ 2906290e23fSDmitry Bogdanov void target_dev_ua_allocate(struct se_device *dev, u8 asc, u8 ascq) 2916290e23fSDmitry Bogdanov { 2926290e23fSDmitry Bogdanov struct se_dev_entry *se_deve; 2936290e23fSDmitry Bogdanov struct se_lun *lun; 2946290e23fSDmitry Bogdanov 2956290e23fSDmitry Bogdanov spin_lock(&dev->se_port_lock); 2966290e23fSDmitry Bogdanov list_for_each_entry(lun, &dev->dev_sep_list, lun_dev_link) { 2976290e23fSDmitry Bogdanov 2986290e23fSDmitry Bogdanov spin_lock(&lun->lun_deve_lock); 2996290e23fSDmitry Bogdanov list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) 3006290e23fSDmitry Bogdanov core_scsi3_ua_allocate(se_deve, asc, ascq); 3016290e23fSDmitry Bogdanov spin_unlock(&lun->lun_deve_lock); 3026290e23fSDmitry Bogdanov } 3036290e23fSDmitry Bogdanov spin_unlock(&dev->se_port_lock); 3046290e23fSDmitry Bogdanov } 3056290e23fSDmitry Bogdanov 3067c0d0d51SHannes Reinecke static void 3077c0d0d51SHannes Reinecke target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new, 3087c0d0d51SHannes Reinecke bool skip_new) 3097c0d0d51SHannes Reinecke { 3107c0d0d51SHannes Reinecke struct se_dev_entry *tmp; 3117c0d0d51SHannes Reinecke 3127c0d0d51SHannes Reinecke rcu_read_lock(); 3137c0d0d51SHannes Reinecke hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) { 3147c0d0d51SHannes Reinecke if (skip_new && tmp == new) 3157c0d0d51SHannes Reinecke continue; 3167c0d0d51SHannes Reinecke core_scsi3_ua_allocate(tmp, 0x3F, 3177c0d0d51SHannes Reinecke ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED); 3187c0d0d51SHannes Reinecke } 3197c0d0d51SHannes Reinecke rcu_read_unlock(); 3207c0d0d51SHannes Reinecke } 3217c0d0d51SHannes Reinecke 322e80ac6c4SAndy Grover int core_enable_device_list_for_node( 323c66ac9dbSNicholas Bellinger struct se_lun *lun, 324c66ac9dbSNicholas Bellinger struct se_lun_acl *lun_acl, 325f2d30680SHannes Reinecke u64 mapped_lun, 32603a68b44SAndy Grover bool lun_access_ro, 327c66ac9dbSNicholas Bellinger struct se_node_acl *nacl, 328e80ac6c4SAndy Grover struct se_portal_group *tpg) 329c66ac9dbSNicholas Bellinger { 33029a05deeSNicholas Bellinger struct se_dev_entry *orig, *new; 331c66ac9dbSNicholas Bellinger 33229a05deeSNicholas Bellinger new = kzalloc(sizeof(*new), GFP_KERNEL); 33329a05deeSNicholas Bellinger if (!new) { 33429a05deeSNicholas Bellinger pr_err("Unable to allocate se_dev_entry memory\n"); 33529a05deeSNicholas Bellinger return -ENOMEM; 33629a05deeSNicholas Bellinger } 337e80ac6c4SAndy Grover 33829a05deeSNicholas Bellinger spin_lock_init(&new->ua_lock); 33929a05deeSNicholas Bellinger INIT_LIST_HEAD(&new->ua_list); 340adf653f9SChristoph Hellwig INIT_LIST_HEAD(&new->lun_link); 341e80ac6c4SAndy Grover 34229a05deeSNicholas Bellinger new->mapped_lun = mapped_lun; 34329a05deeSNicholas Bellinger kref_init(&new->pr_kref); 34429a05deeSNicholas Bellinger init_completion(&new->pr_comp); 34529a05deeSNicholas Bellinger 34603a68b44SAndy Grover new->lun_access_ro = lun_access_ro; 34729a05deeSNicholas Bellinger new->creation_time = get_jiffies_64(); 34829a05deeSNicholas Bellinger new->attach_count++; 34929a05deeSNicholas Bellinger 35029a05deeSNicholas Bellinger mutex_lock(&nacl->lun_entry_mutex); 35129a05deeSNicholas Bellinger orig = target_nacl_find_deve(nacl, mapped_lun); 35229a05deeSNicholas Bellinger if (orig && orig->se_lun) { 353ef4f7e4bSDmitry Bogdanov struct se_lun *orig_lun = orig->se_lun; 35429a05deeSNicholas Bellinger 35529a05deeSNicholas Bellinger if (orig_lun != lun) { 35629a05deeSNicholas Bellinger pr_err("Existing orig->se_lun doesn't match new lun" 35729a05deeSNicholas Bellinger " for dynamic -> explicit NodeACL conversion:" 35829a05deeSNicholas Bellinger " %s\n", nacl->initiatorname); 35929a05deeSNicholas Bellinger mutex_unlock(&nacl->lun_entry_mutex); 36029a05deeSNicholas Bellinger kfree(new); 361e3d6f909SAndy Grover return -EINVAL; 362c66ac9dbSNicholas Bellinger } 363391e2a6dSNicholas Bellinger if (orig->se_lun_acl != NULL) { 364391e2a6dSNicholas Bellinger pr_warn_ratelimited("Detected existing explicit" 365391e2a6dSNicholas Bellinger " se_lun_acl->se_lun_group reference for %s" 366391e2a6dSNicholas Bellinger " mapped_lun: %llu, failing\n", 367391e2a6dSNicholas Bellinger nacl->initiatorname, mapped_lun); 368391e2a6dSNicholas Bellinger mutex_unlock(&nacl->lun_entry_mutex); 369391e2a6dSNicholas Bellinger kfree(new); 370391e2a6dSNicholas Bellinger return -EINVAL; 371391e2a6dSNicholas Bellinger } 372c66ac9dbSNicholas Bellinger 373ef4f7e4bSDmitry Bogdanov new->se_lun = lun; 374ef4f7e4bSDmitry Bogdanov new->se_lun_acl = lun_acl; 37529a05deeSNicholas Bellinger hlist_del_rcu(&orig->link); 37629a05deeSNicholas Bellinger hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); 37729a05deeSNicholas Bellinger mutex_unlock(&nacl->lun_entry_mutex); 378c66ac9dbSNicholas Bellinger 3791adff1b3SNicholas Bellinger spin_lock(&lun->lun_deve_lock); 380adf653f9SChristoph Hellwig list_del(&orig->lun_link); 381adf653f9SChristoph Hellwig list_add_tail(&new->lun_link, &lun->lun_deve_list); 3821adff1b3SNicholas Bellinger spin_unlock(&lun->lun_deve_lock); 38329a05deeSNicholas Bellinger 38429a05deeSNicholas Bellinger kref_put(&orig->pr_kref, target_pr_kref_release); 38529a05deeSNicholas Bellinger wait_for_completion(&orig->pr_comp); 38629a05deeSNicholas Bellinger 3877c0d0d51SHannes Reinecke target_luns_data_has_changed(nacl, new, true); 38829a05deeSNicholas Bellinger kfree_rcu(orig, rcu_head); 389c66ac9dbSNicholas Bellinger return 0; 390c66ac9dbSNicholas Bellinger } 391e80ac6c4SAndy Grover 392ef4f7e4bSDmitry Bogdanov new->se_lun = lun; 393ef4f7e4bSDmitry Bogdanov new->se_lun_acl = lun_acl; 39429a05deeSNicholas Bellinger hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); 39529a05deeSNicholas Bellinger mutex_unlock(&nacl->lun_entry_mutex); 396c66ac9dbSNicholas Bellinger 3971adff1b3SNicholas Bellinger spin_lock(&lun->lun_deve_lock); 398adf653f9SChristoph Hellwig list_add_tail(&new->lun_link, &lun->lun_deve_list); 3991adff1b3SNicholas Bellinger spin_unlock(&lun->lun_deve_lock); 400c66ac9dbSNicholas Bellinger 4017c0d0d51SHannes Reinecke target_luns_data_has_changed(nacl, new, true); 402c66ac9dbSNicholas Bellinger return 0; 403c66ac9dbSNicholas Bellinger } 404c66ac9dbSNicholas Bellinger 40529a05deeSNicholas Bellinger void core_disable_device_list_for_node( 406e80ac6c4SAndy Grover struct se_lun *lun, 40729a05deeSNicholas Bellinger struct se_dev_entry *orig, 408e80ac6c4SAndy Grover struct se_node_acl *nacl, 409e80ac6c4SAndy Grover struct se_portal_group *tpg) 410e80ac6c4SAndy Grover { 411e80ac6c4SAndy Grover /* 4124cc987eaSNicholas Bellinger * rcu_dereference_raw protected by se_lun->lun_group symlink 4134cc987eaSNicholas Bellinger * reference to se_device->dev_group. 4144cc987eaSNicholas Bellinger */ 4154cc987eaSNicholas Bellinger struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 416618baaf7SBart Van Assche 417618baaf7SBart Van Assche lockdep_assert_held(&nacl->lun_entry_mutex); 418618baaf7SBart Van Assche 419e80ac6c4SAndy Grover /* 420e80ac6c4SAndy Grover * If the MappedLUN entry is being disabled, the entry in 421adf653f9SChristoph Hellwig * lun->lun_deve_list must be removed now before clearing the 422e80ac6c4SAndy Grover * struct se_dev_entry pointers below as logic in 423e80ac6c4SAndy Grover * core_alua_do_transition_tg_pt() depends on these being present. 424e80ac6c4SAndy Grover * 425e80ac6c4SAndy Grover * deve->se_lun_acl will be NULL for demo-mode created LUNs 426e80ac6c4SAndy Grover * that have not been explicitly converted to MappedLUNs -> 427adf653f9SChristoph Hellwig * struct se_lun_acl, but we remove deve->lun_link from 428adf653f9SChristoph Hellwig * lun->lun_deve_list. This also means that active UAs and 429e80ac6c4SAndy Grover * NodeACL context specific PR metadata for demo-mode 430e80ac6c4SAndy Grover * MappedLUN *deve will be released below.. 431e80ac6c4SAndy Grover */ 4321adff1b3SNicholas Bellinger spin_lock(&lun->lun_deve_lock); 433adf653f9SChristoph Hellwig list_del(&orig->lun_link); 4341adff1b3SNicholas Bellinger spin_unlock(&lun->lun_deve_lock); 435c66ac9dbSNicholas Bellinger /* 436c66ac9dbSNicholas Bellinger * Disable struct se_dev_entry LUN ACL mapping 437c66ac9dbSNicholas Bellinger */ 43829a05deeSNicholas Bellinger core_scsi3_ua_release_all(orig); 439c66ac9dbSNicholas Bellinger 44029a05deeSNicholas Bellinger hlist_del_rcu(&orig->link); 44180bfdfa9SNicholas Bellinger clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags); 44203a68b44SAndy Grover orig->lun_access_ro = false; 44329a05deeSNicholas Bellinger orig->creation_time = 0; 44429a05deeSNicholas Bellinger orig->attach_count--; 44529a05deeSNicholas Bellinger /* 44629a05deeSNicholas Bellinger * Before firing off RCU callback, wait for any in process SPEC_I_PT=1 44729a05deeSNicholas Bellinger * or REGISTER_AND_MOVE PR operation to complete. 44829a05deeSNicholas Bellinger */ 44929a05deeSNicholas Bellinger kref_put(&orig->pr_kref, target_pr_kref_release); 45029a05deeSNicholas Bellinger wait_for_completion(&orig->pr_comp); 45129a05deeSNicholas Bellinger 45229a05deeSNicholas Bellinger kfree_rcu(orig, rcu_head); 453c66ac9dbSNicholas Bellinger 4544cc987eaSNicholas Bellinger core_scsi3_free_pr_reg_from_nacl(dev, nacl); 4557c0d0d51SHannes Reinecke target_luns_data_has_changed(nacl, NULL, false); 456c66ac9dbSNicholas Bellinger } 457c66ac9dbSNicholas Bellinger 458c66ac9dbSNicholas Bellinger /* core_clear_lun_from_tpg(): 459c66ac9dbSNicholas Bellinger * 460c66ac9dbSNicholas Bellinger * 461c66ac9dbSNicholas Bellinger */ 462c66ac9dbSNicholas Bellinger void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) 463c66ac9dbSNicholas Bellinger { 464c66ac9dbSNicholas Bellinger struct se_node_acl *nacl; 465c66ac9dbSNicholas Bellinger struct se_dev_entry *deve; 466c66ac9dbSNicholas Bellinger 467403edd78SNicholas Bellinger mutex_lock(&tpg->acl_node_mutex); 468c66ac9dbSNicholas Bellinger list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { 469c66ac9dbSNicholas Bellinger 47029a05deeSNicholas Bellinger mutex_lock(&nacl->lun_entry_mutex); 47129a05deeSNicholas Bellinger hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 472ef4f7e4bSDmitry Bogdanov if (lun != deve->se_lun) 473c66ac9dbSNicholas Bellinger continue; 474c66ac9dbSNicholas Bellinger 47529a05deeSNicholas Bellinger core_disable_device_list_for_node(lun, deve, nacl, tpg); 476c66ac9dbSNicholas Bellinger } 47729a05deeSNicholas Bellinger mutex_unlock(&nacl->lun_entry_mutex); 478c66ac9dbSNicholas Bellinger } 479403edd78SNicholas Bellinger mutex_unlock(&tpg->acl_node_mutex); 480c66ac9dbSNicholas Bellinger } 481c66ac9dbSNicholas Bellinger 482adf653f9SChristoph Hellwig int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev) 483c66ac9dbSNicholas Bellinger { 484adf653f9SChristoph Hellwig struct se_lun *tmp; 485c66ac9dbSNicholas Bellinger 486c66ac9dbSNicholas Bellinger spin_lock(&dev->se_port_lock); 487adf653f9SChristoph Hellwig if (dev->export_count == 0x0000ffff) { 4886708bb27SAndy Grover pr_warn("Reached dev->dev_port_count ==" 489c66ac9dbSNicholas Bellinger " 0x0000ffff\n"); 490c66ac9dbSNicholas Bellinger spin_unlock(&dev->se_port_lock); 491adf653f9SChristoph Hellwig return -ENOSPC; 492c66ac9dbSNicholas Bellinger } 493c66ac9dbSNicholas Bellinger again: 494c66ac9dbSNicholas Bellinger /* 49535d1efe8SMasanari Iida * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device 496c66ac9dbSNicholas Bellinger * Here is the table from spc4r17 section 7.7.3.8. 497c66ac9dbSNicholas Bellinger * 498c66ac9dbSNicholas Bellinger * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field 499c66ac9dbSNicholas Bellinger * 500c66ac9dbSNicholas Bellinger * Code Description 501c66ac9dbSNicholas Bellinger * 0h Reserved 502c66ac9dbSNicholas Bellinger * 1h Relative port 1, historically known as port A 503c66ac9dbSNicholas Bellinger * 2h Relative port 2, historically known as port B 504c66ac9dbSNicholas Bellinger * 3h to FFFFh Relative port 3 through 65 535 505c66ac9dbSNicholas Bellinger */ 506adf653f9SChristoph Hellwig lun->lun_rtpi = dev->dev_rpti_counter++; 507adf653f9SChristoph Hellwig if (!lun->lun_rtpi) 508c66ac9dbSNicholas Bellinger goto again; 509c66ac9dbSNicholas Bellinger 510adf653f9SChristoph Hellwig list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) { 511c66ac9dbSNicholas Bellinger /* 51235d1efe8SMasanari Iida * Make sure RELATIVE TARGET PORT IDENTIFIER is unique 513c66ac9dbSNicholas Bellinger * for 16-bit wrap.. 514c66ac9dbSNicholas Bellinger */ 515adf653f9SChristoph Hellwig if (lun->lun_rtpi == tmp->lun_rtpi) 516c66ac9dbSNicholas Bellinger goto again; 517c66ac9dbSNicholas Bellinger } 518c66ac9dbSNicholas Bellinger spin_unlock(&dev->se_port_lock); 519c66ac9dbSNicholas Bellinger 520c66ac9dbSNicholas Bellinger return 0; 521c66ac9dbSNicholas Bellinger } 522c66ac9dbSNicholas Bellinger 5230fd97ccfSChristoph Hellwig static void se_release_vpd_for_dev(struct se_device *dev) 524c66ac9dbSNicholas Bellinger { 525c66ac9dbSNicholas Bellinger struct t10_vpd *vpd, *vpd_tmp; 526c66ac9dbSNicholas Bellinger 5270fd97ccfSChristoph Hellwig spin_lock(&dev->t10_wwn.t10_vpd_lock); 528c66ac9dbSNicholas Bellinger list_for_each_entry_safe(vpd, vpd_tmp, 5290fd97ccfSChristoph Hellwig &dev->t10_wwn.t10_vpd_list, vpd_list) { 530c66ac9dbSNicholas Bellinger list_del(&vpd->vpd_list); 531c66ac9dbSNicholas Bellinger kfree(vpd); 532c66ac9dbSNicholas Bellinger } 5330fd97ccfSChristoph Hellwig spin_unlock(&dev->t10_wwn.t10_vpd_lock); 534c66ac9dbSNicholas Bellinger } 535c66ac9dbSNicholas Bellinger 536c8045372SRoland Dreier static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) 537525a48a2SNicholas Bellinger { 5383e03989bSRoland Dreier u32 aligned_max_sectors; 5393e03989bSRoland Dreier u32 alignment; 540525a48a2SNicholas Bellinger /* 541525a48a2SNicholas Bellinger * Limit max_sectors to a PAGE_SIZE aligned value for modern 542525a48a2SNicholas Bellinger * transport_allocate_data_tasks() operation. 543525a48a2SNicholas Bellinger */ 5443e03989bSRoland Dreier alignment = max(1ul, PAGE_SIZE / block_size); 5453e03989bSRoland Dreier aligned_max_sectors = rounddown(max_sectors, alignment); 546525a48a2SNicholas Bellinger 5473e03989bSRoland Dreier if (max_sectors != aligned_max_sectors) 5483e03989bSRoland Dreier pr_info("Rounding down aligned max_sectors from %u to %u\n", 5493e03989bSRoland Dreier max_sectors, aligned_max_sectors); 5503e03989bSRoland Dreier 5513e03989bSRoland Dreier return aligned_max_sectors; 552525a48a2SNicholas Bellinger } 553525a48a2SNicholas Bellinger 5546bb82612SNicholas Bellinger int core_dev_add_lun( 555c66ac9dbSNicholas Bellinger struct se_portal_group *tpg, 556c66ac9dbSNicholas Bellinger struct se_device *dev, 5576bb82612SNicholas Bellinger struct se_lun *lun) 558c66ac9dbSNicholas Bellinger { 5598d9efe53SSebastian Andrzej Siewior int rc; 560c66ac9dbSNicholas Bellinger 56103a68b44SAndy Grover rc = core_tpg_add_lun(tpg, lun, false, dev); 5628d9efe53SSebastian Andrzej Siewior if (rc < 0) 5636bb82612SNicholas Bellinger return rc; 564c66ac9dbSNicholas Bellinger 565f2d30680SHannes Reinecke pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from" 56630c7ca93SDavid Disseldorp " CORE HBA: %u\n", tpg->se_tpg_tfo->fabric_name, 5672af7973aSAndy Grover tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 56830c7ca93SDavid Disseldorp tpg->se_tpg_tfo->fabric_name, dev->se_hba->hba_id); 569c66ac9dbSNicholas Bellinger /* 570c66ac9dbSNicholas Bellinger * Update LUN maps for dynamically added initiators when 571c66ac9dbSNicholas Bellinger * generate_node_acl is enabled. 572c66ac9dbSNicholas Bellinger */ 573e3d6f909SAndy Grover if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { 574c66ac9dbSNicholas Bellinger struct se_node_acl *acl; 575403edd78SNicholas Bellinger 576403edd78SNicholas Bellinger mutex_lock(&tpg->acl_node_mutex); 577c66ac9dbSNicholas Bellinger list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 578052605c6SNicholas Bellinger if (acl->dynamic_node_acl && 579052605c6SNicholas Bellinger (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || 580052605c6SNicholas Bellinger !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { 581df9766caSNicholas Bellinger core_tpg_add_node_to_devs(acl, tpg, lun); 582c66ac9dbSNicholas Bellinger } 583c66ac9dbSNicholas Bellinger } 584403edd78SNicholas Bellinger mutex_unlock(&tpg->acl_node_mutex); 585c66ac9dbSNicholas Bellinger } 586c66ac9dbSNicholas Bellinger 5876bb82612SNicholas Bellinger return 0; 588c66ac9dbSNicholas Bellinger } 589c66ac9dbSNicholas Bellinger 590c66ac9dbSNicholas Bellinger /* core_dev_del_lun(): 591c66ac9dbSNicholas Bellinger * 592c66ac9dbSNicholas Bellinger * 593c66ac9dbSNicholas Bellinger */ 594cd9d7cbaSAndy Grover void core_dev_del_lun( 595c66ac9dbSNicholas Bellinger struct se_portal_group *tpg, 596cd9d7cbaSAndy Grover struct se_lun *lun) 597c66ac9dbSNicholas Bellinger { 598f2d30680SHannes Reinecke pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from" 59930c7ca93SDavid Disseldorp " device object\n", tpg->se_tpg_tfo->fabric_name, 600cd9d7cbaSAndy Grover tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 60130c7ca93SDavid Disseldorp tpg->se_tpg_tfo->fabric_name); 602c66ac9dbSNicholas Bellinger 603cd9d7cbaSAndy Grover core_tpg_remove_lun(tpg, lun); 604c66ac9dbSNicholas Bellinger } 605c66ac9dbSNicholas Bellinger 606c66ac9dbSNicholas Bellinger struct se_lun_acl *core_dev_init_initiator_node_lun_acl( 607c66ac9dbSNicholas Bellinger struct se_portal_group *tpg, 608fcf29481SNicholas Bellinger struct se_node_acl *nacl, 609f2d30680SHannes Reinecke u64 mapped_lun, 610c66ac9dbSNicholas Bellinger int *ret) 611c66ac9dbSNicholas Bellinger { 612c66ac9dbSNicholas Bellinger struct se_lun_acl *lacl; 613c66ac9dbSNicholas Bellinger 614fcf29481SNicholas Bellinger if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) { 6156708bb27SAndy Grover pr_err("%s InitiatorName exceeds maximum size.\n", 61630c7ca93SDavid Disseldorp tpg->se_tpg_tfo->fabric_name); 617c66ac9dbSNicholas Bellinger *ret = -EOVERFLOW; 618c66ac9dbSNicholas Bellinger return NULL; 619c66ac9dbSNicholas Bellinger } 620c66ac9dbSNicholas Bellinger lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); 6216708bb27SAndy Grover if (!lacl) { 6226708bb27SAndy Grover pr_err("Unable to allocate memory for struct se_lun_acl.\n"); 623c66ac9dbSNicholas Bellinger *ret = -ENOMEM; 624c66ac9dbSNicholas Bellinger return NULL; 625c66ac9dbSNicholas Bellinger } 626c66ac9dbSNicholas Bellinger 627c66ac9dbSNicholas Bellinger lacl->mapped_lun = mapped_lun; 628c66ac9dbSNicholas Bellinger lacl->se_lun_nacl = nacl; 629c66ac9dbSNicholas Bellinger 630c66ac9dbSNicholas Bellinger return lacl; 631c66ac9dbSNicholas Bellinger } 632c66ac9dbSNicholas Bellinger 633c66ac9dbSNicholas Bellinger int core_dev_add_initiator_node_lun_acl( 634c66ac9dbSNicholas Bellinger struct se_portal_group *tpg, 635c66ac9dbSNicholas Bellinger struct se_lun_acl *lacl, 6366bb82612SNicholas Bellinger struct se_lun *lun, 63703a68b44SAndy Grover bool lun_access_ro) 638c66ac9dbSNicholas Bellinger { 6396bb82612SNicholas Bellinger struct se_node_acl *nacl = lacl->se_lun_nacl; 6404cc987eaSNicholas Bellinger /* 6414cc987eaSNicholas Bellinger * rcu_dereference_raw protected by se_lun->lun_group symlink 6424cc987eaSNicholas Bellinger * reference to se_device->dev_group. 6434cc987eaSNicholas Bellinger */ 6444cc987eaSNicholas Bellinger struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); 645c66ac9dbSNicholas Bellinger 6466708bb27SAndy Grover if (!nacl) 647c66ac9dbSNicholas Bellinger return -EINVAL; 648c66ac9dbSNicholas Bellinger 64903a68b44SAndy Grover if (lun->lun_access_ro) 65003a68b44SAndy Grover lun_access_ro = true; 651c66ac9dbSNicholas Bellinger 652c66ac9dbSNicholas Bellinger lacl->se_lun = lun; 653c66ac9dbSNicholas Bellinger 654e80ac6c4SAndy Grover if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun, 65503a68b44SAndy Grover lun_access_ro, nacl, tpg) < 0) 656c66ac9dbSNicholas Bellinger return -EINVAL; 657c66ac9dbSNicholas Bellinger 658f2d30680SHannes Reinecke pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for " 65930c7ca93SDavid Disseldorp " InitiatorNode: %s\n", tpg->se_tpg_tfo->fabric_name, 6606bb82612SNicholas Bellinger tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun, 66103a68b44SAndy Grover lun_access_ro ? "RO" : "RW", 662b6a54b8dSChris Zankel nacl->initiatorname); 663c66ac9dbSNicholas Bellinger /* 664c66ac9dbSNicholas Bellinger * Check to see if there are any existing persistent reservation APTPL 665c66ac9dbSNicholas Bellinger * pre-registrations that need to be enabled for this LUN ACL.. 666c66ac9dbSNicholas Bellinger */ 6674cc987eaSNicholas Bellinger core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl, 668e2480563SNicholas Bellinger lacl->mapped_lun); 669c66ac9dbSNicholas Bellinger return 0; 670c66ac9dbSNicholas Bellinger } 671c66ac9dbSNicholas Bellinger 672c66ac9dbSNicholas Bellinger int core_dev_del_initiator_node_lun_acl( 673c66ac9dbSNicholas Bellinger struct se_lun *lun, 674c66ac9dbSNicholas Bellinger struct se_lun_acl *lacl) 675c66ac9dbSNicholas Bellinger { 676adf653f9SChristoph Hellwig struct se_portal_group *tpg = lun->lun_tpg; 677c66ac9dbSNicholas Bellinger struct se_node_acl *nacl; 67829a05deeSNicholas Bellinger struct se_dev_entry *deve; 679c66ac9dbSNicholas Bellinger 680c66ac9dbSNicholas Bellinger nacl = lacl->se_lun_nacl; 6816708bb27SAndy Grover if (!nacl) 682c66ac9dbSNicholas Bellinger return -EINVAL; 683c66ac9dbSNicholas Bellinger 68429a05deeSNicholas Bellinger mutex_lock(&nacl->lun_entry_mutex); 68529a05deeSNicholas Bellinger deve = target_nacl_find_deve(nacl, lacl->mapped_lun); 68629a05deeSNicholas Bellinger if (deve) 68729a05deeSNicholas Bellinger core_disable_device_list_for_node(lun, deve, nacl, tpg); 68829a05deeSNicholas Bellinger mutex_unlock(&nacl->lun_entry_mutex); 689c66ac9dbSNicholas Bellinger 690f2d30680SHannes Reinecke pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for" 691f2d30680SHannes Reinecke " InitiatorNode: %s Mapped LUN: %llu\n", 69230c7ca93SDavid Disseldorp tpg->se_tpg_tfo->fabric_name, 693e3d6f909SAndy Grover tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 694b6a54b8dSChris Zankel nacl->initiatorname, lacl->mapped_lun); 695c66ac9dbSNicholas Bellinger 696c66ac9dbSNicholas Bellinger return 0; 697c66ac9dbSNicholas Bellinger } 698c66ac9dbSNicholas Bellinger 699c66ac9dbSNicholas Bellinger void core_dev_free_initiator_node_lun_acl( 700c66ac9dbSNicholas Bellinger struct se_portal_group *tpg, 701c66ac9dbSNicholas Bellinger struct se_lun_acl *lacl) 702c66ac9dbSNicholas Bellinger { 7036708bb27SAndy Grover pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" 70430c7ca93SDavid Disseldorp " Mapped LUN: %llu\n", tpg->se_tpg_tfo->fabric_name, 705e3d6f909SAndy Grover tpg->se_tpg_tfo->tpg_get_tag(tpg), 70630c7ca93SDavid Disseldorp tpg->se_tpg_tfo->fabric_name, 707b6a54b8dSChris Zankel lacl->se_lun_nacl->initiatorname, lacl->mapped_lun); 708c66ac9dbSNicholas Bellinger 709c66ac9dbSNicholas Bellinger kfree(lacl); 710c66ac9dbSNicholas Bellinger } 711c66ac9dbSNicholas Bellinger 7120fd97ccfSChristoph Hellwig static void scsi_dump_inquiry(struct se_device *dev) 7130fd97ccfSChristoph Hellwig { 7140fd97ccfSChristoph Hellwig struct t10_wwn *wwn = &dev->t10_wwn; 715b2da4abfSDavid Disseldorp int device_type = dev->transport->get_device_type(dev); 716b2da4abfSDavid Disseldorp 7170fd97ccfSChristoph Hellwig /* 7180fd97ccfSChristoph Hellwig * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer 7190fd97ccfSChristoph Hellwig */ 720b2da4abfSDavid Disseldorp pr_debug(" Vendor: %-" __stringify(INQUIRY_VENDOR_LEN) "s\n", 721b2da4abfSDavid Disseldorp wwn->vendor); 722b2da4abfSDavid Disseldorp pr_debug(" Model: %-" __stringify(INQUIRY_MODEL_LEN) "s\n", 723b2da4abfSDavid Disseldorp wwn->model); 724b2da4abfSDavid Disseldorp pr_debug(" Revision: %-" __stringify(INQUIRY_REVISION_LEN) "s\n", 725b2da4abfSDavid Disseldorp wwn->revision); 7260fd97ccfSChristoph Hellwig pr_debug(" Type: %s ", scsi_device_type(device_type)); 7270fd97ccfSChristoph Hellwig } 7280fd97ccfSChristoph Hellwig 7290fd97ccfSChristoph Hellwig struct se_device *target_alloc_device(struct se_hba *hba, const char *name) 7300fd97ccfSChristoph Hellwig { 7310fd97ccfSChristoph Hellwig struct se_device *dev; 7324863e525SNicholas Bellinger struct se_lun *xcopy_lun; 7331526d9f1SMike Christie int i; 7340fd97ccfSChristoph Hellwig 7350a06d430SChristoph Hellwig dev = hba->backend->ops->alloc_device(hba, name); 7360fd97ccfSChristoph Hellwig if (!dev) 7370fd97ccfSChristoph Hellwig return NULL; 7380fd97ccfSChristoph Hellwig 7391526d9f1SMike Christie dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL); 7401526d9f1SMike Christie if (!dev->queues) { 7411526d9f1SMike Christie dev->transport->free_device(dev); 7421526d9f1SMike Christie return NULL; 7431526d9f1SMike Christie } 7441526d9f1SMike Christie 7451526d9f1SMike Christie dev->queue_cnt = nr_cpu_ids; 7461526d9f1SMike Christie for (i = 0; i < dev->queue_cnt; i++) { 747eb44ce8cSMike Christie struct se_device_queue *q; 748eb44ce8cSMike Christie 749eb44ce8cSMike Christie q = &dev->queues[i]; 750eb44ce8cSMike Christie INIT_LIST_HEAD(&q->state_list); 751eb44ce8cSMike Christie spin_lock_init(&q->lock); 752eb44ce8cSMike Christie 753eb44ce8cSMike Christie init_llist_head(&q->sq.cmd_list); 754eb44ce8cSMike Christie INIT_WORK(&q->sq.work, target_queued_submit_work); 7551526d9f1SMike Christie } 7561526d9f1SMike Christie 7570fd97ccfSChristoph Hellwig dev->se_hba = hba; 7580a06d430SChristoph Hellwig dev->transport = hba->backend->ops; 75969088a04SBodo Stroesser dev->transport_flags = dev->transport->transport_flags_default; 760fe052a18SSagi Grimberg dev->prot_length = sizeof(struct t10_pi_tuple); 7614cc987eaSNicholas Bellinger dev->hba_index = hba->hba_index; 7620fd97ccfSChristoph Hellwig 7630fd97ccfSChristoph Hellwig INIT_LIST_HEAD(&dev->dev_sep_list); 7640fd97ccfSChristoph Hellwig INIT_LIST_HEAD(&dev->dev_tmr_list); 7650fd97ccfSChristoph Hellwig INIT_LIST_HEAD(&dev->delayed_cmd_list); 7660fd97ccfSChristoph Hellwig INIT_LIST_HEAD(&dev->qf_cmd_list); 7670fd97ccfSChristoph Hellwig spin_lock_init(&dev->delayed_cmd_lock); 7680fd97ccfSChristoph Hellwig spin_lock_init(&dev->dev_reservation_lock); 7690fd97ccfSChristoph Hellwig spin_lock_init(&dev->se_port_lock); 7700fd97ccfSChristoph Hellwig spin_lock_init(&dev->se_tmr_lock); 7710fd97ccfSChristoph Hellwig spin_lock_init(&dev->qf_cmd_lock); 77268ff9b9bSNicholas Bellinger sema_init(&dev->caw_sem, 1); 7730fd97ccfSChristoph Hellwig INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list); 7740fd97ccfSChristoph Hellwig spin_lock_init(&dev->t10_wwn.t10_vpd_lock); 7750fd97ccfSChristoph Hellwig INIT_LIST_HEAD(&dev->t10_pr.registration_list); 7760fd97ccfSChristoph Hellwig INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list); 7770fd97ccfSChristoph Hellwig spin_lock_init(&dev->t10_pr.registration_lock); 7780fd97ccfSChristoph Hellwig spin_lock_init(&dev->t10_pr.aptpl_reg_lock); 7790fd97ccfSChristoph Hellwig INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list); 7800fd97ccfSChristoph Hellwig spin_lock_init(&dev->t10_alua.tg_pt_gps_lock); 781c66094bfSHannes Reinecke INIT_LIST_HEAD(&dev->t10_alua.lba_map_list); 782c66094bfSHannes Reinecke spin_lock_init(&dev->t10_alua.lba_map_lock); 7830fd97ccfSChristoph Hellwig 784ed1227e0SMike Christie INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work); 785ed1227e0SMike Christie 7860fd97ccfSChristoph Hellwig dev->t10_wwn.t10_dev = dev; 7872469f1e0SSergey Samoylenko /* 7882469f1e0SSergey Samoylenko * Use OpenFabrics IEEE Company ID: 00 14 05 7892469f1e0SSergey Samoylenko */ 7902469f1e0SSergey Samoylenko dev->t10_wwn.company_id = 0x001405; 7912469f1e0SSergey Samoylenko 7920fd97ccfSChristoph Hellwig dev->t10_alua.t10_dev = dev; 7930fd97ccfSChristoph Hellwig 7940fd97ccfSChristoph Hellwig dev->dev_attrib.da_dev = dev; 795adfa9570STregaron Bayly dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS; 796814e5b45SChristoph Hellwig dev->dev_attrib.emulate_dpo = 1; 797814e5b45SChristoph Hellwig dev->dev_attrib.emulate_fua_write = 1; 798814e5b45SChristoph Hellwig dev->dev_attrib.emulate_fua_read = 1; 7990fd97ccfSChristoph Hellwig dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; 8001bf630fdSDavid Disseldorp dev->dev_attrib.emulate_ua_intlck_ctrl = TARGET_UA_INTLCK_CTRL_CLEAR; 8010fd97ccfSChristoph Hellwig dev->dev_attrib.emulate_tas = DA_EMULATE_TAS; 8020fd97ccfSChristoph Hellwig dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU; 8030fd97ccfSChristoph Hellwig dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; 8040123a9ecSNicholas Bellinger dev->dev_attrib.emulate_caw = DA_EMULATE_CAW; 805d397a445SNicholas Bellinger dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; 806b49d6f78SDavid Disseldorp dev->dev_attrib.emulate_pr = DA_EMULATE_PR; 807bd217b8cSDmitry Bogdanov dev->dev_attrib.emulate_rsoc = DA_EMULATE_RSOC; 8082ed22c9cSNicholas Bellinger dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT; 8090fd97ccfSChristoph Hellwig dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 81092404e60SNicholas Bellinger dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL; 8110fd97ccfSChristoph Hellwig dev->dev_attrib.is_nonrot = DA_IS_NONROT; 8120fd97ccfSChristoph Hellwig dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; 8130fd97ccfSChristoph Hellwig dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; 8140fd97ccfSChristoph Hellwig dev->dev_attrib.max_unmap_block_desc_count = 8150fd97ccfSChristoph Hellwig DA_MAX_UNMAP_BLOCK_DESC_COUNT; 8160fd97ccfSChristoph Hellwig dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; 8170fd97ccfSChristoph Hellwig dev->dev_attrib.unmap_granularity_alignment = 8180fd97ccfSChristoph Hellwig DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; 819e6f41633SJamie Pocas dev->dev_attrib.unmap_zeroes_data = 820e6f41633SJamie Pocas DA_UNMAP_ZEROES_DATA_DEFAULT; 821773cbaf7SNicholas Bellinger dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; 8220fd97ccfSChristoph Hellwig 8234863e525SNicholas Bellinger xcopy_lun = &dev->xcopy_lun; 8244cc987eaSNicholas Bellinger rcu_assign_pointer(xcopy_lun->lun_se_dev, dev); 825bd4e2d29SNicholas Bellinger init_completion(&xcopy_lun->lun_shutdown_comp); 826adf653f9SChristoph Hellwig INIT_LIST_HEAD(&xcopy_lun->lun_deve_list); 827adf653f9SChristoph Hellwig INIT_LIST_HEAD(&xcopy_lun->lun_dev_link); 828adf653f9SChristoph Hellwig mutex_init(&xcopy_lun->lun_tg_pt_md_mutex); 829adf653f9SChristoph Hellwig xcopy_lun->lun_tpg = &xcopy_pt_tpg; 8304863e525SNicholas Bellinger 8313beeabd5SDavid Disseldorp /* Preload the default INQUIRY const values */ 8323beeabd5SDavid Disseldorp strlcpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor)); 8333beeabd5SDavid Disseldorp strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod, 8343beeabd5SDavid Disseldorp sizeof(dev->t10_wwn.model)); 8353beeabd5SDavid Disseldorp strlcpy(dev->t10_wwn.revision, dev->transport->inquiry_rev, 8363beeabd5SDavid Disseldorp sizeof(dev->t10_wwn.revision)); 8373beeabd5SDavid Disseldorp 8380fd97ccfSChristoph Hellwig return dev; 8390fd97ccfSChristoph Hellwig } 8400fd97ccfSChristoph Hellwig 8418a9ebe71SMike Christie /* 842cf0fbf89SChristoph Hellwig * Check if the underlying struct block_device supports discard and if yes 843cf0fbf89SChristoph Hellwig * configure the UNMAP parameters. 8448a9ebe71SMike Christie */ 8458a9ebe71SMike Christie bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, 846817e8b51SChristoph Hellwig struct block_device *bdev) 8478a9ebe71SMike Christie { 848817e8b51SChristoph Hellwig int block_size = bdev_logical_block_size(bdev); 849ea263c7fSMike Christie 85070200574SChristoph Hellwig if (!bdev_max_discard_sectors(bdev)) 8518a9ebe71SMike Christie return false; 8528a9ebe71SMike Christie 853ea263c7fSMike Christie attrib->max_unmap_lba_count = 854cf0fbf89SChristoph Hellwig bdev_max_discard_sectors(bdev) >> (ilog2(block_size) - 9); 8558a9ebe71SMike Christie /* 8568a9ebe71SMike Christie * Currently hardcoded to 1 in Linux/SCSI code.. 8578a9ebe71SMike Christie */ 8588a9ebe71SMike Christie attrib->max_unmap_block_desc_count = 1; 8597b47ef52SChristoph Hellwig attrib->unmap_granularity = bdev_discard_granularity(bdev) / block_size; 860968786b9SChristoph Hellwig attrib->unmap_granularity_alignment = 861968786b9SChristoph Hellwig bdev_discard_alignment(bdev) / block_size; 8628a9ebe71SMike Christie return true; 8638a9ebe71SMike Christie } 8648a9ebe71SMike Christie EXPORT_SYMBOL(target_configure_unmap_from_queue); 8658a9ebe71SMike Christie 8668a9ebe71SMike Christie /* 8678a9ebe71SMike Christie * Convert from blocksize advertised to the initiator to the 512 byte 8688a9ebe71SMike Christie * units unconditionally used by the Linux block layer. 8698a9ebe71SMike Christie */ 8708a9ebe71SMike Christie sector_t target_to_linux_sector(struct se_device *dev, sector_t lb) 8718a9ebe71SMike Christie { 8728a9ebe71SMike Christie switch (dev->dev_attrib.block_size) { 8738a9ebe71SMike Christie case 4096: 8748a9ebe71SMike Christie return lb << 3; 8758a9ebe71SMike Christie case 2048: 8768a9ebe71SMike Christie return lb << 2; 8778a9ebe71SMike Christie case 1024: 8788a9ebe71SMike Christie return lb << 1; 8798a9ebe71SMike Christie default: 8808a9ebe71SMike Christie return lb; 8818a9ebe71SMike Christie } 8828a9ebe71SMike Christie } 8838a9ebe71SMike Christie EXPORT_SYMBOL(target_to_linux_sector); 8848a9ebe71SMike Christie 885b1943fd4SMike Christie struct devices_idr_iter { 88636d4cb46SBart Van Assche struct config_item *prev_item; 887b1943fd4SMike Christie int (*fn)(struct se_device *dev, void *data); 888b1943fd4SMike Christie void *data; 889b1943fd4SMike Christie }; 890b1943fd4SMike Christie 891b1943fd4SMike Christie static int target_devices_idr_iter(int id, void *p, void *data) 89236d4cb46SBart Van Assche __must_hold(&device_mutex) 893b1943fd4SMike Christie { 894b1943fd4SMike Christie struct devices_idr_iter *iter = data; 895b1943fd4SMike Christie struct se_device *dev = p; 89636d4cb46SBart Van Assche int ret; 89736d4cb46SBart Van Assche 89836d4cb46SBart Van Assche config_item_put(iter->prev_item); 89936d4cb46SBart Van Assche iter->prev_item = NULL; 900b1943fd4SMike Christie 901b1943fd4SMike Christie /* 902b1943fd4SMike Christie * We add the device early to the idr, so it can be used 903b1943fd4SMike Christie * by backend modules during configuration. We do not want 904b1943fd4SMike Christie * to allow other callers to access partially setup devices, 905b1943fd4SMike Christie * so we skip them here. 906b1943fd4SMike Christie */ 907cb0f32e1SMike Christie if (!target_dev_configured(dev)) 908b1943fd4SMike Christie return 0; 909b1943fd4SMike Christie 91036d4cb46SBart Van Assche iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item); 91136d4cb46SBart Van Assche if (!iter->prev_item) 91236d4cb46SBart Van Assche return 0; 91336d4cb46SBart Van Assche mutex_unlock(&device_mutex); 91436d4cb46SBart Van Assche 91536d4cb46SBart Van Assche ret = iter->fn(dev, iter->data); 91636d4cb46SBart Van Assche 91736d4cb46SBart Van Assche mutex_lock(&device_mutex); 91836d4cb46SBart Van Assche return ret; 919b1943fd4SMike Christie } 920b1943fd4SMike Christie 921b1943fd4SMike Christie /** 922b1943fd4SMike Christie * target_for_each_device - iterate over configured devices 923b1943fd4SMike Christie * @fn: iterator function 924b1943fd4SMike Christie * @data: pointer to data that will be passed to fn 925b1943fd4SMike Christie * 926b1943fd4SMike Christie * fn must return 0 to continue looping over devices. non-zero will break 927b1943fd4SMike Christie * from the loop and return that value to the caller. 928b1943fd4SMike Christie */ 929b1943fd4SMike Christie int target_for_each_device(int (*fn)(struct se_device *dev, void *data), 930b1943fd4SMike Christie void *data) 931b1943fd4SMike Christie { 93236d4cb46SBart Van Assche struct devices_idr_iter iter = { .fn = fn, .data = data }; 933b1943fd4SMike Christie int ret; 934b1943fd4SMike Christie 935be50f538SMike Christie mutex_lock(&device_mutex); 936b1943fd4SMike Christie ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter); 937be50f538SMike Christie mutex_unlock(&device_mutex); 93836d4cb46SBart Van Assche config_item_put(iter.prev_item); 939b1943fd4SMike Christie return ret; 940b1943fd4SMike Christie } 941b1943fd4SMike Christie 9420fd97ccfSChristoph Hellwig int target_configure_device(struct se_device *dev) 9430fd97ccfSChristoph Hellwig { 9440fd97ccfSChristoph Hellwig struct se_hba *hba = dev->se_hba; 9450a5eee64SMike Christie int ret, id; 9460fd97ccfSChristoph Hellwig 947cb0f32e1SMike Christie if (target_dev_configured(dev)) { 9480fd97ccfSChristoph Hellwig pr_err("se_dev->se_dev_ptr already set for storage" 9490fd97ccfSChristoph Hellwig " object\n"); 9500fd97ccfSChristoph Hellwig return -EEXIST; 9510fd97ccfSChristoph Hellwig } 9520fd97ccfSChristoph Hellwig 9530a5eee64SMike Christie /* 9540a5eee64SMike Christie * Add early so modules like tcmu can use during its 9550a5eee64SMike Christie * configuration. 9560a5eee64SMike Christie */ 957be50f538SMike Christie mutex_lock(&device_mutex); 9580a5eee64SMike Christie /* 9590a5eee64SMike Christie * Use cyclic to try and avoid collisions with devices 9600a5eee64SMike Christie * that were recently removed. 9610a5eee64SMike Christie */ 9620a5eee64SMike Christie id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL); 963be50f538SMike Christie mutex_unlock(&device_mutex); 9640a5eee64SMike Christie if (id < 0) { 9650a5eee64SMike Christie ret = -ENOMEM; 9660a5eee64SMike Christie goto out; 9670a5eee64SMike Christie } 9680a5eee64SMike Christie dev->dev_index = id; 9690a5eee64SMike Christie 9700fd97ccfSChristoph Hellwig ret = dev->transport->configure_device(dev); 9710fd97ccfSChristoph Hellwig if (ret) 9720a5eee64SMike Christie goto out_free_index; 9736b206a5aSMike Christie 9746b206a5aSMike Christie if (dev->transport->configure_unmap && 9756b206a5aSMike Christie dev->transport->configure_unmap(dev)) { 9766b206a5aSMike Christie pr_debug("Discard support available, but disabled by default.\n"); 9776b206a5aSMike Christie } 9786b206a5aSMike Christie 9790fd97ccfSChristoph Hellwig /* 9800fd97ccfSChristoph Hellwig * XXX: there is not much point to have two different values here.. 9810fd97ccfSChristoph Hellwig */ 9820fd97ccfSChristoph Hellwig dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size; 9830fd97ccfSChristoph Hellwig dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth; 9840fd97ccfSChristoph Hellwig 9850fd97ccfSChristoph Hellwig /* 9860fd97ccfSChristoph Hellwig * Align max_hw_sectors down to PAGE_SIZE I/O transfers 9870fd97ccfSChristoph Hellwig */ 9880fd97ccfSChristoph Hellwig dev->dev_attrib.hw_max_sectors = 9890fd97ccfSChristoph Hellwig se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors, 9900fd97ccfSChristoph Hellwig dev->dev_attrib.hw_block_size); 991046ba642SNicholas Bellinger dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors; 9920fd97ccfSChristoph Hellwig 9930fd97ccfSChristoph Hellwig dev->creation_time = get_jiffies_64(); 9940fd97ccfSChristoph Hellwig 9950fd97ccfSChristoph Hellwig ret = core_setup_alua(dev); 9960fd97ccfSChristoph Hellwig if (ret) 997c82b59e7Stangwenji goto out_destroy_device; 9980fd97ccfSChristoph Hellwig 9990fd97ccfSChristoph Hellwig /* 10000fd97ccfSChristoph Hellwig * Setup work_queue for QUEUE_FULL 10010fd97ccfSChristoph Hellwig */ 10020fd97ccfSChristoph Hellwig INIT_WORK(&dev->qf_work_queue, target_qf_do_work); 10030fd97ccfSChristoph Hellwig 10040fd97ccfSChristoph Hellwig scsi_dump_inquiry(dev); 10050fd97ccfSChristoph Hellwig 10060fd97ccfSChristoph Hellwig spin_lock(&hba->device_lock); 10070fd97ccfSChristoph Hellwig hba->dev_count++; 10080fd97ccfSChristoph Hellwig spin_unlock(&hba->device_lock); 1009d9ea32bfSNicholas Bellinger 10105f7da044SNicholas Bellinger dev->dev_flags |= DF_CONFIGURED; 10115f7da044SNicholas Bellinger 10120fd97ccfSChristoph Hellwig return 0; 10130fd97ccfSChristoph Hellwig 1014c82b59e7Stangwenji out_destroy_device: 1015c82b59e7Stangwenji dev->transport->destroy_device(dev); 10160a5eee64SMike Christie out_free_index: 1017be50f538SMike Christie mutex_lock(&device_mutex); 10180a5eee64SMike Christie idr_remove(&devices_idr, dev->dev_index); 1019be50f538SMike Christie mutex_unlock(&device_mutex); 10200fd97ccfSChristoph Hellwig out: 10210fd97ccfSChristoph Hellwig se_release_vpd_for_dev(dev); 10220fd97ccfSChristoph Hellwig return ret; 10230fd97ccfSChristoph Hellwig } 10240fd97ccfSChristoph Hellwig 10250fd97ccfSChristoph Hellwig void target_free_device(struct se_device *dev) 10260fd97ccfSChristoph Hellwig { 10270fd97ccfSChristoph Hellwig struct se_hba *hba = dev->se_hba; 10280fd97ccfSChristoph Hellwig 10290fd97ccfSChristoph Hellwig WARN_ON(!list_empty(&dev->dev_sep_list)); 10300fd97ccfSChristoph Hellwig 1031cb0f32e1SMike Christie if (target_dev_configured(dev)) { 103292634706SMike Christie dev->transport->destroy_device(dev); 103392634706SMike Christie 1034be50f538SMike Christie mutex_lock(&device_mutex); 10350a5eee64SMike Christie idr_remove(&devices_idr, dev->dev_index); 1036be50f538SMike Christie mutex_unlock(&device_mutex); 1037d9ea32bfSNicholas Bellinger 10380fd97ccfSChristoph Hellwig spin_lock(&hba->device_lock); 10390fd97ccfSChristoph Hellwig hba->dev_count--; 10400fd97ccfSChristoph Hellwig spin_unlock(&hba->device_lock); 10410fd97ccfSChristoph Hellwig } 10420fd97ccfSChristoph Hellwig 10430fd97ccfSChristoph Hellwig core_alua_free_lu_gp_mem(dev); 1044229d4f11SHannes Reinecke core_alua_set_lba_map(dev, NULL, 0, 0); 10450fd97ccfSChristoph Hellwig core_scsi3_free_all_registrations(dev); 10460fd97ccfSChristoph Hellwig se_release_vpd_for_dev(dev); 10470fd97ccfSChristoph Hellwig 10482ed22c9cSNicholas Bellinger if (dev->transport->free_prot) 10492ed22c9cSNicholas Bellinger dev->transport->free_prot(dev); 10502ed22c9cSNicholas Bellinger 10511526d9f1SMike Christie kfree(dev->queues); 10520fd97ccfSChristoph Hellwig dev->transport->free_device(dev); 10530fd97ccfSChristoph Hellwig } 10540fd97ccfSChristoph Hellwig 1055c66ac9dbSNicholas Bellinger int core_dev_setup_virtual_lun0(void) 1056c66ac9dbSNicholas Bellinger { 1057c66ac9dbSNicholas Bellinger struct se_hba *hba; 1058c66ac9dbSNicholas Bellinger struct se_device *dev; 10591b5ad814SKonstantin Shelekhin char buf[] = "rd_pages=8,rd_nullio=1,rd_dummy=1"; 1060c66ac9dbSNicholas Bellinger int ret; 1061c66ac9dbSNicholas Bellinger 10626708bb27SAndy Grover hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE); 1063c66ac9dbSNicholas Bellinger if (IS_ERR(hba)) 1064c66ac9dbSNicholas Bellinger return PTR_ERR(hba); 1065c66ac9dbSNicholas Bellinger 10660fd97ccfSChristoph Hellwig dev = target_alloc_device(hba, "virt_lun0"); 10670fd97ccfSChristoph Hellwig if (!dev) { 1068c66ac9dbSNicholas Bellinger ret = -ENOMEM; 10690fd97ccfSChristoph Hellwig goto out_free_hba; 1070c66ac9dbSNicholas Bellinger } 1071c66ac9dbSNicholas Bellinger 10720a06d430SChristoph Hellwig hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf)); 1073c66ac9dbSNicholas Bellinger 10740fd97ccfSChristoph Hellwig ret = target_configure_device(dev); 10750fd97ccfSChristoph Hellwig if (ret) 10760fd97ccfSChristoph Hellwig goto out_free_se_dev; 10770fd97ccfSChristoph Hellwig 10780fd97ccfSChristoph Hellwig lun0_hba = hba; 1079e3d6f909SAndy Grover g_lun0_dev = dev; 1080c66ac9dbSNicholas Bellinger return 0; 10810fd97ccfSChristoph Hellwig 10820fd97ccfSChristoph Hellwig out_free_se_dev: 10830fd97ccfSChristoph Hellwig target_free_device(dev); 10840fd97ccfSChristoph Hellwig out_free_hba: 10850fd97ccfSChristoph Hellwig core_delete_hba(hba); 1086c66ac9dbSNicholas Bellinger return ret; 1087c66ac9dbSNicholas Bellinger } 1088c66ac9dbSNicholas Bellinger 1089c66ac9dbSNicholas Bellinger 1090c66ac9dbSNicholas Bellinger void core_dev_release_virtual_lun0(void) 1091c66ac9dbSNicholas Bellinger { 1092e3d6f909SAndy Grover struct se_hba *hba = lun0_hba; 1093c66ac9dbSNicholas Bellinger 10946708bb27SAndy Grover if (!hba) 1095c66ac9dbSNicholas Bellinger return; 1096c66ac9dbSNicholas Bellinger 1097e3d6f909SAndy Grover if (g_lun0_dev) 10980fd97ccfSChristoph Hellwig target_free_device(g_lun0_dev); 1099c66ac9dbSNicholas Bellinger core_delete_hba(hba); 1100c66ac9dbSNicholas Bellinger } 11017bfea53bSAndy Grover 11027bfea53bSAndy Grover /* 11037bfea53bSAndy Grover * Common CDB parsing for kernel and user passthrough. 11047bfea53bSAndy Grover */ 11057bfea53bSAndy Grover sense_reason_t 11067bfea53bSAndy Grover passthrough_parse_cdb(struct se_cmd *cmd, 11077bfea53bSAndy Grover sense_reason_t (*exec_cmd)(struct se_cmd *cmd)) 11087bfea53bSAndy Grover { 11097bfea53bSAndy Grover unsigned char *cdb = cmd->t_task_cdb; 11104ec5bf0eSBryant G. Ly struct se_device *dev = cmd->se_dev; 11114ec5bf0eSBryant G. Ly unsigned int size; 11127bfea53bSAndy Grover 11137bfea53bSAndy Grover /* 11147bfea53bSAndy Grover * For REPORT LUNS we always need to emulate the response, for everything 11157bfea53bSAndy Grover * else, pass it up. 11167bfea53bSAndy Grover */ 11177bfea53bSAndy Grover if (cdb[0] == REPORT_LUNS) { 11187bfea53bSAndy Grover cmd->execute_cmd = spc_emulate_report_luns; 11197bfea53bSAndy Grover return TCM_NO_SENSE; 11207bfea53bSAndy Grover } 11217bfea53bSAndy Grover 11224ec5bf0eSBryant G. Ly /* 1123b49d6f78SDavid Disseldorp * With emulate_pr disabled, all reservation requests should fail, 1124b49d6f78SDavid Disseldorp * regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set. 1125b49d6f78SDavid Disseldorp */ 1126b49d6f78SDavid Disseldorp if (!dev->dev_attrib.emulate_pr && 1127b49d6f78SDavid Disseldorp ((cdb[0] == PERSISTENT_RESERVE_IN) || 1128b49d6f78SDavid Disseldorp (cdb[0] == PERSISTENT_RESERVE_OUT) || 1129b49d6f78SDavid Disseldorp (cdb[0] == RELEASE || cdb[0] == RELEASE_10) || 1130b49d6f78SDavid Disseldorp (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) { 1131b49d6f78SDavid Disseldorp return TCM_UNSUPPORTED_SCSI_OPCODE; 1132b49d6f78SDavid Disseldorp } 1133b49d6f78SDavid Disseldorp 1134b49d6f78SDavid Disseldorp /* 11354ec5bf0eSBryant G. Ly * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to 11364ec5bf0eSBryant G. Ly * emulate the response, since tcmu does not have the information 11374ec5bf0eSBryant G. Ly * required to process these commands. 11384ec5bf0eSBryant G. Ly */ 113969088a04SBodo Stroesser if (!(dev->transport_flags & 11404ec5bf0eSBryant G. Ly TRANSPORT_FLAG_PASSTHROUGH_PGR)) { 11414ec5bf0eSBryant G. Ly if (cdb[0] == PERSISTENT_RESERVE_IN) { 11424ec5bf0eSBryant G. Ly cmd->execute_cmd = target_scsi3_emulate_pr_in; 1143a85d667eSBart Van Assche size = get_unaligned_be16(&cdb[7]); 11444ec5bf0eSBryant G. Ly return target_cmd_size_check(cmd, size); 11454ec5bf0eSBryant G. Ly } 11464ec5bf0eSBryant G. Ly if (cdb[0] == PERSISTENT_RESERVE_OUT) { 11474ec5bf0eSBryant G. Ly cmd->execute_cmd = target_scsi3_emulate_pr_out; 1148388fe699STang Wenji size = get_unaligned_be32(&cdb[5]); 11494ec5bf0eSBryant G. Ly return target_cmd_size_check(cmd, size); 11504ec5bf0eSBryant G. Ly } 11514ec5bf0eSBryant G. Ly 11524ec5bf0eSBryant G. Ly if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) { 11534ec5bf0eSBryant G. Ly cmd->execute_cmd = target_scsi2_reservation_release; 11544ec5bf0eSBryant G. Ly if (cdb[0] == RELEASE_10) 1155a85d667eSBart Van Assche size = get_unaligned_be16(&cdb[7]); 11564ec5bf0eSBryant G. Ly else 11574ec5bf0eSBryant G. Ly size = cmd->data_length; 11584ec5bf0eSBryant G. Ly return target_cmd_size_check(cmd, size); 11594ec5bf0eSBryant G. Ly } 11604ec5bf0eSBryant G. Ly if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) { 11614ec5bf0eSBryant G. Ly cmd->execute_cmd = target_scsi2_reservation_reserve; 11624ec5bf0eSBryant G. Ly if (cdb[0] == RESERVE_10) 1163a85d667eSBart Van Assche size = get_unaligned_be16(&cdb[7]); 11644ec5bf0eSBryant G. Ly else 11654ec5bf0eSBryant G. Ly size = cmd->data_length; 11664ec5bf0eSBryant G. Ly return target_cmd_size_check(cmd, size); 11674ec5bf0eSBryant G. Ly } 11684ec5bf0eSBryant G. Ly } 11694ec5bf0eSBryant G. Ly 11707bfea53bSAndy Grover /* Set DATA_CDB flag for ops that should have it */ 11717bfea53bSAndy Grover switch (cdb[0]) { 11727bfea53bSAndy Grover case READ_6: 11737bfea53bSAndy Grover case READ_10: 11747bfea53bSAndy Grover case READ_12: 11757bfea53bSAndy Grover case READ_16: 11767bfea53bSAndy Grover case WRITE_6: 11777bfea53bSAndy Grover case WRITE_10: 11787bfea53bSAndy Grover case WRITE_12: 11797bfea53bSAndy Grover case WRITE_16: 11807bfea53bSAndy Grover case WRITE_VERIFY: 11817bfea53bSAndy Grover case WRITE_VERIFY_12: 11823e182db7SBart Van Assche case WRITE_VERIFY_16: 11837bfea53bSAndy Grover case COMPARE_AND_WRITE: 11847bfea53bSAndy Grover case XDWRITEREAD_10: 11857bfea53bSAndy Grover cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 11867bfea53bSAndy Grover break; 11877bfea53bSAndy Grover case VARIABLE_LENGTH_CMD: 11887bfea53bSAndy Grover switch (get_unaligned_be16(&cdb[8])) { 11897bfea53bSAndy Grover case READ_32: 11907bfea53bSAndy Grover case WRITE_32: 1191e5dc9a70SDamien Le Moal case WRITE_VERIFY_32: 11927bfea53bSAndy Grover case XDWRITEREAD_32: 11937bfea53bSAndy Grover cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 11947bfea53bSAndy Grover break; 11957bfea53bSAndy Grover } 11967bfea53bSAndy Grover } 11977bfea53bSAndy Grover 11987bfea53bSAndy Grover cmd->execute_cmd = exec_cmd; 11997bfea53bSAndy Grover 12007bfea53bSAndy Grover return TCM_NO_SENSE; 12017bfea53bSAndy Grover } 12027bfea53bSAndy Grover EXPORT_SYMBOL(passthrough_parse_cdb); 1203