1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef TARGET_CORE_FABRIC_H 3 #define TARGET_CORE_FABRIC_H 4 5 #include <linux/configfs.h> 6 #include <linux/types.h> 7 #include <target/target_core_base.h> 8 9 struct target_core_fabric_ops { 10 struct module *module; 11 /* 12 * XXX: Special case for iscsi/iSCSI... 13 * If non-null, fabric_alias is used for matching target/$fabric 14 * ConfigFS paths. If null, fabric_name is used for this (see below). 15 */ 16 const char *fabric_alias; 17 /* 18 * fabric_name is used for matching target/$fabric ConfigFS paths 19 * without a fabric_alias (see above). It's also used for the ALUA state 20 * path and is stored on disk with PR state. 21 */ 22 const char *fabric_name; 23 size_t node_acl_size; 24 /* 25 * Limits number of scatterlist entries per SCF_SCSI_DATA_CDB payload. 26 * Setting this value tells target-core to enforce this limit, and 27 * report as INQUIRY EVPD=b0 MAXIMUM TRANSFER LENGTH. 28 * 29 * target-core will currently reset se_cmd->data_length to this 30 * maximum size, and set UNDERFLOW residual count if length exceeds 31 * this limit. 32 * 33 * XXX: Not all initiator hosts honor this block-limit EVPD 34 * XXX: Currently assumes single PAGE_SIZE per scatterlist entry 35 */ 36 u32 max_data_sg_nents; 37 char *(*tpg_get_wwn)(struct se_portal_group *); 38 u16 (*tpg_get_tag)(struct se_portal_group *); 39 u32 (*tpg_get_default_depth)(struct se_portal_group *); 40 int (*tpg_check_demo_mode)(struct se_portal_group *); 41 int (*tpg_check_demo_mode_cache)(struct se_portal_group *); 42 int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *); 43 int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *); 44 /* 45 * Optionally used by fabrics to allow demo-mode login, but not 46 * expose any TPG LUNs, and return 'not connected' in standard 47 * inquiry response 48 */ 49 int (*tpg_check_demo_mode_login_only)(struct se_portal_group *); 50 /* 51 * Optionally used as a configfs tunable to determine when 52 * target-core should signal the PROTECT=1 feature bit for 53 * backends that don't support T10-PI, so that either fabric 54 * HW offload or target-core emulation performs the associated 55 * WRITE_STRIP and READ_INSERT operations. 56 */ 57 int (*tpg_check_prot_fabric_only)(struct se_portal_group *); 58 u32 (*tpg_get_inst_index)(struct se_portal_group *); 59 /* 60 * Optional to release struct se_cmd and fabric dependent allocated 61 * I/O descriptor after command execution has finished. 62 * 63 * Returning 1 will signal a descriptor has been released. 64 * Returning 0 will signal a descriptor has not been released. 65 */ 66 int (*check_stop_free)(struct se_cmd *); 67 void (*release_cmd)(struct se_cmd *); 68 void (*close_session)(struct se_session *); 69 u32 (*sess_get_index)(struct se_session *); 70 /* 71 * Used only for SCSI fabrics that contain multi-value TransportIDs 72 * (like iSCSI). All other SCSI fabrics should set this to NULL. 73 */ 74 u32 (*sess_get_initiator_sid)(struct se_session *, 75 unsigned char *, u32); 76 int (*write_pending)(struct se_cmd *); 77 void (*set_default_node_attributes)(struct se_node_acl *); 78 int (*get_cmd_state)(struct se_cmd *); 79 int (*queue_data_in)(struct se_cmd *); 80 int (*queue_status)(struct se_cmd *); 81 void (*queue_tm_rsp)(struct se_cmd *); 82 void (*aborted_task)(struct se_cmd *); 83 /* 84 * fabric module calls for target_core_fabric_configfs.c 85 */ 86 struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs *, 87 struct config_group *, const char *); 88 void (*fabric_drop_wwn)(struct se_wwn *); 89 void (*add_wwn_groups)(struct se_wwn *); 90 struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *, 91 const char *); 92 void (*fabric_drop_tpg)(struct se_portal_group *); 93 int (*fabric_post_link)(struct se_portal_group *, 94 struct se_lun *); 95 void (*fabric_pre_unlink)(struct se_portal_group *, 96 struct se_lun *); 97 struct se_tpg_np *(*fabric_make_np)(struct se_portal_group *, 98 struct config_group *, const char *); 99 void (*fabric_drop_np)(struct se_tpg_np *); 100 int (*fabric_init_nodeacl)(struct se_node_acl *, const char *); 101 102 struct configfs_attribute **tfc_discovery_attrs; 103 struct configfs_attribute **tfc_wwn_attrs; 104 struct configfs_attribute **tfc_tpg_base_attrs; 105 struct configfs_attribute **tfc_tpg_np_base_attrs; 106 struct configfs_attribute **tfc_tpg_attrib_attrs; 107 struct configfs_attribute **tfc_tpg_auth_attrs; 108 struct configfs_attribute **tfc_tpg_param_attrs; 109 struct configfs_attribute **tfc_tpg_nacl_base_attrs; 110 struct configfs_attribute **tfc_tpg_nacl_attrib_attrs; 111 struct configfs_attribute **tfc_tpg_nacl_auth_attrs; 112 struct configfs_attribute **tfc_tpg_nacl_param_attrs; 113 114 /* 115 * Set this member variable to true if the SCSI transport protocol 116 * (e.g. iSCSI) requires that the Data-Out buffer is transferred in 117 * its entirety before a command is aborted. 118 */ 119 bool write_pending_must_be_called; 120 }; 121 122 int target_register_template(const struct target_core_fabric_ops *fo); 123 void target_unregister_template(const struct target_core_fabric_ops *fo); 124 125 int target_depend_item(struct config_item *item); 126 void target_undepend_item(struct config_item *item); 127 128 struct se_session *target_setup_session(struct se_portal_group *, 129 unsigned int, unsigned int, enum target_prot_op prot_op, 130 const char *, void *, 131 int (*callback)(struct se_portal_group *, 132 struct se_session *, void *)); 133 void target_remove_session(struct se_session *); 134 135 int transport_init_session(struct se_session *se_sess); 136 struct se_session *transport_alloc_session(enum target_prot_op); 137 int transport_alloc_session_tags(struct se_session *, unsigned int, 138 unsigned int); 139 void __transport_register_session(struct se_portal_group *, 140 struct se_node_acl *, struct se_session *, void *); 141 void transport_register_session(struct se_portal_group *, 142 struct se_node_acl *, struct se_session *, void *); 143 ssize_t target_show_dynamic_sessions(struct se_portal_group *, char *); 144 void transport_free_session(struct se_session *); 145 void target_spc2_release(struct se_node_acl *nacl); 146 void target_put_nacl(struct se_node_acl *); 147 void transport_deregister_session_configfs(struct se_session *); 148 void transport_deregister_session(struct se_session *); 149 150 151 void transport_init_se_cmd(struct se_cmd *, 152 const struct target_core_fabric_ops *, 153 struct se_session *, u32, int, int, unsigned char *, u64); 154 sense_reason_t transport_lookup_cmd_lun(struct se_cmd *); 155 sense_reason_t target_cmd_init_cdb(struct se_cmd *, unsigned char *); 156 sense_reason_t target_cmd_parse_cdb(struct se_cmd *); 157 int target_submit_cmd_map_sgls(struct se_cmd *, struct se_session *, 158 unsigned char *, unsigned char *, u64, u32, int, int, int, 159 struct scatterlist *, u32, struct scatterlist *, u32, 160 struct scatterlist *, u32); 161 int target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *, 162 unsigned char *, u64, u32, int, int, int); 163 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, 164 unsigned char *sense, u64 unpacked_lun, 165 void *fabric_tmr_ptr, unsigned char tm_type, 166 gfp_t, u64, int); 167 int transport_handle_cdb_direct(struct se_cmd *); 168 sense_reason_t transport_generic_new_cmd(struct se_cmd *); 169 170 void target_put_cmd_and_wait(struct se_cmd *cmd); 171 void target_execute_cmd(struct se_cmd *cmd); 172 173 int transport_generic_free_cmd(struct se_cmd *, int); 174 175 bool transport_wait_for_tasks(struct se_cmd *); 176 int transport_send_check_condition_and_sense(struct se_cmd *, 177 sense_reason_t, int); 178 int target_send_busy(struct se_cmd *cmd); 179 int target_get_sess_cmd(struct se_cmd *, bool); 180 int target_put_sess_cmd(struct se_cmd *); 181 void target_sess_cmd_list_set_waiting(struct se_session *); 182 void target_wait_for_sess_cmds(struct se_session *); 183 void target_show_cmd(const char *pfx, struct se_cmd *cmd); 184 185 int core_alua_check_nonop_delay(struct se_cmd *); 186 187 int core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t); 188 void core_tmr_release_req(struct se_tmr_req *); 189 int transport_generic_handle_tmr(struct se_cmd *); 190 void transport_generic_request_failure(struct se_cmd *, sense_reason_t); 191 int transport_lookup_tmr_lun(struct se_cmd *); 192 void core_allocate_nexus_loss_ua(struct se_node_acl *acl); 193 194 struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg, 195 unsigned char *); 196 bool target_tpg_has_node_acl(struct se_portal_group *tpg, 197 const char *); 198 struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *, 199 unsigned char *); 200 int core_tpg_set_initiator_node_queue_depth(struct se_node_acl *, u32); 201 int core_tpg_set_initiator_node_tag(struct se_portal_group *, 202 struct se_node_acl *, const char *); 203 int core_tpg_register(struct se_wwn *, struct se_portal_group *, int); 204 int core_tpg_deregister(struct se_portal_group *); 205 206 int target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, 207 u32 length, bool zero_page, bool chainable); 208 void target_free_sgl(struct scatterlist *sgl, int nents); 209 210 /* 211 * The LIO target core uses DMA_TO_DEVICE to mean that data is going 212 * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean 213 * that data is coming from the target (eg handling a READ). However, 214 * this is just the opposite of what we have to tell the DMA mapping 215 * layer -- eg when handling a READ, the HBA will have to DMA the data 216 * out of memory so it can send it to the initiator, which means we 217 * need to use DMA_TO_DEVICE when we map the data. 218 */ 219 static inline enum dma_data_direction 220 target_reverse_dma_direction(struct se_cmd *se_cmd) 221 { 222 if (se_cmd->se_cmd_flags & SCF_BIDI) 223 return DMA_BIDIRECTIONAL; 224 225 switch (se_cmd->data_direction) { 226 case DMA_TO_DEVICE: 227 return DMA_FROM_DEVICE; 228 case DMA_FROM_DEVICE: 229 return DMA_TO_DEVICE; 230 case DMA_NONE: 231 default: 232 return DMA_NONE; 233 } 234 } 235 236 #endif /* TARGET_CORE_FABRICH */ 237