1 /* 2 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef IB_SRP_H 34 #define IB_SRP_H 35 36 #include <linux/types.h> 37 #include <linux/list.h> 38 #include <linux/mutex.h> 39 #include <linux/scatterlist.h> 40 41 #include <scsi/scsi_host.h> 42 #include <scsi/scsi_cmnd.h> 43 44 #include <rdma/ib_verbs.h> 45 #include <rdma/ib_sa.h> 46 #include <rdma/ib_cm.h> 47 #include <rdma/ib_fmr_pool.h> 48 #include <rdma/rdma_cm.h> 49 50 enum { 51 SRP_PATH_REC_TIMEOUT_MS = 1000, 52 SRP_ABORT_TIMEOUT_MS = 5000, 53 54 SRP_PORT_REDIRECT = 1, 55 SRP_DLID_REDIRECT = 2, 56 SRP_STALE_CONN = 3, 57 58 SRP_DEF_SG_TABLESIZE = 12, 59 60 SRP_DEFAULT_QUEUE_SIZE = 1 << 6, 61 SRP_RSP_SQ_SIZE = 1, 62 SRP_TSK_MGMT_SQ_SIZE = 1, 63 SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE - 64 SRP_TSK_MGMT_SQ_SIZE, 65 66 SRP_TAG_NO_REQ = ~0U, 67 SRP_TAG_TSK_MGMT = 1U << 31, 68 69 SRP_MAX_PAGES_PER_MR = 512, 70 71 SRP_MAX_ADD_CDB_LEN = 16, 72 73 SRP_MAX_IMM_SGE = 2, 74 SRP_MAX_SGE = SRP_MAX_IMM_SGE + 1, 75 /* 76 * Choose the immediate data offset such that a 32 byte CDB still fits. 77 */ 78 SRP_IMM_DATA_OFFSET = sizeof(struct srp_cmd) + 79 SRP_MAX_ADD_CDB_LEN + 80 sizeof(struct srp_imm_buf), 81 }; 82 83 enum srp_target_state { 84 SRP_TARGET_SCANNING, 85 SRP_TARGET_LIVE, 86 SRP_TARGET_REMOVED, 87 }; 88 89 enum srp_iu_type { 90 SRP_IU_CMD, 91 SRP_IU_TSK_MGMT, 92 SRP_IU_RSP, 93 }; 94 95 /* 96 * @mr_page_mask: HCA memory registration page mask. 97 * @mr_page_size: HCA memory registration page size. 98 * @mr_max_size: Maximum size in bytes of a single FMR / FR registration 99 * request. 100 */ 101 struct srp_device { 102 struct list_head dev_list; 103 struct ib_device *dev; 104 struct ib_pd *pd; 105 u32 global_rkey; 106 u64 mr_page_mask; 107 int mr_page_size; 108 int mr_max_size; 109 int max_pages_per_mr; 110 bool has_fmr; 111 bool has_fr; 112 bool use_fmr; 113 bool use_fast_reg; 114 }; 115 116 struct srp_host { 117 struct srp_device *srp_dev; 118 u8 port; 119 struct device dev; 120 struct list_head target_list; 121 spinlock_t target_lock; 122 struct completion released; 123 struct list_head list; 124 struct mutex add_target_mutex; 125 }; 126 127 struct srp_request { 128 struct scsi_cmnd *scmnd; 129 struct srp_iu *cmd; 130 union { 131 struct ib_pool_fmr **fmr_list; 132 struct srp_fr_desc **fr_list; 133 }; 134 u64 *map_page; 135 struct srp_direct_buf *indirect_desc; 136 dma_addr_t indirect_dma_addr; 137 short nmdesc; 138 struct ib_cqe reg_cqe; 139 }; 140 141 /** 142 * struct srp_rdma_ch 143 * @comp_vector: Completion vector used by this RDMA channel. 144 * @max_it_iu_len: Maximum initiator-to-target information unit length. 145 * @max_ti_iu_len: Maximum target-to-initiator information unit length. 146 */ 147 struct srp_rdma_ch { 148 /* These are RW in the hot path, and commonly used together */ 149 struct list_head free_tx; 150 spinlock_t lock; 151 s32 req_lim; 152 153 /* These are read-only in the hot path */ 154 struct srp_target_port *target ____cacheline_aligned_in_smp; 155 struct ib_cq *send_cq; 156 struct ib_cq *recv_cq; 157 struct ib_qp *qp; 158 union { 159 struct ib_fmr_pool *fmr_pool; 160 struct srp_fr_pool *fr_pool; 161 }; 162 uint32_t max_it_iu_len; 163 uint32_t max_ti_iu_len; 164 u8 max_imm_sge; 165 bool use_imm_data; 166 167 /* Everything above this point is used in the hot path of 168 * command processing. Try to keep them packed into cachelines. 169 */ 170 171 struct completion done; 172 int status; 173 174 union { 175 struct ib_cm { 176 struct sa_path_rec path; 177 struct ib_sa_query *path_query; 178 int path_query_id; 179 struct ib_cm_id *cm_id; 180 } ib_cm; 181 struct rdma_cm { 182 struct rdma_cm_id *cm_id; 183 } rdma_cm; 184 }; 185 186 struct srp_iu **tx_ring; 187 struct srp_iu **rx_ring; 188 struct srp_request *req_ring; 189 int comp_vector; 190 191 u64 tsk_mgmt_tag; 192 struct completion tsk_mgmt_done; 193 u8 tsk_mgmt_status; 194 bool connected; 195 }; 196 197 /** 198 * struct srp_target_port 199 * @comp_vector: Completion vector used by the first RDMA channel created for 200 * this target port. 201 */ 202 struct srp_target_port { 203 /* read and written in the hot path */ 204 spinlock_t lock; 205 206 /* read only in the hot path */ 207 u32 global_rkey; 208 struct srp_rdma_ch *ch; 209 struct net *net; 210 u32 ch_count; 211 u32 lkey; 212 enum srp_target_state state; 213 uint32_t max_it_iu_size; 214 unsigned int cmd_sg_cnt; 215 unsigned int indirect_size; 216 bool allow_ext_sg; 217 218 /* other member variables */ 219 union ib_gid sgid; 220 __be64 id_ext; 221 __be64 ioc_guid; 222 __be64 initiator_ext; 223 u16 io_class; 224 struct srp_host *srp_host; 225 struct Scsi_Host *scsi_host; 226 struct srp_rport *rport; 227 char target_name[32]; 228 unsigned int scsi_id; 229 unsigned int sg_tablesize; 230 unsigned int target_can_queue; 231 int mr_pool_size; 232 int mr_per_cmd; 233 int queue_size; 234 int req_ring_size; 235 int comp_vector; 236 int tl_retry_count; 237 238 bool using_rdma_cm; 239 240 union { 241 struct { 242 __be64 service_id; 243 union ib_gid orig_dgid; 244 __be16 pkey; 245 } ib_cm; 246 struct { 247 union { 248 struct sockaddr_in ip4; 249 struct sockaddr_in6 ip6; 250 struct sockaddr sa; 251 struct sockaddr_storage ss; 252 } src; 253 union { 254 struct sockaddr_in ip4; 255 struct sockaddr_in6 ip6; 256 struct sockaddr sa; 257 struct sockaddr_storage ss; 258 } dst; 259 bool src_specified; 260 } rdma_cm; 261 }; 262 263 u32 rq_tmo_jiffies; 264 265 int zero_req_lim; 266 267 struct work_struct tl_err_work; 268 struct work_struct remove_work; 269 270 struct list_head list; 271 bool qp_in_error; 272 }; 273 274 struct srp_iu { 275 struct list_head list; 276 u64 dma; 277 void *buf; 278 size_t size; 279 enum dma_data_direction direction; 280 u32 num_sge; 281 struct ib_sge sge[SRP_MAX_SGE]; 282 struct ib_cqe cqe; 283 }; 284 285 /** 286 * struct srp_fr_desc - fast registration work request arguments 287 * @entry: Entry in srp_fr_pool.free_list. 288 * @mr: Memory region. 289 * @frpl: Fast registration page list. 290 */ 291 struct srp_fr_desc { 292 struct list_head entry; 293 struct ib_mr *mr; 294 }; 295 296 /** 297 * struct srp_fr_pool - pool of fast registration descriptors 298 * 299 * An entry is available for allocation if and only if it occurs in @free_list. 300 * 301 * @size: Number of descriptors in this pool. 302 * @max_page_list_len: Maximum fast registration work request page list length. 303 * @lock: Protects free_list. 304 * @free_list: List of free descriptors. 305 * @desc: Fast registration descriptor pool. 306 */ 307 struct srp_fr_pool { 308 int size; 309 int max_page_list_len; 310 spinlock_t lock; 311 struct list_head free_list; 312 struct srp_fr_desc desc[0]; 313 }; 314 315 /** 316 * struct srp_map_state - per-request DMA memory mapping state 317 * @desc: Pointer to the element of the SRP buffer descriptor array 318 * that is being filled in. 319 * @pages: Array with DMA addresses of pages being considered for 320 * memory registration. 321 * @base_dma_addr: DMA address of the first page that has not yet been mapped. 322 * @dma_len: Number of bytes that will be registered with the next 323 * FMR or FR memory registration call. 324 * @total_len: Total number of bytes in the sg-list being mapped. 325 * @npages: Number of page addresses in the pages[] array. 326 * @nmdesc: Number of FMR or FR memory descriptors used for mapping. 327 * @ndesc: Number of SRP buffer descriptors that have been filled in. 328 */ 329 struct srp_map_state { 330 union { 331 struct { 332 struct ib_pool_fmr **next; 333 struct ib_pool_fmr **end; 334 } fmr; 335 struct { 336 struct srp_fr_desc **next; 337 struct srp_fr_desc **end; 338 } fr; 339 struct { 340 void **next; 341 void **end; 342 } gen; 343 }; 344 struct srp_direct_buf *desc; 345 union { 346 u64 *pages; 347 struct scatterlist *sg; 348 }; 349 dma_addr_t base_dma_addr; 350 u32 dma_len; 351 u32 total_len; 352 unsigned int npages; 353 unsigned int nmdesc; 354 unsigned int ndesc; 355 }; 356 357 #endif /* IB_SRP_H */ 358