1 /* 2 * iSER transport for the Open iSCSI Initiator & iSER transport internals 3 * 4 * Copyright (C) 2004 Dmitry Yusupov 5 * Copyright (C) 2004 Alex Aizman 6 * Copyright (C) 2005 Mike Christie 7 * based on code maintained by open-iscsi@googlegroups.com 8 * 9 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. 10 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 11 * Copyright (c) 2013 Mellanox Technologies. All rights reserved. 12 * 13 * This software is available to you under a choice of one of two 14 * licenses. You may choose to be licensed under the terms of the GNU 15 * General Public License (GPL) Version 2, available from the file 16 * COPYING in the main directory of this source tree, or the 17 * OpenIB.org BSD license below: 18 * 19 * Redistribution and use in source and binary forms, with or 20 * without modification, are permitted provided that the following 21 * conditions are met: 22 * 23 * - Redistributions of source code must retain the above 24 * copyright notice, this list of conditions and the following 25 * disclaimer. 26 * 27 * - Redistributions in binary form must reproduce the above 28 * copyright notice, this list of conditions and the following 29 * disclaimer in the documentation and/or other materials 30 * provided with the distribution. 31 * 32 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 33 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 34 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 35 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 36 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 37 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 38 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 39 * SOFTWARE. 40 */ 41 #ifndef __ISCSI_ISER_H__ 42 #define __ISCSI_ISER_H__ 43 44 #include <linux/types.h> 45 #include <linux/net.h> 46 #include <linux/printk.h> 47 #include <scsi/libiscsi.h> 48 #include <scsi/scsi_transport_iscsi.h> 49 50 #include <linux/interrupt.h> 51 #include <linux/wait.h> 52 #include <linux/sched.h> 53 #include <linux/list.h> 54 #include <linux/slab.h> 55 #include <linux/dma-mapping.h> 56 #include <linux/mutex.h> 57 #include <linux/mempool.h> 58 #include <linux/uio.h> 59 60 #include <linux/socket.h> 61 #include <linux/in.h> 62 #include <linux/in6.h> 63 64 #include <rdma/ib_verbs.h> 65 #include <rdma/ib_fmr_pool.h> 66 #include <rdma/rdma_cm.h> 67 68 #define DRV_NAME "iser" 69 #define PFX DRV_NAME ": " 70 #define DRV_VER "1.1" 71 72 #define iser_dbg(fmt, arg...) \ 73 do { \ 74 if (iser_debug_level > 2) \ 75 printk(KERN_DEBUG PFX "%s:" fmt,\ 76 __func__ , ## arg); \ 77 } while (0) 78 79 #define iser_warn(fmt, arg...) \ 80 do { \ 81 if (iser_debug_level > 0) \ 82 pr_warn(PFX "%s:" fmt, \ 83 __func__ , ## arg); \ 84 } while (0) 85 86 #define iser_info(fmt, arg...) \ 87 do { \ 88 if (iser_debug_level > 1) \ 89 pr_info(PFX "%s:" fmt, \ 90 __func__ , ## arg); \ 91 } while (0) 92 93 #define iser_err(fmt, arg...) \ 94 do { \ 95 printk(KERN_ERR PFX "%s:" fmt, \ 96 __func__ , ## arg); \ 97 } while (0) 98 99 #define SHIFT_4K 12 100 #define SIZE_4K (1ULL << SHIFT_4K) 101 #define MASK_4K (~(SIZE_4K-1)) 102 103 /* support up to 512KB in one RDMA */ 104 #define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K) 105 #define ISER_DEF_XMIT_CMDS_DEFAULT 512 106 #if ISCSI_DEF_XMIT_CMDS_MAX > ISER_DEF_XMIT_CMDS_DEFAULT 107 #define ISER_DEF_XMIT_CMDS_MAX ISCSI_DEF_XMIT_CMDS_MAX 108 #else 109 #define ISER_DEF_XMIT_CMDS_MAX ISER_DEF_XMIT_CMDS_DEFAULT 110 #endif 111 #define ISER_DEF_CMD_PER_LUN ISER_DEF_XMIT_CMDS_MAX 112 113 /* QP settings */ 114 /* Maximal bounds on received asynchronous PDUs */ 115 #define ISER_MAX_RX_MISC_PDUS 4 /* NOOP_IN(2) , ASYNC_EVENT(2) */ 116 117 #define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), * 118 * SCSI_TMFUNC(2), LOGOUT(1) */ 119 120 #define ISER_QP_MAX_RECV_DTOS (ISER_DEF_XMIT_CMDS_MAX) 121 122 #define ISER_MIN_POSTED_RX (ISER_DEF_XMIT_CMDS_MAX >> 2) 123 124 /* the max TX (send) WR supported by the iSER QP is defined by * 125 * max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect * 126 * to have at max for SCSI command. The tx posting & completion handling code * 127 * supports -EAGAIN scheme where tx is suspended till the QP has room for more * 128 * send WR. D=8 comes from 64K/8K */ 129 130 #define ISER_INFLIGHT_DATAOUTS 8 131 132 #define ISER_QP_MAX_REQ_DTOS (ISER_DEF_XMIT_CMDS_MAX * \ 133 (1 + ISER_INFLIGHT_DATAOUTS) + \ 134 ISER_MAX_TX_MISC_PDUS + \ 135 ISER_MAX_RX_MISC_PDUS) 136 137 #define ISER_VER 0x10 138 #define ISER_WSV 0x08 139 #define ISER_RSV 0x04 140 141 struct iser_hdr { 142 u8 flags; 143 u8 rsvd[3]; 144 __be32 write_stag; /* write rkey */ 145 __be64 write_va; 146 __be32 read_stag; /* read rkey */ 147 __be64 read_va; 148 } __attribute__((packed)); 149 150 151 #define ISER_ZBVA_NOT_SUPPORTED 0x80 152 #define ISER_SEND_W_INV_NOT_SUPPORTED 0x40 153 154 struct iser_cm_hdr { 155 u8 flags; 156 u8 rsvd[3]; 157 } __packed; 158 159 /* Constant PDU lengths calculations */ 160 #define ISER_HEADERS_LEN (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr)) 161 162 #define ISER_RECV_DATA_SEG_LEN 128 163 #define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN) 164 #define ISER_RX_LOGIN_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN) 165 166 /* Length of an object name string */ 167 #define ISER_OBJECT_NAME_SIZE 64 168 169 enum iser_ib_conn_state { 170 ISER_CONN_INIT, /* descriptor allocd, no conn */ 171 ISER_CONN_PENDING, /* in the process of being established */ 172 ISER_CONN_UP, /* up and running */ 173 ISER_CONN_TERMINATING, /* in the process of being terminated */ 174 ISER_CONN_DOWN, /* shut down */ 175 ISER_CONN_STATES_NUM 176 }; 177 178 enum iser_task_status { 179 ISER_TASK_STATUS_INIT = 0, 180 ISER_TASK_STATUS_STARTED, 181 ISER_TASK_STATUS_COMPLETED 182 }; 183 184 enum iser_data_dir { 185 ISER_DIR_IN = 0, /* to initiator */ 186 ISER_DIR_OUT, /* from initiator */ 187 ISER_DIRS_NUM 188 }; 189 190 struct iser_data_buf { 191 void *buf; /* pointer to the sg list */ 192 unsigned int size; /* num entries of this sg */ 193 unsigned long data_len; /* total data len */ 194 unsigned int dma_nents; /* returned by dma_map_sg */ 195 char *copy_buf; /* allocated copy buf for SGs unaligned * 196 * for rdma which are copied */ 197 struct scatterlist sg_single; /* SG-ified clone of a non SG SC or * 198 * unaligned SG */ 199 }; 200 201 /* fwd declarations */ 202 struct iser_device; 203 struct iser_cq_desc; 204 struct iscsi_iser_conn; 205 struct iscsi_iser_task; 206 struct iscsi_endpoint; 207 208 struct iser_mem_reg { 209 u32 lkey; 210 u32 rkey; 211 u64 va; 212 u64 len; 213 void *mem_h; 214 int is_mr; 215 }; 216 217 struct iser_regd_buf { 218 struct iser_mem_reg reg; /* memory registration info */ 219 void *virt_addr; 220 struct iser_device *device; /* device->device for dma_unmap */ 221 enum dma_data_direction direction; /* direction for dma_unmap */ 222 unsigned int data_size; 223 }; 224 225 enum iser_desc_type { 226 ISCSI_TX_CONTROL , 227 ISCSI_TX_SCSI_COMMAND, 228 ISCSI_TX_DATAOUT 229 }; 230 231 struct iser_tx_desc { 232 struct iser_hdr iser_header; 233 struct iscsi_hdr iscsi_header; 234 enum iser_desc_type type; 235 u64 dma_addr; 236 /* sg[0] points to iser/iscsi headers, sg[1] optionally points to either 237 of immediate data, unsolicited data-out or control (login,text) */ 238 struct ib_sge tx_sg[2]; 239 int num_sge; 240 }; 241 242 #define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \ 243 sizeof(u64) + sizeof(struct ib_sge))) 244 struct iser_rx_desc { 245 struct iser_hdr iser_header; 246 struct iscsi_hdr iscsi_header; 247 char data[ISER_RECV_DATA_SEG_LEN]; 248 u64 dma_addr; 249 struct ib_sge rx_sg; 250 char pad[ISER_RX_PAD_SIZE]; 251 } __attribute__((packed)); 252 253 #define ISER_MAX_CQ 4 254 255 struct iser_conn; 256 struct iscsi_iser_task; 257 258 struct iser_device { 259 struct ib_device *ib_device; 260 struct ib_pd *pd; 261 struct ib_cq *rx_cq[ISER_MAX_CQ]; 262 struct ib_cq *tx_cq[ISER_MAX_CQ]; 263 struct ib_mr *mr; 264 struct tasklet_struct cq_tasklet[ISER_MAX_CQ]; 265 struct ib_event_handler event_handler; 266 struct list_head ig_list; /* entry in ig devices list */ 267 int refcount; 268 int cq_active_qps[ISER_MAX_CQ]; 269 int cqs_used; 270 struct iser_cq_desc *cq_desc; 271 int (*iser_alloc_rdma_reg_res)(struct iser_conn *ib_conn, 272 unsigned cmds_max); 273 void (*iser_free_rdma_reg_res)(struct iser_conn *ib_conn); 274 int (*iser_reg_rdma_mem)(struct iscsi_iser_task *iser_task, 275 enum iser_data_dir cmd_dir); 276 void (*iser_unreg_rdma_mem)(struct iscsi_iser_task *iser_task, 277 enum iser_data_dir cmd_dir); 278 }; 279 280 struct fast_reg_descriptor { 281 struct list_head list; 282 /* For fast registration - FRWR */ 283 struct ib_mr *data_mr; 284 struct ib_fast_reg_page_list *data_frpl; 285 /* Valid for fast registration flag */ 286 bool valid; 287 }; 288 289 struct iser_conn { 290 struct iscsi_iser_conn *iser_conn; /* iser conn for upcalls */ 291 struct iscsi_endpoint *ep; 292 enum iser_ib_conn_state state; /* rdma connection state */ 293 atomic_t refcount; 294 spinlock_t lock; /* used for state changes */ 295 struct iser_device *device; /* device context */ 296 struct rdma_cm_id *cma_id; /* CMA ID */ 297 struct ib_qp *qp; /* QP */ 298 wait_queue_head_t wait; /* waitq for conn/disconn */ 299 unsigned qp_max_recv_dtos; /* num of rx buffers */ 300 unsigned qp_max_recv_dtos_mask; /* above minus 1 */ 301 unsigned min_posted_rx; /* qp_max_recv_dtos >> 2 */ 302 int post_recv_buf_count; /* posted rx count */ 303 atomic_t post_send_buf_count; /* posted tx count */ 304 char name[ISER_OBJECT_NAME_SIZE]; 305 struct list_head conn_list; /* entry in ig conn list */ 306 307 char *login_buf; 308 char *login_req_buf, *login_resp_buf; 309 u64 login_req_dma, login_resp_dma; 310 unsigned int rx_desc_head; 311 struct iser_rx_desc *rx_descs; 312 struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX]; 313 union { 314 struct { 315 struct ib_fmr_pool *pool; /* pool of IB FMRs */ 316 struct iser_page_vec *page_vec; /* represents SG to fmr maps* 317 * maps serialized as tx is*/ 318 } fmr; 319 struct { 320 struct list_head pool; 321 int pool_size; 322 } frwr; 323 } fastreg; 324 }; 325 326 struct iscsi_iser_conn { 327 struct iscsi_conn *iscsi_conn;/* ptr to iscsi conn */ 328 struct iser_conn *ib_conn; /* iSER IB conn */ 329 }; 330 331 struct iscsi_iser_task { 332 struct iser_tx_desc desc; 333 struct iscsi_iser_conn *iser_conn; 334 enum iser_task_status status; 335 int command_sent; /* set if command sent */ 336 int dir[ISER_DIRS_NUM]; /* set if dir use*/ 337 struct iser_regd_buf rdma_regd[ISER_DIRS_NUM];/* regd rdma buf */ 338 struct iser_data_buf data[ISER_DIRS_NUM]; /* orig. data des*/ 339 struct iser_data_buf data_copy[ISER_DIRS_NUM];/* contig. copy */ 340 }; 341 342 struct iser_page_vec { 343 u64 *pages; 344 int length; 345 int offset; 346 int data_size; 347 }; 348 349 struct iser_cq_desc { 350 struct iser_device *device; 351 int cq_index; 352 }; 353 354 struct iser_global { 355 struct mutex device_list_mutex;/* */ 356 struct list_head device_list; /* all iSER devices */ 357 struct mutex connlist_mutex; 358 struct list_head connlist; /* all iSER IB connections */ 359 360 struct kmem_cache *desc_cache; 361 }; 362 363 extern struct iser_global ig; 364 extern int iser_debug_level; 365 366 /* allocate connection resources needed for rdma functionality */ 367 int iser_conn_set_full_featured_mode(struct iscsi_conn *conn); 368 369 int iser_send_control(struct iscsi_conn *conn, 370 struct iscsi_task *task); 371 372 int iser_send_command(struct iscsi_conn *conn, 373 struct iscsi_task *task); 374 375 int iser_send_data_out(struct iscsi_conn *conn, 376 struct iscsi_task *task, 377 struct iscsi_data *hdr); 378 379 void iscsi_iser_recv(struct iscsi_conn *conn, 380 struct iscsi_hdr *hdr, 381 char *rx_data, 382 int rx_data_len); 383 384 void iser_conn_init(struct iser_conn *ib_conn); 385 386 void iser_conn_get(struct iser_conn *ib_conn); 387 388 int iser_conn_put(struct iser_conn *ib_conn, int destroy_cma_id_allowed); 389 390 void iser_conn_terminate(struct iser_conn *ib_conn); 391 392 void iser_rcv_completion(struct iser_rx_desc *desc, 393 unsigned long dto_xfer_len, 394 struct iser_conn *ib_conn); 395 396 void iser_snd_completion(struct iser_tx_desc *desc, struct iser_conn *ib_conn); 397 398 void iser_task_rdma_init(struct iscsi_iser_task *task); 399 400 void iser_task_rdma_finalize(struct iscsi_iser_task *task); 401 402 void iser_free_rx_descriptors(struct iser_conn *ib_conn); 403 404 void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task, 405 enum iser_data_dir cmd_dir); 406 407 int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *task, 408 enum iser_data_dir cmd_dir); 409 int iser_reg_rdma_mem_frwr(struct iscsi_iser_task *task, 410 enum iser_data_dir cmd_dir); 411 412 int iser_connect(struct iser_conn *ib_conn, 413 struct sockaddr_in *src_addr, 414 struct sockaddr_in *dst_addr, 415 int non_blocking); 416 417 int iser_reg_page_vec(struct iser_conn *ib_conn, 418 struct iser_page_vec *page_vec, 419 struct iser_mem_reg *mem_reg); 420 421 void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task, 422 enum iser_data_dir cmd_dir); 423 void iser_unreg_mem_frwr(struct iscsi_iser_task *iser_task, 424 enum iser_data_dir cmd_dir); 425 426 int iser_post_recvl(struct iser_conn *ib_conn); 427 int iser_post_recvm(struct iser_conn *ib_conn, int count); 428 int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc); 429 430 int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, 431 struct iser_data_buf *data, 432 enum iser_data_dir iser_dir, 433 enum dma_data_direction dma_dir); 434 435 void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task); 436 int iser_initialize_task_headers(struct iscsi_task *task, 437 struct iser_tx_desc *tx_desc); 438 int iser_alloc_rx_descriptors(struct iser_conn *ib_conn, struct iscsi_session *session); 439 int iser_create_fmr_pool(struct iser_conn *ib_conn, unsigned cmds_max); 440 void iser_free_fmr_pool(struct iser_conn *ib_conn); 441 int iser_create_frwr_pool(struct iser_conn *ib_conn, unsigned cmds_max); 442 void iser_free_frwr_pool(struct iser_conn *ib_conn); 443 #endif 444