1 /* 2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the BSD-type 8 * license below: 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 17 * Redistributions in binary form must reproduce the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer in the documentation and/or other materials provided 20 * with the distribution. 21 * 22 * Neither the name of the Network Appliance, Inc. nor the names of 23 * its contributors may be used to endorse or promote products 24 * derived from this software without specific prior written 25 * permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 #ifndef _LINUX_SUNRPC_XPRT_RDMA_H 41 #define _LINUX_SUNRPC_XPRT_RDMA_H 42 43 #include <linux/wait.h> /* wait_queue_head_t, etc */ 44 #include <linux/spinlock.h> /* spinlock_t, etc */ 45 #include <linux/atomic.h> /* atomic_t, etc */ 46 #include <linux/workqueue.h> /* struct work_struct */ 47 48 #include <rdma/rdma_cm.h> /* RDMA connection api */ 49 #include <rdma/ib_verbs.h> /* RDMA verbs api */ 50 51 #include <linux/sunrpc/clnt.h> /* rpc_xprt */ 52 #include <linux/sunrpc/rpc_rdma.h> /* RPC/RDMA protocol */ 53 #include <linux/sunrpc/xprtrdma.h> /* xprt parameters */ 54 55 #define RDMA_RESOLVE_TIMEOUT (5000) /* 5 seconds */ 56 #define RDMA_CONNECT_RETRY_MAX (2) /* retries if no listener backlog */ 57 58 /* 59 * Interface Adapter -- one per transport instance 60 */ 61 struct rpcrdma_ia { 62 struct rdma_cm_id *ri_id; 63 struct ib_pd *ri_pd; 64 struct ib_mr *ri_bind_mem; 65 u32 ri_dma_lkey; 66 int ri_have_dma_lkey; 67 struct completion ri_done; 68 int ri_async_rc; 69 enum rpcrdma_memreg ri_memreg_strategy; 70 unsigned int ri_max_frmr_depth; 71 }; 72 73 /* 74 * RDMA Endpoint -- one per transport instance 75 */ 76 77 #define RPCRDMA_WC_BUDGET (128) 78 #define RPCRDMA_POLLSIZE (16) 79 80 struct rpcrdma_ep { 81 atomic_t rep_cqcount; 82 int rep_cqinit; 83 int rep_connected; 84 struct rpcrdma_ia *rep_ia; 85 struct ib_qp_init_attr rep_attr; 86 wait_queue_head_t rep_connect_wait; 87 struct ib_sge rep_pad; /* holds zeroed pad */ 88 struct ib_mr *rep_pad_mr; /* holds zeroed pad */ 89 void (*rep_func)(struct rpcrdma_ep *); 90 struct rpc_xprt *rep_xprt; /* for rep_func */ 91 struct rdma_conn_param rep_remote_cma; 92 struct sockaddr_storage rep_remote_addr; 93 struct delayed_work rep_connect_worker; 94 struct ib_wc rep_send_wcs[RPCRDMA_POLLSIZE]; 95 struct ib_wc rep_recv_wcs[RPCRDMA_POLLSIZE]; 96 }; 97 98 #define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit) 99 #define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount) 100 101 /* 102 * struct rpcrdma_rep -- this structure encapsulates state required to recv 103 * and complete a reply, asychronously. It needs several pieces of 104 * state: 105 * o recv buffer (posted to provider) 106 * o ib_sge (also donated to provider) 107 * o status of reply (length, success or not) 108 * o bookkeeping state to get run by tasklet (list, etc) 109 * 110 * These are allocated during initialization, per-transport instance; 111 * however, the tasklet execution list itself is global, as it should 112 * always be pretty short. 113 * 114 * N of these are associated with a transport instance, and stored in 115 * struct rpcrdma_buffer. N is the max number of outstanding requests. 116 */ 117 118 /* temporary static scatter/gather max */ 119 #define RPCRDMA_MAX_DATA_SEGS (64) /* max scatter/gather */ 120 #define RPCRDMA_MAX_SEGS (RPCRDMA_MAX_DATA_SEGS + 2) /* head+tail = 2 */ 121 #define MAX_RPCRDMAHDR (\ 122 /* max supported RPC/RDMA header */ \ 123 sizeof(struct rpcrdma_msg) + (2 * sizeof(u32)) + \ 124 (sizeof(struct rpcrdma_read_chunk) * RPCRDMA_MAX_SEGS) + sizeof(u32)) 125 126 struct rpcrdma_buffer; 127 128 struct rpcrdma_rep { 129 unsigned int rr_len; /* actual received reply length */ 130 struct rpcrdma_buffer *rr_buffer; /* home base for this structure */ 131 struct rpc_xprt *rr_xprt; /* needed for request/reply matching */ 132 void (*rr_func)(struct rpcrdma_rep *);/* called by tasklet in softint */ 133 struct list_head rr_list; /* tasklet list */ 134 struct ib_sge rr_iov; /* for posting */ 135 struct ib_mr *rr_handle; /* handle for mem in rr_iov */ 136 char rr_base[MAX_RPCRDMAHDR]; /* minimal inline receive buffer */ 137 }; 138 139 /* 140 * struct rpcrdma_req -- structure central to the request/reply sequence. 141 * 142 * N of these are associated with a transport instance, and stored in 143 * struct rpcrdma_buffer. N is the max number of outstanding requests. 144 * 145 * It includes pre-registered buffer memory for send AND recv. 146 * The recv buffer, however, is not owned by this structure, and 147 * is "donated" to the hardware when a recv is posted. When a 148 * reply is handled, the recv buffer used is given back to the 149 * struct rpcrdma_req associated with the request. 150 * 151 * In addition to the basic memory, this structure includes an array 152 * of iovs for send operations. The reason is that the iovs passed to 153 * ib_post_{send,recv} must not be modified until the work request 154 * completes. 155 * 156 * NOTES: 157 * o RPCRDMA_MAX_SEGS is the max number of addressible chunk elements we 158 * marshal. The number needed varies depending on the iov lists that 159 * are passed to us, the memory registration mode we are in, and if 160 * physical addressing is used, the layout. 161 */ 162 163 struct rpcrdma_mr_seg { /* chunk descriptors */ 164 union { /* chunk memory handles */ 165 struct ib_mr *rl_mr; /* if registered directly */ 166 struct rpcrdma_mw { /* if registered from region */ 167 union { 168 struct ib_fmr *fmr; 169 struct { 170 struct ib_fast_reg_page_list *fr_pgl; 171 struct ib_mr *fr_mr; 172 enum { FRMR_IS_INVALID, FRMR_IS_VALID } state; 173 } frmr; 174 } r; 175 struct list_head mw_list; 176 } *rl_mw; 177 } mr_chunk; 178 u64 mr_base; /* registration result */ 179 u32 mr_rkey; /* registration result */ 180 u32 mr_len; /* length of chunk or segment */ 181 int mr_nsegs; /* number of segments in chunk or 0 */ 182 enum dma_data_direction mr_dir; /* segment mapping direction */ 183 dma_addr_t mr_dma; /* segment mapping address */ 184 size_t mr_dmalen; /* segment mapping length */ 185 struct page *mr_page; /* owning page, if any */ 186 char *mr_offset; /* kva if no page, else offset */ 187 }; 188 189 struct rpcrdma_req { 190 size_t rl_size; /* actual length of buffer */ 191 unsigned int rl_niovs; /* 0, 2 or 4 */ 192 unsigned int rl_nchunks; /* non-zero if chunks */ 193 unsigned int rl_connect_cookie; /* retry detection */ 194 struct rpcrdma_buffer *rl_buffer; /* home base for this structure */ 195 struct rpcrdma_rep *rl_reply;/* holder for reply buffer */ 196 struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];/* chunk segments */ 197 struct ib_sge rl_send_iov[4]; /* for active requests */ 198 struct ib_sge rl_iov; /* for posting */ 199 struct ib_mr *rl_handle; /* handle for mem in rl_iov */ 200 char rl_base[MAX_RPCRDMAHDR]; /* start of actual buffer */ 201 __u32 rl_xdr_buf[0]; /* start of returned rpc rq_buffer */ 202 }; 203 #define rpcr_to_rdmar(r) \ 204 container_of((r)->rq_buffer, struct rpcrdma_req, rl_xdr_buf[0]) 205 206 /* 207 * struct rpcrdma_buffer -- holds list/queue of pre-registered memory for 208 * inline requests/replies, and client/server credits. 209 * 210 * One of these is associated with a transport instance 211 */ 212 struct rpcrdma_buffer { 213 spinlock_t rb_lock; /* protects indexes */ 214 atomic_t rb_credits; /* most recent server credits */ 215 int rb_max_requests;/* client max requests */ 216 struct list_head rb_mws; /* optional memory windows/fmrs/frmrs */ 217 int rb_send_index; 218 struct rpcrdma_req **rb_send_bufs; 219 int rb_recv_index; 220 struct rpcrdma_rep **rb_recv_bufs; 221 char *rb_pool; 222 }; 223 #define rdmab_to_ia(b) (&container_of((b), struct rpcrdma_xprt, rx_buf)->rx_ia) 224 225 /* 226 * Internal structure for transport instance creation. This 227 * exists primarily for modularity. 228 * 229 * This data should be set with mount options 230 */ 231 struct rpcrdma_create_data_internal { 232 struct sockaddr_storage addr; /* RDMA server address */ 233 unsigned int max_requests; /* max requests (slots) in flight */ 234 unsigned int rsize; /* mount rsize - max read hdr+data */ 235 unsigned int wsize; /* mount wsize - max write hdr+data */ 236 unsigned int inline_rsize; /* max non-rdma read data payload */ 237 unsigned int inline_wsize; /* max non-rdma write data payload */ 238 unsigned int padding; /* non-rdma write header padding */ 239 }; 240 241 #define RPCRDMA_INLINE_READ_THRESHOLD(rq) \ 242 (rpcx_to_rdmad(rq->rq_xprt).inline_rsize) 243 244 #define RPCRDMA_INLINE_WRITE_THRESHOLD(rq)\ 245 (rpcx_to_rdmad(rq->rq_xprt).inline_wsize) 246 247 #define RPCRDMA_INLINE_PAD_VALUE(rq)\ 248 rpcx_to_rdmad(rq->rq_xprt).padding 249 250 /* 251 * Statistics for RPCRDMA 252 */ 253 struct rpcrdma_stats { 254 unsigned long read_chunk_count; 255 unsigned long write_chunk_count; 256 unsigned long reply_chunk_count; 257 258 unsigned long long total_rdma_request; 259 unsigned long long total_rdma_reply; 260 261 unsigned long long pullup_copy_count; 262 unsigned long long fixup_copy_count; 263 unsigned long hardway_register_count; 264 unsigned long failed_marshal_count; 265 unsigned long bad_reply_count; 266 }; 267 268 /* 269 * RPCRDMA transport -- encapsulates the structures above for 270 * integration with RPC. 271 * 272 * The contained structures are embedded, not pointers, 273 * for convenience. This structure need not be visible externally. 274 * 275 * It is allocated and initialized during mount, and released 276 * during unmount. 277 */ 278 struct rpcrdma_xprt { 279 struct rpc_xprt xprt; 280 struct rpcrdma_ia rx_ia; 281 struct rpcrdma_ep rx_ep; 282 struct rpcrdma_buffer rx_buf; 283 struct rpcrdma_create_data_internal rx_data; 284 struct delayed_work rdma_connect; 285 struct rpcrdma_stats rx_stats; 286 }; 287 288 #define rpcx_to_rdmax(x) container_of(x, struct rpcrdma_xprt, xprt) 289 #define rpcx_to_rdmad(x) (rpcx_to_rdmax(x)->rx_data) 290 291 /* Setting this to 0 ensures interoperability with early servers. 292 * Setting this to 1 enhances certain unaligned read/write performance. 293 * Default is 0, see sysctl entry and rpc_rdma.c rpcrdma_convert_iovs() */ 294 extern int xprt_rdma_pad_optimize; 295 296 /* 297 * Interface Adapter calls - xprtrdma/verbs.c 298 */ 299 int rpcrdma_ia_open(struct rpcrdma_xprt *, struct sockaddr *, int); 300 void rpcrdma_ia_close(struct rpcrdma_ia *); 301 302 /* 303 * Endpoint calls - xprtrdma/verbs.c 304 */ 305 int rpcrdma_ep_create(struct rpcrdma_ep *, struct rpcrdma_ia *, 306 struct rpcrdma_create_data_internal *); 307 void rpcrdma_ep_destroy(struct rpcrdma_ep *, struct rpcrdma_ia *); 308 int rpcrdma_ep_connect(struct rpcrdma_ep *, struct rpcrdma_ia *); 309 int rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *); 310 311 int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *, 312 struct rpcrdma_req *); 313 int rpcrdma_ep_post_recv(struct rpcrdma_ia *, struct rpcrdma_ep *, 314 struct rpcrdma_rep *); 315 316 /* 317 * Buffer calls - xprtrdma/verbs.c 318 */ 319 int rpcrdma_buffer_create(struct rpcrdma_buffer *, struct rpcrdma_ep *, 320 struct rpcrdma_ia *, 321 struct rpcrdma_create_data_internal *); 322 void rpcrdma_buffer_destroy(struct rpcrdma_buffer *); 323 324 struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *); 325 void rpcrdma_buffer_put(struct rpcrdma_req *); 326 void rpcrdma_recv_buffer_get(struct rpcrdma_req *); 327 void rpcrdma_recv_buffer_put(struct rpcrdma_rep *); 328 329 int rpcrdma_register_internal(struct rpcrdma_ia *, void *, int, 330 struct ib_mr **, struct ib_sge *); 331 int rpcrdma_deregister_internal(struct rpcrdma_ia *, 332 struct ib_mr *, struct ib_sge *); 333 334 int rpcrdma_register_external(struct rpcrdma_mr_seg *, 335 int, int, struct rpcrdma_xprt *); 336 int rpcrdma_deregister_external(struct rpcrdma_mr_seg *, 337 struct rpcrdma_xprt *); 338 339 /* 340 * RPC/RDMA connection management calls - xprtrdma/rpc_rdma.c 341 */ 342 void rpcrdma_connect_worker(struct work_struct *); 343 void rpcrdma_conn_func(struct rpcrdma_ep *); 344 void rpcrdma_reply_handler(struct rpcrdma_rep *); 345 346 /* 347 * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c 348 */ 349 int rpcrdma_marshal_req(struct rpc_rqst *); 350 351 /* Temporary NFS request map cache. Created in svc_rdma.c */ 352 extern struct kmem_cache *svc_rdma_map_cachep; 353 /* WR context cache. Created in svc_rdma.c */ 354 extern struct kmem_cache *svc_rdma_ctxt_cachep; 355 /* Workqueue created in svc_rdma.c */ 356 extern struct workqueue_struct *svc_rdma_wq; 357 358 #endif /* _LINUX_SUNRPC_XPRT_RDMA_H */ 359