1 /* 2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the BSD-type 8 * license below: 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 17 * Redistributions in binary form must reproduce the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer in the documentation and/or other materials provided 20 * with the distribution. 21 * 22 * Neither the name of the Network Appliance, Inc. nor the names of 23 * its contributors may be used to endorse or promote products 24 * derived from this software without specific prior written 25 * permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 #ifndef _LINUX_SUNRPC_XPRT_RDMA_H 41 #define _LINUX_SUNRPC_XPRT_RDMA_H 42 43 #include <linux/wait.h> /* wait_queue_head_t, etc */ 44 #include <linux/spinlock.h> /* spinlock_t, etc */ 45 #include <linux/atomic.h> /* atomic_t, etc */ 46 #include <linux/workqueue.h> /* struct work_struct */ 47 48 #include <rdma/rdma_cm.h> /* RDMA connection api */ 49 #include <rdma/ib_verbs.h> /* RDMA verbs api */ 50 51 #include <linux/sunrpc/clnt.h> /* rpc_xprt */ 52 #include <linux/sunrpc/rpc_rdma.h> /* RPC/RDMA protocol */ 53 #include <linux/sunrpc/xprtrdma.h> /* xprt parameters */ 54 55 #define RDMA_RESOLVE_TIMEOUT (5000) /* 5 seconds */ 56 #define RDMA_CONNECT_RETRY_MAX (2) /* retries if no listener backlog */ 57 58 #define RPCRDMA_BIND_TO (60U * HZ) 59 #define RPCRDMA_INIT_REEST_TO (5U * HZ) 60 #define RPCRDMA_MAX_REEST_TO (30U * HZ) 61 #define RPCRDMA_IDLE_DISC_TO (5U * 60 * HZ) 62 63 /* 64 * Interface Adapter -- one per transport instance 65 */ 66 struct rpcrdma_ia { 67 const struct rpcrdma_memreg_ops *ri_ops; 68 struct ib_device *ri_device; 69 struct rdma_cm_id *ri_id; 70 struct ib_pd *ri_pd; 71 struct completion ri_done; 72 int ri_async_rc; 73 unsigned int ri_max_frmr_depth; 74 unsigned int ri_max_inline_write; 75 unsigned int ri_max_inline_read; 76 struct ib_qp_attr ri_qp_attr; 77 struct ib_qp_init_attr ri_qp_init_attr; 78 }; 79 80 /* 81 * RDMA Endpoint -- one per transport instance 82 */ 83 84 struct rpcrdma_ep { 85 atomic_t rep_cqcount; 86 int rep_cqinit; 87 int rep_connected; 88 struct ib_qp_init_attr rep_attr; 89 wait_queue_head_t rep_connect_wait; 90 struct rdma_conn_param rep_remote_cma; 91 struct sockaddr_storage rep_remote_addr; 92 struct delayed_work rep_connect_worker; 93 }; 94 95 #define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit) 96 #define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount) 97 98 /* Pre-allocate extra Work Requests for handling backward receives 99 * and sends. This is a fixed value because the Work Queues are 100 * allocated when the forward channel is set up. 101 */ 102 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 103 #define RPCRDMA_BACKWARD_WRS (8) 104 #else 105 #define RPCRDMA_BACKWARD_WRS (0) 106 #endif 107 108 /* Registered buffer -- registered kmalloc'd memory for RDMA SEND/RECV 109 * 110 * The below structure appears at the front of a large region of kmalloc'd 111 * memory, which always starts on a good alignment boundary. 112 */ 113 114 struct rpcrdma_regbuf { 115 size_t rg_size; 116 struct rpcrdma_req *rg_owner; 117 struct ib_sge rg_iov; 118 __be32 rg_base[0] __attribute__ ((aligned(256))); 119 }; 120 121 static inline u64 122 rdmab_addr(struct rpcrdma_regbuf *rb) 123 { 124 return rb->rg_iov.addr; 125 } 126 127 static inline u32 128 rdmab_length(struct rpcrdma_regbuf *rb) 129 { 130 return rb->rg_iov.length; 131 } 132 133 static inline u32 134 rdmab_lkey(struct rpcrdma_regbuf *rb) 135 { 136 return rb->rg_iov.lkey; 137 } 138 139 static inline struct rpcrdma_msg * 140 rdmab_to_msg(struct rpcrdma_regbuf *rb) 141 { 142 return (struct rpcrdma_msg *)rb->rg_base; 143 } 144 145 #define RPCRDMA_DEF_GFP (GFP_NOIO | __GFP_NOWARN) 146 147 /* To ensure a transport can always make forward progress, 148 * the number of RDMA segments allowed in header chunk lists 149 * is capped at 8. This prevents less-capable devices and 150 * memory registrations from overrunning the Send buffer 151 * while building chunk lists. 152 * 153 * Elements of the Read list take up more room than the 154 * Write list or Reply chunk. 8 read segments means the Read 155 * list (or Write list or Reply chunk) cannot consume more 156 * than 157 * 158 * ((8 + 2) * read segment size) + 1 XDR words, or 244 bytes. 159 * 160 * And the fixed part of the header is another 24 bytes. 161 * 162 * The smallest inline threshold is 1024 bytes, ensuring that 163 * at least 750 bytes are available for RPC messages. 164 */ 165 #define RPCRDMA_MAX_HDR_SEGS (8) 166 167 /* 168 * struct rpcrdma_rep -- this structure encapsulates state required to recv 169 * and complete a reply, asychronously. It needs several pieces of 170 * state: 171 * o recv buffer (posted to provider) 172 * o ib_sge (also donated to provider) 173 * o status of reply (length, success or not) 174 * o bookkeeping state to get run by reply handler (list, etc) 175 * 176 * These are allocated during initialization, per-transport instance. 177 * 178 * N of these are associated with a transport instance, and stored in 179 * struct rpcrdma_buffer. N is the max number of outstanding requests. 180 */ 181 182 struct rpcrdma_rep { 183 struct ib_cqe rr_cqe; 184 unsigned int rr_len; 185 struct ib_device *rr_device; 186 struct rpcrdma_xprt *rr_rxprt; 187 struct work_struct rr_work; 188 struct list_head rr_list; 189 struct rpcrdma_regbuf *rr_rdmabuf; 190 }; 191 192 #define RPCRDMA_BAD_LEN (~0U) 193 194 /* 195 * struct rpcrdma_mw - external memory region metadata 196 * 197 * An external memory region is any buffer or page that is registered 198 * on the fly (ie, not pre-registered). 199 * 200 * Each rpcrdma_buffer has a list of free MWs anchored in rb_mws. During 201 * call_allocate, rpcrdma_buffer_get() assigns one to each segment in 202 * an rpcrdma_req. Then rpcrdma_register_external() grabs these to keep 203 * track of registration metadata while each RPC is pending. 204 * rpcrdma_deregister_external() uses this metadata to unmap and 205 * release these resources when an RPC is complete. 206 */ 207 enum rpcrdma_frmr_state { 208 FRMR_IS_INVALID, /* ready to be used */ 209 FRMR_IS_VALID, /* in use */ 210 FRMR_IS_STALE, /* failed completion */ 211 }; 212 213 struct rpcrdma_frmr { 214 struct ib_mr *fr_mr; 215 struct ib_cqe fr_cqe; 216 enum rpcrdma_frmr_state fr_state; 217 struct completion fr_linv_done; 218 union { 219 struct ib_reg_wr fr_regwr; 220 struct ib_send_wr fr_invwr; 221 }; 222 }; 223 224 struct rpcrdma_fmr { 225 struct ib_fmr *fm_mr; 226 u64 *fm_physaddrs; 227 }; 228 229 struct rpcrdma_mw { 230 struct list_head mw_list; 231 struct scatterlist *mw_sg; 232 int mw_nents; 233 enum dma_data_direction mw_dir; 234 union { 235 struct rpcrdma_fmr fmr; 236 struct rpcrdma_frmr frmr; 237 }; 238 struct rpcrdma_xprt *mw_xprt; 239 u32 mw_handle; 240 u32 mw_length; 241 u64 mw_offset; 242 struct list_head mw_all; 243 }; 244 245 /* 246 * struct rpcrdma_req -- structure central to the request/reply sequence. 247 * 248 * N of these are associated with a transport instance, and stored in 249 * struct rpcrdma_buffer. N is the max number of outstanding requests. 250 * 251 * It includes pre-registered buffer memory for send AND recv. 252 * The recv buffer, however, is not owned by this structure, and 253 * is "donated" to the hardware when a recv is posted. When a 254 * reply is handled, the recv buffer used is given back to the 255 * struct rpcrdma_req associated with the request. 256 * 257 * In addition to the basic memory, this structure includes an array 258 * of iovs for send operations. The reason is that the iovs passed to 259 * ib_post_{send,recv} must not be modified until the work request 260 * completes. 261 */ 262 263 /* Maximum number of page-sized "segments" per chunk list to be 264 * registered or invalidated. Must handle a Reply chunk: 265 */ 266 enum { 267 RPCRDMA_MAX_IOV_SEGS = 3, 268 RPCRDMA_MAX_DATA_SEGS = ((1 * 1024 * 1024) / PAGE_SIZE) + 1, 269 RPCRDMA_MAX_SEGS = RPCRDMA_MAX_DATA_SEGS + 270 RPCRDMA_MAX_IOV_SEGS, 271 }; 272 273 struct rpcrdma_mr_seg { /* chunk descriptors */ 274 u32 mr_len; /* length of chunk or segment */ 275 struct page *mr_page; /* owning page, if any */ 276 char *mr_offset; /* kva if no page, else offset */ 277 }; 278 279 #define RPCRDMA_MAX_IOVS (2) 280 281 struct rpcrdma_buffer; 282 struct rpcrdma_req { 283 struct list_head rl_free; 284 unsigned int rl_niovs; 285 unsigned int rl_connect_cookie; 286 struct rpc_task *rl_task; 287 struct rpcrdma_buffer *rl_buffer; 288 struct rpcrdma_rep *rl_reply;/* holder for reply buffer */ 289 struct ib_sge rl_send_iov[RPCRDMA_MAX_IOVS]; 290 struct rpcrdma_regbuf *rl_rdmabuf; 291 struct rpcrdma_regbuf *rl_sendbuf; 292 293 struct ib_cqe rl_cqe; 294 struct list_head rl_all; 295 bool rl_backchannel; 296 297 struct list_head rl_registered; /* registered segments */ 298 struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS]; 299 }; 300 301 static inline struct rpcrdma_req * 302 rpcr_to_rdmar(struct rpc_rqst *rqst) 303 { 304 void *buffer = rqst->rq_buffer; 305 struct rpcrdma_regbuf *rb; 306 307 rb = container_of(buffer, struct rpcrdma_regbuf, rg_base); 308 return rb->rg_owner; 309 } 310 311 /* 312 * struct rpcrdma_buffer -- holds list/queue of pre-registered memory for 313 * inline requests/replies, and client/server credits. 314 * 315 * One of these is associated with a transport instance 316 */ 317 struct rpcrdma_buffer { 318 spinlock_t rb_mwlock; /* protect rb_mws list */ 319 struct list_head rb_mws; 320 struct list_head rb_all; 321 char *rb_pool; 322 323 spinlock_t rb_lock; /* protect buf lists */ 324 struct list_head rb_send_bufs; 325 struct list_head rb_recv_bufs; 326 u32 rb_max_requests; 327 atomic_t rb_credits; /* most recent credit grant */ 328 329 u32 rb_bc_srv_max_requests; 330 spinlock_t rb_reqslock; /* protect rb_allreqs */ 331 struct list_head rb_allreqs; 332 333 u32 rb_bc_max_requests; 334 335 spinlock_t rb_recovery_lock; /* protect rb_stale_mrs */ 336 struct list_head rb_stale_mrs; 337 struct delayed_work rb_recovery_worker; 338 struct delayed_work rb_refresh_worker; 339 }; 340 #define rdmab_to_ia(b) (&container_of((b), struct rpcrdma_xprt, rx_buf)->rx_ia) 341 342 /* 343 * Internal structure for transport instance creation. This 344 * exists primarily for modularity. 345 * 346 * This data should be set with mount options 347 */ 348 struct rpcrdma_create_data_internal { 349 struct sockaddr_storage addr; /* RDMA server address */ 350 unsigned int max_requests; /* max requests (slots) in flight */ 351 unsigned int rsize; /* mount rsize - max read hdr+data */ 352 unsigned int wsize; /* mount wsize - max write hdr+data */ 353 unsigned int inline_rsize; /* max non-rdma read data payload */ 354 unsigned int inline_wsize; /* max non-rdma write data payload */ 355 unsigned int padding; /* non-rdma write header padding */ 356 }; 357 358 #define RPCRDMA_INLINE_READ_THRESHOLD(rq) \ 359 (rpcx_to_rdmad(rq->rq_xprt).inline_rsize) 360 361 #define RPCRDMA_INLINE_WRITE_THRESHOLD(rq)\ 362 (rpcx_to_rdmad(rq->rq_xprt).inline_wsize) 363 364 #define RPCRDMA_INLINE_PAD_VALUE(rq)\ 365 rpcx_to_rdmad(rq->rq_xprt).padding 366 367 /* 368 * Statistics for RPCRDMA 369 */ 370 struct rpcrdma_stats { 371 unsigned long read_chunk_count; 372 unsigned long write_chunk_count; 373 unsigned long reply_chunk_count; 374 375 unsigned long long total_rdma_request; 376 unsigned long long total_rdma_reply; 377 378 unsigned long long pullup_copy_count; 379 unsigned long long fixup_copy_count; 380 unsigned long hardway_register_count; 381 unsigned long failed_marshal_count; 382 unsigned long bad_reply_count; 383 unsigned long nomsg_call_count; 384 unsigned long bcall_count; 385 unsigned long mrs_recovered; 386 unsigned long mrs_orphaned; 387 unsigned long mrs_allocated; 388 }; 389 390 /* 391 * Per-registration mode operations 392 */ 393 struct rpcrdma_xprt; 394 struct rpcrdma_memreg_ops { 395 int (*ro_map)(struct rpcrdma_xprt *, 396 struct rpcrdma_mr_seg *, int, bool, 397 struct rpcrdma_mw **); 398 void (*ro_unmap_sync)(struct rpcrdma_xprt *, 399 struct rpcrdma_req *); 400 void (*ro_unmap_safe)(struct rpcrdma_xprt *, 401 struct rpcrdma_req *, bool); 402 void (*ro_recover_mr)(struct rpcrdma_mw *); 403 int (*ro_open)(struct rpcrdma_ia *, 404 struct rpcrdma_ep *, 405 struct rpcrdma_create_data_internal *); 406 size_t (*ro_maxpages)(struct rpcrdma_xprt *); 407 int (*ro_init_mr)(struct rpcrdma_ia *, 408 struct rpcrdma_mw *); 409 void (*ro_release_mr)(struct rpcrdma_mw *); 410 const char *ro_displayname; 411 }; 412 413 extern const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops; 414 extern const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops; 415 416 /* 417 * RPCRDMA transport -- encapsulates the structures above for 418 * integration with RPC. 419 * 420 * The contained structures are embedded, not pointers, 421 * for convenience. This structure need not be visible externally. 422 * 423 * It is allocated and initialized during mount, and released 424 * during unmount. 425 */ 426 struct rpcrdma_xprt { 427 struct rpc_xprt rx_xprt; 428 struct rpcrdma_ia rx_ia; 429 struct rpcrdma_ep rx_ep; 430 struct rpcrdma_buffer rx_buf; 431 struct rpcrdma_create_data_internal rx_data; 432 struct delayed_work rx_connect_worker; 433 struct rpcrdma_stats rx_stats; 434 }; 435 436 #define rpcx_to_rdmax(x) container_of(x, struct rpcrdma_xprt, rx_xprt) 437 #define rpcx_to_rdmad(x) (rpcx_to_rdmax(x)->rx_data) 438 439 /* Setting this to 0 ensures interoperability with early servers. 440 * Setting this to 1 enhances certain unaligned read/write performance. 441 * Default is 0, see sysctl entry and rpc_rdma.c rpcrdma_convert_iovs() */ 442 extern int xprt_rdma_pad_optimize; 443 444 /* 445 * Interface Adapter calls - xprtrdma/verbs.c 446 */ 447 int rpcrdma_ia_open(struct rpcrdma_xprt *, struct sockaddr *, int); 448 void rpcrdma_ia_close(struct rpcrdma_ia *); 449 bool frwr_is_supported(struct rpcrdma_ia *); 450 bool fmr_is_supported(struct rpcrdma_ia *); 451 452 /* 453 * Endpoint calls - xprtrdma/verbs.c 454 */ 455 int rpcrdma_ep_create(struct rpcrdma_ep *, struct rpcrdma_ia *, 456 struct rpcrdma_create_data_internal *); 457 void rpcrdma_ep_destroy(struct rpcrdma_ep *, struct rpcrdma_ia *); 458 int rpcrdma_ep_connect(struct rpcrdma_ep *, struct rpcrdma_ia *); 459 void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *); 460 461 int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *, 462 struct rpcrdma_req *); 463 int rpcrdma_ep_post_recv(struct rpcrdma_ia *, struct rpcrdma_ep *, 464 struct rpcrdma_rep *); 465 466 /* 467 * Buffer calls - xprtrdma/verbs.c 468 */ 469 struct rpcrdma_req *rpcrdma_create_req(struct rpcrdma_xprt *); 470 struct rpcrdma_rep *rpcrdma_create_rep(struct rpcrdma_xprt *); 471 void rpcrdma_destroy_req(struct rpcrdma_ia *, struct rpcrdma_req *); 472 int rpcrdma_buffer_create(struct rpcrdma_xprt *); 473 void rpcrdma_buffer_destroy(struct rpcrdma_buffer *); 474 475 struct rpcrdma_mw *rpcrdma_get_mw(struct rpcrdma_xprt *); 476 void rpcrdma_put_mw(struct rpcrdma_xprt *, struct rpcrdma_mw *); 477 struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *); 478 void rpcrdma_buffer_put(struct rpcrdma_req *); 479 void rpcrdma_recv_buffer_get(struct rpcrdma_req *); 480 void rpcrdma_recv_buffer_put(struct rpcrdma_rep *); 481 482 void rpcrdma_defer_mr_recovery(struct rpcrdma_mw *); 483 484 struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(struct rpcrdma_ia *, 485 size_t, gfp_t); 486 void rpcrdma_free_regbuf(struct rpcrdma_ia *, 487 struct rpcrdma_regbuf *); 488 489 int rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *, unsigned int); 490 491 int rpcrdma_alloc_wq(void); 492 void rpcrdma_destroy_wq(void); 493 494 /* 495 * Wrappers for chunk registration, shared by read/write chunk code. 496 */ 497 498 static inline enum dma_data_direction 499 rpcrdma_data_dir(bool writing) 500 { 501 return writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 502 } 503 504 /* 505 * RPC/RDMA connection management calls - xprtrdma/rpc_rdma.c 506 */ 507 void rpcrdma_connect_worker(struct work_struct *); 508 void rpcrdma_conn_func(struct rpcrdma_ep *); 509 void rpcrdma_reply_handler(struct rpcrdma_rep *); 510 511 /* 512 * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c 513 */ 514 int rpcrdma_marshal_req(struct rpc_rqst *); 515 void rpcrdma_set_max_header_sizes(struct rpcrdma_ia *, 516 struct rpcrdma_create_data_internal *, 517 unsigned int); 518 519 /* RPC/RDMA module init - xprtrdma/transport.c 520 */ 521 extern unsigned int xprt_rdma_max_inline_read; 522 void xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap); 523 void xprt_rdma_free_addresses(struct rpc_xprt *xprt); 524 void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq); 525 int xprt_rdma_init(void); 526 void xprt_rdma_cleanup(void); 527 528 /* Backchannel calls - xprtrdma/backchannel.c 529 */ 530 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 531 int xprt_rdma_bc_setup(struct rpc_xprt *, unsigned int); 532 int xprt_rdma_bc_up(struct svc_serv *, struct net *); 533 size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *); 534 int rpcrdma_bc_post_recv(struct rpcrdma_xprt *, unsigned int); 535 void rpcrdma_bc_receive_call(struct rpcrdma_xprt *, struct rpcrdma_rep *); 536 int rpcrdma_bc_marshal_reply(struct rpc_rqst *); 537 void xprt_rdma_bc_free_rqst(struct rpc_rqst *); 538 void xprt_rdma_bc_destroy(struct rpc_xprt *, unsigned int); 539 #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 540 541 extern struct xprt_class xprt_rdma_bc; 542 543 #endif /* _LINUX_SUNRPC_XPRT_RDMA_H */ 544