Lines Matching full:rdma
5 * Use the core R/W API to move RPC-over-RDMA Read and Write chunks.
8 #include <rdma/rw.h>
20 /* Each R/W context contains state for one chain of RDMA Read or
27 * Each WR chain handles only one R_key. Each RPC-over-RDMA segment
54 svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges) in svc_rdma_get_rw_ctxt() argument
59 spin_lock(&rdma->sc_rw_ctxt_lock); in svc_rdma_get_rw_ctxt()
60 node = llist_del_first(&rdma->sc_rw_ctxts); in svc_rdma_get_rw_ctxt()
61 spin_unlock(&rdma->sc_rw_ctxt_lock); in svc_rdma_get_rw_ctxt()
66 GFP_KERNEL, ibdev_to_node(rdma->sc_cm_id->device)); in svc_rdma_get_rw_ctxt()
83 trace_svcrdma_no_rwctx_err(rdma, sges); in svc_rdma_get_rw_ctxt()
94 static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma, in svc_rdma_put_rw_ctxt() argument
97 __svc_rdma_put_rw_ctxt(ctxt, &rdma->sc_rw_ctxts); in svc_rdma_put_rw_ctxt()
102 * @rdma: transport about to be destroyed
105 void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma) in svc_rdma_destroy_rw_ctxts() argument
110 while ((node = llist_del_first(&rdma->sc_rw_ctxts)) != NULL) { in svc_rdma_destroy_rw_ctxts()
118 * @rdma: controlling transport instance
120 * @offset: RDMA offset
121 * @handle: RDMA tag/handle
127 static int svc_rdma_rw_ctx_init(struct svcxprt_rdma *rdma, in svc_rdma_rw_ctx_init() argument
134 ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, rdma->sc_port_num, in svc_rdma_rw_ctx_init()
138 svc_rdma_put_rw_ctxt(rdma, ctxt); in svc_rdma_rw_ctx_init()
139 trace_svcrdma_dma_map_rw_err(rdma, ctxt->rw_nents, ret); in svc_rdma_rw_ctx_init()
163 static void svc_rdma_cc_cid_init(struct svcxprt_rdma *rdma, in svc_rdma_cc_cid_init() argument
166 cid->ci_queue_id = rdma->sc_sq_cq->res.id; in svc_rdma_cc_cid_init()
167 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids); in svc_rdma_cc_cid_init()
170 static void svc_rdma_cc_init(struct svcxprt_rdma *rdma, in svc_rdma_cc_init() argument
173 svc_rdma_cc_cid_init(rdma, &cc->cc_cid); in svc_rdma_cc_init()
174 cc->cc_rdma = rdma; in svc_rdma_cc_init()
188 struct svcxprt_rdma *rdma = cc->cc_rdma; in svc_rdma_cc_release() local
199 rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp, in svc_rdma_cc_release()
200 rdma->sc_port_num, ctxt->rw_sg_table.sgl, in svc_rdma_cc_release()
210 llist_add_batch(first, last, &rdma->sc_rw_ctxts); in svc_rdma_cc_release()
233 svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma, in svc_rdma_write_info_alloc() argument
239 ibdev_to_node(rdma->sc_cm_id->device)); in svc_rdma_write_info_alloc()
246 svc_rdma_cc_init(rdma, &info->wi_cc); in svc_rdma_write_info_alloc()
269 struct svcxprt_rdma *rdma = cc->cc_rdma; in svc_rdma_write_done() local
284 svc_rdma_wake_send_waiters(rdma, cc->cc_sqecount); in svc_rdma_write_done()
287 svc_xprt_deferred_close(&rdma->sc_xprt); in svc_rdma_write_done()
305 svc_rdma_read_info_alloc(struct svcxprt_rdma *rdma) in svc_rdma_read_info_alloc() argument
310 ibdev_to_node(rdma->sc_cm_id->device)); in svc_rdma_read_info_alloc()
314 svc_rdma_cc_init(rdma, &info->ri_cc); in svc_rdma_read_info_alloc()
326 * svc_rdma_wc_read_done - Handle completion of an RDMA Read ctx
365 struct svcxprt_rdma *rdma = cc->cc_rdma; in svc_rdma_post_chunk_ctxt() local
374 if (cc->cc_sqecount > rdma->sc_sq_depth) in svc_rdma_post_chunk_ctxt()
383 first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp, in svc_rdma_post_chunk_ctxt()
384 rdma->sc_port_num, cqe, first_wr); in svc_rdma_post_chunk_ctxt()
390 &rdma->sc_sq_avail) > 0) { in svc_rdma_post_chunk_ctxt()
392 ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr); in svc_rdma_post_chunk_ctxt()
399 trace_svcrdma_sq_full(rdma); in svc_rdma_post_chunk_ctxt()
400 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); in svc_rdma_post_chunk_ctxt()
401 wait_event(rdma->sc_send_wait, in svc_rdma_post_chunk_ctxt()
402 atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount); in svc_rdma_post_chunk_ctxt()
403 trace_svcrdma_sq_retry(rdma); in svc_rdma_post_chunk_ctxt()
406 trace_svcrdma_sq_post_err(rdma, ret); in svc_rdma_post_chunk_ctxt()
407 svc_xprt_deferred_close(&rdma->sc_xprt); in svc_rdma_post_chunk_ctxt()
413 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); in svc_rdma_post_chunk_ctxt()
414 wake_up(&rdma->sc_send_wait); in svc_rdma_post_chunk_ctxt()
465 /* Construct RDMA Write WRs to send a portion of an xdr_buf containing
476 struct svcxprt_rdma *rdma = cc->cc_rdma; in svc_rdma_build_writes() local
492 ctxt = svc_rdma_get_rw_ctxt(rdma, in svc_rdma_build_writes()
499 ret = svc_rdma_rw_ctx_init(rdma, ctxt, offset, seg->rs_handle, in svc_rdma_build_writes()
519 trace_svcrdma_small_wrch_err(rdma, remaining, info->wi_seg_no, in svc_rdma_build_writes()
525 * svc_rdma_iov_write - Construct RDMA Writes from an iov
533 * %-EIO if an rdma-rw error occurred
544 * svc_rdma_pages_write - Construct RDMA Writes from pages
554 * %-EIO if an rdma-rw error occurred
568 * svc_rdma_xb_write - Construct RDMA Writes to write an xdr_buf
576 * %-EIO if an rdma-rw error occurred
607 * @rdma: controlling RDMA transport
618 int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, in svc_rdma_send_write_chunk() argument
626 info = svc_rdma_write_info_alloc(rdma, chunk); in svc_rdma_send_write_chunk()
648 * @rdma: controlling RDMA transport
659 int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, in svc_rdma_send_reply_chunk() argument
672 info = svc_rdma_write_info_alloc(rdma, chunk); in svc_rdma_send_reply_chunk()
695 * svc_rdma_build_read_segment - Build RDMA Read WQEs to pull one RDMA segment
764 * svc_rdma_build_read_chunk - Build RDMA Read WQEs to pull one RDMA chunk
792 * @info: context for RDMA Reads
798 * info->ri_pageno and info->ri_pageoff so that the next RDMA Read
841 * svc_rdma_read_multiple_chunks - Construct RDMA Reads to pull data item Read chunks
842 * @info: context for RDMA Reads
848 * %0: RDMA Read WQEs were successfully built
903 * svc_rdma_read_data_item - Construct RDMA Reads to pull data item Read chunks
904 * @info: context for RDMA Reads
913 * %0: RDMA Read WQEs were successfully built
961 * svc_rdma_read_chunk_range - Build RDMA Read WQEs for portion of a chunk
962 * @info: context for RDMA Reads
968 * %0: RDMA Read WQEs were successfully built
1006 * svc_rdma_read_call_chunk - Build RDMA Read WQEs to pull a Long Message
1007 * @info: context for RDMA Reads
1010 * %0: RDMA Read WQEs were successfully built
1059 * svc_rdma_read_special - Build RDMA Read WQEs to pull a Long Message
1060 * @info: context for RDMA Reads
1070 * %0: RDMA Read WQEs were successfully built
1099 * @rdma: controlling RDMA transport
1103 * The RPC/RDMA protocol assumes that the upper layer's XDR decoders
1111 * RDMA Reads have completed.
1114 * %1: all needed RDMA Reads were posted successfully,
1120 int svc_rdma_process_read_list(struct svcxprt_rdma *rdma, in svc_rdma_process_read_list() argument
1128 info = svc_rdma_read_info_alloc(rdma); in svc_rdma_process_read_list()