1f13193f5SChuck Lever /* 2f13193f5SChuck Lever * Copyright (c) 2016 Oracle. All rights reserved. 3f13193f5SChuck Lever * 4f13193f5SChuck Lever * Use the core R/W API to move RPC-over-RDMA Read and Write chunks. 5f13193f5SChuck Lever */ 6f13193f5SChuck Lever 7f13193f5SChuck Lever #include <linux/sunrpc/rpc_rdma.h> 8f13193f5SChuck Lever #include <linux/sunrpc/svc_rdma.h> 9f13193f5SChuck Lever #include <linux/sunrpc/debug.h> 10f13193f5SChuck Lever 11f13193f5SChuck Lever #include <rdma/rw.h> 12f13193f5SChuck Lever 13f13193f5SChuck Lever #define RPCDBG_FACILITY RPCDBG_SVCXPRT 14f13193f5SChuck Lever 15f13193f5SChuck Lever /* Each R/W context contains state for one chain of RDMA Read or 16f13193f5SChuck Lever * Write Work Requests. 17f13193f5SChuck Lever * 18f13193f5SChuck Lever * Each WR chain handles a single contiguous server-side buffer, 19f13193f5SChuck Lever * because scatterlist entries after the first have to start on 20f13193f5SChuck Lever * page alignment. xdr_buf iovecs cannot guarantee alignment. 21f13193f5SChuck Lever * 22f13193f5SChuck Lever * Each WR chain handles only one R_key. Each RPC-over-RDMA segment 23f13193f5SChuck Lever * from a client may contain a unique R_key, so each WR chain moves 24f13193f5SChuck Lever * up to one segment at a time. 25f13193f5SChuck Lever * 26f13193f5SChuck Lever * The scatterlist makes this data structure over 4KB in size. To 27f13193f5SChuck Lever * make it less likely to fail, and to handle the allocation for 28f13193f5SChuck Lever * smaller I/O requests without disabling bottom-halves, these 29f13193f5SChuck Lever * contexts are created on demand, but cached and reused until the 30f13193f5SChuck Lever * controlling svcxprt_rdma is destroyed. 31f13193f5SChuck Lever */ 32f13193f5SChuck Lever struct svc_rdma_rw_ctxt { 33f13193f5SChuck Lever struct list_head rw_list; 34f13193f5SChuck Lever struct rdma_rw_ctx rw_ctx; 35f13193f5SChuck Lever int rw_nents; 36f13193f5SChuck Lever struct sg_table rw_sg_table; 37f13193f5SChuck Lever struct scatterlist rw_first_sgl[0]; 38f13193f5SChuck Lever }; 39f13193f5SChuck Lever 40f13193f5SChuck Lever static inline struct svc_rdma_rw_ctxt * 41f13193f5SChuck Lever svc_rdma_next_ctxt(struct list_head *list) 42f13193f5SChuck Lever { 43f13193f5SChuck Lever return list_first_entry_or_null(list, struct svc_rdma_rw_ctxt, 44f13193f5SChuck Lever rw_list); 45f13193f5SChuck Lever } 46f13193f5SChuck Lever 47f13193f5SChuck Lever static struct svc_rdma_rw_ctxt * 48f13193f5SChuck Lever svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges) 49f13193f5SChuck Lever { 50f13193f5SChuck Lever struct svc_rdma_rw_ctxt *ctxt; 51f13193f5SChuck Lever 52f13193f5SChuck Lever spin_lock(&rdma->sc_rw_ctxt_lock); 53f13193f5SChuck Lever 54f13193f5SChuck Lever ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts); 55f13193f5SChuck Lever if (ctxt) { 56f13193f5SChuck Lever list_del(&ctxt->rw_list); 57f13193f5SChuck Lever spin_unlock(&rdma->sc_rw_ctxt_lock); 58f13193f5SChuck Lever } else { 59f13193f5SChuck Lever spin_unlock(&rdma->sc_rw_ctxt_lock); 60f13193f5SChuck Lever ctxt = kmalloc(sizeof(*ctxt) + 61f13193f5SChuck Lever SG_CHUNK_SIZE * sizeof(struct scatterlist), 62f13193f5SChuck Lever GFP_KERNEL); 63f13193f5SChuck Lever if (!ctxt) 64f13193f5SChuck Lever goto out; 65f13193f5SChuck Lever INIT_LIST_HEAD(&ctxt->rw_list); 66f13193f5SChuck Lever } 67f13193f5SChuck Lever 68f13193f5SChuck Lever ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl; 69f13193f5SChuck Lever if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges, 70f13193f5SChuck Lever ctxt->rw_sg_table.sgl)) { 71f13193f5SChuck Lever kfree(ctxt); 72f13193f5SChuck Lever ctxt = NULL; 73f13193f5SChuck Lever } 74f13193f5SChuck Lever out: 75f13193f5SChuck Lever return ctxt; 76f13193f5SChuck Lever } 77f13193f5SChuck Lever 78f13193f5SChuck Lever static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma, 79f13193f5SChuck Lever struct svc_rdma_rw_ctxt *ctxt) 80f13193f5SChuck Lever { 81f13193f5SChuck Lever sg_free_table_chained(&ctxt->rw_sg_table, true); 82f13193f5SChuck Lever 83f13193f5SChuck Lever spin_lock(&rdma->sc_rw_ctxt_lock); 84f13193f5SChuck Lever list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts); 85f13193f5SChuck Lever spin_unlock(&rdma->sc_rw_ctxt_lock); 86f13193f5SChuck Lever } 87f13193f5SChuck Lever 88f13193f5SChuck Lever /** 89f13193f5SChuck Lever * svc_rdma_destroy_rw_ctxts - Free accumulated R/W contexts 90f13193f5SChuck Lever * @rdma: transport about to be destroyed 91f13193f5SChuck Lever * 92f13193f5SChuck Lever */ 93f13193f5SChuck Lever void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma) 94f13193f5SChuck Lever { 95f13193f5SChuck Lever struct svc_rdma_rw_ctxt *ctxt; 96f13193f5SChuck Lever 97f13193f5SChuck Lever while ((ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts)) != NULL) { 98f13193f5SChuck Lever list_del(&ctxt->rw_list); 99f13193f5SChuck Lever kfree(ctxt); 100f13193f5SChuck Lever } 101f13193f5SChuck Lever } 102f13193f5SChuck Lever 103f13193f5SChuck Lever /* A chunk context tracks all I/O for moving one Read or Write 104f13193f5SChuck Lever * chunk. This is a a set of rdma_rw's that handle data movement 105f13193f5SChuck Lever * for all segments of one chunk. 106f13193f5SChuck Lever * 107f13193f5SChuck Lever * These are small, acquired with a single allocator call, and 108f13193f5SChuck Lever * no more than one is needed per chunk. They are allocated on 109f13193f5SChuck Lever * demand, and not cached. 110f13193f5SChuck Lever */ 111f13193f5SChuck Lever struct svc_rdma_chunk_ctxt { 112f13193f5SChuck Lever struct ib_cqe cc_cqe; 113f13193f5SChuck Lever struct svcxprt_rdma *cc_rdma; 114f13193f5SChuck Lever struct list_head cc_rwctxts; 115f13193f5SChuck Lever int cc_sqecount; 116f13193f5SChuck Lever enum dma_data_direction cc_dir; 117f13193f5SChuck Lever }; 118f13193f5SChuck Lever 119f13193f5SChuck Lever static void svc_rdma_cc_init(struct svcxprt_rdma *rdma, 120f13193f5SChuck Lever struct svc_rdma_chunk_ctxt *cc, 121f13193f5SChuck Lever enum dma_data_direction dir) 122f13193f5SChuck Lever { 123f13193f5SChuck Lever cc->cc_rdma = rdma; 124f13193f5SChuck Lever svc_xprt_get(&rdma->sc_xprt); 125f13193f5SChuck Lever 126f13193f5SChuck Lever INIT_LIST_HEAD(&cc->cc_rwctxts); 127f13193f5SChuck Lever cc->cc_sqecount = 0; 128f13193f5SChuck Lever cc->cc_dir = dir; 129f13193f5SChuck Lever } 130f13193f5SChuck Lever 131f13193f5SChuck Lever static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc) 132f13193f5SChuck Lever { 133f13193f5SChuck Lever struct svcxprt_rdma *rdma = cc->cc_rdma; 134f13193f5SChuck Lever struct svc_rdma_rw_ctxt *ctxt; 135f13193f5SChuck Lever 136f13193f5SChuck Lever while ((ctxt = svc_rdma_next_ctxt(&cc->cc_rwctxts)) != NULL) { 137f13193f5SChuck Lever list_del(&ctxt->rw_list); 138f13193f5SChuck Lever 139f13193f5SChuck Lever rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp, 140f13193f5SChuck Lever rdma->sc_port_num, ctxt->rw_sg_table.sgl, 141f13193f5SChuck Lever ctxt->rw_nents, cc->cc_dir); 142f13193f5SChuck Lever svc_rdma_put_rw_ctxt(rdma, ctxt); 143f13193f5SChuck Lever } 144f13193f5SChuck Lever svc_xprt_put(&rdma->sc_xprt); 145f13193f5SChuck Lever } 146f13193f5SChuck Lever 147f13193f5SChuck Lever /* State for sending a Write or Reply chunk. 148f13193f5SChuck Lever * - Tracks progress of writing one chunk over all its segments 149f13193f5SChuck Lever * - Stores arguments for the SGL constructor functions 150f13193f5SChuck Lever */ 151f13193f5SChuck Lever struct svc_rdma_write_info { 152f13193f5SChuck Lever /* write state of this chunk */ 153f13193f5SChuck Lever unsigned int wi_seg_off; 154f13193f5SChuck Lever unsigned int wi_seg_no; 155f13193f5SChuck Lever unsigned int wi_nsegs; 156f13193f5SChuck Lever __be32 *wi_segs; 157f13193f5SChuck Lever 158f13193f5SChuck Lever /* SGL constructor arguments */ 159f13193f5SChuck Lever struct xdr_buf *wi_xdr; 160f13193f5SChuck Lever unsigned char *wi_base; 161f13193f5SChuck Lever unsigned int wi_next_off; 162f13193f5SChuck Lever 163f13193f5SChuck Lever struct svc_rdma_chunk_ctxt wi_cc; 164f13193f5SChuck Lever }; 165f13193f5SChuck Lever 166f13193f5SChuck Lever static struct svc_rdma_write_info * 167f13193f5SChuck Lever svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma, __be32 *chunk) 168f13193f5SChuck Lever { 169f13193f5SChuck Lever struct svc_rdma_write_info *info; 170f13193f5SChuck Lever 171f13193f5SChuck Lever info = kmalloc(sizeof(*info), GFP_KERNEL); 172f13193f5SChuck Lever if (!info) 173f13193f5SChuck Lever return info; 174f13193f5SChuck Lever 175f13193f5SChuck Lever info->wi_seg_off = 0; 176f13193f5SChuck Lever info->wi_seg_no = 0; 177f13193f5SChuck Lever info->wi_nsegs = be32_to_cpup(++chunk); 178f13193f5SChuck Lever info->wi_segs = ++chunk; 179f13193f5SChuck Lever svc_rdma_cc_init(rdma, &info->wi_cc, DMA_TO_DEVICE); 180f13193f5SChuck Lever return info; 181f13193f5SChuck Lever } 182f13193f5SChuck Lever 183f13193f5SChuck Lever static void svc_rdma_write_info_free(struct svc_rdma_write_info *info) 184f13193f5SChuck Lever { 185f13193f5SChuck Lever svc_rdma_cc_release(&info->wi_cc); 186f13193f5SChuck Lever kfree(info); 187f13193f5SChuck Lever } 188f13193f5SChuck Lever 189f13193f5SChuck Lever /** 190f13193f5SChuck Lever * svc_rdma_write_done - Write chunk completion 191f13193f5SChuck Lever * @cq: controlling Completion Queue 192f13193f5SChuck Lever * @wc: Work Completion 193f13193f5SChuck Lever * 194f13193f5SChuck Lever * Pages under I/O are freed by a subsequent Send completion. 195f13193f5SChuck Lever */ 196f13193f5SChuck Lever static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc) 197f13193f5SChuck Lever { 198f13193f5SChuck Lever struct ib_cqe *cqe = wc->wr_cqe; 199f13193f5SChuck Lever struct svc_rdma_chunk_ctxt *cc = 200f13193f5SChuck Lever container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe); 201f13193f5SChuck Lever struct svcxprt_rdma *rdma = cc->cc_rdma; 202f13193f5SChuck Lever struct svc_rdma_write_info *info = 203f13193f5SChuck Lever container_of(cc, struct svc_rdma_write_info, wi_cc); 204f13193f5SChuck Lever 205f13193f5SChuck Lever atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); 206f13193f5SChuck Lever wake_up(&rdma->sc_send_wait); 207f13193f5SChuck Lever 208f13193f5SChuck Lever if (unlikely(wc->status != IB_WC_SUCCESS)) { 209f13193f5SChuck Lever set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); 210f13193f5SChuck Lever if (wc->status != IB_WC_WR_FLUSH_ERR) 211f13193f5SChuck Lever pr_err("svcrdma: write ctx: %s (%u/0x%x)\n", 212f13193f5SChuck Lever ib_wc_status_msg(wc->status), 213f13193f5SChuck Lever wc->status, wc->vendor_err); 214f13193f5SChuck Lever } 215f13193f5SChuck Lever 216f13193f5SChuck Lever svc_rdma_write_info_free(info); 217f13193f5SChuck Lever } 218f13193f5SChuck Lever 219f13193f5SChuck Lever /* This function sleeps when the transport's Send Queue is congested. 220f13193f5SChuck Lever * 221f13193f5SChuck Lever * Assumptions: 222f13193f5SChuck Lever * - If ib_post_send() succeeds, only one completion is expected, 223f13193f5SChuck Lever * even if one or more WRs are flushed. This is true when posting 224f13193f5SChuck Lever * an rdma_rw_ctx or when posting a single signaled WR. 225f13193f5SChuck Lever */ 226f13193f5SChuck Lever static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc) 227f13193f5SChuck Lever { 228f13193f5SChuck Lever struct svcxprt_rdma *rdma = cc->cc_rdma; 229f13193f5SChuck Lever struct svc_xprt *xprt = &rdma->sc_xprt; 230f13193f5SChuck Lever struct ib_send_wr *first_wr, *bad_wr; 231f13193f5SChuck Lever struct list_head *tmp; 232f13193f5SChuck Lever struct ib_cqe *cqe; 233f13193f5SChuck Lever int ret; 234f13193f5SChuck Lever 235*107c1d0aSChuck Lever if (cc->cc_sqecount > rdma->sc_sq_depth) 236*107c1d0aSChuck Lever return -EINVAL; 237*107c1d0aSChuck Lever 238f13193f5SChuck Lever first_wr = NULL; 239f13193f5SChuck Lever cqe = &cc->cc_cqe; 240f13193f5SChuck Lever list_for_each(tmp, &cc->cc_rwctxts) { 241f13193f5SChuck Lever struct svc_rdma_rw_ctxt *ctxt; 242f13193f5SChuck Lever 243f13193f5SChuck Lever ctxt = list_entry(tmp, struct svc_rdma_rw_ctxt, rw_list); 244f13193f5SChuck Lever first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp, 245f13193f5SChuck Lever rdma->sc_port_num, cqe, first_wr); 246f13193f5SChuck Lever cqe = NULL; 247f13193f5SChuck Lever } 248f13193f5SChuck Lever 249f13193f5SChuck Lever do { 250f13193f5SChuck Lever if (atomic_sub_return(cc->cc_sqecount, 251f13193f5SChuck Lever &rdma->sc_sq_avail) > 0) { 252f13193f5SChuck Lever ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr); 253f13193f5SChuck Lever if (ret) 254f13193f5SChuck Lever break; 255f13193f5SChuck Lever return 0; 256f13193f5SChuck Lever } 257f13193f5SChuck Lever 258f13193f5SChuck Lever atomic_inc(&rdma_stat_sq_starve); 259f13193f5SChuck Lever atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); 260f13193f5SChuck Lever wait_event(rdma->sc_send_wait, 261f13193f5SChuck Lever atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount); 262f13193f5SChuck Lever } while (1); 263f13193f5SChuck Lever 264f13193f5SChuck Lever pr_err("svcrdma: ib_post_send failed (%d)\n", ret); 265f13193f5SChuck Lever set_bit(XPT_CLOSE, &xprt->xpt_flags); 266f13193f5SChuck Lever 267f13193f5SChuck Lever /* If even one was posted, there will be a completion. */ 268f13193f5SChuck Lever if (bad_wr != first_wr) 269f13193f5SChuck Lever return 0; 270f13193f5SChuck Lever 271f13193f5SChuck Lever atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); 272f13193f5SChuck Lever wake_up(&rdma->sc_send_wait); 273f13193f5SChuck Lever return -ENOTCONN; 274f13193f5SChuck Lever } 275f13193f5SChuck Lever 276f13193f5SChuck Lever /* Build and DMA-map an SGL that covers one kvec in an xdr_buf 277f13193f5SChuck Lever */ 278f13193f5SChuck Lever static void svc_rdma_vec_to_sg(struct svc_rdma_write_info *info, 279f13193f5SChuck Lever unsigned int len, 280f13193f5SChuck Lever struct svc_rdma_rw_ctxt *ctxt) 281f13193f5SChuck Lever { 282f13193f5SChuck Lever struct scatterlist *sg = ctxt->rw_sg_table.sgl; 283f13193f5SChuck Lever 284f13193f5SChuck Lever sg_set_buf(&sg[0], info->wi_base, len); 285f13193f5SChuck Lever info->wi_base += len; 286f13193f5SChuck Lever 287f13193f5SChuck Lever ctxt->rw_nents = 1; 288f13193f5SChuck Lever } 289f13193f5SChuck Lever 290f13193f5SChuck Lever /* Build and DMA-map an SGL that covers part of an xdr_buf's pagelist. 291f13193f5SChuck Lever */ 292f13193f5SChuck Lever static void svc_rdma_pagelist_to_sg(struct svc_rdma_write_info *info, 293f13193f5SChuck Lever unsigned int remaining, 294f13193f5SChuck Lever struct svc_rdma_rw_ctxt *ctxt) 295f13193f5SChuck Lever { 296f13193f5SChuck Lever unsigned int sge_no, sge_bytes, page_off, page_no; 297f13193f5SChuck Lever struct xdr_buf *xdr = info->wi_xdr; 298f13193f5SChuck Lever struct scatterlist *sg; 299f13193f5SChuck Lever struct page **page; 300f13193f5SChuck Lever 301f13193f5SChuck Lever page_off = (info->wi_next_off + xdr->page_base) & ~PAGE_MASK; 302f13193f5SChuck Lever page_no = (info->wi_next_off + xdr->page_base) >> PAGE_SHIFT; 303f13193f5SChuck Lever page = xdr->pages + page_no; 304f13193f5SChuck Lever info->wi_next_off += remaining; 305f13193f5SChuck Lever sg = ctxt->rw_sg_table.sgl; 306f13193f5SChuck Lever sge_no = 0; 307f13193f5SChuck Lever do { 308f13193f5SChuck Lever sge_bytes = min_t(unsigned int, remaining, 309f13193f5SChuck Lever PAGE_SIZE - page_off); 310f13193f5SChuck Lever sg_set_page(sg, *page, sge_bytes, page_off); 311f13193f5SChuck Lever 312f13193f5SChuck Lever remaining -= sge_bytes; 313f13193f5SChuck Lever sg = sg_next(sg); 314f13193f5SChuck Lever page_off = 0; 315f13193f5SChuck Lever sge_no++; 316f13193f5SChuck Lever page++; 317f13193f5SChuck Lever } while (remaining); 318f13193f5SChuck Lever 319f13193f5SChuck Lever ctxt->rw_nents = sge_no; 320f13193f5SChuck Lever } 321f13193f5SChuck Lever 322f13193f5SChuck Lever /* Construct RDMA Write WRs to send a portion of an xdr_buf containing 323f13193f5SChuck Lever * an RPC Reply. 324f13193f5SChuck Lever */ 325f13193f5SChuck Lever static int 326f13193f5SChuck Lever svc_rdma_build_writes(struct svc_rdma_write_info *info, 327f13193f5SChuck Lever void (*constructor)(struct svc_rdma_write_info *info, 328f13193f5SChuck Lever unsigned int len, 329f13193f5SChuck Lever struct svc_rdma_rw_ctxt *ctxt), 330f13193f5SChuck Lever unsigned int remaining) 331f13193f5SChuck Lever { 332f13193f5SChuck Lever struct svc_rdma_chunk_ctxt *cc = &info->wi_cc; 333f13193f5SChuck Lever struct svcxprt_rdma *rdma = cc->cc_rdma; 334f13193f5SChuck Lever struct svc_rdma_rw_ctxt *ctxt; 335f13193f5SChuck Lever __be32 *seg; 336f13193f5SChuck Lever int ret; 337f13193f5SChuck Lever 338f13193f5SChuck Lever cc->cc_cqe.done = svc_rdma_write_done; 339f13193f5SChuck Lever seg = info->wi_segs + info->wi_seg_no * rpcrdma_segment_maxsz; 340f13193f5SChuck Lever do { 341f13193f5SChuck Lever unsigned int write_len; 342f13193f5SChuck Lever u32 seg_length, seg_handle; 343f13193f5SChuck Lever u64 seg_offset; 344f13193f5SChuck Lever 345f13193f5SChuck Lever if (info->wi_seg_no >= info->wi_nsegs) 346f13193f5SChuck Lever goto out_overflow; 347f13193f5SChuck Lever 348f13193f5SChuck Lever seg_handle = be32_to_cpup(seg); 349f13193f5SChuck Lever seg_length = be32_to_cpup(seg + 1); 350f13193f5SChuck Lever xdr_decode_hyper(seg + 2, &seg_offset); 351f13193f5SChuck Lever seg_offset += info->wi_seg_off; 352f13193f5SChuck Lever 353f13193f5SChuck Lever write_len = min(remaining, seg_length - info->wi_seg_off); 354f13193f5SChuck Lever ctxt = svc_rdma_get_rw_ctxt(rdma, 355f13193f5SChuck Lever (write_len >> PAGE_SHIFT) + 2); 356f13193f5SChuck Lever if (!ctxt) 357f13193f5SChuck Lever goto out_noctx; 358f13193f5SChuck Lever 359f13193f5SChuck Lever constructor(info, write_len, ctxt); 360f13193f5SChuck Lever ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, 361f13193f5SChuck Lever rdma->sc_port_num, ctxt->rw_sg_table.sgl, 362f13193f5SChuck Lever ctxt->rw_nents, 0, seg_offset, 363f13193f5SChuck Lever seg_handle, DMA_TO_DEVICE); 364f13193f5SChuck Lever if (ret < 0) 365f13193f5SChuck Lever goto out_initerr; 366f13193f5SChuck Lever 367f13193f5SChuck Lever list_add(&ctxt->rw_list, &cc->cc_rwctxts); 368f13193f5SChuck Lever cc->cc_sqecount += ret; 369f13193f5SChuck Lever if (write_len == seg_length - info->wi_seg_off) { 370f13193f5SChuck Lever seg += 4; 371f13193f5SChuck Lever info->wi_seg_no++; 372f13193f5SChuck Lever info->wi_seg_off = 0; 373f13193f5SChuck Lever } else { 374f13193f5SChuck Lever info->wi_seg_off += write_len; 375f13193f5SChuck Lever } 376f13193f5SChuck Lever remaining -= write_len; 377f13193f5SChuck Lever } while (remaining); 378f13193f5SChuck Lever 379f13193f5SChuck Lever return 0; 380f13193f5SChuck Lever 381f13193f5SChuck Lever out_overflow: 382f13193f5SChuck Lever dprintk("svcrdma: inadequate space in Write chunk (%u)\n", 383f13193f5SChuck Lever info->wi_nsegs); 384f13193f5SChuck Lever return -E2BIG; 385f13193f5SChuck Lever 386f13193f5SChuck Lever out_noctx: 387f13193f5SChuck Lever dprintk("svcrdma: no R/W ctxs available\n"); 388f13193f5SChuck Lever return -ENOMEM; 389f13193f5SChuck Lever 390f13193f5SChuck Lever out_initerr: 391f13193f5SChuck Lever svc_rdma_put_rw_ctxt(rdma, ctxt); 392f13193f5SChuck Lever pr_err("svcrdma: failed to map pagelist (%d)\n", ret); 393f13193f5SChuck Lever return -EIO; 394f13193f5SChuck Lever } 395f13193f5SChuck Lever 396f13193f5SChuck Lever /* Send one of an xdr_buf's kvecs by itself. To send a Reply 397f13193f5SChuck Lever * chunk, the whole RPC Reply is written back to the client. 398f13193f5SChuck Lever * This function writes either the head or tail of the xdr_buf 399f13193f5SChuck Lever * containing the Reply. 400f13193f5SChuck Lever */ 401f13193f5SChuck Lever static int svc_rdma_send_xdr_kvec(struct svc_rdma_write_info *info, 402f13193f5SChuck Lever struct kvec *vec) 403f13193f5SChuck Lever { 404f13193f5SChuck Lever info->wi_base = vec->iov_base; 405f13193f5SChuck Lever return svc_rdma_build_writes(info, svc_rdma_vec_to_sg, 406f13193f5SChuck Lever vec->iov_len); 407f13193f5SChuck Lever } 408f13193f5SChuck Lever 409f13193f5SChuck Lever /* Send an xdr_buf's page list by itself. A Write chunk is 410f13193f5SChuck Lever * just the page list. a Reply chunk is the head, page list, 411f13193f5SChuck Lever * and tail. This function is shared between the two types 412f13193f5SChuck Lever * of chunk. 413f13193f5SChuck Lever */ 414f13193f5SChuck Lever static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info, 415f13193f5SChuck Lever struct xdr_buf *xdr) 416f13193f5SChuck Lever { 417f13193f5SChuck Lever info->wi_xdr = xdr; 418f13193f5SChuck Lever info->wi_next_off = 0; 419f13193f5SChuck Lever return svc_rdma_build_writes(info, svc_rdma_pagelist_to_sg, 420f13193f5SChuck Lever xdr->page_len); 421f13193f5SChuck Lever } 422f13193f5SChuck Lever 423f13193f5SChuck Lever /** 424f13193f5SChuck Lever * svc_rdma_send_write_chunk - Write all segments in a Write chunk 425f13193f5SChuck Lever * @rdma: controlling RDMA transport 426f13193f5SChuck Lever * @wr_ch: Write chunk provided by client 427f13193f5SChuck Lever * @xdr: xdr_buf containing the data payload 428f13193f5SChuck Lever * 429f13193f5SChuck Lever * Returns a non-negative number of bytes the chunk consumed, or 430f13193f5SChuck Lever * %-E2BIG if the payload was larger than the Write chunk, 431*107c1d0aSChuck Lever * %-EINVAL if client provided too many segments, 432f13193f5SChuck Lever * %-ENOMEM if rdma_rw context pool was exhausted, 433f13193f5SChuck Lever * %-ENOTCONN if posting failed (connection is lost), 434f13193f5SChuck Lever * %-EIO if rdma_rw initialization failed (DMA mapping, etc). 435f13193f5SChuck Lever */ 436f13193f5SChuck Lever int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch, 437f13193f5SChuck Lever struct xdr_buf *xdr) 438f13193f5SChuck Lever { 439f13193f5SChuck Lever struct svc_rdma_write_info *info; 440f13193f5SChuck Lever int ret; 441f13193f5SChuck Lever 442f13193f5SChuck Lever if (!xdr->page_len) 443f13193f5SChuck Lever return 0; 444f13193f5SChuck Lever 445f13193f5SChuck Lever info = svc_rdma_write_info_alloc(rdma, wr_ch); 446f13193f5SChuck Lever if (!info) 447f13193f5SChuck Lever return -ENOMEM; 448f13193f5SChuck Lever 449f13193f5SChuck Lever ret = svc_rdma_send_xdr_pagelist(info, xdr); 450f13193f5SChuck Lever if (ret < 0) 451f13193f5SChuck Lever goto out_err; 452f13193f5SChuck Lever 453f13193f5SChuck Lever ret = svc_rdma_post_chunk_ctxt(&info->wi_cc); 454f13193f5SChuck Lever if (ret < 0) 455f13193f5SChuck Lever goto out_err; 456f13193f5SChuck Lever return xdr->page_len; 457f13193f5SChuck Lever 458f13193f5SChuck Lever out_err: 459f13193f5SChuck Lever svc_rdma_write_info_free(info); 460f13193f5SChuck Lever return ret; 461f13193f5SChuck Lever } 462f13193f5SChuck Lever 463f13193f5SChuck Lever /** 464f13193f5SChuck Lever * svc_rdma_send_reply_chunk - Write all segments in the Reply chunk 465f13193f5SChuck Lever * @rdma: controlling RDMA transport 466f13193f5SChuck Lever * @rp_ch: Reply chunk provided by client 467f13193f5SChuck Lever * @writelist: true if client provided a Write list 468f13193f5SChuck Lever * @xdr: xdr_buf containing an RPC Reply 469f13193f5SChuck Lever * 470f13193f5SChuck Lever * Returns a non-negative number of bytes the chunk consumed, or 471f13193f5SChuck Lever * %-E2BIG if the payload was larger than the Reply chunk, 472*107c1d0aSChuck Lever * %-EINVAL if client provided too many segments, 473f13193f5SChuck Lever * %-ENOMEM if rdma_rw context pool was exhausted, 474f13193f5SChuck Lever * %-ENOTCONN if posting failed (connection is lost), 475f13193f5SChuck Lever * %-EIO if rdma_rw initialization failed (DMA mapping, etc). 476f13193f5SChuck Lever */ 477f13193f5SChuck Lever int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, __be32 *rp_ch, 478f13193f5SChuck Lever bool writelist, struct xdr_buf *xdr) 479f13193f5SChuck Lever { 480f13193f5SChuck Lever struct svc_rdma_write_info *info; 481f13193f5SChuck Lever int consumed, ret; 482f13193f5SChuck Lever 483f13193f5SChuck Lever info = svc_rdma_write_info_alloc(rdma, rp_ch); 484f13193f5SChuck Lever if (!info) 485f13193f5SChuck Lever return -ENOMEM; 486f13193f5SChuck Lever 487f13193f5SChuck Lever ret = svc_rdma_send_xdr_kvec(info, &xdr->head[0]); 488f13193f5SChuck Lever if (ret < 0) 489f13193f5SChuck Lever goto out_err; 490f13193f5SChuck Lever consumed = xdr->head[0].iov_len; 491f13193f5SChuck Lever 492f13193f5SChuck Lever /* Send the page list in the Reply chunk only if the 493f13193f5SChuck Lever * client did not provide Write chunks. 494f13193f5SChuck Lever */ 495f13193f5SChuck Lever if (!writelist && xdr->page_len) { 496f13193f5SChuck Lever ret = svc_rdma_send_xdr_pagelist(info, xdr); 497f13193f5SChuck Lever if (ret < 0) 498f13193f5SChuck Lever goto out_err; 499f13193f5SChuck Lever consumed += xdr->page_len; 500f13193f5SChuck Lever } 501f13193f5SChuck Lever 502f13193f5SChuck Lever if (xdr->tail[0].iov_len) { 503f13193f5SChuck Lever ret = svc_rdma_send_xdr_kvec(info, &xdr->tail[0]); 504f13193f5SChuck Lever if (ret < 0) 505f13193f5SChuck Lever goto out_err; 506f13193f5SChuck Lever consumed += xdr->tail[0].iov_len; 507f13193f5SChuck Lever } 508f13193f5SChuck Lever 509f13193f5SChuck Lever ret = svc_rdma_post_chunk_ctxt(&info->wi_cc); 510f13193f5SChuck Lever if (ret < 0) 511f13193f5SChuck Lever goto out_err; 512f13193f5SChuck Lever return consumed; 513f13193f5SChuck Lever 514f13193f5SChuck Lever out_err: 515f13193f5SChuck Lever svc_rdma_write_info_free(info); 516f13193f5SChuck Lever return ret; 517f13193f5SChuck Lever } 518