1f13193f5SChuck Lever /* 2f13193f5SChuck Lever * Copyright (c) 2016 Oracle. All rights reserved. 3f13193f5SChuck Lever * 4f13193f5SChuck Lever * Use the core R/W API to move RPC-over-RDMA Read and Write chunks. 5f13193f5SChuck Lever */ 6f13193f5SChuck Lever 7f13193f5SChuck Lever #include <linux/sunrpc/rpc_rdma.h> 8f13193f5SChuck Lever #include <linux/sunrpc/svc_rdma.h> 9f13193f5SChuck Lever #include <linux/sunrpc/debug.h> 10f13193f5SChuck Lever 11f13193f5SChuck Lever #include <rdma/rw.h> 12f13193f5SChuck Lever 13f13193f5SChuck Lever #define RPCDBG_FACILITY RPCDBG_SVCXPRT 14f13193f5SChuck Lever 15*026d958bSChuck Lever static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc); 16*026d958bSChuck Lever static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc); 17*026d958bSChuck Lever 18f13193f5SChuck Lever /* Each R/W context contains state for one chain of RDMA Read or 19f13193f5SChuck Lever * Write Work Requests. 20f13193f5SChuck Lever * 21f13193f5SChuck Lever * Each WR chain handles a single contiguous server-side buffer, 22f13193f5SChuck Lever * because scatterlist entries after the first have to start on 23f13193f5SChuck Lever * page alignment. xdr_buf iovecs cannot guarantee alignment. 24f13193f5SChuck Lever * 25f13193f5SChuck Lever * Each WR chain handles only one R_key. Each RPC-over-RDMA segment 26f13193f5SChuck Lever * from a client may contain a unique R_key, so each WR chain moves 27f13193f5SChuck Lever * up to one segment at a time. 28f13193f5SChuck Lever * 29f13193f5SChuck Lever * The scatterlist makes this data structure over 4KB in size. To 30f13193f5SChuck Lever * make it less likely to fail, and to handle the allocation for 31f13193f5SChuck Lever * smaller I/O requests without disabling bottom-halves, these 32f13193f5SChuck Lever * contexts are created on demand, but cached and reused until the 33f13193f5SChuck Lever * controlling svcxprt_rdma is destroyed. 34f13193f5SChuck Lever */ 35f13193f5SChuck Lever struct svc_rdma_rw_ctxt { 36f13193f5SChuck Lever struct list_head rw_list; 37f13193f5SChuck Lever struct rdma_rw_ctx rw_ctx; 38f13193f5SChuck Lever int rw_nents; 39f13193f5SChuck Lever struct sg_table rw_sg_table; 40f13193f5SChuck Lever struct scatterlist rw_first_sgl[0]; 41f13193f5SChuck Lever }; 42f13193f5SChuck Lever 43f13193f5SChuck Lever static inline struct svc_rdma_rw_ctxt * 44f13193f5SChuck Lever svc_rdma_next_ctxt(struct list_head *list) 45f13193f5SChuck Lever { 46f13193f5SChuck Lever return list_first_entry_or_null(list, struct svc_rdma_rw_ctxt, 47f13193f5SChuck Lever rw_list); 48f13193f5SChuck Lever } 49f13193f5SChuck Lever 50f13193f5SChuck Lever static struct svc_rdma_rw_ctxt * 51f13193f5SChuck Lever svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges) 52f13193f5SChuck Lever { 53f13193f5SChuck Lever struct svc_rdma_rw_ctxt *ctxt; 54f13193f5SChuck Lever 55f13193f5SChuck Lever spin_lock(&rdma->sc_rw_ctxt_lock); 56f13193f5SChuck Lever 57f13193f5SChuck Lever ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts); 58f13193f5SChuck Lever if (ctxt) { 59f13193f5SChuck Lever list_del(&ctxt->rw_list); 60f13193f5SChuck Lever spin_unlock(&rdma->sc_rw_ctxt_lock); 61f13193f5SChuck Lever } else { 62f13193f5SChuck Lever spin_unlock(&rdma->sc_rw_ctxt_lock); 63f13193f5SChuck Lever ctxt = kmalloc(sizeof(*ctxt) + 64f13193f5SChuck Lever SG_CHUNK_SIZE * sizeof(struct scatterlist), 65f13193f5SChuck Lever GFP_KERNEL); 66f13193f5SChuck Lever if (!ctxt) 67f13193f5SChuck Lever goto out; 68f13193f5SChuck Lever INIT_LIST_HEAD(&ctxt->rw_list); 69f13193f5SChuck Lever } 70f13193f5SChuck Lever 71f13193f5SChuck Lever ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl; 72f13193f5SChuck Lever if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges, 73f13193f5SChuck Lever ctxt->rw_sg_table.sgl)) { 74f13193f5SChuck Lever kfree(ctxt); 75f13193f5SChuck Lever ctxt = NULL; 76f13193f5SChuck Lever } 77f13193f5SChuck Lever out: 78f13193f5SChuck Lever return ctxt; 79f13193f5SChuck Lever } 80f13193f5SChuck Lever 81f13193f5SChuck Lever static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma, 82f13193f5SChuck Lever struct svc_rdma_rw_ctxt *ctxt) 83f13193f5SChuck Lever { 84f13193f5SChuck Lever sg_free_table_chained(&ctxt->rw_sg_table, true); 85f13193f5SChuck Lever 86f13193f5SChuck Lever spin_lock(&rdma->sc_rw_ctxt_lock); 87f13193f5SChuck Lever list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts); 88f13193f5SChuck Lever spin_unlock(&rdma->sc_rw_ctxt_lock); 89f13193f5SChuck Lever } 90f13193f5SChuck Lever 91f13193f5SChuck Lever /** 92f13193f5SChuck Lever * svc_rdma_destroy_rw_ctxts - Free accumulated R/W contexts 93f13193f5SChuck Lever * @rdma: transport about to be destroyed 94f13193f5SChuck Lever * 95f13193f5SChuck Lever */ 96f13193f5SChuck Lever void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma) 97f13193f5SChuck Lever { 98f13193f5SChuck Lever struct svc_rdma_rw_ctxt *ctxt; 99f13193f5SChuck Lever 100f13193f5SChuck Lever while ((ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts)) != NULL) { 101f13193f5SChuck Lever list_del(&ctxt->rw_list); 102f13193f5SChuck Lever kfree(ctxt); 103f13193f5SChuck Lever } 104f13193f5SChuck Lever } 105f13193f5SChuck Lever 106f13193f5SChuck Lever /* A chunk context tracks all I/O for moving one Read or Write 107f13193f5SChuck Lever * chunk. This is a a set of rdma_rw's that handle data movement 108f13193f5SChuck Lever * for all segments of one chunk. 109f13193f5SChuck Lever * 110f13193f5SChuck Lever * These are small, acquired with a single allocator call, and 111f13193f5SChuck Lever * no more than one is needed per chunk. They are allocated on 112f13193f5SChuck Lever * demand, and not cached. 113f13193f5SChuck Lever */ 114f13193f5SChuck Lever struct svc_rdma_chunk_ctxt { 115f13193f5SChuck Lever struct ib_cqe cc_cqe; 116f13193f5SChuck Lever struct svcxprt_rdma *cc_rdma; 117f13193f5SChuck Lever struct list_head cc_rwctxts; 118f13193f5SChuck Lever int cc_sqecount; 119f13193f5SChuck Lever enum dma_data_direction cc_dir; 120f13193f5SChuck Lever }; 121f13193f5SChuck Lever 122f13193f5SChuck Lever static void svc_rdma_cc_init(struct svcxprt_rdma *rdma, 123f13193f5SChuck Lever struct svc_rdma_chunk_ctxt *cc, 124f13193f5SChuck Lever enum dma_data_direction dir) 125f13193f5SChuck Lever { 126f13193f5SChuck Lever cc->cc_rdma = rdma; 127f13193f5SChuck Lever svc_xprt_get(&rdma->sc_xprt); 128f13193f5SChuck Lever 129f13193f5SChuck Lever INIT_LIST_HEAD(&cc->cc_rwctxts); 130f13193f5SChuck Lever cc->cc_sqecount = 0; 131f13193f5SChuck Lever cc->cc_dir = dir; 132f13193f5SChuck Lever } 133f13193f5SChuck Lever 134f13193f5SChuck Lever static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc) 135f13193f5SChuck Lever { 136f13193f5SChuck Lever struct svcxprt_rdma *rdma = cc->cc_rdma; 137f13193f5SChuck Lever struct svc_rdma_rw_ctxt *ctxt; 138f13193f5SChuck Lever 139f13193f5SChuck Lever while ((ctxt = svc_rdma_next_ctxt(&cc->cc_rwctxts)) != NULL) { 140f13193f5SChuck Lever list_del(&ctxt->rw_list); 141f13193f5SChuck Lever 142f13193f5SChuck Lever rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp, 143f13193f5SChuck Lever rdma->sc_port_num, ctxt->rw_sg_table.sgl, 144f13193f5SChuck Lever ctxt->rw_nents, cc->cc_dir); 145f13193f5SChuck Lever svc_rdma_put_rw_ctxt(rdma, ctxt); 146f13193f5SChuck Lever } 147f13193f5SChuck Lever svc_xprt_put(&rdma->sc_xprt); 148f13193f5SChuck Lever } 149f13193f5SChuck Lever 150f13193f5SChuck Lever /* State for sending a Write or Reply chunk. 151f13193f5SChuck Lever * - Tracks progress of writing one chunk over all its segments 152f13193f5SChuck Lever * - Stores arguments for the SGL constructor functions 153f13193f5SChuck Lever */ 154f13193f5SChuck Lever struct svc_rdma_write_info { 155f13193f5SChuck Lever /* write state of this chunk */ 156f13193f5SChuck Lever unsigned int wi_seg_off; 157f13193f5SChuck Lever unsigned int wi_seg_no; 158f13193f5SChuck Lever unsigned int wi_nsegs; 159f13193f5SChuck Lever __be32 *wi_segs; 160f13193f5SChuck Lever 161f13193f5SChuck Lever /* SGL constructor arguments */ 162f13193f5SChuck Lever struct xdr_buf *wi_xdr; 163f13193f5SChuck Lever unsigned char *wi_base; 164f13193f5SChuck Lever unsigned int wi_next_off; 165f13193f5SChuck Lever 166f13193f5SChuck Lever struct svc_rdma_chunk_ctxt wi_cc; 167f13193f5SChuck Lever }; 168f13193f5SChuck Lever 169f13193f5SChuck Lever static struct svc_rdma_write_info * 170f13193f5SChuck Lever svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma, __be32 *chunk) 171f13193f5SChuck Lever { 172f13193f5SChuck Lever struct svc_rdma_write_info *info; 173f13193f5SChuck Lever 174f13193f5SChuck Lever info = kmalloc(sizeof(*info), GFP_KERNEL); 175f13193f5SChuck Lever if (!info) 176f13193f5SChuck Lever return info; 177f13193f5SChuck Lever 178f13193f5SChuck Lever info->wi_seg_off = 0; 179f13193f5SChuck Lever info->wi_seg_no = 0; 180f13193f5SChuck Lever info->wi_nsegs = be32_to_cpup(++chunk); 181f13193f5SChuck Lever info->wi_segs = ++chunk; 182f13193f5SChuck Lever svc_rdma_cc_init(rdma, &info->wi_cc, DMA_TO_DEVICE); 183*026d958bSChuck Lever info->wi_cc.cc_cqe.done = svc_rdma_write_done; 184f13193f5SChuck Lever return info; 185f13193f5SChuck Lever } 186f13193f5SChuck Lever 187f13193f5SChuck Lever static void svc_rdma_write_info_free(struct svc_rdma_write_info *info) 188f13193f5SChuck Lever { 189f13193f5SChuck Lever svc_rdma_cc_release(&info->wi_cc); 190f13193f5SChuck Lever kfree(info); 191f13193f5SChuck Lever } 192f13193f5SChuck Lever 193f13193f5SChuck Lever /** 194f13193f5SChuck Lever * svc_rdma_write_done - Write chunk completion 195f13193f5SChuck Lever * @cq: controlling Completion Queue 196f13193f5SChuck Lever * @wc: Work Completion 197f13193f5SChuck Lever * 198f13193f5SChuck Lever * Pages under I/O are freed by a subsequent Send completion. 199f13193f5SChuck Lever */ 200f13193f5SChuck Lever static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc) 201f13193f5SChuck Lever { 202f13193f5SChuck Lever struct ib_cqe *cqe = wc->wr_cqe; 203f13193f5SChuck Lever struct svc_rdma_chunk_ctxt *cc = 204f13193f5SChuck Lever container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe); 205f13193f5SChuck Lever struct svcxprt_rdma *rdma = cc->cc_rdma; 206f13193f5SChuck Lever struct svc_rdma_write_info *info = 207f13193f5SChuck Lever container_of(cc, struct svc_rdma_write_info, wi_cc); 208f13193f5SChuck Lever 209f13193f5SChuck Lever atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); 210f13193f5SChuck Lever wake_up(&rdma->sc_send_wait); 211f13193f5SChuck Lever 212f13193f5SChuck Lever if (unlikely(wc->status != IB_WC_SUCCESS)) { 213f13193f5SChuck Lever set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); 214f13193f5SChuck Lever if (wc->status != IB_WC_WR_FLUSH_ERR) 215f13193f5SChuck Lever pr_err("svcrdma: write ctx: %s (%u/0x%x)\n", 216f13193f5SChuck Lever ib_wc_status_msg(wc->status), 217f13193f5SChuck Lever wc->status, wc->vendor_err); 218f13193f5SChuck Lever } 219f13193f5SChuck Lever 220f13193f5SChuck Lever svc_rdma_write_info_free(info); 221f13193f5SChuck Lever } 222f13193f5SChuck Lever 223*026d958bSChuck Lever /* State for pulling a Read chunk. 224*026d958bSChuck Lever */ 225*026d958bSChuck Lever struct svc_rdma_read_info { 226*026d958bSChuck Lever struct svc_rdma_op_ctxt *ri_readctxt; 227*026d958bSChuck Lever unsigned int ri_position; 228*026d958bSChuck Lever unsigned int ri_pageno; 229*026d958bSChuck Lever unsigned int ri_pageoff; 230*026d958bSChuck Lever unsigned int ri_chunklen; 231*026d958bSChuck Lever 232*026d958bSChuck Lever struct svc_rdma_chunk_ctxt ri_cc; 233*026d958bSChuck Lever }; 234*026d958bSChuck Lever 235*026d958bSChuck Lever static struct svc_rdma_read_info * 236*026d958bSChuck Lever svc_rdma_read_info_alloc(struct svcxprt_rdma *rdma) 237*026d958bSChuck Lever { 238*026d958bSChuck Lever struct svc_rdma_read_info *info; 239*026d958bSChuck Lever 240*026d958bSChuck Lever info = kmalloc(sizeof(*info), GFP_KERNEL); 241*026d958bSChuck Lever if (!info) 242*026d958bSChuck Lever return info; 243*026d958bSChuck Lever 244*026d958bSChuck Lever svc_rdma_cc_init(rdma, &info->ri_cc, DMA_FROM_DEVICE); 245*026d958bSChuck Lever info->ri_cc.cc_cqe.done = svc_rdma_wc_read_done; 246*026d958bSChuck Lever return info; 247*026d958bSChuck Lever } 248*026d958bSChuck Lever 249*026d958bSChuck Lever static void svc_rdma_read_info_free(struct svc_rdma_read_info *info) 250*026d958bSChuck Lever { 251*026d958bSChuck Lever svc_rdma_cc_release(&info->ri_cc); 252*026d958bSChuck Lever kfree(info); 253*026d958bSChuck Lever } 254*026d958bSChuck Lever 255*026d958bSChuck Lever /** 256*026d958bSChuck Lever * svc_rdma_wc_read_done - Handle completion of an RDMA Read ctx 257*026d958bSChuck Lever * @cq: controlling Completion Queue 258*026d958bSChuck Lever * @wc: Work Completion 259*026d958bSChuck Lever * 260*026d958bSChuck Lever */ 261*026d958bSChuck Lever static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc) 262*026d958bSChuck Lever { 263*026d958bSChuck Lever struct ib_cqe *cqe = wc->wr_cqe; 264*026d958bSChuck Lever struct svc_rdma_chunk_ctxt *cc = 265*026d958bSChuck Lever container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe); 266*026d958bSChuck Lever struct svcxprt_rdma *rdma = cc->cc_rdma; 267*026d958bSChuck Lever struct svc_rdma_read_info *info = 268*026d958bSChuck Lever container_of(cc, struct svc_rdma_read_info, ri_cc); 269*026d958bSChuck Lever 270*026d958bSChuck Lever atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); 271*026d958bSChuck Lever wake_up(&rdma->sc_send_wait); 272*026d958bSChuck Lever 273*026d958bSChuck Lever if (unlikely(wc->status != IB_WC_SUCCESS)) { 274*026d958bSChuck Lever set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); 275*026d958bSChuck Lever if (wc->status != IB_WC_WR_FLUSH_ERR) 276*026d958bSChuck Lever pr_err("svcrdma: read ctx: %s (%u/0x%x)\n", 277*026d958bSChuck Lever ib_wc_status_msg(wc->status), 278*026d958bSChuck Lever wc->status, wc->vendor_err); 279*026d958bSChuck Lever svc_rdma_put_context(info->ri_readctxt, 1); 280*026d958bSChuck Lever } else { 281*026d958bSChuck Lever spin_lock(&rdma->sc_rq_dto_lock); 282*026d958bSChuck Lever list_add_tail(&info->ri_readctxt->list, 283*026d958bSChuck Lever &rdma->sc_read_complete_q); 284*026d958bSChuck Lever spin_unlock(&rdma->sc_rq_dto_lock); 285*026d958bSChuck Lever 286*026d958bSChuck Lever set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags); 287*026d958bSChuck Lever svc_xprt_enqueue(&rdma->sc_xprt); 288*026d958bSChuck Lever } 289*026d958bSChuck Lever 290*026d958bSChuck Lever svc_rdma_read_info_free(info); 291*026d958bSChuck Lever } 292*026d958bSChuck Lever 293f13193f5SChuck Lever /* This function sleeps when the transport's Send Queue is congested. 294f13193f5SChuck Lever * 295f13193f5SChuck Lever * Assumptions: 296f13193f5SChuck Lever * - If ib_post_send() succeeds, only one completion is expected, 297f13193f5SChuck Lever * even if one or more WRs are flushed. This is true when posting 298f13193f5SChuck Lever * an rdma_rw_ctx or when posting a single signaled WR. 299f13193f5SChuck Lever */ 300f13193f5SChuck Lever static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc) 301f13193f5SChuck Lever { 302f13193f5SChuck Lever struct svcxprt_rdma *rdma = cc->cc_rdma; 303f13193f5SChuck Lever struct svc_xprt *xprt = &rdma->sc_xprt; 304f13193f5SChuck Lever struct ib_send_wr *first_wr, *bad_wr; 305f13193f5SChuck Lever struct list_head *tmp; 306f13193f5SChuck Lever struct ib_cqe *cqe; 307f13193f5SChuck Lever int ret; 308f13193f5SChuck Lever 309107c1d0aSChuck Lever if (cc->cc_sqecount > rdma->sc_sq_depth) 310107c1d0aSChuck Lever return -EINVAL; 311107c1d0aSChuck Lever 312f13193f5SChuck Lever first_wr = NULL; 313f13193f5SChuck Lever cqe = &cc->cc_cqe; 314f13193f5SChuck Lever list_for_each(tmp, &cc->cc_rwctxts) { 315f13193f5SChuck Lever struct svc_rdma_rw_ctxt *ctxt; 316f13193f5SChuck Lever 317f13193f5SChuck Lever ctxt = list_entry(tmp, struct svc_rdma_rw_ctxt, rw_list); 318f13193f5SChuck Lever first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp, 319f13193f5SChuck Lever rdma->sc_port_num, cqe, first_wr); 320f13193f5SChuck Lever cqe = NULL; 321f13193f5SChuck Lever } 322f13193f5SChuck Lever 323f13193f5SChuck Lever do { 324f13193f5SChuck Lever if (atomic_sub_return(cc->cc_sqecount, 325f13193f5SChuck Lever &rdma->sc_sq_avail) > 0) { 326f13193f5SChuck Lever ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr); 327f13193f5SChuck Lever if (ret) 328f13193f5SChuck Lever break; 329f13193f5SChuck Lever return 0; 330f13193f5SChuck Lever } 331f13193f5SChuck Lever 332f13193f5SChuck Lever atomic_inc(&rdma_stat_sq_starve); 333f13193f5SChuck Lever atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); 334f13193f5SChuck Lever wait_event(rdma->sc_send_wait, 335f13193f5SChuck Lever atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount); 336f13193f5SChuck Lever } while (1); 337f13193f5SChuck Lever 338f13193f5SChuck Lever pr_err("svcrdma: ib_post_send failed (%d)\n", ret); 339f13193f5SChuck Lever set_bit(XPT_CLOSE, &xprt->xpt_flags); 340f13193f5SChuck Lever 341f13193f5SChuck Lever /* If even one was posted, there will be a completion. */ 342f13193f5SChuck Lever if (bad_wr != first_wr) 343f13193f5SChuck Lever return 0; 344f13193f5SChuck Lever 345f13193f5SChuck Lever atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); 346f13193f5SChuck Lever wake_up(&rdma->sc_send_wait); 347f13193f5SChuck Lever return -ENOTCONN; 348f13193f5SChuck Lever } 349f13193f5SChuck Lever 350f13193f5SChuck Lever /* Build and DMA-map an SGL that covers one kvec in an xdr_buf 351f13193f5SChuck Lever */ 352f13193f5SChuck Lever static void svc_rdma_vec_to_sg(struct svc_rdma_write_info *info, 353f13193f5SChuck Lever unsigned int len, 354f13193f5SChuck Lever struct svc_rdma_rw_ctxt *ctxt) 355f13193f5SChuck Lever { 356f13193f5SChuck Lever struct scatterlist *sg = ctxt->rw_sg_table.sgl; 357f13193f5SChuck Lever 358f13193f5SChuck Lever sg_set_buf(&sg[0], info->wi_base, len); 359f13193f5SChuck Lever info->wi_base += len; 360f13193f5SChuck Lever 361f13193f5SChuck Lever ctxt->rw_nents = 1; 362f13193f5SChuck Lever } 363f13193f5SChuck Lever 364f13193f5SChuck Lever /* Build and DMA-map an SGL that covers part of an xdr_buf's pagelist. 365f13193f5SChuck Lever */ 366f13193f5SChuck Lever static void svc_rdma_pagelist_to_sg(struct svc_rdma_write_info *info, 367f13193f5SChuck Lever unsigned int remaining, 368f13193f5SChuck Lever struct svc_rdma_rw_ctxt *ctxt) 369f13193f5SChuck Lever { 370f13193f5SChuck Lever unsigned int sge_no, sge_bytes, page_off, page_no; 371f13193f5SChuck Lever struct xdr_buf *xdr = info->wi_xdr; 372f13193f5SChuck Lever struct scatterlist *sg; 373f13193f5SChuck Lever struct page **page; 374f13193f5SChuck Lever 375f13193f5SChuck Lever page_off = (info->wi_next_off + xdr->page_base) & ~PAGE_MASK; 376f13193f5SChuck Lever page_no = (info->wi_next_off + xdr->page_base) >> PAGE_SHIFT; 377f13193f5SChuck Lever page = xdr->pages + page_no; 378f13193f5SChuck Lever info->wi_next_off += remaining; 379f13193f5SChuck Lever sg = ctxt->rw_sg_table.sgl; 380f13193f5SChuck Lever sge_no = 0; 381f13193f5SChuck Lever do { 382f13193f5SChuck Lever sge_bytes = min_t(unsigned int, remaining, 383f13193f5SChuck Lever PAGE_SIZE - page_off); 384f13193f5SChuck Lever sg_set_page(sg, *page, sge_bytes, page_off); 385f13193f5SChuck Lever 386f13193f5SChuck Lever remaining -= sge_bytes; 387f13193f5SChuck Lever sg = sg_next(sg); 388f13193f5SChuck Lever page_off = 0; 389f13193f5SChuck Lever sge_no++; 390f13193f5SChuck Lever page++; 391f13193f5SChuck Lever } while (remaining); 392f13193f5SChuck Lever 393f13193f5SChuck Lever ctxt->rw_nents = sge_no; 394f13193f5SChuck Lever } 395f13193f5SChuck Lever 396f13193f5SChuck Lever /* Construct RDMA Write WRs to send a portion of an xdr_buf containing 397f13193f5SChuck Lever * an RPC Reply. 398f13193f5SChuck Lever */ 399f13193f5SChuck Lever static int 400f13193f5SChuck Lever svc_rdma_build_writes(struct svc_rdma_write_info *info, 401f13193f5SChuck Lever void (*constructor)(struct svc_rdma_write_info *info, 402f13193f5SChuck Lever unsigned int len, 403f13193f5SChuck Lever struct svc_rdma_rw_ctxt *ctxt), 404f13193f5SChuck Lever unsigned int remaining) 405f13193f5SChuck Lever { 406f13193f5SChuck Lever struct svc_rdma_chunk_ctxt *cc = &info->wi_cc; 407f13193f5SChuck Lever struct svcxprt_rdma *rdma = cc->cc_rdma; 408f13193f5SChuck Lever struct svc_rdma_rw_ctxt *ctxt; 409f13193f5SChuck Lever __be32 *seg; 410f13193f5SChuck Lever int ret; 411f13193f5SChuck Lever 412f13193f5SChuck Lever seg = info->wi_segs + info->wi_seg_no * rpcrdma_segment_maxsz; 413f13193f5SChuck Lever do { 414f13193f5SChuck Lever unsigned int write_len; 415f13193f5SChuck Lever u32 seg_length, seg_handle; 416f13193f5SChuck Lever u64 seg_offset; 417f13193f5SChuck Lever 418f13193f5SChuck Lever if (info->wi_seg_no >= info->wi_nsegs) 419f13193f5SChuck Lever goto out_overflow; 420f13193f5SChuck Lever 421f13193f5SChuck Lever seg_handle = be32_to_cpup(seg); 422f13193f5SChuck Lever seg_length = be32_to_cpup(seg + 1); 423f13193f5SChuck Lever xdr_decode_hyper(seg + 2, &seg_offset); 424f13193f5SChuck Lever seg_offset += info->wi_seg_off; 425f13193f5SChuck Lever 426f13193f5SChuck Lever write_len = min(remaining, seg_length - info->wi_seg_off); 427f13193f5SChuck Lever ctxt = svc_rdma_get_rw_ctxt(rdma, 428f13193f5SChuck Lever (write_len >> PAGE_SHIFT) + 2); 429f13193f5SChuck Lever if (!ctxt) 430f13193f5SChuck Lever goto out_noctx; 431f13193f5SChuck Lever 432f13193f5SChuck Lever constructor(info, write_len, ctxt); 433f13193f5SChuck Lever ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, 434f13193f5SChuck Lever rdma->sc_port_num, ctxt->rw_sg_table.sgl, 435f13193f5SChuck Lever ctxt->rw_nents, 0, seg_offset, 436f13193f5SChuck Lever seg_handle, DMA_TO_DEVICE); 437f13193f5SChuck Lever if (ret < 0) 438f13193f5SChuck Lever goto out_initerr; 439f13193f5SChuck Lever 440f13193f5SChuck Lever list_add(&ctxt->rw_list, &cc->cc_rwctxts); 441f13193f5SChuck Lever cc->cc_sqecount += ret; 442f13193f5SChuck Lever if (write_len == seg_length - info->wi_seg_off) { 443f13193f5SChuck Lever seg += 4; 444f13193f5SChuck Lever info->wi_seg_no++; 445f13193f5SChuck Lever info->wi_seg_off = 0; 446f13193f5SChuck Lever } else { 447f13193f5SChuck Lever info->wi_seg_off += write_len; 448f13193f5SChuck Lever } 449f13193f5SChuck Lever remaining -= write_len; 450f13193f5SChuck Lever } while (remaining); 451f13193f5SChuck Lever 452f13193f5SChuck Lever return 0; 453f13193f5SChuck Lever 454f13193f5SChuck Lever out_overflow: 455f13193f5SChuck Lever dprintk("svcrdma: inadequate space in Write chunk (%u)\n", 456f13193f5SChuck Lever info->wi_nsegs); 457f13193f5SChuck Lever return -E2BIG; 458f13193f5SChuck Lever 459f13193f5SChuck Lever out_noctx: 460f13193f5SChuck Lever dprintk("svcrdma: no R/W ctxs available\n"); 461f13193f5SChuck Lever return -ENOMEM; 462f13193f5SChuck Lever 463f13193f5SChuck Lever out_initerr: 464f13193f5SChuck Lever svc_rdma_put_rw_ctxt(rdma, ctxt); 465f13193f5SChuck Lever pr_err("svcrdma: failed to map pagelist (%d)\n", ret); 466f13193f5SChuck Lever return -EIO; 467f13193f5SChuck Lever } 468f13193f5SChuck Lever 469f13193f5SChuck Lever /* Send one of an xdr_buf's kvecs by itself. To send a Reply 470f13193f5SChuck Lever * chunk, the whole RPC Reply is written back to the client. 471f13193f5SChuck Lever * This function writes either the head or tail of the xdr_buf 472f13193f5SChuck Lever * containing the Reply. 473f13193f5SChuck Lever */ 474f13193f5SChuck Lever static int svc_rdma_send_xdr_kvec(struct svc_rdma_write_info *info, 475f13193f5SChuck Lever struct kvec *vec) 476f13193f5SChuck Lever { 477f13193f5SChuck Lever info->wi_base = vec->iov_base; 478f13193f5SChuck Lever return svc_rdma_build_writes(info, svc_rdma_vec_to_sg, 479f13193f5SChuck Lever vec->iov_len); 480f13193f5SChuck Lever } 481f13193f5SChuck Lever 482f13193f5SChuck Lever /* Send an xdr_buf's page list by itself. A Write chunk is 483f13193f5SChuck Lever * just the page list. a Reply chunk is the head, page list, 484f13193f5SChuck Lever * and tail. This function is shared between the two types 485f13193f5SChuck Lever * of chunk. 486f13193f5SChuck Lever */ 487f13193f5SChuck Lever static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info, 488f13193f5SChuck Lever struct xdr_buf *xdr) 489f13193f5SChuck Lever { 490f13193f5SChuck Lever info->wi_xdr = xdr; 491f13193f5SChuck Lever info->wi_next_off = 0; 492f13193f5SChuck Lever return svc_rdma_build_writes(info, svc_rdma_pagelist_to_sg, 493f13193f5SChuck Lever xdr->page_len); 494f13193f5SChuck Lever } 495f13193f5SChuck Lever 496f13193f5SChuck Lever /** 497f13193f5SChuck Lever * svc_rdma_send_write_chunk - Write all segments in a Write chunk 498f13193f5SChuck Lever * @rdma: controlling RDMA transport 499f13193f5SChuck Lever * @wr_ch: Write chunk provided by client 500f13193f5SChuck Lever * @xdr: xdr_buf containing the data payload 501f13193f5SChuck Lever * 502f13193f5SChuck Lever * Returns a non-negative number of bytes the chunk consumed, or 503f13193f5SChuck Lever * %-E2BIG if the payload was larger than the Write chunk, 504107c1d0aSChuck Lever * %-EINVAL if client provided too many segments, 505f13193f5SChuck Lever * %-ENOMEM if rdma_rw context pool was exhausted, 506f13193f5SChuck Lever * %-ENOTCONN if posting failed (connection is lost), 507f13193f5SChuck Lever * %-EIO if rdma_rw initialization failed (DMA mapping, etc). 508f13193f5SChuck Lever */ 509f13193f5SChuck Lever int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch, 510f13193f5SChuck Lever struct xdr_buf *xdr) 511f13193f5SChuck Lever { 512f13193f5SChuck Lever struct svc_rdma_write_info *info; 513f13193f5SChuck Lever int ret; 514f13193f5SChuck Lever 515f13193f5SChuck Lever if (!xdr->page_len) 516f13193f5SChuck Lever return 0; 517f13193f5SChuck Lever 518f13193f5SChuck Lever info = svc_rdma_write_info_alloc(rdma, wr_ch); 519f13193f5SChuck Lever if (!info) 520f13193f5SChuck Lever return -ENOMEM; 521f13193f5SChuck Lever 522f13193f5SChuck Lever ret = svc_rdma_send_xdr_pagelist(info, xdr); 523f13193f5SChuck Lever if (ret < 0) 524f13193f5SChuck Lever goto out_err; 525f13193f5SChuck Lever 526f13193f5SChuck Lever ret = svc_rdma_post_chunk_ctxt(&info->wi_cc); 527f13193f5SChuck Lever if (ret < 0) 528f13193f5SChuck Lever goto out_err; 529f13193f5SChuck Lever return xdr->page_len; 530f13193f5SChuck Lever 531f13193f5SChuck Lever out_err: 532f13193f5SChuck Lever svc_rdma_write_info_free(info); 533f13193f5SChuck Lever return ret; 534f13193f5SChuck Lever } 535f13193f5SChuck Lever 536f13193f5SChuck Lever /** 537f13193f5SChuck Lever * svc_rdma_send_reply_chunk - Write all segments in the Reply chunk 538f13193f5SChuck Lever * @rdma: controlling RDMA transport 539f13193f5SChuck Lever * @rp_ch: Reply chunk provided by client 540f13193f5SChuck Lever * @writelist: true if client provided a Write list 541f13193f5SChuck Lever * @xdr: xdr_buf containing an RPC Reply 542f13193f5SChuck Lever * 543f13193f5SChuck Lever * Returns a non-negative number of bytes the chunk consumed, or 544f13193f5SChuck Lever * %-E2BIG if the payload was larger than the Reply chunk, 545107c1d0aSChuck Lever * %-EINVAL if client provided too many segments, 546f13193f5SChuck Lever * %-ENOMEM if rdma_rw context pool was exhausted, 547f13193f5SChuck Lever * %-ENOTCONN if posting failed (connection is lost), 548f13193f5SChuck Lever * %-EIO if rdma_rw initialization failed (DMA mapping, etc). 549f13193f5SChuck Lever */ 550f13193f5SChuck Lever int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, __be32 *rp_ch, 551f13193f5SChuck Lever bool writelist, struct xdr_buf *xdr) 552f13193f5SChuck Lever { 553f13193f5SChuck Lever struct svc_rdma_write_info *info; 554f13193f5SChuck Lever int consumed, ret; 555f13193f5SChuck Lever 556f13193f5SChuck Lever info = svc_rdma_write_info_alloc(rdma, rp_ch); 557f13193f5SChuck Lever if (!info) 558f13193f5SChuck Lever return -ENOMEM; 559f13193f5SChuck Lever 560f13193f5SChuck Lever ret = svc_rdma_send_xdr_kvec(info, &xdr->head[0]); 561f13193f5SChuck Lever if (ret < 0) 562f13193f5SChuck Lever goto out_err; 563f13193f5SChuck Lever consumed = xdr->head[0].iov_len; 564f13193f5SChuck Lever 565f13193f5SChuck Lever /* Send the page list in the Reply chunk only if the 566f13193f5SChuck Lever * client did not provide Write chunks. 567f13193f5SChuck Lever */ 568f13193f5SChuck Lever if (!writelist && xdr->page_len) { 569f13193f5SChuck Lever ret = svc_rdma_send_xdr_pagelist(info, xdr); 570f13193f5SChuck Lever if (ret < 0) 571f13193f5SChuck Lever goto out_err; 572f13193f5SChuck Lever consumed += xdr->page_len; 573f13193f5SChuck Lever } 574f13193f5SChuck Lever 575f13193f5SChuck Lever if (xdr->tail[0].iov_len) { 576f13193f5SChuck Lever ret = svc_rdma_send_xdr_kvec(info, &xdr->tail[0]); 577f13193f5SChuck Lever if (ret < 0) 578f13193f5SChuck Lever goto out_err; 579f13193f5SChuck Lever consumed += xdr->tail[0].iov_len; 580f13193f5SChuck Lever } 581f13193f5SChuck Lever 582f13193f5SChuck Lever ret = svc_rdma_post_chunk_ctxt(&info->wi_cc); 583f13193f5SChuck Lever if (ret < 0) 584f13193f5SChuck Lever goto out_err; 585f13193f5SChuck Lever return consumed; 586f13193f5SChuck Lever 587f13193f5SChuck Lever out_err: 588f13193f5SChuck Lever svc_rdma_write_info_free(info); 589f13193f5SChuck Lever return ret; 590f13193f5SChuck Lever } 591*026d958bSChuck Lever 592*026d958bSChuck Lever static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info, 593*026d958bSChuck Lever struct svc_rqst *rqstp, 594*026d958bSChuck Lever u32 rkey, u32 len, u64 offset) 595*026d958bSChuck Lever { 596*026d958bSChuck Lever struct svc_rdma_op_ctxt *head = info->ri_readctxt; 597*026d958bSChuck Lever struct svc_rdma_chunk_ctxt *cc = &info->ri_cc; 598*026d958bSChuck Lever struct svc_rdma_rw_ctxt *ctxt; 599*026d958bSChuck Lever unsigned int sge_no, seg_len; 600*026d958bSChuck Lever struct scatterlist *sg; 601*026d958bSChuck Lever int ret; 602*026d958bSChuck Lever 603*026d958bSChuck Lever sge_no = PAGE_ALIGN(info->ri_pageoff + len) >> PAGE_SHIFT; 604*026d958bSChuck Lever ctxt = svc_rdma_get_rw_ctxt(cc->cc_rdma, sge_no); 605*026d958bSChuck Lever if (!ctxt) 606*026d958bSChuck Lever goto out_noctx; 607*026d958bSChuck Lever ctxt->rw_nents = sge_no; 608*026d958bSChuck Lever 609*026d958bSChuck Lever dprintk("svcrdma: reading segment %u@0x%016llx:0x%08x (%u sges)\n", 610*026d958bSChuck Lever len, offset, rkey, sge_no); 611*026d958bSChuck Lever 612*026d958bSChuck Lever sg = ctxt->rw_sg_table.sgl; 613*026d958bSChuck Lever for (sge_no = 0; sge_no < ctxt->rw_nents; sge_no++) { 614*026d958bSChuck Lever seg_len = min_t(unsigned int, len, 615*026d958bSChuck Lever PAGE_SIZE - info->ri_pageoff); 616*026d958bSChuck Lever 617*026d958bSChuck Lever head->arg.pages[info->ri_pageno] = 618*026d958bSChuck Lever rqstp->rq_pages[info->ri_pageno]; 619*026d958bSChuck Lever if (!info->ri_pageoff) 620*026d958bSChuck Lever head->count++; 621*026d958bSChuck Lever 622*026d958bSChuck Lever sg_set_page(sg, rqstp->rq_pages[info->ri_pageno], 623*026d958bSChuck Lever seg_len, info->ri_pageoff); 624*026d958bSChuck Lever sg = sg_next(sg); 625*026d958bSChuck Lever 626*026d958bSChuck Lever info->ri_pageoff += seg_len; 627*026d958bSChuck Lever if (info->ri_pageoff == PAGE_SIZE) { 628*026d958bSChuck Lever info->ri_pageno++; 629*026d958bSChuck Lever info->ri_pageoff = 0; 630*026d958bSChuck Lever } 631*026d958bSChuck Lever len -= seg_len; 632*026d958bSChuck Lever 633*026d958bSChuck Lever /* Safety check */ 634*026d958bSChuck Lever if (len && 635*026d958bSChuck Lever &rqstp->rq_pages[info->ri_pageno + 1] > rqstp->rq_page_end) 636*026d958bSChuck Lever goto out_overrun; 637*026d958bSChuck Lever } 638*026d958bSChuck Lever 639*026d958bSChuck Lever ret = rdma_rw_ctx_init(&ctxt->rw_ctx, cc->cc_rdma->sc_qp, 640*026d958bSChuck Lever cc->cc_rdma->sc_port_num, 641*026d958bSChuck Lever ctxt->rw_sg_table.sgl, ctxt->rw_nents, 642*026d958bSChuck Lever 0, offset, rkey, DMA_FROM_DEVICE); 643*026d958bSChuck Lever if (ret < 0) 644*026d958bSChuck Lever goto out_initerr; 645*026d958bSChuck Lever 646*026d958bSChuck Lever list_add(&ctxt->rw_list, &cc->cc_rwctxts); 647*026d958bSChuck Lever cc->cc_sqecount += ret; 648*026d958bSChuck Lever return 0; 649*026d958bSChuck Lever 650*026d958bSChuck Lever out_noctx: 651*026d958bSChuck Lever dprintk("svcrdma: no R/W ctxs available\n"); 652*026d958bSChuck Lever return -ENOMEM; 653*026d958bSChuck Lever 654*026d958bSChuck Lever out_overrun: 655*026d958bSChuck Lever dprintk("svcrdma: request overruns rq_pages\n"); 656*026d958bSChuck Lever return -EINVAL; 657*026d958bSChuck Lever 658*026d958bSChuck Lever out_initerr: 659*026d958bSChuck Lever svc_rdma_put_rw_ctxt(cc->cc_rdma, ctxt); 660*026d958bSChuck Lever pr_err("svcrdma: failed to map pagelist (%d)\n", ret); 661*026d958bSChuck Lever return -EIO; 662*026d958bSChuck Lever } 663*026d958bSChuck Lever 664*026d958bSChuck Lever static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp, 665*026d958bSChuck Lever struct svc_rdma_read_info *info, 666*026d958bSChuck Lever __be32 *p) 667*026d958bSChuck Lever { 668*026d958bSChuck Lever int ret; 669*026d958bSChuck Lever 670*026d958bSChuck Lever info->ri_chunklen = 0; 671*026d958bSChuck Lever while (*p++ != xdr_zero) { 672*026d958bSChuck Lever u32 rs_handle, rs_length; 673*026d958bSChuck Lever u64 rs_offset; 674*026d958bSChuck Lever 675*026d958bSChuck Lever if (be32_to_cpup(p++) != info->ri_position) 676*026d958bSChuck Lever break; 677*026d958bSChuck Lever rs_handle = be32_to_cpup(p++); 678*026d958bSChuck Lever rs_length = be32_to_cpup(p++); 679*026d958bSChuck Lever p = xdr_decode_hyper(p, &rs_offset); 680*026d958bSChuck Lever 681*026d958bSChuck Lever ret = svc_rdma_build_read_segment(info, rqstp, 682*026d958bSChuck Lever rs_handle, rs_length, 683*026d958bSChuck Lever rs_offset); 684*026d958bSChuck Lever if (ret < 0) 685*026d958bSChuck Lever break; 686*026d958bSChuck Lever 687*026d958bSChuck Lever info->ri_chunklen += rs_length; 688*026d958bSChuck Lever } 689*026d958bSChuck Lever 690*026d958bSChuck Lever return ret; 691*026d958bSChuck Lever } 692*026d958bSChuck Lever 693*026d958bSChuck Lever /* If there is inline content following the Read chunk, append it to 694*026d958bSChuck Lever * the page list immediately following the data payload. This has to 695*026d958bSChuck Lever * be done after the reader function has determined how many pages 696*026d958bSChuck Lever * were consumed for RDMA Read. 697*026d958bSChuck Lever * 698*026d958bSChuck Lever * On entry, ri_pageno and ri_pageoff point directly to the end of the 699*026d958bSChuck Lever * page list. On exit, both have been updated to the new "next byte". 700*026d958bSChuck Lever * 701*026d958bSChuck Lever * Assumptions: 702*026d958bSChuck Lever * - Inline content fits entirely in rq_pages[0] 703*026d958bSChuck Lever * - Trailing content is only a handful of bytes 704*026d958bSChuck Lever */ 705*026d958bSChuck Lever static int svc_rdma_copy_tail(struct svc_rqst *rqstp, 706*026d958bSChuck Lever struct svc_rdma_read_info *info) 707*026d958bSChuck Lever { 708*026d958bSChuck Lever struct svc_rdma_op_ctxt *head = info->ri_readctxt; 709*026d958bSChuck Lever unsigned int tail_length, remaining; 710*026d958bSChuck Lever u8 *srcp, *destp; 711*026d958bSChuck Lever 712*026d958bSChuck Lever /* Assert that all inline content fits in page 0. This is an 713*026d958bSChuck Lever * implementation limit, not a protocol limit. 714*026d958bSChuck Lever */ 715*026d958bSChuck Lever if (head->arg.head[0].iov_len > PAGE_SIZE) { 716*026d958bSChuck Lever pr_warn_once("svcrdma: too much trailing inline content\n"); 717*026d958bSChuck Lever return -EINVAL; 718*026d958bSChuck Lever } 719*026d958bSChuck Lever 720*026d958bSChuck Lever srcp = head->arg.head[0].iov_base; 721*026d958bSChuck Lever srcp += info->ri_position; 722*026d958bSChuck Lever tail_length = head->arg.head[0].iov_len - info->ri_position; 723*026d958bSChuck Lever remaining = tail_length; 724*026d958bSChuck Lever 725*026d958bSChuck Lever /* If there is room on the last page in the page list, try to 726*026d958bSChuck Lever * fit the trailing content there. 727*026d958bSChuck Lever */ 728*026d958bSChuck Lever if (info->ri_pageoff > 0) { 729*026d958bSChuck Lever unsigned int len; 730*026d958bSChuck Lever 731*026d958bSChuck Lever len = min_t(unsigned int, remaining, 732*026d958bSChuck Lever PAGE_SIZE - info->ri_pageoff); 733*026d958bSChuck Lever destp = page_address(rqstp->rq_pages[info->ri_pageno]); 734*026d958bSChuck Lever destp += info->ri_pageoff; 735*026d958bSChuck Lever 736*026d958bSChuck Lever memcpy(destp, srcp, len); 737*026d958bSChuck Lever srcp += len; 738*026d958bSChuck Lever destp += len; 739*026d958bSChuck Lever info->ri_pageoff += len; 740*026d958bSChuck Lever remaining -= len; 741*026d958bSChuck Lever 742*026d958bSChuck Lever if (info->ri_pageoff == PAGE_SIZE) { 743*026d958bSChuck Lever info->ri_pageno++; 744*026d958bSChuck Lever info->ri_pageoff = 0; 745*026d958bSChuck Lever } 746*026d958bSChuck Lever } 747*026d958bSChuck Lever 748*026d958bSChuck Lever /* Otherwise, a fresh page is needed. */ 749*026d958bSChuck Lever if (remaining) { 750*026d958bSChuck Lever head->arg.pages[info->ri_pageno] = 751*026d958bSChuck Lever rqstp->rq_pages[info->ri_pageno]; 752*026d958bSChuck Lever head->count++; 753*026d958bSChuck Lever 754*026d958bSChuck Lever destp = page_address(rqstp->rq_pages[info->ri_pageno]); 755*026d958bSChuck Lever memcpy(destp, srcp, remaining); 756*026d958bSChuck Lever info->ri_pageoff += remaining; 757*026d958bSChuck Lever } 758*026d958bSChuck Lever 759*026d958bSChuck Lever head->arg.page_len += tail_length; 760*026d958bSChuck Lever head->arg.len += tail_length; 761*026d958bSChuck Lever head->arg.buflen += tail_length; 762*026d958bSChuck Lever return 0; 763*026d958bSChuck Lever } 764*026d958bSChuck Lever 765*026d958bSChuck Lever /* Construct RDMA Reads to pull over a normal Read chunk. The chunk 766*026d958bSChuck Lever * data lands in the page list of head->arg.pages. 767*026d958bSChuck Lever * 768*026d958bSChuck Lever * Currently NFSD does not look at the head->arg.tail[0] iovec. 769*026d958bSChuck Lever * Therefore, XDR round-up of the Read chunk and trailing 770*026d958bSChuck Lever * inline content must both be added at the end of the pagelist. 771*026d958bSChuck Lever */ 772*026d958bSChuck Lever static int svc_rdma_build_normal_read_chunk(struct svc_rqst *rqstp, 773*026d958bSChuck Lever struct svc_rdma_read_info *info, 774*026d958bSChuck Lever __be32 *p) 775*026d958bSChuck Lever { 776*026d958bSChuck Lever struct svc_rdma_op_ctxt *head = info->ri_readctxt; 777*026d958bSChuck Lever int ret; 778*026d958bSChuck Lever 779*026d958bSChuck Lever dprintk("svcrdma: Reading Read chunk at position %u\n", 780*026d958bSChuck Lever info->ri_position); 781*026d958bSChuck Lever 782*026d958bSChuck Lever info->ri_pageno = head->hdr_count; 783*026d958bSChuck Lever info->ri_pageoff = 0; 784*026d958bSChuck Lever 785*026d958bSChuck Lever ret = svc_rdma_build_read_chunk(rqstp, info, p); 786*026d958bSChuck Lever if (ret < 0) 787*026d958bSChuck Lever goto out; 788*026d958bSChuck Lever 789*026d958bSChuck Lever /* Read chunk may need XDR round-up (see RFC 5666, s. 3.7). 790*026d958bSChuck Lever */ 791*026d958bSChuck Lever if (info->ri_chunklen & 3) { 792*026d958bSChuck Lever u32 padlen = 4 - (info->ri_chunklen & 3); 793*026d958bSChuck Lever 794*026d958bSChuck Lever info->ri_chunklen += padlen; 795*026d958bSChuck Lever 796*026d958bSChuck Lever /* NB: data payload always starts on XDR alignment, 797*026d958bSChuck Lever * thus the pad can never contain a page boundary. 798*026d958bSChuck Lever */ 799*026d958bSChuck Lever info->ri_pageoff += padlen; 800*026d958bSChuck Lever if (info->ri_pageoff == PAGE_SIZE) { 801*026d958bSChuck Lever info->ri_pageno++; 802*026d958bSChuck Lever info->ri_pageoff = 0; 803*026d958bSChuck Lever } 804*026d958bSChuck Lever } 805*026d958bSChuck Lever 806*026d958bSChuck Lever head->arg.page_len = info->ri_chunklen; 807*026d958bSChuck Lever head->arg.len += info->ri_chunklen; 808*026d958bSChuck Lever head->arg.buflen += info->ri_chunklen; 809*026d958bSChuck Lever 810*026d958bSChuck Lever if (info->ri_position < head->arg.head[0].iov_len) { 811*026d958bSChuck Lever ret = svc_rdma_copy_tail(rqstp, info); 812*026d958bSChuck Lever if (ret < 0) 813*026d958bSChuck Lever goto out; 814*026d958bSChuck Lever } 815*026d958bSChuck Lever head->arg.head[0].iov_len = info->ri_position; 816*026d958bSChuck Lever 817*026d958bSChuck Lever out: 818*026d958bSChuck Lever return ret; 819*026d958bSChuck Lever } 820*026d958bSChuck Lever 821*026d958bSChuck Lever /* Construct RDMA Reads to pull over a Position Zero Read chunk. 822*026d958bSChuck Lever * The start of the data lands in the first page just after 823*026d958bSChuck Lever * the Transport header, and the rest lands in the page list of 824*026d958bSChuck Lever * head->arg.pages. 825*026d958bSChuck Lever * 826*026d958bSChuck Lever * Assumptions: 827*026d958bSChuck Lever * - A PZRC has an XDR-aligned length (no implicit round-up). 828*026d958bSChuck Lever * - There can be no trailing inline content (IOW, we assume 829*026d958bSChuck Lever * a PZRC is never sent in an RDMA_MSG message, though it's 830*026d958bSChuck Lever * allowed by spec). 831*026d958bSChuck Lever */ 832*026d958bSChuck Lever static int svc_rdma_build_pz_read_chunk(struct svc_rqst *rqstp, 833*026d958bSChuck Lever struct svc_rdma_read_info *info, 834*026d958bSChuck Lever __be32 *p) 835*026d958bSChuck Lever { 836*026d958bSChuck Lever struct svc_rdma_op_ctxt *head = info->ri_readctxt; 837*026d958bSChuck Lever int ret; 838*026d958bSChuck Lever 839*026d958bSChuck Lever dprintk("svcrdma: Reading Position Zero Read chunk\n"); 840*026d958bSChuck Lever 841*026d958bSChuck Lever info->ri_pageno = head->hdr_count - 1; 842*026d958bSChuck Lever info->ri_pageoff = offset_in_page(head->byte_len); 843*026d958bSChuck Lever 844*026d958bSChuck Lever ret = svc_rdma_build_read_chunk(rqstp, info, p); 845*026d958bSChuck Lever if (ret < 0) 846*026d958bSChuck Lever goto out; 847*026d958bSChuck Lever 848*026d958bSChuck Lever head->arg.len += info->ri_chunklen; 849*026d958bSChuck Lever head->arg.buflen += info->ri_chunklen; 850*026d958bSChuck Lever 851*026d958bSChuck Lever if (head->arg.len <= head->sge[0].length) { 852*026d958bSChuck Lever /* Transport header and RPC message fit entirely 853*026d958bSChuck Lever * in page where head iovec resides. 854*026d958bSChuck Lever */ 855*026d958bSChuck Lever head->arg.head[0].iov_len = info->ri_chunklen; 856*026d958bSChuck Lever } else { 857*026d958bSChuck Lever /* Transport header and part of RPC message reside 858*026d958bSChuck Lever * in the head iovec's page. 859*026d958bSChuck Lever */ 860*026d958bSChuck Lever head->arg.head[0].iov_len = 861*026d958bSChuck Lever head->sge[0].length - head->byte_len; 862*026d958bSChuck Lever head->arg.page_len = 863*026d958bSChuck Lever info->ri_chunklen - head->arg.head[0].iov_len; 864*026d958bSChuck Lever } 865*026d958bSChuck Lever 866*026d958bSChuck Lever out: 867*026d958bSChuck Lever return ret; 868*026d958bSChuck Lever } 869*026d958bSChuck Lever 870*026d958bSChuck Lever /** 871*026d958bSChuck Lever * svc_rdma_recv_read_chunk - Pull a Read chunk from the client 872*026d958bSChuck Lever * @rdma: controlling RDMA transport 873*026d958bSChuck Lever * @rqstp: set of pages to use as Read sink buffers 874*026d958bSChuck Lever * @head: pages under I/O collect here 875*026d958bSChuck Lever * @p: pointer to start of Read chunk 876*026d958bSChuck Lever * 877*026d958bSChuck Lever * Returns: 878*026d958bSChuck Lever * %0 if all needed RDMA Reads were posted successfully, 879*026d958bSChuck Lever * %-EINVAL if client provided too many segments, 880*026d958bSChuck Lever * %-ENOMEM if rdma_rw context pool was exhausted, 881*026d958bSChuck Lever * %-ENOTCONN if posting failed (connection is lost), 882*026d958bSChuck Lever * %-EIO if rdma_rw initialization failed (DMA mapping, etc). 883*026d958bSChuck Lever * 884*026d958bSChuck Lever * Assumptions: 885*026d958bSChuck Lever * - All Read segments in @p have the same Position value. 886*026d958bSChuck Lever */ 887*026d958bSChuck Lever int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp, 888*026d958bSChuck Lever struct svc_rdma_op_ctxt *head, __be32 *p) 889*026d958bSChuck Lever { 890*026d958bSChuck Lever struct svc_rdma_read_info *info; 891*026d958bSChuck Lever struct page **page; 892*026d958bSChuck Lever int ret; 893*026d958bSChuck Lever 894*026d958bSChuck Lever /* The request (with page list) is constructed in 895*026d958bSChuck Lever * head->arg. Pages involved with RDMA Read I/O are 896*026d958bSChuck Lever * transferred there. 897*026d958bSChuck Lever */ 898*026d958bSChuck Lever head->hdr_count = head->count; 899*026d958bSChuck Lever head->arg.head[0] = rqstp->rq_arg.head[0]; 900*026d958bSChuck Lever head->arg.tail[0] = rqstp->rq_arg.tail[0]; 901*026d958bSChuck Lever head->arg.pages = head->pages; 902*026d958bSChuck Lever head->arg.page_base = 0; 903*026d958bSChuck Lever head->arg.page_len = 0; 904*026d958bSChuck Lever head->arg.len = rqstp->rq_arg.len; 905*026d958bSChuck Lever head->arg.buflen = rqstp->rq_arg.buflen; 906*026d958bSChuck Lever 907*026d958bSChuck Lever info = svc_rdma_read_info_alloc(rdma); 908*026d958bSChuck Lever if (!info) 909*026d958bSChuck Lever return -ENOMEM; 910*026d958bSChuck Lever info->ri_readctxt = head; 911*026d958bSChuck Lever 912*026d958bSChuck Lever info->ri_position = be32_to_cpup(p + 1); 913*026d958bSChuck Lever if (info->ri_position) 914*026d958bSChuck Lever ret = svc_rdma_build_normal_read_chunk(rqstp, info, p); 915*026d958bSChuck Lever else 916*026d958bSChuck Lever ret = svc_rdma_build_pz_read_chunk(rqstp, info, p); 917*026d958bSChuck Lever 918*026d958bSChuck Lever /* Mark the start of the pages that can be used for the reply */ 919*026d958bSChuck Lever if (info->ri_pageoff > 0) 920*026d958bSChuck Lever info->ri_pageno++; 921*026d958bSChuck Lever rqstp->rq_respages = &rqstp->rq_pages[info->ri_pageno]; 922*026d958bSChuck Lever rqstp->rq_next_page = rqstp->rq_respages + 1; 923*026d958bSChuck Lever 924*026d958bSChuck Lever if (ret < 0) 925*026d958bSChuck Lever goto out; 926*026d958bSChuck Lever 927*026d958bSChuck Lever ret = svc_rdma_post_chunk_ctxt(&info->ri_cc); 928*026d958bSChuck Lever 929*026d958bSChuck Lever out: 930*026d958bSChuck Lever /* Read sink pages have been moved from rqstp->rq_pages to 931*026d958bSChuck Lever * head->arg.pages. Force svc_recv to refill those slots 932*026d958bSChuck Lever * in rq_pages. 933*026d958bSChuck Lever */ 934*026d958bSChuck Lever for (page = rqstp->rq_pages; page < rqstp->rq_respages; page++) 935*026d958bSChuck Lever *page = NULL; 936*026d958bSChuck Lever 937*026d958bSChuck Lever if (ret < 0) 938*026d958bSChuck Lever svc_rdma_read_info_free(info); 939*026d958bSChuck Lever return ret; 940*026d958bSChuck Lever } 941