1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2f13193f5SChuck Lever /* 3ecf85b23SChuck Lever * Copyright (c) 2016-2018 Oracle. All rights reserved. 4f13193f5SChuck Lever * 5f13193f5SChuck Lever * Use the core R/W API to move RPC-over-RDMA Read and Write chunks. 6f13193f5SChuck Lever */ 7f13193f5SChuck Lever 898895edbSChuck Lever #include <rdma/rw.h> 998895edbSChuck Lever 10f13193f5SChuck Lever #include <linux/sunrpc/rpc_rdma.h> 11f13193f5SChuck Lever #include <linux/sunrpc/svc_rdma.h> 12f13193f5SChuck Lever 1398895edbSChuck Lever #include "xprt_rdma.h" 1498895edbSChuck Lever #include <trace/events/rpcrdma.h> 15f13193f5SChuck Lever 16026d958bSChuck Lever static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc); 17026d958bSChuck Lever static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc); 18026d958bSChuck Lever 19f13193f5SChuck Lever /* Each R/W context contains state for one chain of RDMA Read or 20f13193f5SChuck Lever * Write Work Requests. 21f13193f5SChuck Lever * 22f13193f5SChuck Lever * Each WR chain handles a single contiguous server-side buffer, 23f13193f5SChuck Lever * because scatterlist entries after the first have to start on 24f13193f5SChuck Lever * page alignment. xdr_buf iovecs cannot guarantee alignment. 25f13193f5SChuck Lever * 26f13193f5SChuck Lever * Each WR chain handles only one R_key. Each RPC-over-RDMA segment 27f13193f5SChuck Lever * from a client may contain a unique R_key, so each WR chain moves 28f13193f5SChuck Lever * up to one segment at a time. 29f13193f5SChuck Lever * 30f13193f5SChuck Lever * The scatterlist makes this data structure over 4KB in size. To 31f13193f5SChuck Lever * make it less likely to fail, and to handle the allocation for 32f13193f5SChuck Lever * smaller I/O requests without disabling bottom-halves, these 33f13193f5SChuck Lever * contexts are created on demand, but cached and reused until the 34f13193f5SChuck Lever * controlling svcxprt_rdma is destroyed. 35f13193f5SChuck Lever */ 36f13193f5SChuck Lever struct svc_rdma_rw_ctxt { 37f13193f5SChuck Lever struct list_head rw_list; 38f13193f5SChuck Lever struct rdma_rw_ctx rw_ctx; 392abfbe7eSChuck Lever unsigned int rw_nents; 40f13193f5SChuck Lever struct sg_table rw_sg_table; 41c0fb23f8SGustavo A. R. Silva struct scatterlist rw_first_sgl[]; 42f13193f5SChuck Lever }; 43f13193f5SChuck Lever 44f13193f5SChuck Lever static inline struct svc_rdma_rw_ctxt * 45f13193f5SChuck Lever svc_rdma_next_ctxt(struct list_head *list) 46f13193f5SChuck Lever { 47f13193f5SChuck Lever return list_first_entry_or_null(list, struct svc_rdma_rw_ctxt, 48f13193f5SChuck Lever rw_list); 49f13193f5SChuck Lever } 50f13193f5SChuck Lever 51f13193f5SChuck Lever static struct svc_rdma_rw_ctxt * 52f13193f5SChuck Lever svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges) 53f13193f5SChuck Lever { 54f13193f5SChuck Lever struct svc_rdma_rw_ctxt *ctxt; 55f13193f5SChuck Lever 56f13193f5SChuck Lever spin_lock(&rdma->sc_rw_ctxt_lock); 57f13193f5SChuck Lever 58f13193f5SChuck Lever ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts); 59f13193f5SChuck Lever if (ctxt) { 60f13193f5SChuck Lever list_del(&ctxt->rw_list); 61f13193f5SChuck Lever spin_unlock(&rdma->sc_rw_ctxt_lock); 62f13193f5SChuck Lever } else { 63f13193f5SChuck Lever spin_unlock(&rdma->sc_rw_ctxt_lock); 6414cfbd94SGustavo A. R. Silva ctxt = kmalloc(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE), 65f13193f5SChuck Lever GFP_KERNEL); 66f13193f5SChuck Lever if (!ctxt) 67f4e53e1cSChuck Lever goto out_noctx; 68f13193f5SChuck Lever INIT_LIST_HEAD(&ctxt->rw_list); 69f13193f5SChuck Lever } 70f13193f5SChuck Lever 71f13193f5SChuck Lever ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl; 72f13193f5SChuck Lever if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges, 734635873cSMing Lei ctxt->rw_sg_table.sgl, 74f4e53e1cSChuck Lever SG_CHUNK_SIZE)) 75f4e53e1cSChuck Lever goto out_free; 76f13193f5SChuck Lever return ctxt; 77f4e53e1cSChuck Lever 78f4e53e1cSChuck Lever out_free: 79f4e53e1cSChuck Lever kfree(ctxt); 80f4e53e1cSChuck Lever out_noctx: 81f4e53e1cSChuck Lever trace_svcrdma_no_rwctx_err(rdma, sges); 82f4e53e1cSChuck Lever return NULL; 83f13193f5SChuck Lever } 84f13193f5SChuck Lever 85f13193f5SChuck Lever static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma, 86f13193f5SChuck Lever struct svc_rdma_rw_ctxt *ctxt) 87f13193f5SChuck Lever { 884635873cSMing Lei sg_free_table_chained(&ctxt->rw_sg_table, SG_CHUNK_SIZE); 89f13193f5SChuck Lever 90f13193f5SChuck Lever spin_lock(&rdma->sc_rw_ctxt_lock); 91f13193f5SChuck Lever list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts); 92f13193f5SChuck Lever spin_unlock(&rdma->sc_rw_ctxt_lock); 93f13193f5SChuck Lever } 94f13193f5SChuck Lever 95f13193f5SChuck Lever /** 96f13193f5SChuck Lever * svc_rdma_destroy_rw_ctxts - Free accumulated R/W contexts 97f13193f5SChuck Lever * @rdma: transport about to be destroyed 98f13193f5SChuck Lever * 99f13193f5SChuck Lever */ 100f13193f5SChuck Lever void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma) 101f13193f5SChuck Lever { 102f13193f5SChuck Lever struct svc_rdma_rw_ctxt *ctxt; 103f13193f5SChuck Lever 104f13193f5SChuck Lever while ((ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts)) != NULL) { 105f13193f5SChuck Lever list_del(&ctxt->rw_list); 106f13193f5SChuck Lever kfree(ctxt); 107f13193f5SChuck Lever } 108f13193f5SChuck Lever } 109f13193f5SChuck Lever 1102abfbe7eSChuck Lever /** 1112abfbe7eSChuck Lever * svc_rdma_rw_ctx_init - Prepare a R/W context for I/O 1122abfbe7eSChuck Lever * @rdma: controlling transport instance 1132abfbe7eSChuck Lever * @ctxt: R/W context to prepare 1142abfbe7eSChuck Lever * @offset: RDMA offset 1152abfbe7eSChuck Lever * @handle: RDMA tag/handle 1162abfbe7eSChuck Lever * @direction: I/O direction 1172abfbe7eSChuck Lever * 1182abfbe7eSChuck Lever * Returns on success, the number of WQEs that will be needed 1192abfbe7eSChuck Lever * on the workqueue, or a negative errno. 1202abfbe7eSChuck Lever */ 1212abfbe7eSChuck Lever static int svc_rdma_rw_ctx_init(struct svcxprt_rdma *rdma, 1222abfbe7eSChuck Lever struct svc_rdma_rw_ctxt *ctxt, 1232abfbe7eSChuck Lever u64 offset, u32 handle, 1242abfbe7eSChuck Lever enum dma_data_direction direction) 1252abfbe7eSChuck Lever { 1262abfbe7eSChuck Lever int ret; 1272abfbe7eSChuck Lever 1282abfbe7eSChuck Lever ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, rdma->sc_port_num, 1292abfbe7eSChuck Lever ctxt->rw_sg_table.sgl, ctxt->rw_nents, 1302abfbe7eSChuck Lever 0, offset, handle, direction); 1312abfbe7eSChuck Lever if (unlikely(ret < 0)) { 1322abfbe7eSChuck Lever svc_rdma_put_rw_ctxt(rdma, ctxt); 1332abfbe7eSChuck Lever trace_svcrdma_dma_map_rw_err(rdma, ctxt->rw_nents, ret); 1342abfbe7eSChuck Lever } 1352abfbe7eSChuck Lever return ret; 1362abfbe7eSChuck Lever } 1372abfbe7eSChuck Lever 138f13193f5SChuck Lever /* A chunk context tracks all I/O for moving one Read or Write 139f13193f5SChuck Lever * chunk. This is a a set of rdma_rw's that handle data movement 140f13193f5SChuck Lever * for all segments of one chunk. 141f13193f5SChuck Lever * 142f13193f5SChuck Lever * These are small, acquired with a single allocator call, and 143f13193f5SChuck Lever * no more than one is needed per chunk. They are allocated on 144f13193f5SChuck Lever * demand, and not cached. 145f13193f5SChuck Lever */ 146f13193f5SChuck Lever struct svc_rdma_chunk_ctxt { 147f13193f5SChuck Lever struct ib_cqe cc_cqe; 148f13193f5SChuck Lever struct svcxprt_rdma *cc_rdma; 149f13193f5SChuck Lever struct list_head cc_rwctxts; 150f13193f5SChuck Lever int cc_sqecount; 151f13193f5SChuck Lever }; 152f13193f5SChuck Lever 153f13193f5SChuck Lever static void svc_rdma_cc_init(struct svcxprt_rdma *rdma, 15435a30fc3SChuck Lever struct svc_rdma_chunk_ctxt *cc) 155f13193f5SChuck Lever { 156f13193f5SChuck Lever cc->cc_rdma = rdma; 157f13193f5SChuck Lever svc_xprt_get(&rdma->sc_xprt); 158f13193f5SChuck Lever 159f13193f5SChuck Lever INIT_LIST_HEAD(&cc->cc_rwctxts); 160f13193f5SChuck Lever cc->cc_sqecount = 0; 161f13193f5SChuck Lever } 162f13193f5SChuck Lever 16335a30fc3SChuck Lever static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc, 16435a30fc3SChuck Lever enum dma_data_direction dir) 165f13193f5SChuck Lever { 166f13193f5SChuck Lever struct svcxprt_rdma *rdma = cc->cc_rdma; 167f13193f5SChuck Lever struct svc_rdma_rw_ctxt *ctxt; 168f13193f5SChuck Lever 169f13193f5SChuck Lever while ((ctxt = svc_rdma_next_ctxt(&cc->cc_rwctxts)) != NULL) { 170f13193f5SChuck Lever list_del(&ctxt->rw_list); 171f13193f5SChuck Lever 172f13193f5SChuck Lever rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp, 173f13193f5SChuck Lever rdma->sc_port_num, ctxt->rw_sg_table.sgl, 17435a30fc3SChuck Lever ctxt->rw_nents, dir); 175f13193f5SChuck Lever svc_rdma_put_rw_ctxt(rdma, ctxt); 176f13193f5SChuck Lever } 177f13193f5SChuck Lever svc_xprt_put(&rdma->sc_xprt); 178f13193f5SChuck Lever } 179f13193f5SChuck Lever 180f13193f5SChuck Lever /* State for sending a Write or Reply chunk. 181f13193f5SChuck Lever * - Tracks progress of writing one chunk over all its segments 182f13193f5SChuck Lever * - Stores arguments for the SGL constructor functions 183f13193f5SChuck Lever */ 184f13193f5SChuck Lever struct svc_rdma_write_info { 185f13193f5SChuck Lever /* write state of this chunk */ 186f13193f5SChuck Lever unsigned int wi_seg_off; 187f13193f5SChuck Lever unsigned int wi_seg_no; 188f13193f5SChuck Lever unsigned int wi_nsegs; 189f13193f5SChuck Lever __be32 *wi_segs; 190f13193f5SChuck Lever 191f13193f5SChuck Lever /* SGL constructor arguments */ 192f13193f5SChuck Lever struct xdr_buf *wi_xdr; 193f13193f5SChuck Lever unsigned char *wi_base; 194f13193f5SChuck Lever unsigned int wi_next_off; 195f13193f5SChuck Lever 196f13193f5SChuck Lever struct svc_rdma_chunk_ctxt wi_cc; 197f13193f5SChuck Lever }; 198f13193f5SChuck Lever 199f13193f5SChuck Lever static struct svc_rdma_write_info * 200f13193f5SChuck Lever svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma, __be32 *chunk) 201f13193f5SChuck Lever { 202f13193f5SChuck Lever struct svc_rdma_write_info *info; 203f13193f5SChuck Lever 204f13193f5SChuck Lever info = kmalloc(sizeof(*info), GFP_KERNEL); 205f13193f5SChuck Lever if (!info) 206f13193f5SChuck Lever return info; 207f13193f5SChuck Lever 208f13193f5SChuck Lever info->wi_seg_off = 0; 209f13193f5SChuck Lever info->wi_seg_no = 0; 210f13193f5SChuck Lever info->wi_nsegs = be32_to_cpup(++chunk); 211f13193f5SChuck Lever info->wi_segs = ++chunk; 21235a30fc3SChuck Lever svc_rdma_cc_init(rdma, &info->wi_cc); 213026d958bSChuck Lever info->wi_cc.cc_cqe.done = svc_rdma_write_done; 214f13193f5SChuck Lever return info; 215f13193f5SChuck Lever } 216f13193f5SChuck Lever 217f13193f5SChuck Lever static void svc_rdma_write_info_free(struct svc_rdma_write_info *info) 218f13193f5SChuck Lever { 21935a30fc3SChuck Lever svc_rdma_cc_release(&info->wi_cc, DMA_TO_DEVICE); 220f13193f5SChuck Lever kfree(info); 221f13193f5SChuck Lever } 222f13193f5SChuck Lever 223f13193f5SChuck Lever /** 224f13193f5SChuck Lever * svc_rdma_write_done - Write chunk completion 225f13193f5SChuck Lever * @cq: controlling Completion Queue 226f13193f5SChuck Lever * @wc: Work Completion 227f13193f5SChuck Lever * 228f13193f5SChuck Lever * Pages under I/O are freed by a subsequent Send completion. 229f13193f5SChuck Lever */ 230f13193f5SChuck Lever static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc) 231f13193f5SChuck Lever { 232f13193f5SChuck Lever struct ib_cqe *cqe = wc->wr_cqe; 233f13193f5SChuck Lever struct svc_rdma_chunk_ctxt *cc = 234f13193f5SChuck Lever container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe); 235f13193f5SChuck Lever struct svcxprt_rdma *rdma = cc->cc_rdma; 236f13193f5SChuck Lever struct svc_rdma_write_info *info = 237f13193f5SChuck Lever container_of(cc, struct svc_rdma_write_info, wi_cc); 238f13193f5SChuck Lever 239bd2abef3SChuck Lever trace_svcrdma_wc_write(wc); 240bd2abef3SChuck Lever 241f13193f5SChuck Lever atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); 242f13193f5SChuck Lever wake_up(&rdma->sc_send_wait); 243f13193f5SChuck Lever 2448820bcaaSChuck Lever if (unlikely(wc->status != IB_WC_SUCCESS)) 245f13193f5SChuck Lever set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); 246f13193f5SChuck Lever 247f13193f5SChuck Lever svc_rdma_write_info_free(info); 248f13193f5SChuck Lever } 249f13193f5SChuck Lever 250026d958bSChuck Lever /* State for pulling a Read chunk. 251026d958bSChuck Lever */ 252026d958bSChuck Lever struct svc_rdma_read_info { 253ecf85b23SChuck Lever struct svc_rdma_recv_ctxt *ri_readctxt; 254026d958bSChuck Lever unsigned int ri_position; 255026d958bSChuck Lever unsigned int ri_pageno; 256026d958bSChuck Lever unsigned int ri_pageoff; 257026d958bSChuck Lever unsigned int ri_chunklen; 258026d958bSChuck Lever 259026d958bSChuck Lever struct svc_rdma_chunk_ctxt ri_cc; 260026d958bSChuck Lever }; 261026d958bSChuck Lever 262026d958bSChuck Lever static struct svc_rdma_read_info * 263026d958bSChuck Lever svc_rdma_read_info_alloc(struct svcxprt_rdma *rdma) 264026d958bSChuck Lever { 265026d958bSChuck Lever struct svc_rdma_read_info *info; 266026d958bSChuck Lever 267026d958bSChuck Lever info = kmalloc(sizeof(*info), GFP_KERNEL); 268026d958bSChuck Lever if (!info) 269026d958bSChuck Lever return info; 270026d958bSChuck Lever 27135a30fc3SChuck Lever svc_rdma_cc_init(rdma, &info->ri_cc); 272026d958bSChuck Lever info->ri_cc.cc_cqe.done = svc_rdma_wc_read_done; 273026d958bSChuck Lever return info; 274026d958bSChuck Lever } 275026d958bSChuck Lever 276026d958bSChuck Lever static void svc_rdma_read_info_free(struct svc_rdma_read_info *info) 277026d958bSChuck Lever { 27835a30fc3SChuck Lever svc_rdma_cc_release(&info->ri_cc, DMA_FROM_DEVICE); 279026d958bSChuck Lever kfree(info); 280026d958bSChuck Lever } 281026d958bSChuck Lever 282026d958bSChuck Lever /** 283026d958bSChuck Lever * svc_rdma_wc_read_done - Handle completion of an RDMA Read ctx 284026d958bSChuck Lever * @cq: controlling Completion Queue 285026d958bSChuck Lever * @wc: Work Completion 286026d958bSChuck Lever * 287026d958bSChuck Lever */ 288026d958bSChuck Lever static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc) 289026d958bSChuck Lever { 290026d958bSChuck Lever struct ib_cqe *cqe = wc->wr_cqe; 291026d958bSChuck Lever struct svc_rdma_chunk_ctxt *cc = 292026d958bSChuck Lever container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe); 293026d958bSChuck Lever struct svcxprt_rdma *rdma = cc->cc_rdma; 294026d958bSChuck Lever struct svc_rdma_read_info *info = 295026d958bSChuck Lever container_of(cc, struct svc_rdma_read_info, ri_cc); 296026d958bSChuck Lever 297bd2abef3SChuck Lever trace_svcrdma_wc_read(wc); 298bd2abef3SChuck Lever 299026d958bSChuck Lever atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); 300026d958bSChuck Lever wake_up(&rdma->sc_send_wait); 301026d958bSChuck Lever 302026d958bSChuck Lever if (unlikely(wc->status != IB_WC_SUCCESS)) { 303026d958bSChuck Lever set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); 3041e5f4160SChuck Lever svc_rdma_recv_ctxt_put(rdma, info->ri_readctxt); 305026d958bSChuck Lever } else { 306026d958bSChuck Lever spin_lock(&rdma->sc_rq_dto_lock); 307ecf85b23SChuck Lever list_add_tail(&info->ri_readctxt->rc_list, 308026d958bSChuck Lever &rdma->sc_read_complete_q); 30995503d29SJ. Bruce Fields /* Note the unlock pairs with the smp_rmb in svc_xprt_ready: */ 31095503d29SJ. Bruce Fields set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags); 311026d958bSChuck Lever spin_unlock(&rdma->sc_rq_dto_lock); 312026d958bSChuck Lever 313026d958bSChuck Lever svc_xprt_enqueue(&rdma->sc_xprt); 314026d958bSChuck Lever } 315026d958bSChuck Lever 316026d958bSChuck Lever svc_rdma_read_info_free(info); 317026d958bSChuck Lever } 318026d958bSChuck Lever 319f13193f5SChuck Lever /* This function sleeps when the transport's Send Queue is congested. 320f13193f5SChuck Lever * 321f13193f5SChuck Lever * Assumptions: 322f13193f5SChuck Lever * - If ib_post_send() succeeds, only one completion is expected, 323f13193f5SChuck Lever * even if one or more WRs are flushed. This is true when posting 324f13193f5SChuck Lever * an rdma_rw_ctx or when posting a single signaled WR. 325f13193f5SChuck Lever */ 326f13193f5SChuck Lever static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc) 327f13193f5SChuck Lever { 328f13193f5SChuck Lever struct svcxprt_rdma *rdma = cc->cc_rdma; 329f13193f5SChuck Lever struct svc_xprt *xprt = &rdma->sc_xprt; 330d34ac5cdSBart Van Assche struct ib_send_wr *first_wr; 331d34ac5cdSBart Van Assche const struct ib_send_wr *bad_wr; 332f13193f5SChuck Lever struct list_head *tmp; 333f13193f5SChuck Lever struct ib_cqe *cqe; 334f13193f5SChuck Lever int ret; 335f13193f5SChuck Lever 336107c1d0aSChuck Lever if (cc->cc_sqecount > rdma->sc_sq_depth) 337107c1d0aSChuck Lever return -EINVAL; 338107c1d0aSChuck Lever 339f13193f5SChuck Lever first_wr = NULL; 340f13193f5SChuck Lever cqe = &cc->cc_cqe; 341f13193f5SChuck Lever list_for_each(tmp, &cc->cc_rwctxts) { 342f13193f5SChuck Lever struct svc_rdma_rw_ctxt *ctxt; 343f13193f5SChuck Lever 344f13193f5SChuck Lever ctxt = list_entry(tmp, struct svc_rdma_rw_ctxt, rw_list); 345f13193f5SChuck Lever first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp, 346f13193f5SChuck Lever rdma->sc_port_num, cqe, first_wr); 347f13193f5SChuck Lever cqe = NULL; 348f13193f5SChuck Lever } 349f13193f5SChuck Lever 350f13193f5SChuck Lever do { 351f13193f5SChuck Lever if (atomic_sub_return(cc->cc_sqecount, 352f13193f5SChuck Lever &rdma->sc_sq_avail) > 0) { 3535d85a822SJason Gunthorpe ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr); 354f13193f5SChuck Lever if (ret) 355f13193f5SChuck Lever break; 356f13193f5SChuck Lever return 0; 357f13193f5SChuck Lever } 358f13193f5SChuck Lever 359bd2abef3SChuck Lever trace_svcrdma_sq_full(rdma); 360f13193f5SChuck Lever atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); 361f13193f5SChuck Lever wait_event(rdma->sc_send_wait, 362f13193f5SChuck Lever atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount); 363bd2abef3SChuck Lever trace_svcrdma_sq_retry(rdma); 364f13193f5SChuck Lever } while (1); 365f13193f5SChuck Lever 366e28b4fc6SChuck Lever trace_svcrdma_sq_post_err(rdma, ret); 367f13193f5SChuck Lever set_bit(XPT_CLOSE, &xprt->xpt_flags); 368f13193f5SChuck Lever 369f13193f5SChuck Lever /* If even one was posted, there will be a completion. */ 370f13193f5SChuck Lever if (bad_wr != first_wr) 371f13193f5SChuck Lever return 0; 372f13193f5SChuck Lever 373f13193f5SChuck Lever atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); 374f13193f5SChuck Lever wake_up(&rdma->sc_send_wait); 375f13193f5SChuck Lever return -ENOTCONN; 376f13193f5SChuck Lever } 377f13193f5SChuck Lever 378f13193f5SChuck Lever /* Build and DMA-map an SGL that covers one kvec in an xdr_buf 379f13193f5SChuck Lever */ 380f13193f5SChuck Lever static void svc_rdma_vec_to_sg(struct svc_rdma_write_info *info, 381f13193f5SChuck Lever unsigned int len, 382f13193f5SChuck Lever struct svc_rdma_rw_ctxt *ctxt) 383f13193f5SChuck Lever { 384f13193f5SChuck Lever struct scatterlist *sg = ctxt->rw_sg_table.sgl; 385f13193f5SChuck Lever 386f13193f5SChuck Lever sg_set_buf(&sg[0], info->wi_base, len); 387f13193f5SChuck Lever info->wi_base += len; 388f13193f5SChuck Lever 389f13193f5SChuck Lever ctxt->rw_nents = 1; 390f13193f5SChuck Lever } 391f13193f5SChuck Lever 392f13193f5SChuck Lever /* Build and DMA-map an SGL that covers part of an xdr_buf's pagelist. 393f13193f5SChuck Lever */ 394f13193f5SChuck Lever static void svc_rdma_pagelist_to_sg(struct svc_rdma_write_info *info, 395f13193f5SChuck Lever unsigned int remaining, 396f13193f5SChuck Lever struct svc_rdma_rw_ctxt *ctxt) 397f13193f5SChuck Lever { 398f13193f5SChuck Lever unsigned int sge_no, sge_bytes, page_off, page_no; 399f13193f5SChuck Lever struct xdr_buf *xdr = info->wi_xdr; 400f13193f5SChuck Lever struct scatterlist *sg; 401f13193f5SChuck Lever struct page **page; 402f13193f5SChuck Lever 40391b022ecSChuck Lever page_off = info->wi_next_off + xdr->page_base; 40491b022ecSChuck Lever page_no = page_off >> PAGE_SHIFT; 40591b022ecSChuck Lever page_off = offset_in_page(page_off); 406f13193f5SChuck Lever page = xdr->pages + page_no; 407f13193f5SChuck Lever info->wi_next_off += remaining; 408f13193f5SChuck Lever sg = ctxt->rw_sg_table.sgl; 409f13193f5SChuck Lever sge_no = 0; 410f13193f5SChuck Lever do { 411f13193f5SChuck Lever sge_bytes = min_t(unsigned int, remaining, 412f13193f5SChuck Lever PAGE_SIZE - page_off); 413f13193f5SChuck Lever sg_set_page(sg, *page, sge_bytes, page_off); 414f13193f5SChuck Lever 415f13193f5SChuck Lever remaining -= sge_bytes; 416f13193f5SChuck Lever sg = sg_next(sg); 417f13193f5SChuck Lever page_off = 0; 418f13193f5SChuck Lever sge_no++; 419f13193f5SChuck Lever page++; 420f13193f5SChuck Lever } while (remaining); 421f13193f5SChuck Lever 422f13193f5SChuck Lever ctxt->rw_nents = sge_no; 423f13193f5SChuck Lever } 424f13193f5SChuck Lever 425f13193f5SChuck Lever /* Construct RDMA Write WRs to send a portion of an xdr_buf containing 426f13193f5SChuck Lever * an RPC Reply. 427f13193f5SChuck Lever */ 428f13193f5SChuck Lever static int 429f13193f5SChuck Lever svc_rdma_build_writes(struct svc_rdma_write_info *info, 430f13193f5SChuck Lever void (*constructor)(struct svc_rdma_write_info *info, 431f13193f5SChuck Lever unsigned int len, 432f13193f5SChuck Lever struct svc_rdma_rw_ctxt *ctxt), 433f13193f5SChuck Lever unsigned int remaining) 434f13193f5SChuck Lever { 435f13193f5SChuck Lever struct svc_rdma_chunk_ctxt *cc = &info->wi_cc; 436f13193f5SChuck Lever struct svcxprt_rdma *rdma = cc->cc_rdma; 437f13193f5SChuck Lever struct svc_rdma_rw_ctxt *ctxt; 438f13193f5SChuck Lever __be32 *seg; 439f13193f5SChuck Lever int ret; 440f13193f5SChuck Lever 441f13193f5SChuck Lever seg = info->wi_segs + info->wi_seg_no * rpcrdma_segment_maxsz; 442f13193f5SChuck Lever do { 443f13193f5SChuck Lever unsigned int write_len; 444f13193f5SChuck Lever u32 seg_length, seg_handle; 445f13193f5SChuck Lever u64 seg_offset; 446f13193f5SChuck Lever 447f13193f5SChuck Lever if (info->wi_seg_no >= info->wi_nsegs) 448f13193f5SChuck Lever goto out_overflow; 449f13193f5SChuck Lever 450f13193f5SChuck Lever seg_handle = be32_to_cpup(seg); 451f13193f5SChuck Lever seg_length = be32_to_cpup(seg + 1); 452f13193f5SChuck Lever xdr_decode_hyper(seg + 2, &seg_offset); 453f13193f5SChuck Lever seg_offset += info->wi_seg_off; 454f13193f5SChuck Lever 455f13193f5SChuck Lever write_len = min(remaining, seg_length - info->wi_seg_off); 456f13193f5SChuck Lever ctxt = svc_rdma_get_rw_ctxt(rdma, 457f13193f5SChuck Lever (write_len >> PAGE_SHIFT) + 2); 458f13193f5SChuck Lever if (!ctxt) 459f4e53e1cSChuck Lever return -ENOMEM; 460f13193f5SChuck Lever 461f13193f5SChuck Lever constructor(info, write_len, ctxt); 4622abfbe7eSChuck Lever ret = svc_rdma_rw_ctx_init(rdma, ctxt, seg_offset, seg_handle, 4632abfbe7eSChuck Lever DMA_TO_DEVICE); 464f13193f5SChuck Lever if (ret < 0) 4652abfbe7eSChuck Lever return -EIO; 466f13193f5SChuck Lever 467a406c563SChuck Lever trace_svcrdma_send_wseg(seg_handle, write_len, seg_offset); 468a406c563SChuck Lever 469f13193f5SChuck Lever list_add(&ctxt->rw_list, &cc->cc_rwctxts); 470f13193f5SChuck Lever cc->cc_sqecount += ret; 471f13193f5SChuck Lever if (write_len == seg_length - info->wi_seg_off) { 472f13193f5SChuck Lever seg += 4; 473f13193f5SChuck Lever info->wi_seg_no++; 474f13193f5SChuck Lever info->wi_seg_off = 0; 475f13193f5SChuck Lever } else { 476f13193f5SChuck Lever info->wi_seg_off += write_len; 477f13193f5SChuck Lever } 478f13193f5SChuck Lever remaining -= write_len; 479f13193f5SChuck Lever } while (remaining); 480f13193f5SChuck Lever 481f13193f5SChuck Lever return 0; 482f13193f5SChuck Lever 483f13193f5SChuck Lever out_overflow: 484*dbc17acdSChuck Lever trace_svcrdma_small_wrch_err(rdma, remaining, info->wi_seg_no, 485f13193f5SChuck Lever info->wi_nsegs); 486f13193f5SChuck Lever return -E2BIG; 487f13193f5SChuck Lever } 488f13193f5SChuck Lever 489f13193f5SChuck Lever /* Send one of an xdr_buf's kvecs by itself. To send a Reply 490f13193f5SChuck Lever * chunk, the whole RPC Reply is written back to the client. 491f13193f5SChuck Lever * This function writes either the head or tail of the xdr_buf 492f13193f5SChuck Lever * containing the Reply. 493f13193f5SChuck Lever */ 494f13193f5SChuck Lever static int svc_rdma_send_xdr_kvec(struct svc_rdma_write_info *info, 495f13193f5SChuck Lever struct kvec *vec) 496f13193f5SChuck Lever { 497f13193f5SChuck Lever info->wi_base = vec->iov_base; 498f13193f5SChuck Lever return svc_rdma_build_writes(info, svc_rdma_vec_to_sg, 499f13193f5SChuck Lever vec->iov_len); 500f13193f5SChuck Lever } 501f13193f5SChuck Lever 50241205539SChuck Lever /* Send an xdr_buf's page list by itself. A Write chunk is just 50341205539SChuck Lever * the page list. A Reply chunk is @xdr's head, page list, and 50441205539SChuck Lever * tail. This function is shared between the two types of chunk. 505f13193f5SChuck Lever */ 506f13193f5SChuck Lever static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info, 50741205539SChuck Lever struct xdr_buf *xdr, 50841205539SChuck Lever unsigned int offset, 50941205539SChuck Lever unsigned long length) 510f13193f5SChuck Lever { 511f13193f5SChuck Lever info->wi_xdr = xdr; 51241205539SChuck Lever info->wi_next_off = offset - xdr->head[0].iov_len; 513f13193f5SChuck Lever return svc_rdma_build_writes(info, svc_rdma_pagelist_to_sg, 51441205539SChuck Lever length); 515f13193f5SChuck Lever } 516f13193f5SChuck Lever 517f13193f5SChuck Lever /** 518f13193f5SChuck Lever * svc_rdma_send_write_chunk - Write all segments in a Write chunk 519f13193f5SChuck Lever * @rdma: controlling RDMA transport 520f13193f5SChuck Lever * @wr_ch: Write chunk provided by client 521f13193f5SChuck Lever * @xdr: xdr_buf containing the data payload 52241205539SChuck Lever * @offset: payload's byte offset in @xdr 52341205539SChuck Lever * @length: size of payload, in bytes 524f13193f5SChuck Lever * 525f13193f5SChuck Lever * Returns a non-negative number of bytes the chunk consumed, or 526f13193f5SChuck Lever * %-E2BIG if the payload was larger than the Write chunk, 527107c1d0aSChuck Lever * %-EINVAL if client provided too many segments, 528f13193f5SChuck Lever * %-ENOMEM if rdma_rw context pool was exhausted, 529f13193f5SChuck Lever * %-ENOTCONN if posting failed (connection is lost), 530f13193f5SChuck Lever * %-EIO if rdma_rw initialization failed (DMA mapping, etc). 531f13193f5SChuck Lever */ 532f13193f5SChuck Lever int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch, 53341205539SChuck Lever struct xdr_buf *xdr, 53441205539SChuck Lever unsigned int offset, unsigned long length) 535f13193f5SChuck Lever { 536f13193f5SChuck Lever struct svc_rdma_write_info *info; 537f13193f5SChuck Lever int ret; 538f13193f5SChuck Lever 53941205539SChuck Lever if (!length) 540f13193f5SChuck Lever return 0; 541f13193f5SChuck Lever 542f13193f5SChuck Lever info = svc_rdma_write_info_alloc(rdma, wr_ch); 543f13193f5SChuck Lever if (!info) 544f13193f5SChuck Lever return -ENOMEM; 545f13193f5SChuck Lever 54641205539SChuck Lever ret = svc_rdma_send_xdr_pagelist(info, xdr, offset, length); 547f13193f5SChuck Lever if (ret < 0) 548f13193f5SChuck Lever goto out_err; 549f13193f5SChuck Lever 550f13193f5SChuck Lever ret = svc_rdma_post_chunk_ctxt(&info->wi_cc); 551f13193f5SChuck Lever if (ret < 0) 552f13193f5SChuck Lever goto out_err; 55398895edbSChuck Lever 554a406c563SChuck Lever trace_svcrdma_send_write_chunk(xdr->page_len); 55541205539SChuck Lever return length; 556f13193f5SChuck Lever 557f13193f5SChuck Lever out_err: 558f13193f5SChuck Lever svc_rdma_write_info_free(info); 559f13193f5SChuck Lever return ret; 560f13193f5SChuck Lever } 561f13193f5SChuck Lever 562f13193f5SChuck Lever /** 563f13193f5SChuck Lever * svc_rdma_send_reply_chunk - Write all segments in the Reply chunk 564f13193f5SChuck Lever * @rdma: controlling RDMA transport 5656fa5785eSChuck Lever * @rctxt: Write and Reply chunks from client 566f13193f5SChuck Lever * @xdr: xdr_buf containing an RPC Reply 567f13193f5SChuck Lever * 568f13193f5SChuck Lever * Returns a non-negative number of bytes the chunk consumed, or 569f13193f5SChuck Lever * %-E2BIG if the payload was larger than the Reply chunk, 570107c1d0aSChuck Lever * %-EINVAL if client provided too many segments, 571f13193f5SChuck Lever * %-ENOMEM if rdma_rw context pool was exhausted, 572f13193f5SChuck Lever * %-ENOTCONN if posting failed (connection is lost), 573f13193f5SChuck Lever * %-EIO if rdma_rw initialization failed (DMA mapping, etc). 574f13193f5SChuck Lever */ 5756fa5785eSChuck Lever int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, 5766fa5785eSChuck Lever const struct svc_rdma_recv_ctxt *rctxt, 5776fa5785eSChuck Lever struct xdr_buf *xdr) 578f13193f5SChuck Lever { 579f13193f5SChuck Lever struct svc_rdma_write_info *info; 580f13193f5SChuck Lever int consumed, ret; 581f13193f5SChuck Lever 5826fa5785eSChuck Lever info = svc_rdma_write_info_alloc(rdma, rctxt->rc_reply_chunk); 583f13193f5SChuck Lever if (!info) 584f13193f5SChuck Lever return -ENOMEM; 585f13193f5SChuck Lever 586f13193f5SChuck Lever ret = svc_rdma_send_xdr_kvec(info, &xdr->head[0]); 587f13193f5SChuck Lever if (ret < 0) 588f13193f5SChuck Lever goto out_err; 589f13193f5SChuck Lever consumed = xdr->head[0].iov_len; 590f13193f5SChuck Lever 591f13193f5SChuck Lever /* Send the page list in the Reply chunk only if the 592f13193f5SChuck Lever * client did not provide Write chunks. 593f13193f5SChuck Lever */ 5946fa5785eSChuck Lever if (!rctxt->rc_write_list && xdr->page_len) { 59541205539SChuck Lever ret = svc_rdma_send_xdr_pagelist(info, xdr, 59641205539SChuck Lever xdr->head[0].iov_len, 59741205539SChuck Lever xdr->page_len); 598f13193f5SChuck Lever if (ret < 0) 599f13193f5SChuck Lever goto out_err; 600f13193f5SChuck Lever consumed += xdr->page_len; 601f13193f5SChuck Lever } 602f13193f5SChuck Lever 603f13193f5SChuck Lever if (xdr->tail[0].iov_len) { 604f13193f5SChuck Lever ret = svc_rdma_send_xdr_kvec(info, &xdr->tail[0]); 605f13193f5SChuck Lever if (ret < 0) 606f13193f5SChuck Lever goto out_err; 607f13193f5SChuck Lever consumed += xdr->tail[0].iov_len; 608f13193f5SChuck Lever } 609f13193f5SChuck Lever 610f13193f5SChuck Lever ret = svc_rdma_post_chunk_ctxt(&info->wi_cc); 611f13193f5SChuck Lever if (ret < 0) 612f13193f5SChuck Lever goto out_err; 61398895edbSChuck Lever 614a406c563SChuck Lever trace_svcrdma_send_reply_chunk(consumed); 615f13193f5SChuck Lever return consumed; 616f13193f5SChuck Lever 617f13193f5SChuck Lever out_err: 618f13193f5SChuck Lever svc_rdma_write_info_free(info); 619f13193f5SChuck Lever return ret; 620f13193f5SChuck Lever } 621026d958bSChuck Lever 622026d958bSChuck Lever static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info, 623026d958bSChuck Lever struct svc_rqst *rqstp, 624026d958bSChuck Lever u32 rkey, u32 len, u64 offset) 625026d958bSChuck Lever { 626ecf85b23SChuck Lever struct svc_rdma_recv_ctxt *head = info->ri_readctxt; 627026d958bSChuck Lever struct svc_rdma_chunk_ctxt *cc = &info->ri_cc; 628026d958bSChuck Lever struct svc_rdma_rw_ctxt *ctxt; 629026d958bSChuck Lever unsigned int sge_no, seg_len; 630026d958bSChuck Lever struct scatterlist *sg; 631026d958bSChuck Lever int ret; 632026d958bSChuck Lever 633026d958bSChuck Lever sge_no = PAGE_ALIGN(info->ri_pageoff + len) >> PAGE_SHIFT; 634026d958bSChuck Lever ctxt = svc_rdma_get_rw_ctxt(cc->cc_rdma, sge_no); 635026d958bSChuck Lever if (!ctxt) 636f4e53e1cSChuck Lever return -ENOMEM; 637026d958bSChuck Lever ctxt->rw_nents = sge_no; 638026d958bSChuck Lever 639026d958bSChuck Lever sg = ctxt->rw_sg_table.sgl; 640026d958bSChuck Lever for (sge_no = 0; sge_no < ctxt->rw_nents; sge_no++) { 641026d958bSChuck Lever seg_len = min_t(unsigned int, len, 642026d958bSChuck Lever PAGE_SIZE - info->ri_pageoff); 643026d958bSChuck Lever 644ecf85b23SChuck Lever head->rc_arg.pages[info->ri_pageno] = 645026d958bSChuck Lever rqstp->rq_pages[info->ri_pageno]; 646026d958bSChuck Lever if (!info->ri_pageoff) 647ecf85b23SChuck Lever head->rc_page_count++; 648026d958bSChuck Lever 649026d958bSChuck Lever sg_set_page(sg, rqstp->rq_pages[info->ri_pageno], 650026d958bSChuck Lever seg_len, info->ri_pageoff); 651026d958bSChuck Lever sg = sg_next(sg); 652026d958bSChuck Lever 653026d958bSChuck Lever info->ri_pageoff += seg_len; 654026d958bSChuck Lever if (info->ri_pageoff == PAGE_SIZE) { 655026d958bSChuck Lever info->ri_pageno++; 656026d958bSChuck Lever info->ri_pageoff = 0; 657026d958bSChuck Lever } 658026d958bSChuck Lever len -= seg_len; 659026d958bSChuck Lever 660026d958bSChuck Lever /* Safety check */ 661026d958bSChuck Lever if (len && 662026d958bSChuck Lever &rqstp->rq_pages[info->ri_pageno + 1] > rqstp->rq_page_end) 663026d958bSChuck Lever goto out_overrun; 664026d958bSChuck Lever } 665026d958bSChuck Lever 6662abfbe7eSChuck Lever ret = svc_rdma_rw_ctx_init(cc->cc_rdma, ctxt, offset, rkey, 6672abfbe7eSChuck Lever DMA_FROM_DEVICE); 668026d958bSChuck Lever if (ret < 0) 6692abfbe7eSChuck Lever return -EIO; 670026d958bSChuck Lever 671026d958bSChuck Lever list_add(&ctxt->rw_list, &cc->cc_rwctxts); 672026d958bSChuck Lever cc->cc_sqecount += ret; 673026d958bSChuck Lever return 0; 674026d958bSChuck Lever 675026d958bSChuck Lever out_overrun: 6769d200638SChuck Lever trace_svcrdma_page_overrun_err(cc->cc_rdma, rqstp, info->ri_pageno); 677026d958bSChuck Lever return -EINVAL; 678026d958bSChuck Lever } 679026d958bSChuck Lever 6807075a867SChuck Lever /* Walk the segments in the Read chunk starting at @p and construct 6817075a867SChuck Lever * RDMA Read operations to pull the chunk to the server. 6827075a867SChuck Lever */ 683026d958bSChuck Lever static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp, 684026d958bSChuck Lever struct svc_rdma_read_info *info, 685026d958bSChuck Lever __be32 *p) 686026d958bSChuck Lever { 68707d0ff3bSChuck Lever unsigned int i; 688026d958bSChuck Lever int ret; 689026d958bSChuck Lever 6907075a867SChuck Lever ret = -EINVAL; 691026d958bSChuck Lever info->ri_chunklen = 0; 6927075a867SChuck Lever while (*p++ != xdr_zero && be32_to_cpup(p++) == info->ri_position) { 693026d958bSChuck Lever u32 rs_handle, rs_length; 694026d958bSChuck Lever u64 rs_offset; 695026d958bSChuck Lever 696026d958bSChuck Lever rs_handle = be32_to_cpup(p++); 697026d958bSChuck Lever rs_length = be32_to_cpup(p++); 698026d958bSChuck Lever p = xdr_decode_hyper(p, &rs_offset); 699026d958bSChuck Lever 700026d958bSChuck Lever ret = svc_rdma_build_read_segment(info, rqstp, 701026d958bSChuck Lever rs_handle, rs_length, 702026d958bSChuck Lever rs_offset); 703026d958bSChuck Lever if (ret < 0) 704026d958bSChuck Lever break; 705026d958bSChuck Lever 706a406c563SChuck Lever trace_svcrdma_send_rseg(rs_handle, rs_length, rs_offset); 707026d958bSChuck Lever info->ri_chunklen += rs_length; 708026d958bSChuck Lever } 709026d958bSChuck Lever 71007d0ff3bSChuck Lever /* Pages under I/O have been copied to head->rc_pages. 71107d0ff3bSChuck Lever * Prevent their premature release by svc_xprt_release() . 71207d0ff3bSChuck Lever */ 71307d0ff3bSChuck Lever for (i = 0; i < info->ri_readctxt->rc_page_count; i++) 71407d0ff3bSChuck Lever rqstp->rq_pages[i] = NULL; 71507d0ff3bSChuck Lever 716026d958bSChuck Lever return ret; 717026d958bSChuck Lever } 718026d958bSChuck Lever 719026d958bSChuck Lever /* Construct RDMA Reads to pull over a normal Read chunk. The chunk 720ecf85b23SChuck Lever * data lands in the page list of head->rc_arg.pages. 721026d958bSChuck Lever * 722ecf85b23SChuck Lever * Currently NFSD does not look at the head->rc_arg.tail[0] iovec. 723026d958bSChuck Lever * Therefore, XDR round-up of the Read chunk and trailing 724026d958bSChuck Lever * inline content must both be added at the end of the pagelist. 725026d958bSChuck Lever */ 726026d958bSChuck Lever static int svc_rdma_build_normal_read_chunk(struct svc_rqst *rqstp, 727026d958bSChuck Lever struct svc_rdma_read_info *info, 728026d958bSChuck Lever __be32 *p) 729026d958bSChuck Lever { 730ecf85b23SChuck Lever struct svc_rdma_recv_ctxt *head = info->ri_readctxt; 731026d958bSChuck Lever int ret; 732026d958bSChuck Lever 733026d958bSChuck Lever ret = svc_rdma_build_read_chunk(rqstp, info, p); 734026d958bSChuck Lever if (ret < 0) 735026d958bSChuck Lever goto out; 736026d958bSChuck Lever 737a406c563SChuck Lever trace_svcrdma_send_read_chunk(info->ri_chunklen, info->ri_position); 73898895edbSChuck Lever 7393316f063SChuck Lever head->rc_hdr_count = 0; 7403316f063SChuck Lever 741193bcb7bSChuck Lever /* Split the Receive buffer between the head and tail 742193bcb7bSChuck Lever * buffers at Read chunk's position. XDR roundup of the 743193bcb7bSChuck Lever * chunk is not included in either the pagelist or in 744193bcb7bSChuck Lever * the tail. 745026d958bSChuck Lever */ 746ecf85b23SChuck Lever head->rc_arg.tail[0].iov_base = 747ecf85b23SChuck Lever head->rc_arg.head[0].iov_base + info->ri_position; 748ecf85b23SChuck Lever head->rc_arg.tail[0].iov_len = 749ecf85b23SChuck Lever head->rc_arg.head[0].iov_len - info->ri_position; 750ecf85b23SChuck Lever head->rc_arg.head[0].iov_len = info->ri_position; 751026d958bSChuck Lever 752175e0310SChuck Lever /* Read chunk may need XDR roundup (see RFC 8166, s. 3.4.5.2). 753193bcb7bSChuck Lever * 754175e0310SChuck Lever * If the client already rounded up the chunk length, the 755175e0310SChuck Lever * length does not change. Otherwise, the length of the page 756175e0310SChuck Lever * list is increased to include XDR round-up. 757175e0310SChuck Lever * 758175e0310SChuck Lever * Currently these chunks always start at page offset 0, 759175e0310SChuck Lever * thus the rounded-up length never crosses a page boundary. 760026d958bSChuck Lever */ 761175e0310SChuck Lever info->ri_chunklen = XDR_QUADLEN(info->ri_chunklen) << 2; 762026d958bSChuck Lever 763ecf85b23SChuck Lever head->rc_arg.page_len = info->ri_chunklen; 764ecf85b23SChuck Lever head->rc_arg.len += info->ri_chunklen; 765ecf85b23SChuck Lever head->rc_arg.buflen += info->ri_chunklen; 766026d958bSChuck Lever 767026d958bSChuck Lever out: 768026d958bSChuck Lever return ret; 769026d958bSChuck Lever } 770026d958bSChuck Lever 771026d958bSChuck Lever /* Construct RDMA Reads to pull over a Position Zero Read chunk. 772026d958bSChuck Lever * The start of the data lands in the first page just after 773026d958bSChuck Lever * the Transport header, and the rest lands in the page list of 774ecf85b23SChuck Lever * head->rc_arg.pages. 775026d958bSChuck Lever * 776026d958bSChuck Lever * Assumptions: 777026d958bSChuck Lever * - A PZRC has an XDR-aligned length (no implicit round-up). 778026d958bSChuck Lever * - There can be no trailing inline content (IOW, we assume 779026d958bSChuck Lever * a PZRC is never sent in an RDMA_MSG message, though it's 780026d958bSChuck Lever * allowed by spec). 781026d958bSChuck Lever */ 782026d958bSChuck Lever static int svc_rdma_build_pz_read_chunk(struct svc_rqst *rqstp, 783026d958bSChuck Lever struct svc_rdma_read_info *info, 784026d958bSChuck Lever __be32 *p) 785026d958bSChuck Lever { 786ecf85b23SChuck Lever struct svc_rdma_recv_ctxt *head = info->ri_readctxt; 787026d958bSChuck Lever int ret; 788026d958bSChuck Lever 789026d958bSChuck Lever ret = svc_rdma_build_read_chunk(rqstp, info, p); 790026d958bSChuck Lever if (ret < 0) 791026d958bSChuck Lever goto out; 792026d958bSChuck Lever 793a406c563SChuck Lever trace_svcrdma_send_pzr(info->ri_chunklen); 79498895edbSChuck Lever 795ecf85b23SChuck Lever head->rc_arg.len += info->ri_chunklen; 796ecf85b23SChuck Lever head->rc_arg.buflen += info->ri_chunklen; 797026d958bSChuck Lever 7983316f063SChuck Lever head->rc_hdr_count = 1; 7993316f063SChuck Lever head->rc_arg.head[0].iov_base = page_address(head->rc_pages[0]); 8003316f063SChuck Lever head->rc_arg.head[0].iov_len = min_t(size_t, PAGE_SIZE, 8013316f063SChuck Lever info->ri_chunklen); 8023316f063SChuck Lever 8033316f063SChuck Lever head->rc_arg.page_len = info->ri_chunklen - 8043316f063SChuck Lever head->rc_arg.head[0].iov_len; 805026d958bSChuck Lever 806026d958bSChuck Lever out: 807026d958bSChuck Lever return ret; 808026d958bSChuck Lever } 809026d958bSChuck Lever 810026d958bSChuck Lever /** 811026d958bSChuck Lever * svc_rdma_recv_read_chunk - Pull a Read chunk from the client 812026d958bSChuck Lever * @rdma: controlling RDMA transport 813026d958bSChuck Lever * @rqstp: set of pages to use as Read sink buffers 814026d958bSChuck Lever * @head: pages under I/O collect here 815026d958bSChuck Lever * @p: pointer to start of Read chunk 816026d958bSChuck Lever * 817026d958bSChuck Lever * Returns: 818026d958bSChuck Lever * %0 if all needed RDMA Reads were posted successfully, 819026d958bSChuck Lever * %-EINVAL if client provided too many segments, 820026d958bSChuck Lever * %-ENOMEM if rdma_rw context pool was exhausted, 821026d958bSChuck Lever * %-ENOTCONN if posting failed (connection is lost), 822026d958bSChuck Lever * %-EIO if rdma_rw initialization failed (DMA mapping, etc). 823026d958bSChuck Lever * 824026d958bSChuck Lever * Assumptions: 825026d958bSChuck Lever * - All Read segments in @p have the same Position value. 826026d958bSChuck Lever */ 827026d958bSChuck Lever int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp, 828ecf85b23SChuck Lever struct svc_rdma_recv_ctxt *head, __be32 *p) 829026d958bSChuck Lever { 830026d958bSChuck Lever struct svc_rdma_read_info *info; 831026d958bSChuck Lever int ret; 832026d958bSChuck Lever 833026d958bSChuck Lever /* The request (with page list) is constructed in 834ecf85b23SChuck Lever * head->rc_arg. Pages involved with RDMA Read I/O are 835026d958bSChuck Lever * transferred there. 836026d958bSChuck Lever */ 837ecf85b23SChuck Lever head->rc_arg.head[0] = rqstp->rq_arg.head[0]; 838ecf85b23SChuck Lever head->rc_arg.tail[0] = rqstp->rq_arg.tail[0]; 839ecf85b23SChuck Lever head->rc_arg.pages = head->rc_pages; 840ecf85b23SChuck Lever head->rc_arg.page_base = 0; 841ecf85b23SChuck Lever head->rc_arg.page_len = 0; 842ecf85b23SChuck Lever head->rc_arg.len = rqstp->rq_arg.len; 843ecf85b23SChuck Lever head->rc_arg.buflen = rqstp->rq_arg.buflen; 844026d958bSChuck Lever 845026d958bSChuck Lever info = svc_rdma_read_info_alloc(rdma); 846026d958bSChuck Lever if (!info) 847026d958bSChuck Lever return -ENOMEM; 848026d958bSChuck Lever info->ri_readctxt = head; 8493316f063SChuck Lever info->ri_pageno = 0; 8503316f063SChuck Lever info->ri_pageoff = 0; 851026d958bSChuck Lever 852026d958bSChuck Lever info->ri_position = be32_to_cpup(p + 1); 853026d958bSChuck Lever if (info->ri_position) 854026d958bSChuck Lever ret = svc_rdma_build_normal_read_chunk(rqstp, info, p); 855026d958bSChuck Lever else 856026d958bSChuck Lever ret = svc_rdma_build_pz_read_chunk(rqstp, info, p); 857026d958bSChuck Lever if (ret < 0) 85807d0ff3bSChuck Lever goto out_err; 859026d958bSChuck Lever 860026d958bSChuck Lever ret = svc_rdma_post_chunk_ctxt(&info->ri_cc); 861026d958bSChuck Lever if (ret < 0) 86207d0ff3bSChuck Lever goto out_err; 86307d0ff3bSChuck Lever return 0; 86407d0ff3bSChuck Lever 86507d0ff3bSChuck Lever out_err: 866026d958bSChuck Lever svc_rdma_read_info_free(info); 867026d958bSChuck Lever return ret; 868026d958bSChuck Lever } 869