1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2f13193f5SChuck Lever /*
3f13193f5SChuck Lever  * Copyright (c) 2016 Oracle.  All rights reserved.
4f13193f5SChuck Lever  *
5f13193f5SChuck Lever  * Use the core R/W API to move RPC-over-RDMA Read and Write chunks.
6f13193f5SChuck Lever  */
7f13193f5SChuck Lever 
8f13193f5SChuck Lever #include <linux/sunrpc/rpc_rdma.h>
9f13193f5SChuck Lever #include <linux/sunrpc/svc_rdma.h>
10f13193f5SChuck Lever #include <linux/sunrpc/debug.h>
11f13193f5SChuck Lever 
12f13193f5SChuck Lever #include <rdma/rw.h>
13f13193f5SChuck Lever 
14f13193f5SChuck Lever #define RPCDBG_FACILITY	RPCDBG_SVCXPRT
15f13193f5SChuck Lever 
16026d958bSChuck Lever static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc);
17026d958bSChuck Lever static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc);
18026d958bSChuck Lever 
19f13193f5SChuck Lever /* Each R/W context contains state for one chain of RDMA Read or
20f13193f5SChuck Lever  * Write Work Requests.
21f13193f5SChuck Lever  *
22f13193f5SChuck Lever  * Each WR chain handles a single contiguous server-side buffer,
23f13193f5SChuck Lever  * because scatterlist entries after the first have to start on
24f13193f5SChuck Lever  * page alignment. xdr_buf iovecs cannot guarantee alignment.
25f13193f5SChuck Lever  *
26f13193f5SChuck Lever  * Each WR chain handles only one R_key. Each RPC-over-RDMA segment
27f13193f5SChuck Lever  * from a client may contain a unique R_key, so each WR chain moves
28f13193f5SChuck Lever  * up to one segment at a time.
29f13193f5SChuck Lever  *
30f13193f5SChuck Lever  * The scatterlist makes this data structure over 4KB in size. To
31f13193f5SChuck Lever  * make it less likely to fail, and to handle the allocation for
32f13193f5SChuck Lever  * smaller I/O requests without disabling bottom-halves, these
33f13193f5SChuck Lever  * contexts are created on demand, but cached and reused until the
34f13193f5SChuck Lever  * controlling svcxprt_rdma is destroyed.
35f13193f5SChuck Lever  */
36f13193f5SChuck Lever struct svc_rdma_rw_ctxt {
37f13193f5SChuck Lever 	struct list_head	rw_list;
38f13193f5SChuck Lever 	struct rdma_rw_ctx	rw_ctx;
39f13193f5SChuck Lever 	int			rw_nents;
40f13193f5SChuck Lever 	struct sg_table		rw_sg_table;
41f13193f5SChuck Lever 	struct scatterlist	rw_first_sgl[0];
42f13193f5SChuck Lever };
43f13193f5SChuck Lever 
44f13193f5SChuck Lever static inline struct svc_rdma_rw_ctxt *
45f13193f5SChuck Lever svc_rdma_next_ctxt(struct list_head *list)
46f13193f5SChuck Lever {
47f13193f5SChuck Lever 	return list_first_entry_or_null(list, struct svc_rdma_rw_ctxt,
48f13193f5SChuck Lever 					rw_list);
49f13193f5SChuck Lever }
50f13193f5SChuck Lever 
51f13193f5SChuck Lever static struct svc_rdma_rw_ctxt *
52f13193f5SChuck Lever svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
53f13193f5SChuck Lever {
54f13193f5SChuck Lever 	struct svc_rdma_rw_ctxt *ctxt;
55f13193f5SChuck Lever 
56f13193f5SChuck Lever 	spin_lock(&rdma->sc_rw_ctxt_lock);
57f13193f5SChuck Lever 
58f13193f5SChuck Lever 	ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts);
59f13193f5SChuck Lever 	if (ctxt) {
60f13193f5SChuck Lever 		list_del(&ctxt->rw_list);
61f13193f5SChuck Lever 		spin_unlock(&rdma->sc_rw_ctxt_lock);
62f13193f5SChuck Lever 	} else {
63f13193f5SChuck Lever 		spin_unlock(&rdma->sc_rw_ctxt_lock);
64f13193f5SChuck Lever 		ctxt = kmalloc(sizeof(*ctxt) +
65f13193f5SChuck Lever 			       SG_CHUNK_SIZE * sizeof(struct scatterlist),
66f13193f5SChuck Lever 			       GFP_KERNEL);
67f13193f5SChuck Lever 		if (!ctxt)
68f13193f5SChuck Lever 			goto out;
69f13193f5SChuck Lever 		INIT_LIST_HEAD(&ctxt->rw_list);
70f13193f5SChuck Lever 	}
71f13193f5SChuck Lever 
72f13193f5SChuck Lever 	ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl;
73f13193f5SChuck Lever 	if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges,
74f13193f5SChuck Lever 				   ctxt->rw_sg_table.sgl)) {
75f13193f5SChuck Lever 		kfree(ctxt);
76f13193f5SChuck Lever 		ctxt = NULL;
77f13193f5SChuck Lever 	}
78f13193f5SChuck Lever out:
79f13193f5SChuck Lever 	return ctxt;
80f13193f5SChuck Lever }
81f13193f5SChuck Lever 
82f13193f5SChuck Lever static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
83f13193f5SChuck Lever 				 struct svc_rdma_rw_ctxt *ctxt)
84f13193f5SChuck Lever {
85f13193f5SChuck Lever 	sg_free_table_chained(&ctxt->rw_sg_table, true);
86f13193f5SChuck Lever 
87f13193f5SChuck Lever 	spin_lock(&rdma->sc_rw_ctxt_lock);
88f13193f5SChuck Lever 	list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts);
89f13193f5SChuck Lever 	spin_unlock(&rdma->sc_rw_ctxt_lock);
90f13193f5SChuck Lever }
91f13193f5SChuck Lever 
92f13193f5SChuck Lever /**
93f13193f5SChuck Lever  * svc_rdma_destroy_rw_ctxts - Free accumulated R/W contexts
94f13193f5SChuck Lever  * @rdma: transport about to be destroyed
95f13193f5SChuck Lever  *
96f13193f5SChuck Lever  */
97f13193f5SChuck Lever void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma)
98f13193f5SChuck Lever {
99f13193f5SChuck Lever 	struct svc_rdma_rw_ctxt *ctxt;
100f13193f5SChuck Lever 
101f13193f5SChuck Lever 	while ((ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts)) != NULL) {
102f13193f5SChuck Lever 		list_del(&ctxt->rw_list);
103f13193f5SChuck Lever 		kfree(ctxt);
104f13193f5SChuck Lever 	}
105f13193f5SChuck Lever }
106f13193f5SChuck Lever 
107f13193f5SChuck Lever /* A chunk context tracks all I/O for moving one Read or Write
108f13193f5SChuck Lever  * chunk. This is a a set of rdma_rw's that handle data movement
109f13193f5SChuck Lever  * for all segments of one chunk.
110f13193f5SChuck Lever  *
111f13193f5SChuck Lever  * These are small, acquired with a single allocator call, and
112f13193f5SChuck Lever  * no more than one is needed per chunk. They are allocated on
113f13193f5SChuck Lever  * demand, and not cached.
114f13193f5SChuck Lever  */
115f13193f5SChuck Lever struct svc_rdma_chunk_ctxt {
116f13193f5SChuck Lever 	struct ib_cqe		cc_cqe;
117f13193f5SChuck Lever 	struct svcxprt_rdma	*cc_rdma;
118f13193f5SChuck Lever 	struct list_head	cc_rwctxts;
119f13193f5SChuck Lever 	int			cc_sqecount;
120f13193f5SChuck Lever };
121f13193f5SChuck Lever 
122f13193f5SChuck Lever static void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
12335a30fc3SChuck Lever 			     struct svc_rdma_chunk_ctxt *cc)
124f13193f5SChuck Lever {
125f13193f5SChuck Lever 	cc->cc_rdma = rdma;
126f13193f5SChuck Lever 	svc_xprt_get(&rdma->sc_xprt);
127f13193f5SChuck Lever 
128f13193f5SChuck Lever 	INIT_LIST_HEAD(&cc->cc_rwctxts);
129f13193f5SChuck Lever 	cc->cc_sqecount = 0;
130f13193f5SChuck Lever }
131f13193f5SChuck Lever 
13235a30fc3SChuck Lever static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc,
13335a30fc3SChuck Lever 				enum dma_data_direction dir)
134f13193f5SChuck Lever {
135f13193f5SChuck Lever 	struct svcxprt_rdma *rdma = cc->cc_rdma;
136f13193f5SChuck Lever 	struct svc_rdma_rw_ctxt *ctxt;
137f13193f5SChuck Lever 
138f13193f5SChuck Lever 	while ((ctxt = svc_rdma_next_ctxt(&cc->cc_rwctxts)) != NULL) {
139f13193f5SChuck Lever 		list_del(&ctxt->rw_list);
140f13193f5SChuck Lever 
141f13193f5SChuck Lever 		rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp,
142f13193f5SChuck Lever 				    rdma->sc_port_num, ctxt->rw_sg_table.sgl,
14335a30fc3SChuck Lever 				    ctxt->rw_nents, dir);
144f13193f5SChuck Lever 		svc_rdma_put_rw_ctxt(rdma, ctxt);
145f13193f5SChuck Lever 	}
146f13193f5SChuck Lever 	svc_xprt_put(&rdma->sc_xprt);
147f13193f5SChuck Lever }
148f13193f5SChuck Lever 
149f13193f5SChuck Lever /* State for sending a Write or Reply chunk.
150f13193f5SChuck Lever  *  - Tracks progress of writing one chunk over all its segments
151f13193f5SChuck Lever  *  - Stores arguments for the SGL constructor functions
152f13193f5SChuck Lever  */
153f13193f5SChuck Lever struct svc_rdma_write_info {
154f13193f5SChuck Lever 	/* write state of this chunk */
155f13193f5SChuck Lever 	unsigned int		wi_seg_off;
156f13193f5SChuck Lever 	unsigned int		wi_seg_no;
157f13193f5SChuck Lever 	unsigned int		wi_nsegs;
158f13193f5SChuck Lever 	__be32			*wi_segs;
159f13193f5SChuck Lever 
160f13193f5SChuck Lever 	/* SGL constructor arguments */
161f13193f5SChuck Lever 	struct xdr_buf		*wi_xdr;
162f13193f5SChuck Lever 	unsigned char		*wi_base;
163f13193f5SChuck Lever 	unsigned int		wi_next_off;
164f13193f5SChuck Lever 
165f13193f5SChuck Lever 	struct svc_rdma_chunk_ctxt	wi_cc;
166f13193f5SChuck Lever };
167f13193f5SChuck Lever 
168f13193f5SChuck Lever static struct svc_rdma_write_info *
169f13193f5SChuck Lever svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma, __be32 *chunk)
170f13193f5SChuck Lever {
171f13193f5SChuck Lever 	struct svc_rdma_write_info *info;
172f13193f5SChuck Lever 
173f13193f5SChuck Lever 	info = kmalloc(sizeof(*info), GFP_KERNEL);
174f13193f5SChuck Lever 	if (!info)
175f13193f5SChuck Lever 		return info;
176f13193f5SChuck Lever 
177f13193f5SChuck Lever 	info->wi_seg_off = 0;
178f13193f5SChuck Lever 	info->wi_seg_no = 0;
179f13193f5SChuck Lever 	info->wi_nsegs = be32_to_cpup(++chunk);
180f13193f5SChuck Lever 	info->wi_segs = ++chunk;
18135a30fc3SChuck Lever 	svc_rdma_cc_init(rdma, &info->wi_cc);
182026d958bSChuck Lever 	info->wi_cc.cc_cqe.done = svc_rdma_write_done;
183f13193f5SChuck Lever 	return info;
184f13193f5SChuck Lever }
185f13193f5SChuck Lever 
186f13193f5SChuck Lever static void svc_rdma_write_info_free(struct svc_rdma_write_info *info)
187f13193f5SChuck Lever {
18835a30fc3SChuck Lever 	svc_rdma_cc_release(&info->wi_cc, DMA_TO_DEVICE);
189f13193f5SChuck Lever 	kfree(info);
190f13193f5SChuck Lever }
191f13193f5SChuck Lever 
192f13193f5SChuck Lever /**
193f13193f5SChuck Lever  * svc_rdma_write_done - Write chunk completion
194f13193f5SChuck Lever  * @cq: controlling Completion Queue
195f13193f5SChuck Lever  * @wc: Work Completion
196f13193f5SChuck Lever  *
197f13193f5SChuck Lever  * Pages under I/O are freed by a subsequent Send completion.
198f13193f5SChuck Lever  */
199f13193f5SChuck Lever static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
200f13193f5SChuck Lever {
201f13193f5SChuck Lever 	struct ib_cqe *cqe = wc->wr_cqe;
202f13193f5SChuck Lever 	struct svc_rdma_chunk_ctxt *cc =
203f13193f5SChuck Lever 			container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
204f13193f5SChuck Lever 	struct svcxprt_rdma *rdma = cc->cc_rdma;
205f13193f5SChuck Lever 	struct svc_rdma_write_info *info =
206f13193f5SChuck Lever 			container_of(cc, struct svc_rdma_write_info, wi_cc);
207f13193f5SChuck Lever 
208f13193f5SChuck Lever 	atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
209f13193f5SChuck Lever 	wake_up(&rdma->sc_send_wait);
210f13193f5SChuck Lever 
211f13193f5SChuck Lever 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
212f13193f5SChuck Lever 		set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
213f13193f5SChuck Lever 		if (wc->status != IB_WC_WR_FLUSH_ERR)
214f13193f5SChuck Lever 			pr_err("svcrdma: write ctx: %s (%u/0x%x)\n",
215f13193f5SChuck Lever 			       ib_wc_status_msg(wc->status),
216f13193f5SChuck Lever 			       wc->status, wc->vendor_err);
217f13193f5SChuck Lever 	}
218f13193f5SChuck Lever 
219f13193f5SChuck Lever 	svc_rdma_write_info_free(info);
220f13193f5SChuck Lever }
221f13193f5SChuck Lever 
222026d958bSChuck Lever /* State for pulling a Read chunk.
223026d958bSChuck Lever  */
224026d958bSChuck Lever struct svc_rdma_read_info {
225026d958bSChuck Lever 	struct svc_rdma_op_ctxt		*ri_readctxt;
226026d958bSChuck Lever 	unsigned int			ri_position;
227026d958bSChuck Lever 	unsigned int			ri_pageno;
228026d958bSChuck Lever 	unsigned int			ri_pageoff;
229026d958bSChuck Lever 	unsigned int			ri_chunklen;
230026d958bSChuck Lever 
231026d958bSChuck Lever 	struct svc_rdma_chunk_ctxt	ri_cc;
232026d958bSChuck Lever };
233026d958bSChuck Lever 
234026d958bSChuck Lever static struct svc_rdma_read_info *
235026d958bSChuck Lever svc_rdma_read_info_alloc(struct svcxprt_rdma *rdma)
236026d958bSChuck Lever {
237026d958bSChuck Lever 	struct svc_rdma_read_info *info;
238026d958bSChuck Lever 
239026d958bSChuck Lever 	info = kmalloc(sizeof(*info), GFP_KERNEL);
240026d958bSChuck Lever 	if (!info)
241026d958bSChuck Lever 		return info;
242026d958bSChuck Lever 
24335a30fc3SChuck Lever 	svc_rdma_cc_init(rdma, &info->ri_cc);
244026d958bSChuck Lever 	info->ri_cc.cc_cqe.done = svc_rdma_wc_read_done;
245026d958bSChuck Lever 	return info;
246026d958bSChuck Lever }
247026d958bSChuck Lever 
248026d958bSChuck Lever static void svc_rdma_read_info_free(struct svc_rdma_read_info *info)
249026d958bSChuck Lever {
25035a30fc3SChuck Lever 	svc_rdma_cc_release(&info->ri_cc, DMA_FROM_DEVICE);
251026d958bSChuck Lever 	kfree(info);
252026d958bSChuck Lever }
253026d958bSChuck Lever 
254026d958bSChuck Lever /**
255026d958bSChuck Lever  * svc_rdma_wc_read_done - Handle completion of an RDMA Read ctx
256026d958bSChuck Lever  * @cq: controlling Completion Queue
257026d958bSChuck Lever  * @wc: Work Completion
258026d958bSChuck Lever  *
259026d958bSChuck Lever  */
260026d958bSChuck Lever static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
261026d958bSChuck Lever {
262026d958bSChuck Lever 	struct ib_cqe *cqe = wc->wr_cqe;
263026d958bSChuck Lever 	struct svc_rdma_chunk_ctxt *cc =
264026d958bSChuck Lever 			container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
265026d958bSChuck Lever 	struct svcxprt_rdma *rdma = cc->cc_rdma;
266026d958bSChuck Lever 	struct svc_rdma_read_info *info =
267026d958bSChuck Lever 			container_of(cc, struct svc_rdma_read_info, ri_cc);
268026d958bSChuck Lever 
269026d958bSChuck Lever 	atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
270026d958bSChuck Lever 	wake_up(&rdma->sc_send_wait);
271026d958bSChuck Lever 
272026d958bSChuck Lever 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
273026d958bSChuck Lever 		set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
274026d958bSChuck Lever 		if (wc->status != IB_WC_WR_FLUSH_ERR)
275026d958bSChuck Lever 			pr_err("svcrdma: read ctx: %s (%u/0x%x)\n",
276026d958bSChuck Lever 			       ib_wc_status_msg(wc->status),
277026d958bSChuck Lever 			       wc->status, wc->vendor_err);
278026d958bSChuck Lever 		svc_rdma_put_context(info->ri_readctxt, 1);
279026d958bSChuck Lever 	} else {
280026d958bSChuck Lever 		spin_lock(&rdma->sc_rq_dto_lock);
281026d958bSChuck Lever 		list_add_tail(&info->ri_readctxt->list,
282026d958bSChuck Lever 			      &rdma->sc_read_complete_q);
283026d958bSChuck Lever 		spin_unlock(&rdma->sc_rq_dto_lock);
284026d958bSChuck Lever 
285026d958bSChuck Lever 		set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
286026d958bSChuck Lever 		svc_xprt_enqueue(&rdma->sc_xprt);
287026d958bSChuck Lever 	}
288026d958bSChuck Lever 
289026d958bSChuck Lever 	svc_rdma_read_info_free(info);
290026d958bSChuck Lever }
291026d958bSChuck Lever 
292f13193f5SChuck Lever /* This function sleeps when the transport's Send Queue is congested.
293f13193f5SChuck Lever  *
294f13193f5SChuck Lever  * Assumptions:
295f13193f5SChuck Lever  * - If ib_post_send() succeeds, only one completion is expected,
296f13193f5SChuck Lever  *   even if one or more WRs are flushed. This is true when posting
297f13193f5SChuck Lever  *   an rdma_rw_ctx or when posting a single signaled WR.
298f13193f5SChuck Lever  */
299f13193f5SChuck Lever static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
300f13193f5SChuck Lever {
301f13193f5SChuck Lever 	struct svcxprt_rdma *rdma = cc->cc_rdma;
302f13193f5SChuck Lever 	struct svc_xprt *xprt = &rdma->sc_xprt;
303f13193f5SChuck Lever 	struct ib_send_wr *first_wr, *bad_wr;
304f13193f5SChuck Lever 	struct list_head *tmp;
305f13193f5SChuck Lever 	struct ib_cqe *cqe;
306f13193f5SChuck Lever 	int ret;
307f13193f5SChuck Lever 
308107c1d0aSChuck Lever 	if (cc->cc_sqecount > rdma->sc_sq_depth)
309107c1d0aSChuck Lever 		return -EINVAL;
310107c1d0aSChuck Lever 
311f13193f5SChuck Lever 	first_wr = NULL;
312f13193f5SChuck Lever 	cqe = &cc->cc_cqe;
313f13193f5SChuck Lever 	list_for_each(tmp, &cc->cc_rwctxts) {
314f13193f5SChuck Lever 		struct svc_rdma_rw_ctxt *ctxt;
315f13193f5SChuck Lever 
316f13193f5SChuck Lever 		ctxt = list_entry(tmp, struct svc_rdma_rw_ctxt, rw_list);
317f13193f5SChuck Lever 		first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp,
318f13193f5SChuck Lever 					   rdma->sc_port_num, cqe, first_wr);
319f13193f5SChuck Lever 		cqe = NULL;
320f13193f5SChuck Lever 	}
321f13193f5SChuck Lever 
322f13193f5SChuck Lever 	do {
323f13193f5SChuck Lever 		if (atomic_sub_return(cc->cc_sqecount,
324f13193f5SChuck Lever 				      &rdma->sc_sq_avail) > 0) {
325f13193f5SChuck Lever 			ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
326f13193f5SChuck Lever 			if (ret)
327f13193f5SChuck Lever 				break;
328f13193f5SChuck Lever 			return 0;
329f13193f5SChuck Lever 		}
330f13193f5SChuck Lever 
331f13193f5SChuck Lever 		atomic_inc(&rdma_stat_sq_starve);
332f13193f5SChuck Lever 		atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
333f13193f5SChuck Lever 		wait_event(rdma->sc_send_wait,
334f13193f5SChuck Lever 			   atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount);
335f13193f5SChuck Lever 	} while (1);
336f13193f5SChuck Lever 
337f13193f5SChuck Lever 	pr_err("svcrdma: ib_post_send failed (%d)\n", ret);
338f13193f5SChuck Lever 	set_bit(XPT_CLOSE, &xprt->xpt_flags);
339f13193f5SChuck Lever 
340f13193f5SChuck Lever 	/* If even one was posted, there will be a completion. */
341f13193f5SChuck Lever 	if (bad_wr != first_wr)
342f13193f5SChuck Lever 		return 0;
343f13193f5SChuck Lever 
344f13193f5SChuck Lever 	atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
345f13193f5SChuck Lever 	wake_up(&rdma->sc_send_wait);
346f13193f5SChuck Lever 	return -ENOTCONN;
347f13193f5SChuck Lever }
348f13193f5SChuck Lever 
349f13193f5SChuck Lever /* Build and DMA-map an SGL that covers one kvec in an xdr_buf
350f13193f5SChuck Lever  */
351f13193f5SChuck Lever static void svc_rdma_vec_to_sg(struct svc_rdma_write_info *info,
352f13193f5SChuck Lever 			       unsigned int len,
353f13193f5SChuck Lever 			       struct svc_rdma_rw_ctxt *ctxt)
354f13193f5SChuck Lever {
355f13193f5SChuck Lever 	struct scatterlist *sg = ctxt->rw_sg_table.sgl;
356f13193f5SChuck Lever 
357f13193f5SChuck Lever 	sg_set_buf(&sg[0], info->wi_base, len);
358f13193f5SChuck Lever 	info->wi_base += len;
359f13193f5SChuck Lever 
360f13193f5SChuck Lever 	ctxt->rw_nents = 1;
361f13193f5SChuck Lever }
362f13193f5SChuck Lever 
363f13193f5SChuck Lever /* Build and DMA-map an SGL that covers part of an xdr_buf's pagelist.
364f13193f5SChuck Lever  */
365f13193f5SChuck Lever static void svc_rdma_pagelist_to_sg(struct svc_rdma_write_info *info,
366f13193f5SChuck Lever 				    unsigned int remaining,
367f13193f5SChuck Lever 				    struct svc_rdma_rw_ctxt *ctxt)
368f13193f5SChuck Lever {
369f13193f5SChuck Lever 	unsigned int sge_no, sge_bytes, page_off, page_no;
370f13193f5SChuck Lever 	struct xdr_buf *xdr = info->wi_xdr;
371f13193f5SChuck Lever 	struct scatterlist *sg;
372f13193f5SChuck Lever 	struct page **page;
373f13193f5SChuck Lever 
37491b022ecSChuck Lever 	page_off = info->wi_next_off + xdr->page_base;
37591b022ecSChuck Lever 	page_no = page_off >> PAGE_SHIFT;
37691b022ecSChuck Lever 	page_off = offset_in_page(page_off);
377f13193f5SChuck Lever 	page = xdr->pages + page_no;
378f13193f5SChuck Lever 	info->wi_next_off += remaining;
379f13193f5SChuck Lever 	sg = ctxt->rw_sg_table.sgl;
380f13193f5SChuck Lever 	sge_no = 0;
381f13193f5SChuck Lever 	do {
382f13193f5SChuck Lever 		sge_bytes = min_t(unsigned int, remaining,
383f13193f5SChuck Lever 				  PAGE_SIZE - page_off);
384f13193f5SChuck Lever 		sg_set_page(sg, *page, sge_bytes, page_off);
385f13193f5SChuck Lever 
386f13193f5SChuck Lever 		remaining -= sge_bytes;
387f13193f5SChuck Lever 		sg = sg_next(sg);
388f13193f5SChuck Lever 		page_off = 0;
389f13193f5SChuck Lever 		sge_no++;
390f13193f5SChuck Lever 		page++;
391f13193f5SChuck Lever 	} while (remaining);
392f13193f5SChuck Lever 
393f13193f5SChuck Lever 	ctxt->rw_nents = sge_no;
394f13193f5SChuck Lever }
395f13193f5SChuck Lever 
396f13193f5SChuck Lever /* Construct RDMA Write WRs to send a portion of an xdr_buf containing
397f13193f5SChuck Lever  * an RPC Reply.
398f13193f5SChuck Lever  */
399f13193f5SChuck Lever static int
400f13193f5SChuck Lever svc_rdma_build_writes(struct svc_rdma_write_info *info,
401f13193f5SChuck Lever 		      void (*constructor)(struct svc_rdma_write_info *info,
402f13193f5SChuck Lever 					  unsigned int len,
403f13193f5SChuck Lever 					  struct svc_rdma_rw_ctxt *ctxt),
404f13193f5SChuck Lever 		      unsigned int remaining)
405f13193f5SChuck Lever {
406f13193f5SChuck Lever 	struct svc_rdma_chunk_ctxt *cc = &info->wi_cc;
407f13193f5SChuck Lever 	struct svcxprt_rdma *rdma = cc->cc_rdma;
408f13193f5SChuck Lever 	struct svc_rdma_rw_ctxt *ctxt;
409f13193f5SChuck Lever 	__be32 *seg;
410f13193f5SChuck Lever 	int ret;
411f13193f5SChuck Lever 
412f13193f5SChuck Lever 	seg = info->wi_segs + info->wi_seg_no * rpcrdma_segment_maxsz;
413f13193f5SChuck Lever 	do {
414f13193f5SChuck Lever 		unsigned int write_len;
415f13193f5SChuck Lever 		u32 seg_length, seg_handle;
416f13193f5SChuck Lever 		u64 seg_offset;
417f13193f5SChuck Lever 
418f13193f5SChuck Lever 		if (info->wi_seg_no >= info->wi_nsegs)
419f13193f5SChuck Lever 			goto out_overflow;
420f13193f5SChuck Lever 
421f13193f5SChuck Lever 		seg_handle = be32_to_cpup(seg);
422f13193f5SChuck Lever 		seg_length = be32_to_cpup(seg + 1);
423f13193f5SChuck Lever 		xdr_decode_hyper(seg + 2, &seg_offset);
424f13193f5SChuck Lever 		seg_offset += info->wi_seg_off;
425f13193f5SChuck Lever 
426f13193f5SChuck Lever 		write_len = min(remaining, seg_length - info->wi_seg_off);
427f13193f5SChuck Lever 		ctxt = svc_rdma_get_rw_ctxt(rdma,
428f13193f5SChuck Lever 					    (write_len >> PAGE_SHIFT) + 2);
429f13193f5SChuck Lever 		if (!ctxt)
430f13193f5SChuck Lever 			goto out_noctx;
431f13193f5SChuck Lever 
432f13193f5SChuck Lever 		constructor(info, write_len, ctxt);
433f13193f5SChuck Lever 		ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp,
434f13193f5SChuck Lever 				       rdma->sc_port_num, ctxt->rw_sg_table.sgl,
435f13193f5SChuck Lever 				       ctxt->rw_nents, 0, seg_offset,
436f13193f5SChuck Lever 				       seg_handle, DMA_TO_DEVICE);
437f13193f5SChuck Lever 		if (ret < 0)
438f13193f5SChuck Lever 			goto out_initerr;
439f13193f5SChuck Lever 
440f13193f5SChuck Lever 		list_add(&ctxt->rw_list, &cc->cc_rwctxts);
441f13193f5SChuck Lever 		cc->cc_sqecount += ret;
442f13193f5SChuck Lever 		if (write_len == seg_length - info->wi_seg_off) {
443f13193f5SChuck Lever 			seg += 4;
444f13193f5SChuck Lever 			info->wi_seg_no++;
445f13193f5SChuck Lever 			info->wi_seg_off = 0;
446f13193f5SChuck Lever 		} else {
447f13193f5SChuck Lever 			info->wi_seg_off += write_len;
448f13193f5SChuck Lever 		}
449f13193f5SChuck Lever 		remaining -= write_len;
450f13193f5SChuck Lever 	} while (remaining);
451f13193f5SChuck Lever 
452f13193f5SChuck Lever 	return 0;
453f13193f5SChuck Lever 
454f13193f5SChuck Lever out_overflow:
455f13193f5SChuck Lever 	dprintk("svcrdma: inadequate space in Write chunk (%u)\n",
456f13193f5SChuck Lever 		info->wi_nsegs);
457f13193f5SChuck Lever 	return -E2BIG;
458f13193f5SChuck Lever 
459f13193f5SChuck Lever out_noctx:
460f13193f5SChuck Lever 	dprintk("svcrdma: no R/W ctxs available\n");
461f13193f5SChuck Lever 	return -ENOMEM;
462f13193f5SChuck Lever 
463f13193f5SChuck Lever out_initerr:
464f13193f5SChuck Lever 	svc_rdma_put_rw_ctxt(rdma, ctxt);
465f13193f5SChuck Lever 	pr_err("svcrdma: failed to map pagelist (%d)\n", ret);
466f13193f5SChuck Lever 	return -EIO;
467f13193f5SChuck Lever }
468f13193f5SChuck Lever 
469f13193f5SChuck Lever /* Send one of an xdr_buf's kvecs by itself. To send a Reply
470f13193f5SChuck Lever  * chunk, the whole RPC Reply is written back to the client.
471f13193f5SChuck Lever  * This function writes either the head or tail of the xdr_buf
472f13193f5SChuck Lever  * containing the Reply.
473f13193f5SChuck Lever  */
474f13193f5SChuck Lever static int svc_rdma_send_xdr_kvec(struct svc_rdma_write_info *info,
475f13193f5SChuck Lever 				  struct kvec *vec)
476f13193f5SChuck Lever {
477f13193f5SChuck Lever 	info->wi_base = vec->iov_base;
478f13193f5SChuck Lever 	return svc_rdma_build_writes(info, svc_rdma_vec_to_sg,
479f13193f5SChuck Lever 				     vec->iov_len);
480f13193f5SChuck Lever }
481f13193f5SChuck Lever 
482f13193f5SChuck Lever /* Send an xdr_buf's page list by itself. A Write chunk is
483f13193f5SChuck Lever  * just the page list. a Reply chunk is the head, page list,
484f13193f5SChuck Lever  * and tail. This function is shared between the two types
485f13193f5SChuck Lever  * of chunk.
486f13193f5SChuck Lever  */
487f13193f5SChuck Lever static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info,
488f13193f5SChuck Lever 				      struct xdr_buf *xdr)
489f13193f5SChuck Lever {
490f13193f5SChuck Lever 	info->wi_xdr = xdr;
491f13193f5SChuck Lever 	info->wi_next_off = 0;
492f13193f5SChuck Lever 	return svc_rdma_build_writes(info, svc_rdma_pagelist_to_sg,
493f13193f5SChuck Lever 				     xdr->page_len);
494f13193f5SChuck Lever }
495f13193f5SChuck Lever 
496f13193f5SChuck Lever /**
497f13193f5SChuck Lever  * svc_rdma_send_write_chunk - Write all segments in a Write chunk
498f13193f5SChuck Lever  * @rdma: controlling RDMA transport
499f13193f5SChuck Lever  * @wr_ch: Write chunk provided by client
500f13193f5SChuck Lever  * @xdr: xdr_buf containing the data payload
501f13193f5SChuck Lever  *
502f13193f5SChuck Lever  * Returns a non-negative number of bytes the chunk consumed, or
503f13193f5SChuck Lever  *	%-E2BIG if the payload was larger than the Write chunk,
504107c1d0aSChuck Lever  *	%-EINVAL if client provided too many segments,
505f13193f5SChuck Lever  *	%-ENOMEM if rdma_rw context pool was exhausted,
506f13193f5SChuck Lever  *	%-ENOTCONN if posting failed (connection is lost),
507f13193f5SChuck Lever  *	%-EIO if rdma_rw initialization failed (DMA mapping, etc).
508f13193f5SChuck Lever  */
509f13193f5SChuck Lever int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch,
510f13193f5SChuck Lever 			      struct xdr_buf *xdr)
511f13193f5SChuck Lever {
512f13193f5SChuck Lever 	struct svc_rdma_write_info *info;
513f13193f5SChuck Lever 	int ret;
514f13193f5SChuck Lever 
515f13193f5SChuck Lever 	if (!xdr->page_len)
516f13193f5SChuck Lever 		return 0;
517f13193f5SChuck Lever 
518f13193f5SChuck Lever 	info = svc_rdma_write_info_alloc(rdma, wr_ch);
519f13193f5SChuck Lever 	if (!info)
520f13193f5SChuck Lever 		return -ENOMEM;
521f13193f5SChuck Lever 
522f13193f5SChuck Lever 	ret = svc_rdma_send_xdr_pagelist(info, xdr);
523f13193f5SChuck Lever 	if (ret < 0)
524f13193f5SChuck Lever 		goto out_err;
525f13193f5SChuck Lever 
526f13193f5SChuck Lever 	ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
527f13193f5SChuck Lever 	if (ret < 0)
528f13193f5SChuck Lever 		goto out_err;
529f13193f5SChuck Lever 	return xdr->page_len;
530f13193f5SChuck Lever 
531f13193f5SChuck Lever out_err:
532f13193f5SChuck Lever 	svc_rdma_write_info_free(info);
533f13193f5SChuck Lever 	return ret;
534f13193f5SChuck Lever }
535f13193f5SChuck Lever 
536f13193f5SChuck Lever /**
537f13193f5SChuck Lever  * svc_rdma_send_reply_chunk - Write all segments in the Reply chunk
538f13193f5SChuck Lever  * @rdma: controlling RDMA transport
539f13193f5SChuck Lever  * @rp_ch: Reply chunk provided by client
540f13193f5SChuck Lever  * @writelist: true if client provided a Write list
541f13193f5SChuck Lever  * @xdr: xdr_buf containing an RPC Reply
542f13193f5SChuck Lever  *
543f13193f5SChuck Lever  * Returns a non-negative number of bytes the chunk consumed, or
544f13193f5SChuck Lever  *	%-E2BIG if the payload was larger than the Reply chunk,
545107c1d0aSChuck Lever  *	%-EINVAL if client provided too many segments,
546f13193f5SChuck Lever  *	%-ENOMEM if rdma_rw context pool was exhausted,
547f13193f5SChuck Lever  *	%-ENOTCONN if posting failed (connection is lost),
548f13193f5SChuck Lever  *	%-EIO if rdma_rw initialization failed (DMA mapping, etc).
549f13193f5SChuck Lever  */
550f13193f5SChuck Lever int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, __be32 *rp_ch,
551f13193f5SChuck Lever 			      bool writelist, struct xdr_buf *xdr)
552f13193f5SChuck Lever {
553f13193f5SChuck Lever 	struct svc_rdma_write_info *info;
554f13193f5SChuck Lever 	int consumed, ret;
555f13193f5SChuck Lever 
556f13193f5SChuck Lever 	info = svc_rdma_write_info_alloc(rdma, rp_ch);
557f13193f5SChuck Lever 	if (!info)
558f13193f5SChuck Lever 		return -ENOMEM;
559f13193f5SChuck Lever 
560f13193f5SChuck Lever 	ret = svc_rdma_send_xdr_kvec(info, &xdr->head[0]);
561f13193f5SChuck Lever 	if (ret < 0)
562f13193f5SChuck Lever 		goto out_err;
563f13193f5SChuck Lever 	consumed = xdr->head[0].iov_len;
564f13193f5SChuck Lever 
565f13193f5SChuck Lever 	/* Send the page list in the Reply chunk only if the
566f13193f5SChuck Lever 	 * client did not provide Write chunks.
567f13193f5SChuck Lever 	 */
568f13193f5SChuck Lever 	if (!writelist && xdr->page_len) {
569f13193f5SChuck Lever 		ret = svc_rdma_send_xdr_pagelist(info, xdr);
570f13193f5SChuck Lever 		if (ret < 0)
571f13193f5SChuck Lever 			goto out_err;
572f13193f5SChuck Lever 		consumed += xdr->page_len;
573f13193f5SChuck Lever 	}
574f13193f5SChuck Lever 
575f13193f5SChuck Lever 	if (xdr->tail[0].iov_len) {
576f13193f5SChuck Lever 		ret = svc_rdma_send_xdr_kvec(info, &xdr->tail[0]);
577f13193f5SChuck Lever 		if (ret < 0)
578f13193f5SChuck Lever 			goto out_err;
579f13193f5SChuck Lever 		consumed += xdr->tail[0].iov_len;
580f13193f5SChuck Lever 	}
581f13193f5SChuck Lever 
582f13193f5SChuck Lever 	ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
583f13193f5SChuck Lever 	if (ret < 0)
584f13193f5SChuck Lever 		goto out_err;
585f13193f5SChuck Lever 	return consumed;
586f13193f5SChuck Lever 
587f13193f5SChuck Lever out_err:
588f13193f5SChuck Lever 	svc_rdma_write_info_free(info);
589f13193f5SChuck Lever 	return ret;
590f13193f5SChuck Lever }
591026d958bSChuck Lever 
592026d958bSChuck Lever static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info,
593026d958bSChuck Lever 				       struct svc_rqst *rqstp,
594026d958bSChuck Lever 				       u32 rkey, u32 len, u64 offset)
595026d958bSChuck Lever {
596026d958bSChuck Lever 	struct svc_rdma_op_ctxt *head = info->ri_readctxt;
597026d958bSChuck Lever 	struct svc_rdma_chunk_ctxt *cc = &info->ri_cc;
598026d958bSChuck Lever 	struct svc_rdma_rw_ctxt *ctxt;
599026d958bSChuck Lever 	unsigned int sge_no, seg_len;
600026d958bSChuck Lever 	struct scatterlist *sg;
601026d958bSChuck Lever 	int ret;
602026d958bSChuck Lever 
603026d958bSChuck Lever 	sge_no = PAGE_ALIGN(info->ri_pageoff + len) >> PAGE_SHIFT;
604026d958bSChuck Lever 	ctxt = svc_rdma_get_rw_ctxt(cc->cc_rdma, sge_no);
605026d958bSChuck Lever 	if (!ctxt)
606026d958bSChuck Lever 		goto out_noctx;
607026d958bSChuck Lever 	ctxt->rw_nents = sge_no;
608026d958bSChuck Lever 
609026d958bSChuck Lever 	dprintk("svcrdma: reading segment %u@0x%016llx:0x%08x (%u sges)\n",
610026d958bSChuck Lever 		len, offset, rkey, sge_no);
611026d958bSChuck Lever 
612026d958bSChuck Lever 	sg = ctxt->rw_sg_table.sgl;
613026d958bSChuck Lever 	for (sge_no = 0; sge_no < ctxt->rw_nents; sge_no++) {
614026d958bSChuck Lever 		seg_len = min_t(unsigned int, len,
615026d958bSChuck Lever 				PAGE_SIZE - info->ri_pageoff);
616026d958bSChuck Lever 
617026d958bSChuck Lever 		head->arg.pages[info->ri_pageno] =
618026d958bSChuck Lever 			rqstp->rq_pages[info->ri_pageno];
619026d958bSChuck Lever 		if (!info->ri_pageoff)
620026d958bSChuck Lever 			head->count++;
621026d958bSChuck Lever 
622026d958bSChuck Lever 		sg_set_page(sg, rqstp->rq_pages[info->ri_pageno],
623026d958bSChuck Lever 			    seg_len, info->ri_pageoff);
624026d958bSChuck Lever 		sg = sg_next(sg);
625026d958bSChuck Lever 
626026d958bSChuck Lever 		info->ri_pageoff += seg_len;
627026d958bSChuck Lever 		if (info->ri_pageoff == PAGE_SIZE) {
628026d958bSChuck Lever 			info->ri_pageno++;
629026d958bSChuck Lever 			info->ri_pageoff = 0;
630026d958bSChuck Lever 		}
631026d958bSChuck Lever 		len -= seg_len;
632026d958bSChuck Lever 
633026d958bSChuck Lever 		/* Safety check */
634026d958bSChuck Lever 		if (len &&
635026d958bSChuck Lever 		    &rqstp->rq_pages[info->ri_pageno + 1] > rqstp->rq_page_end)
636026d958bSChuck Lever 			goto out_overrun;
637026d958bSChuck Lever 	}
638026d958bSChuck Lever 
639026d958bSChuck Lever 	ret = rdma_rw_ctx_init(&ctxt->rw_ctx, cc->cc_rdma->sc_qp,
640026d958bSChuck Lever 			       cc->cc_rdma->sc_port_num,
641026d958bSChuck Lever 			       ctxt->rw_sg_table.sgl, ctxt->rw_nents,
642026d958bSChuck Lever 			       0, offset, rkey, DMA_FROM_DEVICE);
643026d958bSChuck Lever 	if (ret < 0)
644026d958bSChuck Lever 		goto out_initerr;
645026d958bSChuck Lever 
646026d958bSChuck Lever 	list_add(&ctxt->rw_list, &cc->cc_rwctxts);
647026d958bSChuck Lever 	cc->cc_sqecount += ret;
648026d958bSChuck Lever 	return 0;
649026d958bSChuck Lever 
650026d958bSChuck Lever out_noctx:
651026d958bSChuck Lever 	dprintk("svcrdma: no R/W ctxs available\n");
652026d958bSChuck Lever 	return -ENOMEM;
653026d958bSChuck Lever 
654026d958bSChuck Lever out_overrun:
655026d958bSChuck Lever 	dprintk("svcrdma: request overruns rq_pages\n");
656026d958bSChuck Lever 	return -EINVAL;
657026d958bSChuck Lever 
658026d958bSChuck Lever out_initerr:
659026d958bSChuck Lever 	svc_rdma_put_rw_ctxt(cc->cc_rdma, ctxt);
660026d958bSChuck Lever 	pr_err("svcrdma: failed to map pagelist (%d)\n", ret);
661026d958bSChuck Lever 	return -EIO;
662026d958bSChuck Lever }
663026d958bSChuck Lever 
6647075a867SChuck Lever /* Walk the segments in the Read chunk starting at @p and construct
6657075a867SChuck Lever  * RDMA Read operations to pull the chunk to the server.
6667075a867SChuck Lever  */
667026d958bSChuck Lever static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
668026d958bSChuck Lever 				     struct svc_rdma_read_info *info,
669026d958bSChuck Lever 				     __be32 *p)
670026d958bSChuck Lever {
671026d958bSChuck Lever 	int ret;
672026d958bSChuck Lever 
6737075a867SChuck Lever 	ret = -EINVAL;
674026d958bSChuck Lever 	info->ri_chunklen = 0;
6757075a867SChuck Lever 	while (*p++ != xdr_zero && be32_to_cpup(p++) == info->ri_position) {
676026d958bSChuck Lever 		u32 rs_handle, rs_length;
677026d958bSChuck Lever 		u64 rs_offset;
678026d958bSChuck Lever 
679026d958bSChuck Lever 		rs_handle = be32_to_cpup(p++);
680026d958bSChuck Lever 		rs_length = be32_to_cpup(p++);
681026d958bSChuck Lever 		p = xdr_decode_hyper(p, &rs_offset);
682026d958bSChuck Lever 
683026d958bSChuck Lever 		ret = svc_rdma_build_read_segment(info, rqstp,
684026d958bSChuck Lever 						  rs_handle, rs_length,
685026d958bSChuck Lever 						  rs_offset);
686026d958bSChuck Lever 		if (ret < 0)
687026d958bSChuck Lever 			break;
688026d958bSChuck Lever 
689026d958bSChuck Lever 		info->ri_chunklen += rs_length;
690026d958bSChuck Lever 	}
691026d958bSChuck Lever 
692026d958bSChuck Lever 	return ret;
693026d958bSChuck Lever }
694026d958bSChuck Lever 
695026d958bSChuck Lever /* Construct RDMA Reads to pull over a normal Read chunk. The chunk
696026d958bSChuck Lever  * data lands in the page list of head->arg.pages.
697026d958bSChuck Lever  *
698026d958bSChuck Lever  * Currently NFSD does not look at the head->arg.tail[0] iovec.
699026d958bSChuck Lever  * Therefore, XDR round-up of the Read chunk and trailing
700026d958bSChuck Lever  * inline content must both be added at the end of the pagelist.
701026d958bSChuck Lever  */
702026d958bSChuck Lever static int svc_rdma_build_normal_read_chunk(struct svc_rqst *rqstp,
703026d958bSChuck Lever 					    struct svc_rdma_read_info *info,
704026d958bSChuck Lever 					    __be32 *p)
705026d958bSChuck Lever {
706026d958bSChuck Lever 	struct svc_rdma_op_ctxt *head = info->ri_readctxt;
707026d958bSChuck Lever 	int ret;
708026d958bSChuck Lever 
709026d958bSChuck Lever 	dprintk("svcrdma: Reading Read chunk at position %u\n",
710026d958bSChuck Lever 		info->ri_position);
711026d958bSChuck Lever 
712026d958bSChuck Lever 	info->ri_pageno = head->hdr_count;
713026d958bSChuck Lever 	info->ri_pageoff = 0;
714026d958bSChuck Lever 
715026d958bSChuck Lever 	ret = svc_rdma_build_read_chunk(rqstp, info, p);
716026d958bSChuck Lever 	if (ret < 0)
717026d958bSChuck Lever 		goto out;
718026d958bSChuck Lever 
719193bcb7bSChuck Lever 	/* Split the Receive buffer between the head and tail
720193bcb7bSChuck Lever 	 * buffers at Read chunk's position. XDR roundup of the
721193bcb7bSChuck Lever 	 * chunk is not included in either the pagelist or in
722193bcb7bSChuck Lever 	 * the tail.
723026d958bSChuck Lever 	 */
724193bcb7bSChuck Lever 	head->arg.tail[0].iov_base =
725193bcb7bSChuck Lever 		head->arg.head[0].iov_base + info->ri_position;
726193bcb7bSChuck Lever 	head->arg.tail[0].iov_len =
727193bcb7bSChuck Lever 		head->arg.head[0].iov_len - info->ri_position;
728193bcb7bSChuck Lever 	head->arg.head[0].iov_len = info->ri_position;
729026d958bSChuck Lever 
730175e0310SChuck Lever 	/* Read chunk may need XDR roundup (see RFC 8166, s. 3.4.5.2).
731193bcb7bSChuck Lever 	 *
732175e0310SChuck Lever 	 * If the client already rounded up the chunk length, the
733175e0310SChuck Lever 	 * length does not change. Otherwise, the length of the page
734175e0310SChuck Lever 	 * list is increased to include XDR round-up.
735175e0310SChuck Lever 	 *
736175e0310SChuck Lever 	 * Currently these chunks always start at page offset 0,
737175e0310SChuck Lever 	 * thus the rounded-up length never crosses a page boundary.
738026d958bSChuck Lever 	 */
739175e0310SChuck Lever 	info->ri_chunklen = XDR_QUADLEN(info->ri_chunklen) << 2;
740026d958bSChuck Lever 
741026d958bSChuck Lever 	head->arg.page_len = info->ri_chunklen;
742026d958bSChuck Lever 	head->arg.len += info->ri_chunklen;
743026d958bSChuck Lever 	head->arg.buflen += info->ri_chunklen;
744026d958bSChuck Lever 
745026d958bSChuck Lever out:
746026d958bSChuck Lever 	return ret;
747026d958bSChuck Lever }
748026d958bSChuck Lever 
749026d958bSChuck Lever /* Construct RDMA Reads to pull over a Position Zero Read chunk.
750026d958bSChuck Lever  * The start of the data lands in the first page just after
751026d958bSChuck Lever  * the Transport header, and the rest lands in the page list of
752026d958bSChuck Lever  * head->arg.pages.
753026d958bSChuck Lever  *
754026d958bSChuck Lever  * Assumptions:
755026d958bSChuck Lever  *	- A PZRC has an XDR-aligned length (no implicit round-up).
756026d958bSChuck Lever  *	- There can be no trailing inline content (IOW, we assume
757026d958bSChuck Lever  *	  a PZRC is never sent in an RDMA_MSG message, though it's
758026d958bSChuck Lever  *	  allowed by spec).
759026d958bSChuck Lever  */
760026d958bSChuck Lever static int svc_rdma_build_pz_read_chunk(struct svc_rqst *rqstp,
761026d958bSChuck Lever 					struct svc_rdma_read_info *info,
762026d958bSChuck Lever 					__be32 *p)
763026d958bSChuck Lever {
764026d958bSChuck Lever 	struct svc_rdma_op_ctxt *head = info->ri_readctxt;
765026d958bSChuck Lever 	int ret;
766026d958bSChuck Lever 
767026d958bSChuck Lever 	dprintk("svcrdma: Reading Position Zero Read chunk\n");
768026d958bSChuck Lever 
769026d958bSChuck Lever 	info->ri_pageno = head->hdr_count - 1;
770026d958bSChuck Lever 	info->ri_pageoff = offset_in_page(head->byte_len);
771026d958bSChuck Lever 
772026d958bSChuck Lever 	ret = svc_rdma_build_read_chunk(rqstp, info, p);
773026d958bSChuck Lever 	if (ret < 0)
774026d958bSChuck Lever 		goto out;
775026d958bSChuck Lever 
776026d958bSChuck Lever 	head->arg.len += info->ri_chunklen;
777026d958bSChuck Lever 	head->arg.buflen += info->ri_chunklen;
778026d958bSChuck Lever 
77971641d99SChuck Lever 	if (head->arg.buflen <= head->sge[0].length) {
780026d958bSChuck Lever 		/* Transport header and RPC message fit entirely
781026d958bSChuck Lever 		 * in page where head iovec resides.
782026d958bSChuck Lever 		 */
783026d958bSChuck Lever 		head->arg.head[0].iov_len = info->ri_chunklen;
784026d958bSChuck Lever 	} else {
785026d958bSChuck Lever 		/* Transport header and part of RPC message reside
786026d958bSChuck Lever 		 * in the head iovec's page.
787026d958bSChuck Lever 		 */
788026d958bSChuck Lever 		head->arg.head[0].iov_len =
789026d958bSChuck Lever 				head->sge[0].length - head->byte_len;
790026d958bSChuck Lever 		head->arg.page_len =
791026d958bSChuck Lever 				info->ri_chunklen - head->arg.head[0].iov_len;
792026d958bSChuck Lever 	}
793026d958bSChuck Lever 
794026d958bSChuck Lever out:
795026d958bSChuck Lever 	return ret;
796026d958bSChuck Lever }
797026d958bSChuck Lever 
798026d958bSChuck Lever /**
799026d958bSChuck Lever  * svc_rdma_recv_read_chunk - Pull a Read chunk from the client
800026d958bSChuck Lever  * @rdma: controlling RDMA transport
801026d958bSChuck Lever  * @rqstp: set of pages to use as Read sink buffers
802026d958bSChuck Lever  * @head: pages under I/O collect here
803026d958bSChuck Lever  * @p: pointer to start of Read chunk
804026d958bSChuck Lever  *
805026d958bSChuck Lever  * Returns:
806026d958bSChuck Lever  *	%0 if all needed RDMA Reads were posted successfully,
807026d958bSChuck Lever  *	%-EINVAL if client provided too many segments,
808026d958bSChuck Lever  *	%-ENOMEM if rdma_rw context pool was exhausted,
809026d958bSChuck Lever  *	%-ENOTCONN if posting failed (connection is lost),
810026d958bSChuck Lever  *	%-EIO if rdma_rw initialization failed (DMA mapping, etc).
811026d958bSChuck Lever  *
812026d958bSChuck Lever  * Assumptions:
813026d958bSChuck Lever  * - All Read segments in @p have the same Position value.
814026d958bSChuck Lever  */
815026d958bSChuck Lever int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp,
816026d958bSChuck Lever 			     struct svc_rdma_op_ctxt *head, __be32 *p)
817026d958bSChuck Lever {
818026d958bSChuck Lever 	struct svc_rdma_read_info *info;
819026d958bSChuck Lever 	struct page **page;
820026d958bSChuck Lever 	int ret;
821026d958bSChuck Lever 
822026d958bSChuck Lever 	/* The request (with page list) is constructed in
823026d958bSChuck Lever 	 * head->arg. Pages involved with RDMA Read I/O are
824026d958bSChuck Lever 	 * transferred there.
825026d958bSChuck Lever 	 */
826026d958bSChuck Lever 	head->hdr_count = head->count;
827026d958bSChuck Lever 	head->arg.head[0] = rqstp->rq_arg.head[0];
828026d958bSChuck Lever 	head->arg.tail[0] = rqstp->rq_arg.tail[0];
829026d958bSChuck Lever 	head->arg.pages = head->pages;
830026d958bSChuck Lever 	head->arg.page_base = 0;
831026d958bSChuck Lever 	head->arg.page_len = 0;
832026d958bSChuck Lever 	head->arg.len = rqstp->rq_arg.len;
833026d958bSChuck Lever 	head->arg.buflen = rqstp->rq_arg.buflen;
834026d958bSChuck Lever 
835026d958bSChuck Lever 	info = svc_rdma_read_info_alloc(rdma);
836026d958bSChuck Lever 	if (!info)
837026d958bSChuck Lever 		return -ENOMEM;
838026d958bSChuck Lever 	info->ri_readctxt = head;
839026d958bSChuck Lever 
840026d958bSChuck Lever 	info->ri_position = be32_to_cpup(p + 1);
841026d958bSChuck Lever 	if (info->ri_position)
842026d958bSChuck Lever 		ret = svc_rdma_build_normal_read_chunk(rqstp, info, p);
843026d958bSChuck Lever 	else
844026d958bSChuck Lever 		ret = svc_rdma_build_pz_read_chunk(rqstp, info, p);
845026d958bSChuck Lever 
846026d958bSChuck Lever 	/* Mark the start of the pages that can be used for the reply */
847026d958bSChuck Lever 	if (info->ri_pageoff > 0)
848026d958bSChuck Lever 		info->ri_pageno++;
849026d958bSChuck Lever 	rqstp->rq_respages = &rqstp->rq_pages[info->ri_pageno];
850026d958bSChuck Lever 	rqstp->rq_next_page = rqstp->rq_respages + 1;
851026d958bSChuck Lever 
852026d958bSChuck Lever 	if (ret < 0)
853026d958bSChuck Lever 		goto out;
854026d958bSChuck Lever 
855026d958bSChuck Lever 	ret = svc_rdma_post_chunk_ctxt(&info->ri_cc);
856026d958bSChuck Lever 
857026d958bSChuck Lever out:
858026d958bSChuck Lever 	/* Read sink pages have been moved from rqstp->rq_pages to
859026d958bSChuck Lever 	 * head->arg.pages. Force svc_recv to refill those slots
860026d958bSChuck Lever 	 * in rq_pages.
861026d958bSChuck Lever 	 */
862026d958bSChuck Lever 	for (page = rqstp->rq_pages; page < rqstp->rq_respages; page++)
863026d958bSChuck Lever 		*page = NULL;
864026d958bSChuck Lever 
865026d958bSChuck Lever 	if (ret < 0)
866026d958bSChuck Lever 		svc_rdma_read_info_free(info);
867026d958bSChuck Lever 	return ret;
868026d958bSChuck Lever }
869