1f13193f5SChuck Lever /*
2f13193f5SChuck Lever  * Copyright (c) 2016 Oracle.  All rights reserved.
3f13193f5SChuck Lever  *
4f13193f5SChuck Lever  * Use the core R/W API to move RPC-over-RDMA Read and Write chunks.
5f13193f5SChuck Lever  */
6f13193f5SChuck Lever 
7f13193f5SChuck Lever #include <linux/sunrpc/rpc_rdma.h>
8f13193f5SChuck Lever #include <linux/sunrpc/svc_rdma.h>
9f13193f5SChuck Lever #include <linux/sunrpc/debug.h>
10f13193f5SChuck Lever 
11f13193f5SChuck Lever #include <rdma/rw.h>
12f13193f5SChuck Lever 
13f13193f5SChuck Lever #define RPCDBG_FACILITY	RPCDBG_SVCXPRT
14f13193f5SChuck Lever 
15026d958bSChuck Lever static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc);
16026d958bSChuck Lever static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc);
17026d958bSChuck Lever 
18f13193f5SChuck Lever /* Each R/W context contains state for one chain of RDMA Read or
19f13193f5SChuck Lever  * Write Work Requests.
20f13193f5SChuck Lever  *
21f13193f5SChuck Lever  * Each WR chain handles a single contiguous server-side buffer,
22f13193f5SChuck Lever  * because scatterlist entries after the first have to start on
23f13193f5SChuck Lever  * page alignment. xdr_buf iovecs cannot guarantee alignment.
24f13193f5SChuck Lever  *
25f13193f5SChuck Lever  * Each WR chain handles only one R_key. Each RPC-over-RDMA segment
26f13193f5SChuck Lever  * from a client may contain a unique R_key, so each WR chain moves
27f13193f5SChuck Lever  * up to one segment at a time.
28f13193f5SChuck Lever  *
29f13193f5SChuck Lever  * The scatterlist makes this data structure over 4KB in size. To
30f13193f5SChuck Lever  * make it less likely to fail, and to handle the allocation for
31f13193f5SChuck Lever  * smaller I/O requests without disabling bottom-halves, these
32f13193f5SChuck Lever  * contexts are created on demand, but cached and reused until the
33f13193f5SChuck Lever  * controlling svcxprt_rdma is destroyed.
34f13193f5SChuck Lever  */
35f13193f5SChuck Lever struct svc_rdma_rw_ctxt {
36f13193f5SChuck Lever 	struct list_head	rw_list;
37f13193f5SChuck Lever 	struct rdma_rw_ctx	rw_ctx;
38f13193f5SChuck Lever 	int			rw_nents;
39f13193f5SChuck Lever 	struct sg_table		rw_sg_table;
40f13193f5SChuck Lever 	struct scatterlist	rw_first_sgl[0];
41f13193f5SChuck Lever };
42f13193f5SChuck Lever 
43f13193f5SChuck Lever static inline struct svc_rdma_rw_ctxt *
44f13193f5SChuck Lever svc_rdma_next_ctxt(struct list_head *list)
45f13193f5SChuck Lever {
46f13193f5SChuck Lever 	return list_first_entry_or_null(list, struct svc_rdma_rw_ctxt,
47f13193f5SChuck Lever 					rw_list);
48f13193f5SChuck Lever }
49f13193f5SChuck Lever 
50f13193f5SChuck Lever static struct svc_rdma_rw_ctxt *
51f13193f5SChuck Lever svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
52f13193f5SChuck Lever {
53f13193f5SChuck Lever 	struct svc_rdma_rw_ctxt *ctxt;
54f13193f5SChuck Lever 
55f13193f5SChuck Lever 	spin_lock(&rdma->sc_rw_ctxt_lock);
56f13193f5SChuck Lever 
57f13193f5SChuck Lever 	ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts);
58f13193f5SChuck Lever 	if (ctxt) {
59f13193f5SChuck Lever 		list_del(&ctxt->rw_list);
60f13193f5SChuck Lever 		spin_unlock(&rdma->sc_rw_ctxt_lock);
61f13193f5SChuck Lever 	} else {
62f13193f5SChuck Lever 		spin_unlock(&rdma->sc_rw_ctxt_lock);
63f13193f5SChuck Lever 		ctxt = kmalloc(sizeof(*ctxt) +
64f13193f5SChuck Lever 			       SG_CHUNK_SIZE * sizeof(struct scatterlist),
65f13193f5SChuck Lever 			       GFP_KERNEL);
66f13193f5SChuck Lever 		if (!ctxt)
67f13193f5SChuck Lever 			goto out;
68f13193f5SChuck Lever 		INIT_LIST_HEAD(&ctxt->rw_list);
69f13193f5SChuck Lever 	}
70f13193f5SChuck Lever 
71f13193f5SChuck Lever 	ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl;
72f13193f5SChuck Lever 	if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges,
73f13193f5SChuck Lever 				   ctxt->rw_sg_table.sgl)) {
74f13193f5SChuck Lever 		kfree(ctxt);
75f13193f5SChuck Lever 		ctxt = NULL;
76f13193f5SChuck Lever 	}
77f13193f5SChuck Lever out:
78f13193f5SChuck Lever 	return ctxt;
79f13193f5SChuck Lever }
80f13193f5SChuck Lever 
81f13193f5SChuck Lever static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
82f13193f5SChuck Lever 				 struct svc_rdma_rw_ctxt *ctxt)
83f13193f5SChuck Lever {
84f13193f5SChuck Lever 	sg_free_table_chained(&ctxt->rw_sg_table, true);
85f13193f5SChuck Lever 
86f13193f5SChuck Lever 	spin_lock(&rdma->sc_rw_ctxt_lock);
87f13193f5SChuck Lever 	list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts);
88f13193f5SChuck Lever 	spin_unlock(&rdma->sc_rw_ctxt_lock);
89f13193f5SChuck Lever }
90f13193f5SChuck Lever 
91f13193f5SChuck Lever /**
92f13193f5SChuck Lever  * svc_rdma_destroy_rw_ctxts - Free accumulated R/W contexts
93f13193f5SChuck Lever  * @rdma: transport about to be destroyed
94f13193f5SChuck Lever  *
95f13193f5SChuck Lever  */
96f13193f5SChuck Lever void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma)
97f13193f5SChuck Lever {
98f13193f5SChuck Lever 	struct svc_rdma_rw_ctxt *ctxt;
99f13193f5SChuck Lever 
100f13193f5SChuck Lever 	while ((ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts)) != NULL) {
101f13193f5SChuck Lever 		list_del(&ctxt->rw_list);
102f13193f5SChuck Lever 		kfree(ctxt);
103f13193f5SChuck Lever 	}
104f13193f5SChuck Lever }
105f13193f5SChuck Lever 
106f13193f5SChuck Lever /* A chunk context tracks all I/O for moving one Read or Write
107f13193f5SChuck Lever  * chunk. This is a a set of rdma_rw's that handle data movement
108f13193f5SChuck Lever  * for all segments of one chunk.
109f13193f5SChuck Lever  *
110f13193f5SChuck Lever  * These are small, acquired with a single allocator call, and
111f13193f5SChuck Lever  * no more than one is needed per chunk. They are allocated on
112f13193f5SChuck Lever  * demand, and not cached.
113f13193f5SChuck Lever  */
114f13193f5SChuck Lever struct svc_rdma_chunk_ctxt {
115f13193f5SChuck Lever 	struct ib_cqe		cc_cqe;
116f13193f5SChuck Lever 	struct svcxprt_rdma	*cc_rdma;
117f13193f5SChuck Lever 	struct list_head	cc_rwctxts;
118f13193f5SChuck Lever 	int			cc_sqecount;
119f13193f5SChuck Lever };
120f13193f5SChuck Lever 
121f13193f5SChuck Lever static void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
12235a30fc3SChuck Lever 			     struct svc_rdma_chunk_ctxt *cc)
123f13193f5SChuck Lever {
124f13193f5SChuck Lever 	cc->cc_rdma = rdma;
125f13193f5SChuck Lever 	svc_xprt_get(&rdma->sc_xprt);
126f13193f5SChuck Lever 
127f13193f5SChuck Lever 	INIT_LIST_HEAD(&cc->cc_rwctxts);
128f13193f5SChuck Lever 	cc->cc_sqecount = 0;
129f13193f5SChuck Lever }
130f13193f5SChuck Lever 
13135a30fc3SChuck Lever static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc,
13235a30fc3SChuck Lever 				enum dma_data_direction dir)
133f13193f5SChuck Lever {
134f13193f5SChuck Lever 	struct svcxprt_rdma *rdma = cc->cc_rdma;
135f13193f5SChuck Lever 	struct svc_rdma_rw_ctxt *ctxt;
136f13193f5SChuck Lever 
137f13193f5SChuck Lever 	while ((ctxt = svc_rdma_next_ctxt(&cc->cc_rwctxts)) != NULL) {
138f13193f5SChuck Lever 		list_del(&ctxt->rw_list);
139f13193f5SChuck Lever 
140f13193f5SChuck Lever 		rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp,
141f13193f5SChuck Lever 				    rdma->sc_port_num, ctxt->rw_sg_table.sgl,
14235a30fc3SChuck Lever 				    ctxt->rw_nents, dir);
143f13193f5SChuck Lever 		svc_rdma_put_rw_ctxt(rdma, ctxt);
144f13193f5SChuck Lever 	}
145f13193f5SChuck Lever 	svc_xprt_put(&rdma->sc_xprt);
146f13193f5SChuck Lever }
147f13193f5SChuck Lever 
148f13193f5SChuck Lever /* State for sending a Write or Reply chunk.
149f13193f5SChuck Lever  *  - Tracks progress of writing one chunk over all its segments
150f13193f5SChuck Lever  *  - Stores arguments for the SGL constructor functions
151f13193f5SChuck Lever  */
152f13193f5SChuck Lever struct svc_rdma_write_info {
153f13193f5SChuck Lever 	/* write state of this chunk */
154f13193f5SChuck Lever 	unsigned int		wi_seg_off;
155f13193f5SChuck Lever 	unsigned int		wi_seg_no;
156f13193f5SChuck Lever 	unsigned int		wi_nsegs;
157f13193f5SChuck Lever 	__be32			*wi_segs;
158f13193f5SChuck Lever 
159f13193f5SChuck Lever 	/* SGL constructor arguments */
160f13193f5SChuck Lever 	struct xdr_buf		*wi_xdr;
161f13193f5SChuck Lever 	unsigned char		*wi_base;
162f13193f5SChuck Lever 	unsigned int		wi_next_off;
163f13193f5SChuck Lever 
164f13193f5SChuck Lever 	struct svc_rdma_chunk_ctxt	wi_cc;
165f13193f5SChuck Lever };
166f13193f5SChuck Lever 
167f13193f5SChuck Lever static struct svc_rdma_write_info *
168f13193f5SChuck Lever svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma, __be32 *chunk)
169f13193f5SChuck Lever {
170f13193f5SChuck Lever 	struct svc_rdma_write_info *info;
171f13193f5SChuck Lever 
172f13193f5SChuck Lever 	info = kmalloc(sizeof(*info), GFP_KERNEL);
173f13193f5SChuck Lever 	if (!info)
174f13193f5SChuck Lever 		return info;
175f13193f5SChuck Lever 
176f13193f5SChuck Lever 	info->wi_seg_off = 0;
177f13193f5SChuck Lever 	info->wi_seg_no = 0;
178f13193f5SChuck Lever 	info->wi_nsegs = be32_to_cpup(++chunk);
179f13193f5SChuck Lever 	info->wi_segs = ++chunk;
18035a30fc3SChuck Lever 	svc_rdma_cc_init(rdma, &info->wi_cc);
181026d958bSChuck Lever 	info->wi_cc.cc_cqe.done = svc_rdma_write_done;
182f13193f5SChuck Lever 	return info;
183f13193f5SChuck Lever }
184f13193f5SChuck Lever 
185f13193f5SChuck Lever static void svc_rdma_write_info_free(struct svc_rdma_write_info *info)
186f13193f5SChuck Lever {
18735a30fc3SChuck Lever 	svc_rdma_cc_release(&info->wi_cc, DMA_TO_DEVICE);
188f13193f5SChuck Lever 	kfree(info);
189f13193f5SChuck Lever }
190f13193f5SChuck Lever 
191f13193f5SChuck Lever /**
192f13193f5SChuck Lever  * svc_rdma_write_done - Write chunk completion
193f13193f5SChuck Lever  * @cq: controlling Completion Queue
194f13193f5SChuck Lever  * @wc: Work Completion
195f13193f5SChuck Lever  *
196f13193f5SChuck Lever  * Pages under I/O are freed by a subsequent Send completion.
197f13193f5SChuck Lever  */
198f13193f5SChuck Lever static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
199f13193f5SChuck Lever {
200f13193f5SChuck Lever 	struct ib_cqe *cqe = wc->wr_cqe;
201f13193f5SChuck Lever 	struct svc_rdma_chunk_ctxt *cc =
202f13193f5SChuck Lever 			container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
203f13193f5SChuck Lever 	struct svcxprt_rdma *rdma = cc->cc_rdma;
204f13193f5SChuck Lever 	struct svc_rdma_write_info *info =
205f13193f5SChuck Lever 			container_of(cc, struct svc_rdma_write_info, wi_cc);
206f13193f5SChuck Lever 
207f13193f5SChuck Lever 	atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
208f13193f5SChuck Lever 	wake_up(&rdma->sc_send_wait);
209f13193f5SChuck Lever 
210f13193f5SChuck Lever 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
211f13193f5SChuck Lever 		set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
212f13193f5SChuck Lever 		if (wc->status != IB_WC_WR_FLUSH_ERR)
213f13193f5SChuck Lever 			pr_err("svcrdma: write ctx: %s (%u/0x%x)\n",
214f13193f5SChuck Lever 			       ib_wc_status_msg(wc->status),
215f13193f5SChuck Lever 			       wc->status, wc->vendor_err);
216f13193f5SChuck Lever 	}
217f13193f5SChuck Lever 
218f13193f5SChuck Lever 	svc_rdma_write_info_free(info);
219f13193f5SChuck Lever }
220f13193f5SChuck Lever 
221026d958bSChuck Lever /* State for pulling a Read chunk.
222026d958bSChuck Lever  */
223026d958bSChuck Lever struct svc_rdma_read_info {
224026d958bSChuck Lever 	struct svc_rdma_op_ctxt		*ri_readctxt;
225026d958bSChuck Lever 	unsigned int			ri_position;
226026d958bSChuck Lever 	unsigned int			ri_pageno;
227026d958bSChuck Lever 	unsigned int			ri_pageoff;
228026d958bSChuck Lever 	unsigned int			ri_chunklen;
229026d958bSChuck Lever 
230026d958bSChuck Lever 	struct svc_rdma_chunk_ctxt	ri_cc;
231026d958bSChuck Lever };
232026d958bSChuck Lever 
233026d958bSChuck Lever static struct svc_rdma_read_info *
234026d958bSChuck Lever svc_rdma_read_info_alloc(struct svcxprt_rdma *rdma)
235026d958bSChuck Lever {
236026d958bSChuck Lever 	struct svc_rdma_read_info *info;
237026d958bSChuck Lever 
238026d958bSChuck Lever 	info = kmalloc(sizeof(*info), GFP_KERNEL);
239026d958bSChuck Lever 	if (!info)
240026d958bSChuck Lever 		return info;
241026d958bSChuck Lever 
24235a30fc3SChuck Lever 	svc_rdma_cc_init(rdma, &info->ri_cc);
243026d958bSChuck Lever 	info->ri_cc.cc_cqe.done = svc_rdma_wc_read_done;
244026d958bSChuck Lever 	return info;
245026d958bSChuck Lever }
246026d958bSChuck Lever 
247026d958bSChuck Lever static void svc_rdma_read_info_free(struct svc_rdma_read_info *info)
248026d958bSChuck Lever {
24935a30fc3SChuck Lever 	svc_rdma_cc_release(&info->ri_cc, DMA_FROM_DEVICE);
250026d958bSChuck Lever 	kfree(info);
251026d958bSChuck Lever }
252026d958bSChuck Lever 
253026d958bSChuck Lever /**
254026d958bSChuck Lever  * svc_rdma_wc_read_done - Handle completion of an RDMA Read ctx
255026d958bSChuck Lever  * @cq: controlling Completion Queue
256026d958bSChuck Lever  * @wc: Work Completion
257026d958bSChuck Lever  *
258026d958bSChuck Lever  */
259026d958bSChuck Lever static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
260026d958bSChuck Lever {
261026d958bSChuck Lever 	struct ib_cqe *cqe = wc->wr_cqe;
262026d958bSChuck Lever 	struct svc_rdma_chunk_ctxt *cc =
263026d958bSChuck Lever 			container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
264026d958bSChuck Lever 	struct svcxprt_rdma *rdma = cc->cc_rdma;
265026d958bSChuck Lever 	struct svc_rdma_read_info *info =
266026d958bSChuck Lever 			container_of(cc, struct svc_rdma_read_info, ri_cc);
267026d958bSChuck Lever 
268026d958bSChuck Lever 	atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
269026d958bSChuck Lever 	wake_up(&rdma->sc_send_wait);
270026d958bSChuck Lever 
271026d958bSChuck Lever 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
272026d958bSChuck Lever 		set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
273026d958bSChuck Lever 		if (wc->status != IB_WC_WR_FLUSH_ERR)
274026d958bSChuck Lever 			pr_err("svcrdma: read ctx: %s (%u/0x%x)\n",
275026d958bSChuck Lever 			       ib_wc_status_msg(wc->status),
276026d958bSChuck Lever 			       wc->status, wc->vendor_err);
277026d958bSChuck Lever 		svc_rdma_put_context(info->ri_readctxt, 1);
278026d958bSChuck Lever 	} else {
279026d958bSChuck Lever 		spin_lock(&rdma->sc_rq_dto_lock);
280026d958bSChuck Lever 		list_add_tail(&info->ri_readctxt->list,
281026d958bSChuck Lever 			      &rdma->sc_read_complete_q);
282026d958bSChuck Lever 		spin_unlock(&rdma->sc_rq_dto_lock);
283026d958bSChuck Lever 
284026d958bSChuck Lever 		set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
285026d958bSChuck Lever 		svc_xprt_enqueue(&rdma->sc_xprt);
286026d958bSChuck Lever 	}
287026d958bSChuck Lever 
288026d958bSChuck Lever 	svc_rdma_read_info_free(info);
289026d958bSChuck Lever }
290026d958bSChuck Lever 
291f13193f5SChuck Lever /* This function sleeps when the transport's Send Queue is congested.
292f13193f5SChuck Lever  *
293f13193f5SChuck Lever  * Assumptions:
294f13193f5SChuck Lever  * - If ib_post_send() succeeds, only one completion is expected,
295f13193f5SChuck Lever  *   even if one or more WRs are flushed. This is true when posting
296f13193f5SChuck Lever  *   an rdma_rw_ctx or when posting a single signaled WR.
297f13193f5SChuck Lever  */
298f13193f5SChuck Lever static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
299f13193f5SChuck Lever {
300f13193f5SChuck Lever 	struct svcxprt_rdma *rdma = cc->cc_rdma;
301f13193f5SChuck Lever 	struct svc_xprt *xprt = &rdma->sc_xprt;
302f13193f5SChuck Lever 	struct ib_send_wr *first_wr, *bad_wr;
303f13193f5SChuck Lever 	struct list_head *tmp;
304f13193f5SChuck Lever 	struct ib_cqe *cqe;
305f13193f5SChuck Lever 	int ret;
306f13193f5SChuck Lever 
307107c1d0aSChuck Lever 	if (cc->cc_sqecount > rdma->sc_sq_depth)
308107c1d0aSChuck Lever 		return -EINVAL;
309107c1d0aSChuck Lever 
310f13193f5SChuck Lever 	first_wr = NULL;
311f13193f5SChuck Lever 	cqe = &cc->cc_cqe;
312f13193f5SChuck Lever 	list_for_each(tmp, &cc->cc_rwctxts) {
313f13193f5SChuck Lever 		struct svc_rdma_rw_ctxt *ctxt;
314f13193f5SChuck Lever 
315f13193f5SChuck Lever 		ctxt = list_entry(tmp, struct svc_rdma_rw_ctxt, rw_list);
316f13193f5SChuck Lever 		first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp,
317f13193f5SChuck Lever 					   rdma->sc_port_num, cqe, first_wr);
318f13193f5SChuck Lever 		cqe = NULL;
319f13193f5SChuck Lever 	}
320f13193f5SChuck Lever 
321f13193f5SChuck Lever 	do {
322f13193f5SChuck Lever 		if (atomic_sub_return(cc->cc_sqecount,
323f13193f5SChuck Lever 				      &rdma->sc_sq_avail) > 0) {
324f13193f5SChuck Lever 			ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
325f13193f5SChuck Lever 			if (ret)
326f13193f5SChuck Lever 				break;
327f13193f5SChuck Lever 			return 0;
328f13193f5SChuck Lever 		}
329f13193f5SChuck Lever 
330f13193f5SChuck Lever 		atomic_inc(&rdma_stat_sq_starve);
331f13193f5SChuck Lever 		atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
332f13193f5SChuck Lever 		wait_event(rdma->sc_send_wait,
333f13193f5SChuck Lever 			   atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount);
334f13193f5SChuck Lever 	} while (1);
335f13193f5SChuck Lever 
336f13193f5SChuck Lever 	pr_err("svcrdma: ib_post_send failed (%d)\n", ret);
337f13193f5SChuck Lever 	set_bit(XPT_CLOSE, &xprt->xpt_flags);
338f13193f5SChuck Lever 
339f13193f5SChuck Lever 	/* If even one was posted, there will be a completion. */
340f13193f5SChuck Lever 	if (bad_wr != first_wr)
341f13193f5SChuck Lever 		return 0;
342f13193f5SChuck Lever 
343f13193f5SChuck Lever 	atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
344f13193f5SChuck Lever 	wake_up(&rdma->sc_send_wait);
345f13193f5SChuck Lever 	return -ENOTCONN;
346f13193f5SChuck Lever }
347f13193f5SChuck Lever 
348f13193f5SChuck Lever /* Build and DMA-map an SGL that covers one kvec in an xdr_buf
349f13193f5SChuck Lever  */
350f13193f5SChuck Lever static void svc_rdma_vec_to_sg(struct svc_rdma_write_info *info,
351f13193f5SChuck Lever 			       unsigned int len,
352f13193f5SChuck Lever 			       struct svc_rdma_rw_ctxt *ctxt)
353f13193f5SChuck Lever {
354f13193f5SChuck Lever 	struct scatterlist *sg = ctxt->rw_sg_table.sgl;
355f13193f5SChuck Lever 
356f13193f5SChuck Lever 	sg_set_buf(&sg[0], info->wi_base, len);
357f13193f5SChuck Lever 	info->wi_base += len;
358f13193f5SChuck Lever 
359f13193f5SChuck Lever 	ctxt->rw_nents = 1;
360f13193f5SChuck Lever }
361f13193f5SChuck Lever 
362f13193f5SChuck Lever /* Build and DMA-map an SGL that covers part of an xdr_buf's pagelist.
363f13193f5SChuck Lever  */
364f13193f5SChuck Lever static void svc_rdma_pagelist_to_sg(struct svc_rdma_write_info *info,
365f13193f5SChuck Lever 				    unsigned int remaining,
366f13193f5SChuck Lever 				    struct svc_rdma_rw_ctxt *ctxt)
367f13193f5SChuck Lever {
368f13193f5SChuck Lever 	unsigned int sge_no, sge_bytes, page_off, page_no;
369f13193f5SChuck Lever 	struct xdr_buf *xdr = info->wi_xdr;
370f13193f5SChuck Lever 	struct scatterlist *sg;
371f13193f5SChuck Lever 	struct page **page;
372f13193f5SChuck Lever 
37391b022ecSChuck Lever 	page_off = info->wi_next_off + xdr->page_base;
37491b022ecSChuck Lever 	page_no = page_off >> PAGE_SHIFT;
37591b022ecSChuck Lever 	page_off = offset_in_page(page_off);
376f13193f5SChuck Lever 	page = xdr->pages + page_no;
377f13193f5SChuck Lever 	info->wi_next_off += remaining;
378f13193f5SChuck Lever 	sg = ctxt->rw_sg_table.sgl;
379f13193f5SChuck Lever 	sge_no = 0;
380f13193f5SChuck Lever 	do {
381f13193f5SChuck Lever 		sge_bytes = min_t(unsigned int, remaining,
382f13193f5SChuck Lever 				  PAGE_SIZE - page_off);
383f13193f5SChuck Lever 		sg_set_page(sg, *page, sge_bytes, page_off);
384f13193f5SChuck Lever 
385f13193f5SChuck Lever 		remaining -= sge_bytes;
386f13193f5SChuck Lever 		sg = sg_next(sg);
387f13193f5SChuck Lever 		page_off = 0;
388f13193f5SChuck Lever 		sge_no++;
389f13193f5SChuck Lever 		page++;
390f13193f5SChuck Lever 	} while (remaining);
391f13193f5SChuck Lever 
392f13193f5SChuck Lever 	ctxt->rw_nents = sge_no;
393f13193f5SChuck Lever }
394f13193f5SChuck Lever 
395f13193f5SChuck Lever /* Construct RDMA Write WRs to send a portion of an xdr_buf containing
396f13193f5SChuck Lever  * an RPC Reply.
397f13193f5SChuck Lever  */
398f13193f5SChuck Lever static int
399f13193f5SChuck Lever svc_rdma_build_writes(struct svc_rdma_write_info *info,
400f13193f5SChuck Lever 		      void (*constructor)(struct svc_rdma_write_info *info,
401f13193f5SChuck Lever 					  unsigned int len,
402f13193f5SChuck Lever 					  struct svc_rdma_rw_ctxt *ctxt),
403f13193f5SChuck Lever 		      unsigned int remaining)
404f13193f5SChuck Lever {
405f13193f5SChuck Lever 	struct svc_rdma_chunk_ctxt *cc = &info->wi_cc;
406f13193f5SChuck Lever 	struct svcxprt_rdma *rdma = cc->cc_rdma;
407f13193f5SChuck Lever 	struct svc_rdma_rw_ctxt *ctxt;
408f13193f5SChuck Lever 	__be32 *seg;
409f13193f5SChuck Lever 	int ret;
410f13193f5SChuck Lever 
411f13193f5SChuck Lever 	seg = info->wi_segs + info->wi_seg_no * rpcrdma_segment_maxsz;
412f13193f5SChuck Lever 	do {
413f13193f5SChuck Lever 		unsigned int write_len;
414f13193f5SChuck Lever 		u32 seg_length, seg_handle;
415f13193f5SChuck Lever 		u64 seg_offset;
416f13193f5SChuck Lever 
417f13193f5SChuck Lever 		if (info->wi_seg_no >= info->wi_nsegs)
418f13193f5SChuck Lever 			goto out_overflow;
419f13193f5SChuck Lever 
420f13193f5SChuck Lever 		seg_handle = be32_to_cpup(seg);
421f13193f5SChuck Lever 		seg_length = be32_to_cpup(seg + 1);
422f13193f5SChuck Lever 		xdr_decode_hyper(seg + 2, &seg_offset);
423f13193f5SChuck Lever 		seg_offset += info->wi_seg_off;
424f13193f5SChuck Lever 
425f13193f5SChuck Lever 		write_len = min(remaining, seg_length - info->wi_seg_off);
426f13193f5SChuck Lever 		ctxt = svc_rdma_get_rw_ctxt(rdma,
427f13193f5SChuck Lever 					    (write_len >> PAGE_SHIFT) + 2);
428f13193f5SChuck Lever 		if (!ctxt)
429f13193f5SChuck Lever 			goto out_noctx;
430f13193f5SChuck Lever 
431f13193f5SChuck Lever 		constructor(info, write_len, ctxt);
432f13193f5SChuck Lever 		ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp,
433f13193f5SChuck Lever 				       rdma->sc_port_num, ctxt->rw_sg_table.sgl,
434f13193f5SChuck Lever 				       ctxt->rw_nents, 0, seg_offset,
435f13193f5SChuck Lever 				       seg_handle, DMA_TO_DEVICE);
436f13193f5SChuck Lever 		if (ret < 0)
437f13193f5SChuck Lever 			goto out_initerr;
438f13193f5SChuck Lever 
439f13193f5SChuck Lever 		list_add(&ctxt->rw_list, &cc->cc_rwctxts);
440f13193f5SChuck Lever 		cc->cc_sqecount += ret;
441f13193f5SChuck Lever 		if (write_len == seg_length - info->wi_seg_off) {
442f13193f5SChuck Lever 			seg += 4;
443f13193f5SChuck Lever 			info->wi_seg_no++;
444f13193f5SChuck Lever 			info->wi_seg_off = 0;
445f13193f5SChuck Lever 		} else {
446f13193f5SChuck Lever 			info->wi_seg_off += write_len;
447f13193f5SChuck Lever 		}
448f13193f5SChuck Lever 		remaining -= write_len;
449f13193f5SChuck Lever 	} while (remaining);
450f13193f5SChuck Lever 
451f13193f5SChuck Lever 	return 0;
452f13193f5SChuck Lever 
453f13193f5SChuck Lever out_overflow:
454f13193f5SChuck Lever 	dprintk("svcrdma: inadequate space in Write chunk (%u)\n",
455f13193f5SChuck Lever 		info->wi_nsegs);
456f13193f5SChuck Lever 	return -E2BIG;
457f13193f5SChuck Lever 
458f13193f5SChuck Lever out_noctx:
459f13193f5SChuck Lever 	dprintk("svcrdma: no R/W ctxs available\n");
460f13193f5SChuck Lever 	return -ENOMEM;
461f13193f5SChuck Lever 
462f13193f5SChuck Lever out_initerr:
463f13193f5SChuck Lever 	svc_rdma_put_rw_ctxt(rdma, ctxt);
464f13193f5SChuck Lever 	pr_err("svcrdma: failed to map pagelist (%d)\n", ret);
465f13193f5SChuck Lever 	return -EIO;
466f13193f5SChuck Lever }
467f13193f5SChuck Lever 
468f13193f5SChuck Lever /* Send one of an xdr_buf's kvecs by itself. To send a Reply
469f13193f5SChuck Lever  * chunk, the whole RPC Reply is written back to the client.
470f13193f5SChuck Lever  * This function writes either the head or tail of the xdr_buf
471f13193f5SChuck Lever  * containing the Reply.
472f13193f5SChuck Lever  */
473f13193f5SChuck Lever static int svc_rdma_send_xdr_kvec(struct svc_rdma_write_info *info,
474f13193f5SChuck Lever 				  struct kvec *vec)
475f13193f5SChuck Lever {
476f13193f5SChuck Lever 	info->wi_base = vec->iov_base;
477f13193f5SChuck Lever 	return svc_rdma_build_writes(info, svc_rdma_vec_to_sg,
478f13193f5SChuck Lever 				     vec->iov_len);
479f13193f5SChuck Lever }
480f13193f5SChuck Lever 
481f13193f5SChuck Lever /* Send an xdr_buf's page list by itself. A Write chunk is
482f13193f5SChuck Lever  * just the page list. a Reply chunk is the head, page list,
483f13193f5SChuck Lever  * and tail. This function is shared between the two types
484f13193f5SChuck Lever  * of chunk.
485f13193f5SChuck Lever  */
486f13193f5SChuck Lever static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info,
487f13193f5SChuck Lever 				      struct xdr_buf *xdr)
488f13193f5SChuck Lever {
489f13193f5SChuck Lever 	info->wi_xdr = xdr;
490f13193f5SChuck Lever 	info->wi_next_off = 0;
491f13193f5SChuck Lever 	return svc_rdma_build_writes(info, svc_rdma_pagelist_to_sg,
492f13193f5SChuck Lever 				     xdr->page_len);
493f13193f5SChuck Lever }
494f13193f5SChuck Lever 
495f13193f5SChuck Lever /**
496f13193f5SChuck Lever  * svc_rdma_send_write_chunk - Write all segments in a Write chunk
497f13193f5SChuck Lever  * @rdma: controlling RDMA transport
498f13193f5SChuck Lever  * @wr_ch: Write chunk provided by client
499f13193f5SChuck Lever  * @xdr: xdr_buf containing the data payload
500f13193f5SChuck Lever  *
501f13193f5SChuck Lever  * Returns a non-negative number of bytes the chunk consumed, or
502f13193f5SChuck Lever  *	%-E2BIG if the payload was larger than the Write chunk,
503107c1d0aSChuck Lever  *	%-EINVAL if client provided too many segments,
504f13193f5SChuck Lever  *	%-ENOMEM if rdma_rw context pool was exhausted,
505f13193f5SChuck Lever  *	%-ENOTCONN if posting failed (connection is lost),
506f13193f5SChuck Lever  *	%-EIO if rdma_rw initialization failed (DMA mapping, etc).
507f13193f5SChuck Lever  */
508f13193f5SChuck Lever int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch,
509f13193f5SChuck Lever 			      struct xdr_buf *xdr)
510f13193f5SChuck Lever {
511f13193f5SChuck Lever 	struct svc_rdma_write_info *info;
512f13193f5SChuck Lever 	int ret;
513f13193f5SChuck Lever 
514f13193f5SChuck Lever 	if (!xdr->page_len)
515f13193f5SChuck Lever 		return 0;
516f13193f5SChuck Lever 
517f13193f5SChuck Lever 	info = svc_rdma_write_info_alloc(rdma, wr_ch);
518f13193f5SChuck Lever 	if (!info)
519f13193f5SChuck Lever 		return -ENOMEM;
520f13193f5SChuck Lever 
521f13193f5SChuck Lever 	ret = svc_rdma_send_xdr_pagelist(info, xdr);
522f13193f5SChuck Lever 	if (ret < 0)
523f13193f5SChuck Lever 		goto out_err;
524f13193f5SChuck Lever 
525f13193f5SChuck Lever 	ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
526f13193f5SChuck Lever 	if (ret < 0)
527f13193f5SChuck Lever 		goto out_err;
528f13193f5SChuck Lever 	return xdr->page_len;
529f13193f5SChuck Lever 
530f13193f5SChuck Lever out_err:
531f13193f5SChuck Lever 	svc_rdma_write_info_free(info);
532f13193f5SChuck Lever 	return ret;
533f13193f5SChuck Lever }
534f13193f5SChuck Lever 
535f13193f5SChuck Lever /**
536f13193f5SChuck Lever  * svc_rdma_send_reply_chunk - Write all segments in the Reply chunk
537f13193f5SChuck Lever  * @rdma: controlling RDMA transport
538f13193f5SChuck Lever  * @rp_ch: Reply chunk provided by client
539f13193f5SChuck Lever  * @writelist: true if client provided a Write list
540f13193f5SChuck Lever  * @xdr: xdr_buf containing an RPC Reply
541f13193f5SChuck Lever  *
542f13193f5SChuck Lever  * Returns a non-negative number of bytes the chunk consumed, or
543f13193f5SChuck Lever  *	%-E2BIG if the payload was larger than the Reply chunk,
544107c1d0aSChuck Lever  *	%-EINVAL if client provided too many segments,
545f13193f5SChuck Lever  *	%-ENOMEM if rdma_rw context pool was exhausted,
546f13193f5SChuck Lever  *	%-ENOTCONN if posting failed (connection is lost),
547f13193f5SChuck Lever  *	%-EIO if rdma_rw initialization failed (DMA mapping, etc).
548f13193f5SChuck Lever  */
549f13193f5SChuck Lever int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, __be32 *rp_ch,
550f13193f5SChuck Lever 			      bool writelist, struct xdr_buf *xdr)
551f13193f5SChuck Lever {
552f13193f5SChuck Lever 	struct svc_rdma_write_info *info;
553f13193f5SChuck Lever 	int consumed, ret;
554f13193f5SChuck Lever 
555f13193f5SChuck Lever 	info = svc_rdma_write_info_alloc(rdma, rp_ch);
556f13193f5SChuck Lever 	if (!info)
557f13193f5SChuck Lever 		return -ENOMEM;
558f13193f5SChuck Lever 
559f13193f5SChuck Lever 	ret = svc_rdma_send_xdr_kvec(info, &xdr->head[0]);
560f13193f5SChuck Lever 	if (ret < 0)
561f13193f5SChuck Lever 		goto out_err;
562f13193f5SChuck Lever 	consumed = xdr->head[0].iov_len;
563f13193f5SChuck Lever 
564f13193f5SChuck Lever 	/* Send the page list in the Reply chunk only if the
565f13193f5SChuck Lever 	 * client did not provide Write chunks.
566f13193f5SChuck Lever 	 */
567f13193f5SChuck Lever 	if (!writelist && xdr->page_len) {
568f13193f5SChuck Lever 		ret = svc_rdma_send_xdr_pagelist(info, xdr);
569f13193f5SChuck Lever 		if (ret < 0)
570f13193f5SChuck Lever 			goto out_err;
571f13193f5SChuck Lever 		consumed += xdr->page_len;
572f13193f5SChuck Lever 	}
573f13193f5SChuck Lever 
574f13193f5SChuck Lever 	if (xdr->tail[0].iov_len) {
575f13193f5SChuck Lever 		ret = svc_rdma_send_xdr_kvec(info, &xdr->tail[0]);
576f13193f5SChuck Lever 		if (ret < 0)
577f13193f5SChuck Lever 			goto out_err;
578f13193f5SChuck Lever 		consumed += xdr->tail[0].iov_len;
579f13193f5SChuck Lever 	}
580f13193f5SChuck Lever 
581f13193f5SChuck Lever 	ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
582f13193f5SChuck Lever 	if (ret < 0)
583f13193f5SChuck Lever 		goto out_err;
584f13193f5SChuck Lever 	return consumed;
585f13193f5SChuck Lever 
586f13193f5SChuck Lever out_err:
587f13193f5SChuck Lever 	svc_rdma_write_info_free(info);
588f13193f5SChuck Lever 	return ret;
589f13193f5SChuck Lever }
590026d958bSChuck Lever 
591026d958bSChuck Lever static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info,
592026d958bSChuck Lever 				       struct svc_rqst *rqstp,
593026d958bSChuck Lever 				       u32 rkey, u32 len, u64 offset)
594026d958bSChuck Lever {
595026d958bSChuck Lever 	struct svc_rdma_op_ctxt *head = info->ri_readctxt;
596026d958bSChuck Lever 	struct svc_rdma_chunk_ctxt *cc = &info->ri_cc;
597026d958bSChuck Lever 	struct svc_rdma_rw_ctxt *ctxt;
598026d958bSChuck Lever 	unsigned int sge_no, seg_len;
599026d958bSChuck Lever 	struct scatterlist *sg;
600026d958bSChuck Lever 	int ret;
601026d958bSChuck Lever 
602026d958bSChuck Lever 	sge_no = PAGE_ALIGN(info->ri_pageoff + len) >> PAGE_SHIFT;
603026d958bSChuck Lever 	ctxt = svc_rdma_get_rw_ctxt(cc->cc_rdma, sge_no);
604026d958bSChuck Lever 	if (!ctxt)
605026d958bSChuck Lever 		goto out_noctx;
606026d958bSChuck Lever 	ctxt->rw_nents = sge_no;
607026d958bSChuck Lever 
608026d958bSChuck Lever 	dprintk("svcrdma: reading segment %u@0x%016llx:0x%08x (%u sges)\n",
609026d958bSChuck Lever 		len, offset, rkey, sge_no);
610026d958bSChuck Lever 
611026d958bSChuck Lever 	sg = ctxt->rw_sg_table.sgl;
612026d958bSChuck Lever 	for (sge_no = 0; sge_no < ctxt->rw_nents; sge_no++) {
613026d958bSChuck Lever 		seg_len = min_t(unsigned int, len,
614026d958bSChuck Lever 				PAGE_SIZE - info->ri_pageoff);
615026d958bSChuck Lever 
616026d958bSChuck Lever 		head->arg.pages[info->ri_pageno] =
617026d958bSChuck Lever 			rqstp->rq_pages[info->ri_pageno];
618026d958bSChuck Lever 		if (!info->ri_pageoff)
619026d958bSChuck Lever 			head->count++;
620026d958bSChuck Lever 
621026d958bSChuck Lever 		sg_set_page(sg, rqstp->rq_pages[info->ri_pageno],
622026d958bSChuck Lever 			    seg_len, info->ri_pageoff);
623026d958bSChuck Lever 		sg = sg_next(sg);
624026d958bSChuck Lever 
625026d958bSChuck Lever 		info->ri_pageoff += seg_len;
626026d958bSChuck Lever 		if (info->ri_pageoff == PAGE_SIZE) {
627026d958bSChuck Lever 			info->ri_pageno++;
628026d958bSChuck Lever 			info->ri_pageoff = 0;
629026d958bSChuck Lever 		}
630026d958bSChuck Lever 		len -= seg_len;
631026d958bSChuck Lever 
632026d958bSChuck Lever 		/* Safety check */
633026d958bSChuck Lever 		if (len &&
634026d958bSChuck Lever 		    &rqstp->rq_pages[info->ri_pageno + 1] > rqstp->rq_page_end)
635026d958bSChuck Lever 			goto out_overrun;
636026d958bSChuck Lever 	}
637026d958bSChuck Lever 
638026d958bSChuck Lever 	ret = rdma_rw_ctx_init(&ctxt->rw_ctx, cc->cc_rdma->sc_qp,
639026d958bSChuck Lever 			       cc->cc_rdma->sc_port_num,
640026d958bSChuck Lever 			       ctxt->rw_sg_table.sgl, ctxt->rw_nents,
641026d958bSChuck Lever 			       0, offset, rkey, DMA_FROM_DEVICE);
642026d958bSChuck Lever 	if (ret < 0)
643026d958bSChuck Lever 		goto out_initerr;
644026d958bSChuck Lever 
645026d958bSChuck Lever 	list_add(&ctxt->rw_list, &cc->cc_rwctxts);
646026d958bSChuck Lever 	cc->cc_sqecount += ret;
647026d958bSChuck Lever 	return 0;
648026d958bSChuck Lever 
649026d958bSChuck Lever out_noctx:
650026d958bSChuck Lever 	dprintk("svcrdma: no R/W ctxs available\n");
651026d958bSChuck Lever 	return -ENOMEM;
652026d958bSChuck Lever 
653026d958bSChuck Lever out_overrun:
654026d958bSChuck Lever 	dprintk("svcrdma: request overruns rq_pages\n");
655026d958bSChuck Lever 	return -EINVAL;
656026d958bSChuck Lever 
657026d958bSChuck Lever out_initerr:
658026d958bSChuck Lever 	svc_rdma_put_rw_ctxt(cc->cc_rdma, ctxt);
659026d958bSChuck Lever 	pr_err("svcrdma: failed to map pagelist (%d)\n", ret);
660026d958bSChuck Lever 	return -EIO;
661026d958bSChuck Lever }
662026d958bSChuck Lever 
6637075a867SChuck Lever /* Walk the segments in the Read chunk starting at @p and construct
6647075a867SChuck Lever  * RDMA Read operations to pull the chunk to the server.
6657075a867SChuck Lever  */
666026d958bSChuck Lever static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
667026d958bSChuck Lever 				     struct svc_rdma_read_info *info,
668026d958bSChuck Lever 				     __be32 *p)
669026d958bSChuck Lever {
670026d958bSChuck Lever 	int ret;
671026d958bSChuck Lever 
6727075a867SChuck Lever 	ret = -EINVAL;
673026d958bSChuck Lever 	info->ri_chunklen = 0;
6747075a867SChuck Lever 	while (*p++ != xdr_zero && be32_to_cpup(p++) == info->ri_position) {
675026d958bSChuck Lever 		u32 rs_handle, rs_length;
676026d958bSChuck Lever 		u64 rs_offset;
677026d958bSChuck Lever 
678026d958bSChuck Lever 		rs_handle = be32_to_cpup(p++);
679026d958bSChuck Lever 		rs_length = be32_to_cpup(p++);
680026d958bSChuck Lever 		p = xdr_decode_hyper(p, &rs_offset);
681026d958bSChuck Lever 
682026d958bSChuck Lever 		ret = svc_rdma_build_read_segment(info, rqstp,
683026d958bSChuck Lever 						  rs_handle, rs_length,
684026d958bSChuck Lever 						  rs_offset);
685026d958bSChuck Lever 		if (ret < 0)
686026d958bSChuck Lever 			break;
687026d958bSChuck Lever 
688026d958bSChuck Lever 		info->ri_chunklen += rs_length;
689026d958bSChuck Lever 	}
690026d958bSChuck Lever 
691026d958bSChuck Lever 	return ret;
692026d958bSChuck Lever }
693026d958bSChuck Lever 
694026d958bSChuck Lever /* If there is inline content following the Read chunk, append it to
695026d958bSChuck Lever  * the page list immediately following the data payload. This has to
696026d958bSChuck Lever  * be done after the reader function has determined how many pages
697026d958bSChuck Lever  * were consumed for RDMA Read.
698026d958bSChuck Lever  *
699026d958bSChuck Lever  * On entry, ri_pageno and ri_pageoff point directly to the end of the
700026d958bSChuck Lever  * page list. On exit, both have been updated to the new "next byte".
701026d958bSChuck Lever  *
702026d958bSChuck Lever  * Assumptions:
703026d958bSChuck Lever  *	- Inline content fits entirely in rq_pages[0]
704026d958bSChuck Lever  *	- Trailing content is only a handful of bytes
705026d958bSChuck Lever  */
706026d958bSChuck Lever static int svc_rdma_copy_tail(struct svc_rqst *rqstp,
707026d958bSChuck Lever 			      struct svc_rdma_read_info *info)
708026d958bSChuck Lever {
709026d958bSChuck Lever 	struct svc_rdma_op_ctxt *head = info->ri_readctxt;
710026d958bSChuck Lever 	unsigned int tail_length, remaining;
711026d958bSChuck Lever 	u8 *srcp, *destp;
712026d958bSChuck Lever 
713026d958bSChuck Lever 	/* Assert that all inline content fits in page 0. This is an
714026d958bSChuck Lever 	 * implementation limit, not a protocol limit.
715026d958bSChuck Lever 	 */
716026d958bSChuck Lever 	if (head->arg.head[0].iov_len > PAGE_SIZE) {
717026d958bSChuck Lever 		pr_warn_once("svcrdma: too much trailing inline content\n");
718026d958bSChuck Lever 		return -EINVAL;
719026d958bSChuck Lever 	}
720026d958bSChuck Lever 
721026d958bSChuck Lever 	srcp = head->arg.head[0].iov_base;
722026d958bSChuck Lever 	srcp += info->ri_position;
723026d958bSChuck Lever 	tail_length = head->arg.head[0].iov_len - info->ri_position;
724026d958bSChuck Lever 	remaining = tail_length;
725026d958bSChuck Lever 
726026d958bSChuck Lever 	/* If there is room on the last page in the page list, try to
727026d958bSChuck Lever 	 * fit the trailing content there.
728026d958bSChuck Lever 	 */
729026d958bSChuck Lever 	if (info->ri_pageoff > 0) {
730026d958bSChuck Lever 		unsigned int len;
731026d958bSChuck Lever 
732026d958bSChuck Lever 		len = min_t(unsigned int, remaining,
733026d958bSChuck Lever 			    PAGE_SIZE - info->ri_pageoff);
734026d958bSChuck Lever 		destp = page_address(rqstp->rq_pages[info->ri_pageno]);
735026d958bSChuck Lever 		destp += info->ri_pageoff;
736026d958bSChuck Lever 
737026d958bSChuck Lever 		memcpy(destp, srcp, len);
738026d958bSChuck Lever 		srcp += len;
739026d958bSChuck Lever 		destp += len;
740026d958bSChuck Lever 		info->ri_pageoff += len;
741026d958bSChuck Lever 		remaining -= len;
742026d958bSChuck Lever 
743026d958bSChuck Lever 		if (info->ri_pageoff == PAGE_SIZE) {
744026d958bSChuck Lever 			info->ri_pageno++;
745026d958bSChuck Lever 			info->ri_pageoff = 0;
746026d958bSChuck Lever 		}
747026d958bSChuck Lever 	}
748026d958bSChuck Lever 
749026d958bSChuck Lever 	/* Otherwise, a fresh page is needed. */
750026d958bSChuck Lever 	if (remaining) {
751026d958bSChuck Lever 		head->arg.pages[info->ri_pageno] =
752026d958bSChuck Lever 				rqstp->rq_pages[info->ri_pageno];
753026d958bSChuck Lever 		head->count++;
754026d958bSChuck Lever 
755026d958bSChuck Lever 		destp = page_address(rqstp->rq_pages[info->ri_pageno]);
756026d958bSChuck Lever 		memcpy(destp, srcp, remaining);
757026d958bSChuck Lever 		info->ri_pageoff += remaining;
758026d958bSChuck Lever 	}
759026d958bSChuck Lever 
760026d958bSChuck Lever 	head->arg.page_len += tail_length;
761026d958bSChuck Lever 	head->arg.len += tail_length;
762026d958bSChuck Lever 	head->arg.buflen += tail_length;
763026d958bSChuck Lever 	return 0;
764026d958bSChuck Lever }
765026d958bSChuck Lever 
766026d958bSChuck Lever /* Construct RDMA Reads to pull over a normal Read chunk. The chunk
767026d958bSChuck Lever  * data lands in the page list of head->arg.pages.
768026d958bSChuck Lever  *
769026d958bSChuck Lever  * Currently NFSD does not look at the head->arg.tail[0] iovec.
770026d958bSChuck Lever  * Therefore, XDR round-up of the Read chunk and trailing
771026d958bSChuck Lever  * inline content must both be added at the end of the pagelist.
772026d958bSChuck Lever  */
773026d958bSChuck Lever static int svc_rdma_build_normal_read_chunk(struct svc_rqst *rqstp,
774026d958bSChuck Lever 					    struct svc_rdma_read_info *info,
775026d958bSChuck Lever 					    __be32 *p)
776026d958bSChuck Lever {
777026d958bSChuck Lever 	struct svc_rdma_op_ctxt *head = info->ri_readctxt;
778026d958bSChuck Lever 	int ret;
779026d958bSChuck Lever 
780026d958bSChuck Lever 	dprintk("svcrdma: Reading Read chunk at position %u\n",
781026d958bSChuck Lever 		info->ri_position);
782026d958bSChuck Lever 
783026d958bSChuck Lever 	info->ri_pageno = head->hdr_count;
784026d958bSChuck Lever 	info->ri_pageoff = 0;
785026d958bSChuck Lever 
786026d958bSChuck Lever 	ret = svc_rdma_build_read_chunk(rqstp, info, p);
787026d958bSChuck Lever 	if (ret < 0)
788026d958bSChuck Lever 		goto out;
789026d958bSChuck Lever 
790026d958bSChuck Lever 	/* Read chunk may need XDR round-up (see RFC 5666, s. 3.7).
791026d958bSChuck Lever 	 */
792026d958bSChuck Lever 	if (info->ri_chunklen & 3) {
793026d958bSChuck Lever 		u32 padlen = 4 - (info->ri_chunklen & 3);
794026d958bSChuck Lever 
795026d958bSChuck Lever 		info->ri_chunklen += padlen;
796026d958bSChuck Lever 
797026d958bSChuck Lever 		/* NB: data payload always starts on XDR alignment,
798026d958bSChuck Lever 		 * thus the pad can never contain a page boundary.
799026d958bSChuck Lever 		 */
800026d958bSChuck Lever 		info->ri_pageoff += padlen;
801026d958bSChuck Lever 		if (info->ri_pageoff == PAGE_SIZE) {
802026d958bSChuck Lever 			info->ri_pageno++;
803026d958bSChuck Lever 			info->ri_pageoff = 0;
804026d958bSChuck Lever 		}
805026d958bSChuck Lever 	}
806026d958bSChuck Lever 
807026d958bSChuck Lever 	head->arg.page_len = info->ri_chunklen;
808026d958bSChuck Lever 	head->arg.len += info->ri_chunklen;
809026d958bSChuck Lever 	head->arg.buflen += info->ri_chunklen;
810026d958bSChuck Lever 
811026d958bSChuck Lever 	if (info->ri_position < head->arg.head[0].iov_len) {
812026d958bSChuck Lever 		ret = svc_rdma_copy_tail(rqstp, info);
813026d958bSChuck Lever 		if (ret < 0)
814026d958bSChuck Lever 			goto out;
815026d958bSChuck Lever 	}
816026d958bSChuck Lever 	head->arg.head[0].iov_len = info->ri_position;
817026d958bSChuck Lever 
818026d958bSChuck Lever out:
819026d958bSChuck Lever 	return ret;
820026d958bSChuck Lever }
821026d958bSChuck Lever 
822026d958bSChuck Lever /* Construct RDMA Reads to pull over a Position Zero Read chunk.
823026d958bSChuck Lever  * The start of the data lands in the first page just after
824026d958bSChuck Lever  * the Transport header, and the rest lands in the page list of
825026d958bSChuck Lever  * head->arg.pages.
826026d958bSChuck Lever  *
827026d958bSChuck Lever  * Assumptions:
828026d958bSChuck Lever  *	- A PZRC has an XDR-aligned length (no implicit round-up).
829026d958bSChuck Lever  *	- There can be no trailing inline content (IOW, we assume
830026d958bSChuck Lever  *	  a PZRC is never sent in an RDMA_MSG message, though it's
831026d958bSChuck Lever  *	  allowed by spec).
832026d958bSChuck Lever  */
833026d958bSChuck Lever static int svc_rdma_build_pz_read_chunk(struct svc_rqst *rqstp,
834026d958bSChuck Lever 					struct svc_rdma_read_info *info,
835026d958bSChuck Lever 					__be32 *p)
836026d958bSChuck Lever {
837026d958bSChuck Lever 	struct svc_rdma_op_ctxt *head = info->ri_readctxt;
838026d958bSChuck Lever 	int ret;
839026d958bSChuck Lever 
840026d958bSChuck Lever 	dprintk("svcrdma: Reading Position Zero Read chunk\n");
841026d958bSChuck Lever 
842026d958bSChuck Lever 	info->ri_pageno = head->hdr_count - 1;
843026d958bSChuck Lever 	info->ri_pageoff = offset_in_page(head->byte_len);
844026d958bSChuck Lever 
845026d958bSChuck Lever 	ret = svc_rdma_build_read_chunk(rqstp, info, p);
846026d958bSChuck Lever 	if (ret < 0)
847026d958bSChuck Lever 		goto out;
848026d958bSChuck Lever 
849026d958bSChuck Lever 	head->arg.len += info->ri_chunklen;
850026d958bSChuck Lever 	head->arg.buflen += info->ri_chunklen;
851026d958bSChuck Lever 
85271641d99SChuck Lever 	if (head->arg.buflen <= head->sge[0].length) {
853026d958bSChuck Lever 		/* Transport header and RPC message fit entirely
854026d958bSChuck Lever 		 * in page where head iovec resides.
855026d958bSChuck Lever 		 */
856026d958bSChuck Lever 		head->arg.head[0].iov_len = info->ri_chunklen;
857026d958bSChuck Lever 	} else {
858026d958bSChuck Lever 		/* Transport header and part of RPC message reside
859026d958bSChuck Lever 		 * in the head iovec's page.
860026d958bSChuck Lever 		 */
861026d958bSChuck Lever 		head->arg.head[0].iov_len =
862026d958bSChuck Lever 				head->sge[0].length - head->byte_len;
863026d958bSChuck Lever 		head->arg.page_len =
864026d958bSChuck Lever 				info->ri_chunklen - head->arg.head[0].iov_len;
865026d958bSChuck Lever 	}
866026d958bSChuck Lever 
867026d958bSChuck Lever out:
868026d958bSChuck Lever 	return ret;
869026d958bSChuck Lever }
870026d958bSChuck Lever 
871026d958bSChuck Lever /**
872026d958bSChuck Lever  * svc_rdma_recv_read_chunk - Pull a Read chunk from the client
873026d958bSChuck Lever  * @rdma: controlling RDMA transport
874026d958bSChuck Lever  * @rqstp: set of pages to use as Read sink buffers
875026d958bSChuck Lever  * @head: pages under I/O collect here
876026d958bSChuck Lever  * @p: pointer to start of Read chunk
877026d958bSChuck Lever  *
878026d958bSChuck Lever  * Returns:
879026d958bSChuck Lever  *	%0 if all needed RDMA Reads were posted successfully,
880026d958bSChuck Lever  *	%-EINVAL if client provided too many segments,
881026d958bSChuck Lever  *	%-ENOMEM if rdma_rw context pool was exhausted,
882026d958bSChuck Lever  *	%-ENOTCONN if posting failed (connection is lost),
883026d958bSChuck Lever  *	%-EIO if rdma_rw initialization failed (DMA mapping, etc).
884026d958bSChuck Lever  *
885026d958bSChuck Lever  * Assumptions:
886026d958bSChuck Lever  * - All Read segments in @p have the same Position value.
887026d958bSChuck Lever  */
888026d958bSChuck Lever int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp,
889026d958bSChuck Lever 			     struct svc_rdma_op_ctxt *head, __be32 *p)
890026d958bSChuck Lever {
891026d958bSChuck Lever 	struct svc_rdma_read_info *info;
892026d958bSChuck Lever 	struct page **page;
893026d958bSChuck Lever 	int ret;
894026d958bSChuck Lever 
895026d958bSChuck Lever 	/* The request (with page list) is constructed in
896026d958bSChuck Lever 	 * head->arg. Pages involved with RDMA Read I/O are
897026d958bSChuck Lever 	 * transferred there.
898026d958bSChuck Lever 	 */
899026d958bSChuck Lever 	head->hdr_count = head->count;
900026d958bSChuck Lever 	head->arg.head[0] = rqstp->rq_arg.head[0];
901026d958bSChuck Lever 	head->arg.tail[0] = rqstp->rq_arg.tail[0];
902026d958bSChuck Lever 	head->arg.pages = head->pages;
903026d958bSChuck Lever 	head->arg.page_base = 0;
904026d958bSChuck Lever 	head->arg.page_len = 0;
905026d958bSChuck Lever 	head->arg.len = rqstp->rq_arg.len;
906026d958bSChuck Lever 	head->arg.buflen = rqstp->rq_arg.buflen;
907026d958bSChuck Lever 
908026d958bSChuck Lever 	info = svc_rdma_read_info_alloc(rdma);
909026d958bSChuck Lever 	if (!info)
910026d958bSChuck Lever 		return -ENOMEM;
911026d958bSChuck Lever 	info->ri_readctxt = head;
912026d958bSChuck Lever 
913026d958bSChuck Lever 	info->ri_position = be32_to_cpup(p + 1);
914026d958bSChuck Lever 	if (info->ri_position)
915026d958bSChuck Lever 		ret = svc_rdma_build_normal_read_chunk(rqstp, info, p);
916026d958bSChuck Lever 	else
917026d958bSChuck Lever 		ret = svc_rdma_build_pz_read_chunk(rqstp, info, p);
918026d958bSChuck Lever 
919026d958bSChuck Lever 	/* Mark the start of the pages that can be used for the reply */
920026d958bSChuck Lever 	if (info->ri_pageoff > 0)
921026d958bSChuck Lever 		info->ri_pageno++;
922026d958bSChuck Lever 	rqstp->rq_respages = &rqstp->rq_pages[info->ri_pageno];
923026d958bSChuck Lever 	rqstp->rq_next_page = rqstp->rq_respages + 1;
924026d958bSChuck Lever 
925026d958bSChuck Lever 	if (ret < 0)
926026d958bSChuck Lever 		goto out;
927026d958bSChuck Lever 
928026d958bSChuck Lever 	ret = svc_rdma_post_chunk_ctxt(&info->ri_cc);
929026d958bSChuck Lever 
930026d958bSChuck Lever out:
931026d958bSChuck Lever 	/* Read sink pages have been moved from rqstp->rq_pages to
932026d958bSChuck Lever 	 * head->arg.pages. Force svc_recv to refill those slots
933026d958bSChuck Lever 	 * in rq_pages.
934026d958bSChuck Lever 	 */
935026d958bSChuck Lever 	for (page = rqstp->rq_pages; page < rqstp->rq_respages; page++)
936026d958bSChuck Lever 		*page = NULL;
937026d958bSChuck Lever 
938026d958bSChuck Lever 	if (ret < 0)
939026d958bSChuck Lever 		svc_rdma_read_info_free(info);
940026d958bSChuck Lever 	return ret;
941026d958bSChuck Lever }
942