1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2f13193f5SChuck Lever /*
3ecf85b23SChuck Lever  * Copyright (c) 2016-2018 Oracle.  All rights reserved.
4f13193f5SChuck Lever  *
5f13193f5SChuck Lever  * Use the core R/W API to move RPC-over-RDMA Read and Write chunks.
6f13193f5SChuck Lever  */
7f13193f5SChuck Lever 
898895edbSChuck Lever #include <rdma/rw.h>
998895edbSChuck Lever 
10f60a0869SChuck Lever #include <linux/sunrpc/xdr.h>
11f13193f5SChuck Lever #include <linux/sunrpc/rpc_rdma.h>
12f13193f5SChuck Lever #include <linux/sunrpc/svc_rdma.h>
13f13193f5SChuck Lever 
1498895edbSChuck Lever #include "xprt_rdma.h"
1598895edbSChuck Lever #include <trace/events/rpcrdma.h>
16f13193f5SChuck Lever 
17026d958bSChuck Lever static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc);
18026d958bSChuck Lever static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc);
19026d958bSChuck Lever 
20f13193f5SChuck Lever /* Each R/W context contains state for one chain of RDMA Read or
21f13193f5SChuck Lever  * Write Work Requests.
22f13193f5SChuck Lever  *
23f13193f5SChuck Lever  * Each WR chain handles a single contiguous server-side buffer,
24f13193f5SChuck Lever  * because scatterlist entries after the first have to start on
25f13193f5SChuck Lever  * page alignment. xdr_buf iovecs cannot guarantee alignment.
26f13193f5SChuck Lever  *
27f13193f5SChuck Lever  * Each WR chain handles only one R_key. Each RPC-over-RDMA segment
28f13193f5SChuck Lever  * from a client may contain a unique R_key, so each WR chain moves
29f13193f5SChuck Lever  * up to one segment at a time.
30f13193f5SChuck Lever  *
31f13193f5SChuck Lever  * The scatterlist makes this data structure over 4KB in size. To
32f13193f5SChuck Lever  * make it less likely to fail, and to handle the allocation for
33f13193f5SChuck Lever  * smaller I/O requests without disabling bottom-halves, these
34f13193f5SChuck Lever  * contexts are created on demand, but cached and reused until the
35f13193f5SChuck Lever  * controlling svcxprt_rdma is destroyed.
36f13193f5SChuck Lever  */
37f13193f5SChuck Lever struct svc_rdma_rw_ctxt {
38f13193f5SChuck Lever 	struct list_head	rw_list;
39f13193f5SChuck Lever 	struct rdma_rw_ctx	rw_ctx;
402abfbe7eSChuck Lever 	unsigned int		rw_nents;
41f13193f5SChuck Lever 	struct sg_table		rw_sg_table;
42c0fb23f8SGustavo A. R. Silva 	struct scatterlist	rw_first_sgl[];
43f13193f5SChuck Lever };
44f13193f5SChuck Lever 
45f13193f5SChuck Lever static inline struct svc_rdma_rw_ctxt *
46f13193f5SChuck Lever svc_rdma_next_ctxt(struct list_head *list)
47f13193f5SChuck Lever {
48f13193f5SChuck Lever 	return list_first_entry_or_null(list, struct svc_rdma_rw_ctxt,
49f13193f5SChuck Lever 					rw_list);
50f13193f5SChuck Lever }
51f13193f5SChuck Lever 
52f13193f5SChuck Lever static struct svc_rdma_rw_ctxt *
53f13193f5SChuck Lever svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
54f13193f5SChuck Lever {
55f13193f5SChuck Lever 	struct svc_rdma_rw_ctxt *ctxt;
56f13193f5SChuck Lever 
57f13193f5SChuck Lever 	spin_lock(&rdma->sc_rw_ctxt_lock);
58f13193f5SChuck Lever 
59f13193f5SChuck Lever 	ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts);
60f13193f5SChuck Lever 	if (ctxt) {
61f13193f5SChuck Lever 		list_del(&ctxt->rw_list);
62f13193f5SChuck Lever 		spin_unlock(&rdma->sc_rw_ctxt_lock);
63f13193f5SChuck Lever 	} else {
64f13193f5SChuck Lever 		spin_unlock(&rdma->sc_rw_ctxt_lock);
6514cfbd94SGustavo A. R. Silva 		ctxt = kmalloc(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE),
66f13193f5SChuck Lever 			       GFP_KERNEL);
67f13193f5SChuck Lever 		if (!ctxt)
68f4e53e1cSChuck Lever 			goto out_noctx;
69f13193f5SChuck Lever 		INIT_LIST_HEAD(&ctxt->rw_list);
70f13193f5SChuck Lever 	}
71f13193f5SChuck Lever 
72f13193f5SChuck Lever 	ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl;
73f13193f5SChuck Lever 	if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges,
744635873cSMing Lei 				   ctxt->rw_sg_table.sgl,
75f4e53e1cSChuck Lever 				   SG_CHUNK_SIZE))
76f4e53e1cSChuck Lever 		goto out_free;
77f13193f5SChuck Lever 	return ctxt;
78f4e53e1cSChuck Lever 
79f4e53e1cSChuck Lever out_free:
80f4e53e1cSChuck Lever 	kfree(ctxt);
81f4e53e1cSChuck Lever out_noctx:
82f4e53e1cSChuck Lever 	trace_svcrdma_no_rwctx_err(rdma, sges);
83f4e53e1cSChuck Lever 	return NULL;
84f13193f5SChuck Lever }
85f13193f5SChuck Lever 
86f13193f5SChuck Lever static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
87f13193f5SChuck Lever 				 struct svc_rdma_rw_ctxt *ctxt)
88f13193f5SChuck Lever {
894635873cSMing Lei 	sg_free_table_chained(&ctxt->rw_sg_table, SG_CHUNK_SIZE);
90f13193f5SChuck Lever 
91f13193f5SChuck Lever 	spin_lock(&rdma->sc_rw_ctxt_lock);
92f13193f5SChuck Lever 	list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts);
93f13193f5SChuck Lever 	spin_unlock(&rdma->sc_rw_ctxt_lock);
94f13193f5SChuck Lever }
95f13193f5SChuck Lever 
96f13193f5SChuck Lever /**
97f13193f5SChuck Lever  * svc_rdma_destroy_rw_ctxts - Free accumulated R/W contexts
98f13193f5SChuck Lever  * @rdma: transport about to be destroyed
99f13193f5SChuck Lever  *
100f13193f5SChuck Lever  */
101f13193f5SChuck Lever void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma)
102f13193f5SChuck Lever {
103f13193f5SChuck Lever 	struct svc_rdma_rw_ctxt *ctxt;
104f13193f5SChuck Lever 
105f13193f5SChuck Lever 	while ((ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts)) != NULL) {
106f13193f5SChuck Lever 		list_del(&ctxt->rw_list);
107f13193f5SChuck Lever 		kfree(ctxt);
108f13193f5SChuck Lever 	}
109f13193f5SChuck Lever }
110f13193f5SChuck Lever 
1112abfbe7eSChuck Lever /**
1122abfbe7eSChuck Lever  * svc_rdma_rw_ctx_init - Prepare a R/W context for I/O
1132abfbe7eSChuck Lever  * @rdma: controlling transport instance
1142abfbe7eSChuck Lever  * @ctxt: R/W context to prepare
1152abfbe7eSChuck Lever  * @offset: RDMA offset
1162abfbe7eSChuck Lever  * @handle: RDMA tag/handle
1172abfbe7eSChuck Lever  * @direction: I/O direction
1182abfbe7eSChuck Lever  *
1192abfbe7eSChuck Lever  * Returns on success, the number of WQEs that will be needed
1202abfbe7eSChuck Lever  * on the workqueue, or a negative errno.
1212abfbe7eSChuck Lever  */
1222abfbe7eSChuck Lever static int svc_rdma_rw_ctx_init(struct svcxprt_rdma *rdma,
1232abfbe7eSChuck Lever 				struct svc_rdma_rw_ctxt *ctxt,
1242abfbe7eSChuck Lever 				u64 offset, u32 handle,
1252abfbe7eSChuck Lever 				enum dma_data_direction direction)
1262abfbe7eSChuck Lever {
1272abfbe7eSChuck Lever 	int ret;
1282abfbe7eSChuck Lever 
1292abfbe7eSChuck Lever 	ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, rdma->sc_port_num,
1302abfbe7eSChuck Lever 			       ctxt->rw_sg_table.sgl, ctxt->rw_nents,
1312abfbe7eSChuck Lever 			       0, offset, handle, direction);
1322abfbe7eSChuck Lever 	if (unlikely(ret < 0)) {
1332abfbe7eSChuck Lever 		svc_rdma_put_rw_ctxt(rdma, ctxt);
1342abfbe7eSChuck Lever 		trace_svcrdma_dma_map_rw_err(rdma, ctxt->rw_nents, ret);
1352abfbe7eSChuck Lever 	}
1362abfbe7eSChuck Lever 	return ret;
1372abfbe7eSChuck Lever }
1382abfbe7eSChuck Lever 
139f13193f5SChuck Lever /* A chunk context tracks all I/O for moving one Read or Write
140f13193f5SChuck Lever  * chunk. This is a a set of rdma_rw's that handle data movement
141f13193f5SChuck Lever  * for all segments of one chunk.
142f13193f5SChuck Lever  *
143f13193f5SChuck Lever  * These are small, acquired with a single allocator call, and
144f13193f5SChuck Lever  * no more than one is needed per chunk. They are allocated on
145f13193f5SChuck Lever  * demand, and not cached.
146f13193f5SChuck Lever  */
147f13193f5SChuck Lever struct svc_rdma_chunk_ctxt {
1486787f0beSChuck Lever 	struct rpc_rdma_cid	cc_cid;
149f13193f5SChuck Lever 	struct ib_cqe		cc_cqe;
150f13193f5SChuck Lever 	struct svcxprt_rdma	*cc_rdma;
151f13193f5SChuck Lever 	struct list_head	cc_rwctxts;
152f13193f5SChuck Lever 	int			cc_sqecount;
153f13193f5SChuck Lever };
154f13193f5SChuck Lever 
1556787f0beSChuck Lever static void svc_rdma_cc_cid_init(struct svcxprt_rdma *rdma,
1566787f0beSChuck Lever 				 struct rpc_rdma_cid *cid)
1576787f0beSChuck Lever {
1586787f0beSChuck Lever 	cid->ci_queue_id = rdma->sc_sq_cq->res.id;
1596787f0beSChuck Lever 	cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
1606787f0beSChuck Lever }
1616787f0beSChuck Lever 
162f13193f5SChuck Lever static void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
16335a30fc3SChuck Lever 			     struct svc_rdma_chunk_ctxt *cc)
164f13193f5SChuck Lever {
1656787f0beSChuck Lever 	svc_rdma_cc_cid_init(rdma, &cc->cc_cid);
166f13193f5SChuck Lever 	cc->cc_rdma = rdma;
167f13193f5SChuck Lever 	svc_xprt_get(&rdma->sc_xprt);
168f13193f5SChuck Lever 
169f13193f5SChuck Lever 	INIT_LIST_HEAD(&cc->cc_rwctxts);
170f13193f5SChuck Lever 	cc->cc_sqecount = 0;
171f13193f5SChuck Lever }
172f13193f5SChuck Lever 
17335a30fc3SChuck Lever static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc,
17435a30fc3SChuck Lever 				enum dma_data_direction dir)
175f13193f5SChuck Lever {
176f13193f5SChuck Lever 	struct svcxprt_rdma *rdma = cc->cc_rdma;
177f13193f5SChuck Lever 	struct svc_rdma_rw_ctxt *ctxt;
178f13193f5SChuck Lever 
179f13193f5SChuck Lever 	while ((ctxt = svc_rdma_next_ctxt(&cc->cc_rwctxts)) != NULL) {
180f13193f5SChuck Lever 		list_del(&ctxt->rw_list);
181f13193f5SChuck Lever 
182f13193f5SChuck Lever 		rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp,
183f13193f5SChuck Lever 				    rdma->sc_port_num, ctxt->rw_sg_table.sgl,
18435a30fc3SChuck Lever 				    ctxt->rw_nents, dir);
185f13193f5SChuck Lever 		svc_rdma_put_rw_ctxt(rdma, ctxt);
186f13193f5SChuck Lever 	}
187f13193f5SChuck Lever 	svc_xprt_put(&rdma->sc_xprt);
188f13193f5SChuck Lever }
189f13193f5SChuck Lever 
190f13193f5SChuck Lever /* State for sending a Write or Reply chunk.
191f13193f5SChuck Lever  *  - Tracks progress of writing one chunk over all its segments
192f13193f5SChuck Lever  *  - Stores arguments for the SGL constructor functions
193f13193f5SChuck Lever  */
194f13193f5SChuck Lever struct svc_rdma_write_info {
195f13193f5SChuck Lever 	/* write state of this chunk */
196f13193f5SChuck Lever 	unsigned int		wi_seg_off;
197f13193f5SChuck Lever 	unsigned int		wi_seg_no;
198f13193f5SChuck Lever 	unsigned int		wi_nsegs;
199f13193f5SChuck Lever 	__be32			*wi_segs;
200f13193f5SChuck Lever 
201f13193f5SChuck Lever 	/* SGL constructor arguments */
202f13193f5SChuck Lever 	struct xdr_buf		*wi_xdr;
203f13193f5SChuck Lever 	unsigned char		*wi_base;
204f13193f5SChuck Lever 	unsigned int		wi_next_off;
205f13193f5SChuck Lever 
206f13193f5SChuck Lever 	struct svc_rdma_chunk_ctxt	wi_cc;
207f13193f5SChuck Lever };
208f13193f5SChuck Lever 
209f13193f5SChuck Lever static struct svc_rdma_write_info *
210f13193f5SChuck Lever svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma, __be32 *chunk)
211f13193f5SChuck Lever {
212f13193f5SChuck Lever 	struct svc_rdma_write_info *info;
213f13193f5SChuck Lever 
214f13193f5SChuck Lever 	info = kmalloc(sizeof(*info), GFP_KERNEL);
215f13193f5SChuck Lever 	if (!info)
216f13193f5SChuck Lever 		return info;
217f13193f5SChuck Lever 
218f13193f5SChuck Lever 	info->wi_seg_off = 0;
219f13193f5SChuck Lever 	info->wi_seg_no = 0;
220f13193f5SChuck Lever 	info->wi_nsegs = be32_to_cpup(++chunk);
221f13193f5SChuck Lever 	info->wi_segs = ++chunk;
22235a30fc3SChuck Lever 	svc_rdma_cc_init(rdma, &info->wi_cc);
223026d958bSChuck Lever 	info->wi_cc.cc_cqe.done = svc_rdma_write_done;
224f13193f5SChuck Lever 	return info;
225f13193f5SChuck Lever }
226f13193f5SChuck Lever 
227f13193f5SChuck Lever static void svc_rdma_write_info_free(struct svc_rdma_write_info *info)
228f13193f5SChuck Lever {
22935a30fc3SChuck Lever 	svc_rdma_cc_release(&info->wi_cc, DMA_TO_DEVICE);
230f13193f5SChuck Lever 	kfree(info);
231f13193f5SChuck Lever }
232f13193f5SChuck Lever 
233f13193f5SChuck Lever /**
234f13193f5SChuck Lever  * svc_rdma_write_done - Write chunk completion
235f13193f5SChuck Lever  * @cq: controlling Completion Queue
236f13193f5SChuck Lever  * @wc: Work Completion
237f13193f5SChuck Lever  *
238f13193f5SChuck Lever  * Pages under I/O are freed by a subsequent Send completion.
239f13193f5SChuck Lever  */
240f13193f5SChuck Lever static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
241f13193f5SChuck Lever {
242f13193f5SChuck Lever 	struct ib_cqe *cqe = wc->wr_cqe;
243f13193f5SChuck Lever 	struct svc_rdma_chunk_ctxt *cc =
244f13193f5SChuck Lever 			container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
245f13193f5SChuck Lever 	struct svcxprt_rdma *rdma = cc->cc_rdma;
246f13193f5SChuck Lever 	struct svc_rdma_write_info *info =
247f13193f5SChuck Lever 			container_of(cc, struct svc_rdma_write_info, wi_cc);
248f13193f5SChuck Lever 
2496787f0beSChuck Lever 	trace_svcrdma_wc_write(wc, &cc->cc_cid);
250bd2abef3SChuck Lever 
251f13193f5SChuck Lever 	atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
252f13193f5SChuck Lever 	wake_up(&rdma->sc_send_wait);
253f13193f5SChuck Lever 
2548820bcaaSChuck Lever 	if (unlikely(wc->status != IB_WC_SUCCESS))
255f13193f5SChuck Lever 		set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
256f13193f5SChuck Lever 
257f13193f5SChuck Lever 	svc_rdma_write_info_free(info);
258f13193f5SChuck Lever }
259f13193f5SChuck Lever 
260026d958bSChuck Lever /* State for pulling a Read chunk.
261026d958bSChuck Lever  */
262026d958bSChuck Lever struct svc_rdma_read_info {
263ecf85b23SChuck Lever 	struct svc_rdma_recv_ctxt	*ri_readctxt;
264026d958bSChuck Lever 	unsigned int			ri_position;
265026d958bSChuck Lever 	unsigned int			ri_pageno;
266026d958bSChuck Lever 	unsigned int			ri_pageoff;
267026d958bSChuck Lever 	unsigned int			ri_chunklen;
268026d958bSChuck Lever 
269026d958bSChuck Lever 	struct svc_rdma_chunk_ctxt	ri_cc;
270026d958bSChuck Lever };
271026d958bSChuck Lever 
272026d958bSChuck Lever static struct svc_rdma_read_info *
273026d958bSChuck Lever svc_rdma_read_info_alloc(struct svcxprt_rdma *rdma)
274026d958bSChuck Lever {
275026d958bSChuck Lever 	struct svc_rdma_read_info *info;
276026d958bSChuck Lever 
277026d958bSChuck Lever 	info = kmalloc(sizeof(*info), GFP_KERNEL);
278026d958bSChuck Lever 	if (!info)
279026d958bSChuck Lever 		return info;
280026d958bSChuck Lever 
28135a30fc3SChuck Lever 	svc_rdma_cc_init(rdma, &info->ri_cc);
282026d958bSChuck Lever 	info->ri_cc.cc_cqe.done = svc_rdma_wc_read_done;
283026d958bSChuck Lever 	return info;
284026d958bSChuck Lever }
285026d958bSChuck Lever 
286026d958bSChuck Lever static void svc_rdma_read_info_free(struct svc_rdma_read_info *info)
287026d958bSChuck Lever {
28835a30fc3SChuck Lever 	svc_rdma_cc_release(&info->ri_cc, DMA_FROM_DEVICE);
289026d958bSChuck Lever 	kfree(info);
290026d958bSChuck Lever }
291026d958bSChuck Lever 
292026d958bSChuck Lever /**
293026d958bSChuck Lever  * svc_rdma_wc_read_done - Handle completion of an RDMA Read ctx
294026d958bSChuck Lever  * @cq: controlling Completion Queue
295026d958bSChuck Lever  * @wc: Work Completion
296026d958bSChuck Lever  *
297026d958bSChuck Lever  */
298026d958bSChuck Lever static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
299026d958bSChuck Lever {
300026d958bSChuck Lever 	struct ib_cqe *cqe = wc->wr_cqe;
301026d958bSChuck Lever 	struct svc_rdma_chunk_ctxt *cc =
302026d958bSChuck Lever 			container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
303026d958bSChuck Lever 	struct svcxprt_rdma *rdma = cc->cc_rdma;
304026d958bSChuck Lever 	struct svc_rdma_read_info *info =
305026d958bSChuck Lever 			container_of(cc, struct svc_rdma_read_info, ri_cc);
306026d958bSChuck Lever 
3076787f0beSChuck Lever 	trace_svcrdma_wc_read(wc, &cc->cc_cid);
308bd2abef3SChuck Lever 
309026d958bSChuck Lever 	atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
310026d958bSChuck Lever 	wake_up(&rdma->sc_send_wait);
311026d958bSChuck Lever 
312026d958bSChuck Lever 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
313026d958bSChuck Lever 		set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
3141e5f4160SChuck Lever 		svc_rdma_recv_ctxt_put(rdma, info->ri_readctxt);
315026d958bSChuck Lever 	} else {
316026d958bSChuck Lever 		spin_lock(&rdma->sc_rq_dto_lock);
317ecf85b23SChuck Lever 		list_add_tail(&info->ri_readctxt->rc_list,
318026d958bSChuck Lever 			      &rdma->sc_read_complete_q);
31995503d29SJ. Bruce Fields 		/* Note the unlock pairs with the smp_rmb in svc_xprt_ready: */
32095503d29SJ. Bruce Fields 		set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
321026d958bSChuck Lever 		spin_unlock(&rdma->sc_rq_dto_lock);
322026d958bSChuck Lever 
323026d958bSChuck Lever 		svc_xprt_enqueue(&rdma->sc_xprt);
324026d958bSChuck Lever 	}
325026d958bSChuck Lever 
326026d958bSChuck Lever 	svc_rdma_read_info_free(info);
327026d958bSChuck Lever }
328026d958bSChuck Lever 
329f13193f5SChuck Lever /* This function sleeps when the transport's Send Queue is congested.
330f13193f5SChuck Lever  *
331f13193f5SChuck Lever  * Assumptions:
332f13193f5SChuck Lever  * - If ib_post_send() succeeds, only one completion is expected,
333f13193f5SChuck Lever  *   even if one or more WRs are flushed. This is true when posting
334f13193f5SChuck Lever  *   an rdma_rw_ctx or when posting a single signaled WR.
335f13193f5SChuck Lever  */
336f13193f5SChuck Lever static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
337f13193f5SChuck Lever {
338f13193f5SChuck Lever 	struct svcxprt_rdma *rdma = cc->cc_rdma;
339f13193f5SChuck Lever 	struct svc_xprt *xprt = &rdma->sc_xprt;
340d34ac5cdSBart Van Assche 	struct ib_send_wr *first_wr;
341d34ac5cdSBart Van Assche 	const struct ib_send_wr *bad_wr;
342f13193f5SChuck Lever 	struct list_head *tmp;
343f13193f5SChuck Lever 	struct ib_cqe *cqe;
344f13193f5SChuck Lever 	int ret;
345f13193f5SChuck Lever 
346107c1d0aSChuck Lever 	if (cc->cc_sqecount > rdma->sc_sq_depth)
347107c1d0aSChuck Lever 		return -EINVAL;
348107c1d0aSChuck Lever 
349f13193f5SChuck Lever 	first_wr = NULL;
350f13193f5SChuck Lever 	cqe = &cc->cc_cqe;
351f13193f5SChuck Lever 	list_for_each(tmp, &cc->cc_rwctxts) {
352f13193f5SChuck Lever 		struct svc_rdma_rw_ctxt *ctxt;
353f13193f5SChuck Lever 
354f13193f5SChuck Lever 		ctxt = list_entry(tmp, struct svc_rdma_rw_ctxt, rw_list);
355f13193f5SChuck Lever 		first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp,
356f13193f5SChuck Lever 					   rdma->sc_port_num, cqe, first_wr);
357f13193f5SChuck Lever 		cqe = NULL;
358f13193f5SChuck Lever 	}
359f13193f5SChuck Lever 
360f13193f5SChuck Lever 	do {
361f13193f5SChuck Lever 		if (atomic_sub_return(cc->cc_sqecount,
362f13193f5SChuck Lever 				      &rdma->sc_sq_avail) > 0) {
3636787f0beSChuck Lever 			trace_svcrdma_post_chunk(&cc->cc_cid, cc->cc_sqecount);
3645d85a822SJason Gunthorpe 			ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
365f13193f5SChuck Lever 			if (ret)
366f13193f5SChuck Lever 				break;
367f13193f5SChuck Lever 			return 0;
368f13193f5SChuck Lever 		}
369f13193f5SChuck Lever 
370bd2abef3SChuck Lever 		trace_svcrdma_sq_full(rdma);
371f13193f5SChuck Lever 		atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
372f13193f5SChuck Lever 		wait_event(rdma->sc_send_wait,
373f13193f5SChuck Lever 			   atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount);
374bd2abef3SChuck Lever 		trace_svcrdma_sq_retry(rdma);
375f13193f5SChuck Lever 	} while (1);
376f13193f5SChuck Lever 
377e28b4fc6SChuck Lever 	trace_svcrdma_sq_post_err(rdma, ret);
378f13193f5SChuck Lever 	set_bit(XPT_CLOSE, &xprt->xpt_flags);
379f13193f5SChuck Lever 
380f13193f5SChuck Lever 	/* If even one was posted, there will be a completion. */
381f13193f5SChuck Lever 	if (bad_wr != first_wr)
382f13193f5SChuck Lever 		return 0;
383f13193f5SChuck Lever 
384f13193f5SChuck Lever 	atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
385f13193f5SChuck Lever 	wake_up(&rdma->sc_send_wait);
386f13193f5SChuck Lever 	return -ENOTCONN;
387f13193f5SChuck Lever }
388f13193f5SChuck Lever 
389f13193f5SChuck Lever /* Build and DMA-map an SGL that covers one kvec in an xdr_buf
390f13193f5SChuck Lever  */
391f13193f5SChuck Lever static void svc_rdma_vec_to_sg(struct svc_rdma_write_info *info,
392f13193f5SChuck Lever 			       unsigned int len,
393f13193f5SChuck Lever 			       struct svc_rdma_rw_ctxt *ctxt)
394f13193f5SChuck Lever {
395f13193f5SChuck Lever 	struct scatterlist *sg = ctxt->rw_sg_table.sgl;
396f13193f5SChuck Lever 
397f13193f5SChuck Lever 	sg_set_buf(&sg[0], info->wi_base, len);
398f13193f5SChuck Lever 	info->wi_base += len;
399f13193f5SChuck Lever 
400f13193f5SChuck Lever 	ctxt->rw_nents = 1;
401f13193f5SChuck Lever }
402f13193f5SChuck Lever 
403f13193f5SChuck Lever /* Build and DMA-map an SGL that covers part of an xdr_buf's pagelist.
404f13193f5SChuck Lever  */
405f13193f5SChuck Lever static void svc_rdma_pagelist_to_sg(struct svc_rdma_write_info *info,
406f13193f5SChuck Lever 				    unsigned int remaining,
407f13193f5SChuck Lever 				    struct svc_rdma_rw_ctxt *ctxt)
408f13193f5SChuck Lever {
409f13193f5SChuck Lever 	unsigned int sge_no, sge_bytes, page_off, page_no;
410f13193f5SChuck Lever 	struct xdr_buf *xdr = info->wi_xdr;
411f13193f5SChuck Lever 	struct scatterlist *sg;
412f13193f5SChuck Lever 	struct page **page;
413f13193f5SChuck Lever 
41491b022ecSChuck Lever 	page_off = info->wi_next_off + xdr->page_base;
41591b022ecSChuck Lever 	page_no = page_off >> PAGE_SHIFT;
41691b022ecSChuck Lever 	page_off = offset_in_page(page_off);
417f13193f5SChuck Lever 	page = xdr->pages + page_no;
418f13193f5SChuck Lever 	info->wi_next_off += remaining;
419f13193f5SChuck Lever 	sg = ctxt->rw_sg_table.sgl;
420f13193f5SChuck Lever 	sge_no = 0;
421f13193f5SChuck Lever 	do {
422f13193f5SChuck Lever 		sge_bytes = min_t(unsigned int, remaining,
423f13193f5SChuck Lever 				  PAGE_SIZE - page_off);
424f13193f5SChuck Lever 		sg_set_page(sg, *page, sge_bytes, page_off);
425f13193f5SChuck Lever 
426f13193f5SChuck Lever 		remaining -= sge_bytes;
427f13193f5SChuck Lever 		sg = sg_next(sg);
428f13193f5SChuck Lever 		page_off = 0;
429f13193f5SChuck Lever 		sge_no++;
430f13193f5SChuck Lever 		page++;
431f13193f5SChuck Lever 	} while (remaining);
432f13193f5SChuck Lever 
433f13193f5SChuck Lever 	ctxt->rw_nents = sge_no;
434f13193f5SChuck Lever }
435f13193f5SChuck Lever 
436f13193f5SChuck Lever /* Construct RDMA Write WRs to send a portion of an xdr_buf containing
437f13193f5SChuck Lever  * an RPC Reply.
438f13193f5SChuck Lever  */
439f13193f5SChuck Lever static int
440f13193f5SChuck Lever svc_rdma_build_writes(struct svc_rdma_write_info *info,
441f13193f5SChuck Lever 		      void (*constructor)(struct svc_rdma_write_info *info,
442f13193f5SChuck Lever 					  unsigned int len,
443f13193f5SChuck Lever 					  struct svc_rdma_rw_ctxt *ctxt),
444f13193f5SChuck Lever 		      unsigned int remaining)
445f13193f5SChuck Lever {
446f13193f5SChuck Lever 	struct svc_rdma_chunk_ctxt *cc = &info->wi_cc;
447f13193f5SChuck Lever 	struct svcxprt_rdma *rdma = cc->cc_rdma;
448f13193f5SChuck Lever 	struct svc_rdma_rw_ctxt *ctxt;
449f13193f5SChuck Lever 	__be32 *seg;
450f13193f5SChuck Lever 	int ret;
451f13193f5SChuck Lever 
452f13193f5SChuck Lever 	seg = info->wi_segs + info->wi_seg_no * rpcrdma_segment_maxsz;
453f13193f5SChuck Lever 	do {
454f13193f5SChuck Lever 		unsigned int write_len;
455f60a0869SChuck Lever 		u32 handle, length;
456f60a0869SChuck Lever 		u64 offset;
457f13193f5SChuck Lever 
458f13193f5SChuck Lever 		if (info->wi_seg_no >= info->wi_nsegs)
459f13193f5SChuck Lever 			goto out_overflow;
460f13193f5SChuck Lever 
461f60a0869SChuck Lever 		xdr_decode_rdma_segment(seg, &handle, &length, &offset);
462f60a0869SChuck Lever 		offset += info->wi_seg_off;
463f13193f5SChuck Lever 
464f60a0869SChuck Lever 		write_len = min(remaining, length - info->wi_seg_off);
465f13193f5SChuck Lever 		ctxt = svc_rdma_get_rw_ctxt(rdma,
466f13193f5SChuck Lever 					    (write_len >> PAGE_SHIFT) + 2);
467f13193f5SChuck Lever 		if (!ctxt)
468f4e53e1cSChuck Lever 			return -ENOMEM;
469f13193f5SChuck Lever 
470f13193f5SChuck Lever 		constructor(info, write_len, ctxt);
471f60a0869SChuck Lever 		ret = svc_rdma_rw_ctx_init(rdma, ctxt, offset, handle,
4722abfbe7eSChuck Lever 					   DMA_TO_DEVICE);
473f13193f5SChuck Lever 		if (ret < 0)
4742abfbe7eSChuck Lever 			return -EIO;
475f13193f5SChuck Lever 
476f60a0869SChuck Lever 		trace_svcrdma_send_wseg(handle, write_len, offset);
477a406c563SChuck Lever 
478f13193f5SChuck Lever 		list_add(&ctxt->rw_list, &cc->cc_rwctxts);
479f13193f5SChuck Lever 		cc->cc_sqecount += ret;
480f60a0869SChuck Lever 		if (write_len == length - info->wi_seg_off) {
481f13193f5SChuck Lever 			seg += 4;
482f13193f5SChuck Lever 			info->wi_seg_no++;
483f13193f5SChuck Lever 			info->wi_seg_off = 0;
484f13193f5SChuck Lever 		} else {
485f13193f5SChuck Lever 			info->wi_seg_off += write_len;
486f13193f5SChuck Lever 		}
487f13193f5SChuck Lever 		remaining -= write_len;
488f13193f5SChuck Lever 	} while (remaining);
489f13193f5SChuck Lever 
490f13193f5SChuck Lever 	return 0;
491f13193f5SChuck Lever 
492f13193f5SChuck Lever out_overflow:
493dbc17acdSChuck Lever 	trace_svcrdma_small_wrch_err(rdma, remaining, info->wi_seg_no,
494f13193f5SChuck Lever 				     info->wi_nsegs);
495f13193f5SChuck Lever 	return -E2BIG;
496f13193f5SChuck Lever }
497f13193f5SChuck Lever 
498f13193f5SChuck Lever /* Send one of an xdr_buf's kvecs by itself. To send a Reply
499f13193f5SChuck Lever  * chunk, the whole RPC Reply is written back to the client.
500f13193f5SChuck Lever  * This function writes either the head or tail of the xdr_buf
501f13193f5SChuck Lever  * containing the Reply.
502f13193f5SChuck Lever  */
503f13193f5SChuck Lever static int svc_rdma_send_xdr_kvec(struct svc_rdma_write_info *info,
504f13193f5SChuck Lever 				  struct kvec *vec)
505f13193f5SChuck Lever {
506f13193f5SChuck Lever 	info->wi_base = vec->iov_base;
507f13193f5SChuck Lever 	return svc_rdma_build_writes(info, svc_rdma_vec_to_sg,
508f13193f5SChuck Lever 				     vec->iov_len);
509f13193f5SChuck Lever }
510f13193f5SChuck Lever 
51141205539SChuck Lever /* Send an xdr_buf's page list by itself. A Write chunk is just
51241205539SChuck Lever  * the page list. A Reply chunk is @xdr's head, page list, and
51341205539SChuck Lever  * tail. This function is shared between the two types of chunk.
514f13193f5SChuck Lever  */
515f13193f5SChuck Lever static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info,
51641205539SChuck Lever 				      struct xdr_buf *xdr,
51741205539SChuck Lever 				      unsigned int offset,
51841205539SChuck Lever 				      unsigned long length)
519f13193f5SChuck Lever {
520f13193f5SChuck Lever 	info->wi_xdr = xdr;
52141205539SChuck Lever 	info->wi_next_off = offset - xdr->head[0].iov_len;
522f13193f5SChuck Lever 	return svc_rdma_build_writes(info, svc_rdma_pagelist_to_sg,
52341205539SChuck Lever 				     length);
524f13193f5SChuck Lever }
525f13193f5SChuck Lever 
526f13193f5SChuck Lever /**
527f13193f5SChuck Lever  * svc_rdma_send_write_chunk - Write all segments in a Write chunk
528f13193f5SChuck Lever  * @rdma: controlling RDMA transport
529f13193f5SChuck Lever  * @wr_ch: Write chunk provided by client
530f13193f5SChuck Lever  * @xdr: xdr_buf containing the data payload
53141205539SChuck Lever  * @offset: payload's byte offset in @xdr
53241205539SChuck Lever  * @length: size of payload, in bytes
533f13193f5SChuck Lever  *
534f13193f5SChuck Lever  * Returns a non-negative number of bytes the chunk consumed, or
535f13193f5SChuck Lever  *	%-E2BIG if the payload was larger than the Write chunk,
536107c1d0aSChuck Lever  *	%-EINVAL if client provided too many segments,
537f13193f5SChuck Lever  *	%-ENOMEM if rdma_rw context pool was exhausted,
538f13193f5SChuck Lever  *	%-ENOTCONN if posting failed (connection is lost),
539f13193f5SChuck Lever  *	%-EIO if rdma_rw initialization failed (DMA mapping, etc).
540f13193f5SChuck Lever  */
541f13193f5SChuck Lever int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch,
54241205539SChuck Lever 			      struct xdr_buf *xdr,
54341205539SChuck Lever 			      unsigned int offset, unsigned long length)
544f13193f5SChuck Lever {
545f13193f5SChuck Lever 	struct svc_rdma_write_info *info;
546f13193f5SChuck Lever 	int ret;
547f13193f5SChuck Lever 
54841205539SChuck Lever 	if (!length)
549f13193f5SChuck Lever 		return 0;
550f13193f5SChuck Lever 
551f13193f5SChuck Lever 	info = svc_rdma_write_info_alloc(rdma, wr_ch);
552f13193f5SChuck Lever 	if (!info)
553f13193f5SChuck Lever 		return -ENOMEM;
554f13193f5SChuck Lever 
55541205539SChuck Lever 	ret = svc_rdma_send_xdr_pagelist(info, xdr, offset, length);
556f13193f5SChuck Lever 	if (ret < 0)
557f13193f5SChuck Lever 		goto out_err;
558f13193f5SChuck Lever 
559f13193f5SChuck Lever 	ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
560f13193f5SChuck Lever 	if (ret < 0)
561f13193f5SChuck Lever 		goto out_err;
56298895edbSChuck Lever 
563a406c563SChuck Lever 	trace_svcrdma_send_write_chunk(xdr->page_len);
56441205539SChuck Lever 	return length;
565f13193f5SChuck Lever 
566f13193f5SChuck Lever out_err:
567f13193f5SChuck Lever 	svc_rdma_write_info_free(info);
568f13193f5SChuck Lever 	return ret;
569f13193f5SChuck Lever }
570f13193f5SChuck Lever 
571f13193f5SChuck Lever /**
572f13193f5SChuck Lever  * svc_rdma_send_reply_chunk - Write all segments in the Reply chunk
573f13193f5SChuck Lever  * @rdma: controlling RDMA transport
5746fa5785eSChuck Lever  * @rctxt: Write and Reply chunks from client
575f13193f5SChuck Lever  * @xdr: xdr_buf containing an RPC Reply
576f13193f5SChuck Lever  *
577f13193f5SChuck Lever  * Returns a non-negative number of bytes the chunk consumed, or
578f13193f5SChuck Lever  *	%-E2BIG if the payload was larger than the Reply chunk,
579107c1d0aSChuck Lever  *	%-EINVAL if client provided too many segments,
580f13193f5SChuck Lever  *	%-ENOMEM if rdma_rw context pool was exhausted,
581f13193f5SChuck Lever  *	%-ENOTCONN if posting failed (connection is lost),
582f13193f5SChuck Lever  *	%-EIO if rdma_rw initialization failed (DMA mapping, etc).
583f13193f5SChuck Lever  */
5846fa5785eSChuck Lever int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
5856fa5785eSChuck Lever 			      const struct svc_rdma_recv_ctxt *rctxt,
5866fa5785eSChuck Lever 			      struct xdr_buf *xdr)
587f13193f5SChuck Lever {
588f13193f5SChuck Lever 	struct svc_rdma_write_info *info;
589f13193f5SChuck Lever 	int consumed, ret;
590f13193f5SChuck Lever 
5916fa5785eSChuck Lever 	info = svc_rdma_write_info_alloc(rdma, rctxt->rc_reply_chunk);
592f13193f5SChuck Lever 	if (!info)
593f13193f5SChuck Lever 		return -ENOMEM;
594f13193f5SChuck Lever 
595f13193f5SChuck Lever 	ret = svc_rdma_send_xdr_kvec(info, &xdr->head[0]);
596f13193f5SChuck Lever 	if (ret < 0)
597f13193f5SChuck Lever 		goto out_err;
598f13193f5SChuck Lever 	consumed = xdr->head[0].iov_len;
599f13193f5SChuck Lever 
600f13193f5SChuck Lever 	/* Send the page list in the Reply chunk only if the
601f13193f5SChuck Lever 	 * client did not provide Write chunks.
602f13193f5SChuck Lever 	 */
6036fa5785eSChuck Lever 	if (!rctxt->rc_write_list && xdr->page_len) {
60441205539SChuck Lever 		ret = svc_rdma_send_xdr_pagelist(info, xdr,
60541205539SChuck Lever 						 xdr->head[0].iov_len,
60641205539SChuck Lever 						 xdr->page_len);
607f13193f5SChuck Lever 		if (ret < 0)
608f13193f5SChuck Lever 			goto out_err;
609f13193f5SChuck Lever 		consumed += xdr->page_len;
610f13193f5SChuck Lever 	}
611f13193f5SChuck Lever 
612f13193f5SChuck Lever 	if (xdr->tail[0].iov_len) {
613f13193f5SChuck Lever 		ret = svc_rdma_send_xdr_kvec(info, &xdr->tail[0]);
614f13193f5SChuck Lever 		if (ret < 0)
615f13193f5SChuck Lever 			goto out_err;
616f13193f5SChuck Lever 		consumed += xdr->tail[0].iov_len;
617f13193f5SChuck Lever 	}
618f13193f5SChuck Lever 
619f13193f5SChuck Lever 	ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
620f13193f5SChuck Lever 	if (ret < 0)
621f13193f5SChuck Lever 		goto out_err;
62298895edbSChuck Lever 
623a406c563SChuck Lever 	trace_svcrdma_send_reply_chunk(consumed);
624f13193f5SChuck Lever 	return consumed;
625f13193f5SChuck Lever 
626f13193f5SChuck Lever out_err:
627f13193f5SChuck Lever 	svc_rdma_write_info_free(info);
628f13193f5SChuck Lever 	return ret;
629f13193f5SChuck Lever }
630026d958bSChuck Lever 
631026d958bSChuck Lever static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info,
632026d958bSChuck Lever 				       struct svc_rqst *rqstp,
633026d958bSChuck Lever 				       u32 rkey, u32 len, u64 offset)
634026d958bSChuck Lever {
635ecf85b23SChuck Lever 	struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
636026d958bSChuck Lever 	struct svc_rdma_chunk_ctxt *cc = &info->ri_cc;
637026d958bSChuck Lever 	struct svc_rdma_rw_ctxt *ctxt;
638026d958bSChuck Lever 	unsigned int sge_no, seg_len;
639026d958bSChuck Lever 	struct scatterlist *sg;
640026d958bSChuck Lever 	int ret;
641026d958bSChuck Lever 
642026d958bSChuck Lever 	sge_no = PAGE_ALIGN(info->ri_pageoff + len) >> PAGE_SHIFT;
643026d958bSChuck Lever 	ctxt = svc_rdma_get_rw_ctxt(cc->cc_rdma, sge_no);
644026d958bSChuck Lever 	if (!ctxt)
645f4e53e1cSChuck Lever 		return -ENOMEM;
646026d958bSChuck Lever 	ctxt->rw_nents = sge_no;
647026d958bSChuck Lever 
648026d958bSChuck Lever 	sg = ctxt->rw_sg_table.sgl;
649026d958bSChuck Lever 	for (sge_no = 0; sge_no < ctxt->rw_nents; sge_no++) {
650026d958bSChuck Lever 		seg_len = min_t(unsigned int, len,
651026d958bSChuck Lever 				PAGE_SIZE - info->ri_pageoff);
652026d958bSChuck Lever 
653ecf85b23SChuck Lever 		head->rc_arg.pages[info->ri_pageno] =
654026d958bSChuck Lever 			rqstp->rq_pages[info->ri_pageno];
655026d958bSChuck Lever 		if (!info->ri_pageoff)
656ecf85b23SChuck Lever 			head->rc_page_count++;
657026d958bSChuck Lever 
658026d958bSChuck Lever 		sg_set_page(sg, rqstp->rq_pages[info->ri_pageno],
659026d958bSChuck Lever 			    seg_len, info->ri_pageoff);
660026d958bSChuck Lever 		sg = sg_next(sg);
661026d958bSChuck Lever 
662026d958bSChuck Lever 		info->ri_pageoff += seg_len;
663026d958bSChuck Lever 		if (info->ri_pageoff == PAGE_SIZE) {
664026d958bSChuck Lever 			info->ri_pageno++;
665026d958bSChuck Lever 			info->ri_pageoff = 0;
666026d958bSChuck Lever 		}
667026d958bSChuck Lever 		len -= seg_len;
668026d958bSChuck Lever 
669026d958bSChuck Lever 		/* Safety check */
670026d958bSChuck Lever 		if (len &&
671026d958bSChuck Lever 		    &rqstp->rq_pages[info->ri_pageno + 1] > rqstp->rq_page_end)
672026d958bSChuck Lever 			goto out_overrun;
673026d958bSChuck Lever 	}
674026d958bSChuck Lever 
6752abfbe7eSChuck Lever 	ret = svc_rdma_rw_ctx_init(cc->cc_rdma, ctxt, offset, rkey,
6762abfbe7eSChuck Lever 				   DMA_FROM_DEVICE);
677026d958bSChuck Lever 	if (ret < 0)
6782abfbe7eSChuck Lever 		return -EIO;
679026d958bSChuck Lever 
680026d958bSChuck Lever 	list_add(&ctxt->rw_list, &cc->cc_rwctxts);
681026d958bSChuck Lever 	cc->cc_sqecount += ret;
682026d958bSChuck Lever 	return 0;
683026d958bSChuck Lever 
684026d958bSChuck Lever out_overrun:
6859d200638SChuck Lever 	trace_svcrdma_page_overrun_err(cc->cc_rdma, rqstp, info->ri_pageno);
686026d958bSChuck Lever 	return -EINVAL;
687026d958bSChuck Lever }
688026d958bSChuck Lever 
6897075a867SChuck Lever /* Walk the segments in the Read chunk starting at @p and construct
6907075a867SChuck Lever  * RDMA Read operations to pull the chunk to the server.
6917075a867SChuck Lever  */
692026d958bSChuck Lever static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
693026d958bSChuck Lever 				     struct svc_rdma_read_info *info,
694026d958bSChuck Lever 				     __be32 *p)
695026d958bSChuck Lever {
696026d958bSChuck Lever 	int ret;
697026d958bSChuck Lever 
6987075a867SChuck Lever 	ret = -EINVAL;
699026d958bSChuck Lever 	info->ri_chunklen = 0;
7007075a867SChuck Lever 	while (*p++ != xdr_zero && be32_to_cpup(p++) == info->ri_position) {
701f60a0869SChuck Lever 		u32 handle, length;
702f60a0869SChuck Lever 		u64 offset;
703026d958bSChuck Lever 
704f60a0869SChuck Lever 		p = xdr_decode_rdma_segment(p, &handle, &length, &offset);
705f60a0869SChuck Lever 		ret = svc_rdma_build_read_segment(info, rqstp, handle, length,
706f60a0869SChuck Lever 						  offset);
707026d958bSChuck Lever 		if (ret < 0)
708026d958bSChuck Lever 			break;
709026d958bSChuck Lever 
710f60a0869SChuck Lever 		trace_svcrdma_send_rseg(handle, length, offset);
711f60a0869SChuck Lever 		info->ri_chunklen += length;
712026d958bSChuck Lever 	}
713026d958bSChuck Lever 
714026d958bSChuck Lever 	return ret;
715026d958bSChuck Lever }
716026d958bSChuck Lever 
717026d958bSChuck Lever /* Construct RDMA Reads to pull over a normal Read chunk. The chunk
718ecf85b23SChuck Lever  * data lands in the page list of head->rc_arg.pages.
719026d958bSChuck Lever  *
720ecf85b23SChuck Lever  * Currently NFSD does not look at the head->rc_arg.tail[0] iovec.
721026d958bSChuck Lever  * Therefore, XDR round-up of the Read chunk and trailing
722026d958bSChuck Lever  * inline content must both be added at the end of the pagelist.
723026d958bSChuck Lever  */
724026d958bSChuck Lever static int svc_rdma_build_normal_read_chunk(struct svc_rqst *rqstp,
725026d958bSChuck Lever 					    struct svc_rdma_read_info *info,
726026d958bSChuck Lever 					    __be32 *p)
727026d958bSChuck Lever {
728ecf85b23SChuck Lever 	struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
729026d958bSChuck Lever 	int ret;
730026d958bSChuck Lever 
731026d958bSChuck Lever 	ret = svc_rdma_build_read_chunk(rqstp, info, p);
732026d958bSChuck Lever 	if (ret < 0)
733026d958bSChuck Lever 		goto out;
734026d958bSChuck Lever 
735a406c563SChuck Lever 	trace_svcrdma_send_read_chunk(info->ri_chunklen, info->ri_position);
73698895edbSChuck Lever 
7373316f063SChuck Lever 	head->rc_hdr_count = 0;
7383316f063SChuck Lever 
739193bcb7bSChuck Lever 	/* Split the Receive buffer between the head and tail
740193bcb7bSChuck Lever 	 * buffers at Read chunk's position. XDR roundup of the
741193bcb7bSChuck Lever 	 * chunk is not included in either the pagelist or in
742193bcb7bSChuck Lever 	 * the tail.
743026d958bSChuck Lever 	 */
744ecf85b23SChuck Lever 	head->rc_arg.tail[0].iov_base =
745ecf85b23SChuck Lever 		head->rc_arg.head[0].iov_base + info->ri_position;
746ecf85b23SChuck Lever 	head->rc_arg.tail[0].iov_len =
747ecf85b23SChuck Lever 		head->rc_arg.head[0].iov_len - info->ri_position;
748ecf85b23SChuck Lever 	head->rc_arg.head[0].iov_len = info->ri_position;
749026d958bSChuck Lever 
750175e0310SChuck Lever 	/* Read chunk may need XDR roundup (see RFC 8166, s. 3.4.5.2).
751193bcb7bSChuck Lever 	 *
752175e0310SChuck Lever 	 * If the client already rounded up the chunk length, the
753175e0310SChuck Lever 	 * length does not change. Otherwise, the length of the page
754175e0310SChuck Lever 	 * list is increased to include XDR round-up.
755175e0310SChuck Lever 	 *
756175e0310SChuck Lever 	 * Currently these chunks always start at page offset 0,
757175e0310SChuck Lever 	 * thus the rounded-up length never crosses a page boundary.
758026d958bSChuck Lever 	 */
759175e0310SChuck Lever 	info->ri_chunklen = XDR_QUADLEN(info->ri_chunklen) << 2;
760026d958bSChuck Lever 
761ecf85b23SChuck Lever 	head->rc_arg.page_len = info->ri_chunklen;
762ecf85b23SChuck Lever 	head->rc_arg.len += info->ri_chunklen;
763ecf85b23SChuck Lever 	head->rc_arg.buflen += info->ri_chunklen;
764026d958bSChuck Lever 
765026d958bSChuck Lever out:
766026d958bSChuck Lever 	return ret;
767026d958bSChuck Lever }
768026d958bSChuck Lever 
769026d958bSChuck Lever /* Construct RDMA Reads to pull over a Position Zero Read chunk.
770026d958bSChuck Lever  * The start of the data lands in the first page just after
771026d958bSChuck Lever  * the Transport header, and the rest lands in the page list of
772ecf85b23SChuck Lever  * head->rc_arg.pages.
773026d958bSChuck Lever  *
774026d958bSChuck Lever  * Assumptions:
775026d958bSChuck Lever  *	- A PZRC has an XDR-aligned length (no implicit round-up).
776026d958bSChuck Lever  *	- There can be no trailing inline content (IOW, we assume
777026d958bSChuck Lever  *	  a PZRC is never sent in an RDMA_MSG message, though it's
778026d958bSChuck Lever  *	  allowed by spec).
779026d958bSChuck Lever  */
780026d958bSChuck Lever static int svc_rdma_build_pz_read_chunk(struct svc_rqst *rqstp,
781026d958bSChuck Lever 					struct svc_rdma_read_info *info,
782026d958bSChuck Lever 					__be32 *p)
783026d958bSChuck Lever {
784ecf85b23SChuck Lever 	struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
785026d958bSChuck Lever 	int ret;
786026d958bSChuck Lever 
787026d958bSChuck Lever 	ret = svc_rdma_build_read_chunk(rqstp, info, p);
788026d958bSChuck Lever 	if (ret < 0)
789026d958bSChuck Lever 		goto out;
790026d958bSChuck Lever 
791a406c563SChuck Lever 	trace_svcrdma_send_pzr(info->ri_chunklen);
79298895edbSChuck Lever 
793ecf85b23SChuck Lever 	head->rc_arg.len += info->ri_chunklen;
794ecf85b23SChuck Lever 	head->rc_arg.buflen += info->ri_chunklen;
795026d958bSChuck Lever 
7963316f063SChuck Lever 	head->rc_hdr_count = 1;
7973316f063SChuck Lever 	head->rc_arg.head[0].iov_base = page_address(head->rc_pages[0]);
7983316f063SChuck Lever 	head->rc_arg.head[0].iov_len = min_t(size_t, PAGE_SIZE,
7993316f063SChuck Lever 					     info->ri_chunklen);
8003316f063SChuck Lever 
8013316f063SChuck Lever 	head->rc_arg.page_len = info->ri_chunklen -
8023316f063SChuck Lever 				head->rc_arg.head[0].iov_len;
803026d958bSChuck Lever 
804026d958bSChuck Lever out:
805026d958bSChuck Lever 	return ret;
806026d958bSChuck Lever }
807026d958bSChuck Lever 
808e814eecbSChuck Lever /* Pages under I/O have been copied to head->rc_pages. Ensure they
809e814eecbSChuck Lever  * are not released by svc_xprt_release() until the I/O is complete.
810e814eecbSChuck Lever  *
811e814eecbSChuck Lever  * This has to be done after all Read WRs are constructed to properly
812e814eecbSChuck Lever  * handle a page that is part of I/O on behalf of two different RDMA
813e814eecbSChuck Lever  * segments.
814e814eecbSChuck Lever  *
815e814eecbSChuck Lever  * Do this only if I/O has been posted. Otherwise, we do indeed want
816e814eecbSChuck Lever  * svc_xprt_release() to clean things up properly.
817e814eecbSChuck Lever  */
818e814eecbSChuck Lever static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
819e814eecbSChuck Lever 				   const unsigned int start,
820e814eecbSChuck Lever 				   const unsigned int num_pages)
821e814eecbSChuck Lever {
822e814eecbSChuck Lever 	unsigned int i;
823e814eecbSChuck Lever 
824e814eecbSChuck Lever 	for (i = start; i < num_pages + start; i++)
825e814eecbSChuck Lever 		rqstp->rq_pages[i] = NULL;
826e814eecbSChuck Lever }
827e814eecbSChuck Lever 
828026d958bSChuck Lever /**
829026d958bSChuck Lever  * svc_rdma_recv_read_chunk - Pull a Read chunk from the client
830026d958bSChuck Lever  * @rdma: controlling RDMA transport
831026d958bSChuck Lever  * @rqstp: set of pages to use as Read sink buffers
832026d958bSChuck Lever  * @head: pages under I/O collect here
833026d958bSChuck Lever  * @p: pointer to start of Read chunk
834026d958bSChuck Lever  *
835026d958bSChuck Lever  * Returns:
836026d958bSChuck Lever  *	%0 if all needed RDMA Reads were posted successfully,
837026d958bSChuck Lever  *	%-EINVAL if client provided too many segments,
838026d958bSChuck Lever  *	%-ENOMEM if rdma_rw context pool was exhausted,
839026d958bSChuck Lever  *	%-ENOTCONN if posting failed (connection is lost),
840026d958bSChuck Lever  *	%-EIO if rdma_rw initialization failed (DMA mapping, etc).
841026d958bSChuck Lever  *
842026d958bSChuck Lever  * Assumptions:
843026d958bSChuck Lever  * - All Read segments in @p have the same Position value.
844026d958bSChuck Lever  */
845026d958bSChuck Lever int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp,
846ecf85b23SChuck Lever 			     struct svc_rdma_recv_ctxt *head, __be32 *p)
847026d958bSChuck Lever {
848026d958bSChuck Lever 	struct svc_rdma_read_info *info;
849026d958bSChuck Lever 	int ret;
850026d958bSChuck Lever 
851026d958bSChuck Lever 	/* The request (with page list) is constructed in
852ecf85b23SChuck Lever 	 * head->rc_arg. Pages involved with RDMA Read I/O are
853026d958bSChuck Lever 	 * transferred there.
854026d958bSChuck Lever 	 */
855ecf85b23SChuck Lever 	head->rc_arg.head[0] = rqstp->rq_arg.head[0];
856ecf85b23SChuck Lever 	head->rc_arg.tail[0] = rqstp->rq_arg.tail[0];
857ecf85b23SChuck Lever 	head->rc_arg.pages = head->rc_pages;
858ecf85b23SChuck Lever 	head->rc_arg.page_base = 0;
859ecf85b23SChuck Lever 	head->rc_arg.page_len = 0;
860ecf85b23SChuck Lever 	head->rc_arg.len = rqstp->rq_arg.len;
861ecf85b23SChuck Lever 	head->rc_arg.buflen = rqstp->rq_arg.buflen;
862026d958bSChuck Lever 
863026d958bSChuck Lever 	info = svc_rdma_read_info_alloc(rdma);
864026d958bSChuck Lever 	if (!info)
865026d958bSChuck Lever 		return -ENOMEM;
866026d958bSChuck Lever 	info->ri_readctxt = head;
8673316f063SChuck Lever 	info->ri_pageno = 0;
8683316f063SChuck Lever 	info->ri_pageoff = 0;
869026d958bSChuck Lever 
870026d958bSChuck Lever 	info->ri_position = be32_to_cpup(p + 1);
871026d958bSChuck Lever 	if (info->ri_position)
872026d958bSChuck Lever 		ret = svc_rdma_build_normal_read_chunk(rqstp, info, p);
873026d958bSChuck Lever 	else
874026d958bSChuck Lever 		ret = svc_rdma_build_pz_read_chunk(rqstp, info, p);
875026d958bSChuck Lever 	if (ret < 0)
87607d0ff3bSChuck Lever 		goto out_err;
877026d958bSChuck Lever 
878026d958bSChuck Lever 	ret = svc_rdma_post_chunk_ctxt(&info->ri_cc);
879026d958bSChuck Lever 	if (ret < 0)
88007d0ff3bSChuck Lever 		goto out_err;
881e814eecbSChuck Lever 	svc_rdma_save_io_pages(rqstp, 0, head->rc_page_count);
88207d0ff3bSChuck Lever 	return 0;
88307d0ff3bSChuck Lever 
88407d0ff3bSChuck Lever out_err:
885026d958bSChuck Lever 	svc_rdma_read_info_free(info);
886026d958bSChuck Lever 	return ret;
887026d958bSChuck Lever }
888