xref: /openbmc/linux/net/sunrpc/xprtrdma/svc_rdma_rw.c (revision f13193f50b64e2e0c87706b838d6b9895626a892)
1*f13193f5SChuck Lever /*
2*f13193f5SChuck Lever  * Copyright (c) 2016 Oracle.  All rights reserved.
3*f13193f5SChuck Lever  *
4*f13193f5SChuck Lever  * Use the core R/W API to move RPC-over-RDMA Read and Write chunks.
5*f13193f5SChuck Lever  */
6*f13193f5SChuck Lever 
7*f13193f5SChuck Lever #include <linux/sunrpc/rpc_rdma.h>
8*f13193f5SChuck Lever #include <linux/sunrpc/svc_rdma.h>
9*f13193f5SChuck Lever #include <linux/sunrpc/debug.h>
10*f13193f5SChuck Lever 
11*f13193f5SChuck Lever #include <rdma/rw.h>
12*f13193f5SChuck Lever 
13*f13193f5SChuck Lever #define RPCDBG_FACILITY	RPCDBG_SVCXPRT
14*f13193f5SChuck Lever 
15*f13193f5SChuck Lever /* Each R/W context contains state for one chain of RDMA Read or
16*f13193f5SChuck Lever  * Write Work Requests.
17*f13193f5SChuck Lever  *
18*f13193f5SChuck Lever  * Each WR chain handles a single contiguous server-side buffer,
19*f13193f5SChuck Lever  * because scatterlist entries after the first have to start on
20*f13193f5SChuck Lever  * page alignment. xdr_buf iovecs cannot guarantee alignment.
21*f13193f5SChuck Lever  *
22*f13193f5SChuck Lever  * Each WR chain handles only one R_key. Each RPC-over-RDMA segment
23*f13193f5SChuck Lever  * from a client may contain a unique R_key, so each WR chain moves
24*f13193f5SChuck Lever  * up to one segment at a time.
25*f13193f5SChuck Lever  *
26*f13193f5SChuck Lever  * The scatterlist makes this data structure over 4KB in size. To
27*f13193f5SChuck Lever  * make it less likely to fail, and to handle the allocation for
28*f13193f5SChuck Lever  * smaller I/O requests without disabling bottom-halves, these
29*f13193f5SChuck Lever  * contexts are created on demand, but cached and reused until the
30*f13193f5SChuck Lever  * controlling svcxprt_rdma is destroyed.
31*f13193f5SChuck Lever  */
32*f13193f5SChuck Lever struct svc_rdma_rw_ctxt {
33*f13193f5SChuck Lever 	struct list_head	rw_list;
34*f13193f5SChuck Lever 	struct rdma_rw_ctx	rw_ctx;
35*f13193f5SChuck Lever 	int			rw_nents;
36*f13193f5SChuck Lever 	struct sg_table		rw_sg_table;
37*f13193f5SChuck Lever 	struct scatterlist	rw_first_sgl[0];
38*f13193f5SChuck Lever };
39*f13193f5SChuck Lever 
40*f13193f5SChuck Lever static inline struct svc_rdma_rw_ctxt *
41*f13193f5SChuck Lever svc_rdma_next_ctxt(struct list_head *list)
42*f13193f5SChuck Lever {
43*f13193f5SChuck Lever 	return list_first_entry_or_null(list, struct svc_rdma_rw_ctxt,
44*f13193f5SChuck Lever 					rw_list);
45*f13193f5SChuck Lever }
46*f13193f5SChuck Lever 
47*f13193f5SChuck Lever static struct svc_rdma_rw_ctxt *
48*f13193f5SChuck Lever svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
49*f13193f5SChuck Lever {
50*f13193f5SChuck Lever 	struct svc_rdma_rw_ctxt *ctxt;
51*f13193f5SChuck Lever 
52*f13193f5SChuck Lever 	spin_lock(&rdma->sc_rw_ctxt_lock);
53*f13193f5SChuck Lever 
54*f13193f5SChuck Lever 	ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts);
55*f13193f5SChuck Lever 	if (ctxt) {
56*f13193f5SChuck Lever 		list_del(&ctxt->rw_list);
57*f13193f5SChuck Lever 		spin_unlock(&rdma->sc_rw_ctxt_lock);
58*f13193f5SChuck Lever 	} else {
59*f13193f5SChuck Lever 		spin_unlock(&rdma->sc_rw_ctxt_lock);
60*f13193f5SChuck Lever 		ctxt = kmalloc(sizeof(*ctxt) +
61*f13193f5SChuck Lever 			       SG_CHUNK_SIZE * sizeof(struct scatterlist),
62*f13193f5SChuck Lever 			       GFP_KERNEL);
63*f13193f5SChuck Lever 		if (!ctxt)
64*f13193f5SChuck Lever 			goto out;
65*f13193f5SChuck Lever 		INIT_LIST_HEAD(&ctxt->rw_list);
66*f13193f5SChuck Lever 	}
67*f13193f5SChuck Lever 
68*f13193f5SChuck Lever 	ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl;
69*f13193f5SChuck Lever 	if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges,
70*f13193f5SChuck Lever 				   ctxt->rw_sg_table.sgl)) {
71*f13193f5SChuck Lever 		kfree(ctxt);
72*f13193f5SChuck Lever 		ctxt = NULL;
73*f13193f5SChuck Lever 	}
74*f13193f5SChuck Lever out:
75*f13193f5SChuck Lever 	return ctxt;
76*f13193f5SChuck Lever }
77*f13193f5SChuck Lever 
78*f13193f5SChuck Lever static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
79*f13193f5SChuck Lever 				 struct svc_rdma_rw_ctxt *ctxt)
80*f13193f5SChuck Lever {
81*f13193f5SChuck Lever 	sg_free_table_chained(&ctxt->rw_sg_table, true);
82*f13193f5SChuck Lever 
83*f13193f5SChuck Lever 	spin_lock(&rdma->sc_rw_ctxt_lock);
84*f13193f5SChuck Lever 	list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts);
85*f13193f5SChuck Lever 	spin_unlock(&rdma->sc_rw_ctxt_lock);
86*f13193f5SChuck Lever }
87*f13193f5SChuck Lever 
88*f13193f5SChuck Lever /**
89*f13193f5SChuck Lever  * svc_rdma_destroy_rw_ctxts - Free accumulated R/W contexts
90*f13193f5SChuck Lever  * @rdma: transport about to be destroyed
91*f13193f5SChuck Lever  *
92*f13193f5SChuck Lever  */
93*f13193f5SChuck Lever void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma)
94*f13193f5SChuck Lever {
95*f13193f5SChuck Lever 	struct svc_rdma_rw_ctxt *ctxt;
96*f13193f5SChuck Lever 
97*f13193f5SChuck Lever 	while ((ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts)) != NULL) {
98*f13193f5SChuck Lever 		list_del(&ctxt->rw_list);
99*f13193f5SChuck Lever 		kfree(ctxt);
100*f13193f5SChuck Lever 	}
101*f13193f5SChuck Lever }
102*f13193f5SChuck Lever 
103*f13193f5SChuck Lever /* A chunk context tracks all I/O for moving one Read or Write
104*f13193f5SChuck Lever  * chunk. This is a a set of rdma_rw's that handle data movement
105*f13193f5SChuck Lever  * for all segments of one chunk.
106*f13193f5SChuck Lever  *
107*f13193f5SChuck Lever  * These are small, acquired with a single allocator call, and
108*f13193f5SChuck Lever  * no more than one is needed per chunk. They are allocated on
109*f13193f5SChuck Lever  * demand, and not cached.
110*f13193f5SChuck Lever  */
111*f13193f5SChuck Lever struct svc_rdma_chunk_ctxt {
112*f13193f5SChuck Lever 	struct ib_cqe		cc_cqe;
113*f13193f5SChuck Lever 	struct svcxprt_rdma	*cc_rdma;
114*f13193f5SChuck Lever 	struct list_head	cc_rwctxts;
115*f13193f5SChuck Lever 	int			cc_sqecount;
116*f13193f5SChuck Lever 	enum dma_data_direction cc_dir;
117*f13193f5SChuck Lever };
118*f13193f5SChuck Lever 
119*f13193f5SChuck Lever static void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
120*f13193f5SChuck Lever 			     struct svc_rdma_chunk_ctxt *cc,
121*f13193f5SChuck Lever 			     enum dma_data_direction dir)
122*f13193f5SChuck Lever {
123*f13193f5SChuck Lever 	cc->cc_rdma = rdma;
124*f13193f5SChuck Lever 	svc_xprt_get(&rdma->sc_xprt);
125*f13193f5SChuck Lever 
126*f13193f5SChuck Lever 	INIT_LIST_HEAD(&cc->cc_rwctxts);
127*f13193f5SChuck Lever 	cc->cc_sqecount = 0;
128*f13193f5SChuck Lever 	cc->cc_dir = dir;
129*f13193f5SChuck Lever }
130*f13193f5SChuck Lever 
131*f13193f5SChuck Lever static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc)
132*f13193f5SChuck Lever {
133*f13193f5SChuck Lever 	struct svcxprt_rdma *rdma = cc->cc_rdma;
134*f13193f5SChuck Lever 	struct svc_rdma_rw_ctxt *ctxt;
135*f13193f5SChuck Lever 
136*f13193f5SChuck Lever 	while ((ctxt = svc_rdma_next_ctxt(&cc->cc_rwctxts)) != NULL) {
137*f13193f5SChuck Lever 		list_del(&ctxt->rw_list);
138*f13193f5SChuck Lever 
139*f13193f5SChuck Lever 		rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp,
140*f13193f5SChuck Lever 				    rdma->sc_port_num, ctxt->rw_sg_table.sgl,
141*f13193f5SChuck Lever 				    ctxt->rw_nents, cc->cc_dir);
142*f13193f5SChuck Lever 		svc_rdma_put_rw_ctxt(rdma, ctxt);
143*f13193f5SChuck Lever 	}
144*f13193f5SChuck Lever 	svc_xprt_put(&rdma->sc_xprt);
145*f13193f5SChuck Lever }
146*f13193f5SChuck Lever 
147*f13193f5SChuck Lever /* State for sending a Write or Reply chunk.
148*f13193f5SChuck Lever  *  - Tracks progress of writing one chunk over all its segments
149*f13193f5SChuck Lever  *  - Stores arguments for the SGL constructor functions
150*f13193f5SChuck Lever  */
151*f13193f5SChuck Lever struct svc_rdma_write_info {
152*f13193f5SChuck Lever 	/* write state of this chunk */
153*f13193f5SChuck Lever 	unsigned int		wi_seg_off;
154*f13193f5SChuck Lever 	unsigned int		wi_seg_no;
155*f13193f5SChuck Lever 	unsigned int		wi_nsegs;
156*f13193f5SChuck Lever 	__be32			*wi_segs;
157*f13193f5SChuck Lever 
158*f13193f5SChuck Lever 	/* SGL constructor arguments */
159*f13193f5SChuck Lever 	struct xdr_buf		*wi_xdr;
160*f13193f5SChuck Lever 	unsigned char		*wi_base;
161*f13193f5SChuck Lever 	unsigned int		wi_next_off;
162*f13193f5SChuck Lever 
163*f13193f5SChuck Lever 	struct svc_rdma_chunk_ctxt	wi_cc;
164*f13193f5SChuck Lever };
165*f13193f5SChuck Lever 
166*f13193f5SChuck Lever static struct svc_rdma_write_info *
167*f13193f5SChuck Lever svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma, __be32 *chunk)
168*f13193f5SChuck Lever {
169*f13193f5SChuck Lever 	struct svc_rdma_write_info *info;
170*f13193f5SChuck Lever 
171*f13193f5SChuck Lever 	info = kmalloc(sizeof(*info), GFP_KERNEL);
172*f13193f5SChuck Lever 	if (!info)
173*f13193f5SChuck Lever 		return info;
174*f13193f5SChuck Lever 
175*f13193f5SChuck Lever 	info->wi_seg_off = 0;
176*f13193f5SChuck Lever 	info->wi_seg_no = 0;
177*f13193f5SChuck Lever 	info->wi_nsegs = be32_to_cpup(++chunk);
178*f13193f5SChuck Lever 	info->wi_segs = ++chunk;
179*f13193f5SChuck Lever 	svc_rdma_cc_init(rdma, &info->wi_cc, DMA_TO_DEVICE);
180*f13193f5SChuck Lever 	return info;
181*f13193f5SChuck Lever }
182*f13193f5SChuck Lever 
183*f13193f5SChuck Lever static void svc_rdma_write_info_free(struct svc_rdma_write_info *info)
184*f13193f5SChuck Lever {
185*f13193f5SChuck Lever 	svc_rdma_cc_release(&info->wi_cc);
186*f13193f5SChuck Lever 	kfree(info);
187*f13193f5SChuck Lever }
188*f13193f5SChuck Lever 
189*f13193f5SChuck Lever /**
190*f13193f5SChuck Lever  * svc_rdma_write_done - Write chunk completion
191*f13193f5SChuck Lever  * @cq: controlling Completion Queue
192*f13193f5SChuck Lever  * @wc: Work Completion
193*f13193f5SChuck Lever  *
194*f13193f5SChuck Lever  * Pages under I/O are freed by a subsequent Send completion.
195*f13193f5SChuck Lever  */
196*f13193f5SChuck Lever static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
197*f13193f5SChuck Lever {
198*f13193f5SChuck Lever 	struct ib_cqe *cqe = wc->wr_cqe;
199*f13193f5SChuck Lever 	struct svc_rdma_chunk_ctxt *cc =
200*f13193f5SChuck Lever 			container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
201*f13193f5SChuck Lever 	struct svcxprt_rdma *rdma = cc->cc_rdma;
202*f13193f5SChuck Lever 	struct svc_rdma_write_info *info =
203*f13193f5SChuck Lever 			container_of(cc, struct svc_rdma_write_info, wi_cc);
204*f13193f5SChuck Lever 
205*f13193f5SChuck Lever 	atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
206*f13193f5SChuck Lever 	wake_up(&rdma->sc_send_wait);
207*f13193f5SChuck Lever 
208*f13193f5SChuck Lever 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
209*f13193f5SChuck Lever 		set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
210*f13193f5SChuck Lever 		if (wc->status != IB_WC_WR_FLUSH_ERR)
211*f13193f5SChuck Lever 			pr_err("svcrdma: write ctx: %s (%u/0x%x)\n",
212*f13193f5SChuck Lever 			       ib_wc_status_msg(wc->status),
213*f13193f5SChuck Lever 			       wc->status, wc->vendor_err);
214*f13193f5SChuck Lever 	}
215*f13193f5SChuck Lever 
216*f13193f5SChuck Lever 	svc_rdma_write_info_free(info);
217*f13193f5SChuck Lever }
218*f13193f5SChuck Lever 
219*f13193f5SChuck Lever /* This function sleeps when the transport's Send Queue is congested.
220*f13193f5SChuck Lever  *
221*f13193f5SChuck Lever  * Assumptions:
222*f13193f5SChuck Lever  * - If ib_post_send() succeeds, only one completion is expected,
223*f13193f5SChuck Lever  *   even if one or more WRs are flushed. This is true when posting
224*f13193f5SChuck Lever  *   an rdma_rw_ctx or when posting a single signaled WR.
225*f13193f5SChuck Lever  */
226*f13193f5SChuck Lever static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
227*f13193f5SChuck Lever {
228*f13193f5SChuck Lever 	struct svcxprt_rdma *rdma = cc->cc_rdma;
229*f13193f5SChuck Lever 	struct svc_xprt *xprt = &rdma->sc_xprt;
230*f13193f5SChuck Lever 	struct ib_send_wr *first_wr, *bad_wr;
231*f13193f5SChuck Lever 	struct list_head *tmp;
232*f13193f5SChuck Lever 	struct ib_cqe *cqe;
233*f13193f5SChuck Lever 	int ret;
234*f13193f5SChuck Lever 
235*f13193f5SChuck Lever 	first_wr = NULL;
236*f13193f5SChuck Lever 	cqe = &cc->cc_cqe;
237*f13193f5SChuck Lever 	list_for_each(tmp, &cc->cc_rwctxts) {
238*f13193f5SChuck Lever 		struct svc_rdma_rw_ctxt *ctxt;
239*f13193f5SChuck Lever 
240*f13193f5SChuck Lever 		ctxt = list_entry(tmp, struct svc_rdma_rw_ctxt, rw_list);
241*f13193f5SChuck Lever 		first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp,
242*f13193f5SChuck Lever 					   rdma->sc_port_num, cqe, first_wr);
243*f13193f5SChuck Lever 		cqe = NULL;
244*f13193f5SChuck Lever 	}
245*f13193f5SChuck Lever 
246*f13193f5SChuck Lever 	do {
247*f13193f5SChuck Lever 		if (atomic_sub_return(cc->cc_sqecount,
248*f13193f5SChuck Lever 				      &rdma->sc_sq_avail) > 0) {
249*f13193f5SChuck Lever 			ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
250*f13193f5SChuck Lever 			if (ret)
251*f13193f5SChuck Lever 				break;
252*f13193f5SChuck Lever 			return 0;
253*f13193f5SChuck Lever 		}
254*f13193f5SChuck Lever 
255*f13193f5SChuck Lever 		atomic_inc(&rdma_stat_sq_starve);
256*f13193f5SChuck Lever 		atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
257*f13193f5SChuck Lever 		wait_event(rdma->sc_send_wait,
258*f13193f5SChuck Lever 			   atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount);
259*f13193f5SChuck Lever 	} while (1);
260*f13193f5SChuck Lever 
261*f13193f5SChuck Lever 	pr_err("svcrdma: ib_post_send failed (%d)\n", ret);
262*f13193f5SChuck Lever 	set_bit(XPT_CLOSE, &xprt->xpt_flags);
263*f13193f5SChuck Lever 
264*f13193f5SChuck Lever 	/* If even one was posted, there will be a completion. */
265*f13193f5SChuck Lever 	if (bad_wr != first_wr)
266*f13193f5SChuck Lever 		return 0;
267*f13193f5SChuck Lever 
268*f13193f5SChuck Lever 	atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
269*f13193f5SChuck Lever 	wake_up(&rdma->sc_send_wait);
270*f13193f5SChuck Lever 	return -ENOTCONN;
271*f13193f5SChuck Lever }
272*f13193f5SChuck Lever 
273*f13193f5SChuck Lever /* Build and DMA-map an SGL that covers one kvec in an xdr_buf
274*f13193f5SChuck Lever  */
275*f13193f5SChuck Lever static void svc_rdma_vec_to_sg(struct svc_rdma_write_info *info,
276*f13193f5SChuck Lever 			       unsigned int len,
277*f13193f5SChuck Lever 			       struct svc_rdma_rw_ctxt *ctxt)
278*f13193f5SChuck Lever {
279*f13193f5SChuck Lever 	struct scatterlist *sg = ctxt->rw_sg_table.sgl;
280*f13193f5SChuck Lever 
281*f13193f5SChuck Lever 	sg_set_buf(&sg[0], info->wi_base, len);
282*f13193f5SChuck Lever 	info->wi_base += len;
283*f13193f5SChuck Lever 
284*f13193f5SChuck Lever 	ctxt->rw_nents = 1;
285*f13193f5SChuck Lever }
286*f13193f5SChuck Lever 
287*f13193f5SChuck Lever /* Build and DMA-map an SGL that covers part of an xdr_buf's pagelist.
288*f13193f5SChuck Lever  */
289*f13193f5SChuck Lever static void svc_rdma_pagelist_to_sg(struct svc_rdma_write_info *info,
290*f13193f5SChuck Lever 				    unsigned int remaining,
291*f13193f5SChuck Lever 				    struct svc_rdma_rw_ctxt *ctxt)
292*f13193f5SChuck Lever {
293*f13193f5SChuck Lever 	unsigned int sge_no, sge_bytes, page_off, page_no;
294*f13193f5SChuck Lever 	struct xdr_buf *xdr = info->wi_xdr;
295*f13193f5SChuck Lever 	struct scatterlist *sg;
296*f13193f5SChuck Lever 	struct page **page;
297*f13193f5SChuck Lever 
298*f13193f5SChuck Lever 	page_off = (info->wi_next_off + xdr->page_base) & ~PAGE_MASK;
299*f13193f5SChuck Lever 	page_no = (info->wi_next_off + xdr->page_base) >> PAGE_SHIFT;
300*f13193f5SChuck Lever 	page = xdr->pages + page_no;
301*f13193f5SChuck Lever 	info->wi_next_off += remaining;
302*f13193f5SChuck Lever 	sg = ctxt->rw_sg_table.sgl;
303*f13193f5SChuck Lever 	sge_no = 0;
304*f13193f5SChuck Lever 	do {
305*f13193f5SChuck Lever 		sge_bytes = min_t(unsigned int, remaining,
306*f13193f5SChuck Lever 				  PAGE_SIZE - page_off);
307*f13193f5SChuck Lever 		sg_set_page(sg, *page, sge_bytes, page_off);
308*f13193f5SChuck Lever 
309*f13193f5SChuck Lever 		remaining -= sge_bytes;
310*f13193f5SChuck Lever 		sg = sg_next(sg);
311*f13193f5SChuck Lever 		page_off = 0;
312*f13193f5SChuck Lever 		sge_no++;
313*f13193f5SChuck Lever 		page++;
314*f13193f5SChuck Lever 	} while (remaining);
315*f13193f5SChuck Lever 
316*f13193f5SChuck Lever 	ctxt->rw_nents = sge_no;
317*f13193f5SChuck Lever }
318*f13193f5SChuck Lever 
319*f13193f5SChuck Lever /* Construct RDMA Write WRs to send a portion of an xdr_buf containing
320*f13193f5SChuck Lever  * an RPC Reply.
321*f13193f5SChuck Lever  */
322*f13193f5SChuck Lever static int
323*f13193f5SChuck Lever svc_rdma_build_writes(struct svc_rdma_write_info *info,
324*f13193f5SChuck Lever 		      void (*constructor)(struct svc_rdma_write_info *info,
325*f13193f5SChuck Lever 					  unsigned int len,
326*f13193f5SChuck Lever 					  struct svc_rdma_rw_ctxt *ctxt),
327*f13193f5SChuck Lever 		      unsigned int remaining)
328*f13193f5SChuck Lever {
329*f13193f5SChuck Lever 	struct svc_rdma_chunk_ctxt *cc = &info->wi_cc;
330*f13193f5SChuck Lever 	struct svcxprt_rdma *rdma = cc->cc_rdma;
331*f13193f5SChuck Lever 	struct svc_rdma_rw_ctxt *ctxt;
332*f13193f5SChuck Lever 	__be32 *seg;
333*f13193f5SChuck Lever 	int ret;
334*f13193f5SChuck Lever 
335*f13193f5SChuck Lever 	cc->cc_cqe.done = svc_rdma_write_done;
336*f13193f5SChuck Lever 	seg = info->wi_segs + info->wi_seg_no * rpcrdma_segment_maxsz;
337*f13193f5SChuck Lever 	do {
338*f13193f5SChuck Lever 		unsigned int write_len;
339*f13193f5SChuck Lever 		u32 seg_length, seg_handle;
340*f13193f5SChuck Lever 		u64 seg_offset;
341*f13193f5SChuck Lever 
342*f13193f5SChuck Lever 		if (info->wi_seg_no >= info->wi_nsegs)
343*f13193f5SChuck Lever 			goto out_overflow;
344*f13193f5SChuck Lever 
345*f13193f5SChuck Lever 		seg_handle = be32_to_cpup(seg);
346*f13193f5SChuck Lever 		seg_length = be32_to_cpup(seg + 1);
347*f13193f5SChuck Lever 		xdr_decode_hyper(seg + 2, &seg_offset);
348*f13193f5SChuck Lever 		seg_offset += info->wi_seg_off;
349*f13193f5SChuck Lever 
350*f13193f5SChuck Lever 		write_len = min(remaining, seg_length - info->wi_seg_off);
351*f13193f5SChuck Lever 		ctxt = svc_rdma_get_rw_ctxt(rdma,
352*f13193f5SChuck Lever 					    (write_len >> PAGE_SHIFT) + 2);
353*f13193f5SChuck Lever 		if (!ctxt)
354*f13193f5SChuck Lever 			goto out_noctx;
355*f13193f5SChuck Lever 
356*f13193f5SChuck Lever 		constructor(info, write_len, ctxt);
357*f13193f5SChuck Lever 		ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp,
358*f13193f5SChuck Lever 				       rdma->sc_port_num, ctxt->rw_sg_table.sgl,
359*f13193f5SChuck Lever 				       ctxt->rw_nents, 0, seg_offset,
360*f13193f5SChuck Lever 				       seg_handle, DMA_TO_DEVICE);
361*f13193f5SChuck Lever 		if (ret < 0)
362*f13193f5SChuck Lever 			goto out_initerr;
363*f13193f5SChuck Lever 
364*f13193f5SChuck Lever 		list_add(&ctxt->rw_list, &cc->cc_rwctxts);
365*f13193f5SChuck Lever 		cc->cc_sqecount += ret;
366*f13193f5SChuck Lever 		if (write_len == seg_length - info->wi_seg_off) {
367*f13193f5SChuck Lever 			seg += 4;
368*f13193f5SChuck Lever 			info->wi_seg_no++;
369*f13193f5SChuck Lever 			info->wi_seg_off = 0;
370*f13193f5SChuck Lever 		} else {
371*f13193f5SChuck Lever 			info->wi_seg_off += write_len;
372*f13193f5SChuck Lever 		}
373*f13193f5SChuck Lever 		remaining -= write_len;
374*f13193f5SChuck Lever 	} while (remaining);
375*f13193f5SChuck Lever 
376*f13193f5SChuck Lever 	return 0;
377*f13193f5SChuck Lever 
378*f13193f5SChuck Lever out_overflow:
379*f13193f5SChuck Lever 	dprintk("svcrdma: inadequate space in Write chunk (%u)\n",
380*f13193f5SChuck Lever 		info->wi_nsegs);
381*f13193f5SChuck Lever 	return -E2BIG;
382*f13193f5SChuck Lever 
383*f13193f5SChuck Lever out_noctx:
384*f13193f5SChuck Lever 	dprintk("svcrdma: no R/W ctxs available\n");
385*f13193f5SChuck Lever 	return -ENOMEM;
386*f13193f5SChuck Lever 
387*f13193f5SChuck Lever out_initerr:
388*f13193f5SChuck Lever 	svc_rdma_put_rw_ctxt(rdma, ctxt);
389*f13193f5SChuck Lever 	pr_err("svcrdma: failed to map pagelist (%d)\n", ret);
390*f13193f5SChuck Lever 	return -EIO;
391*f13193f5SChuck Lever }
392*f13193f5SChuck Lever 
393*f13193f5SChuck Lever /* Send one of an xdr_buf's kvecs by itself. To send a Reply
394*f13193f5SChuck Lever  * chunk, the whole RPC Reply is written back to the client.
395*f13193f5SChuck Lever  * This function writes either the head or tail of the xdr_buf
396*f13193f5SChuck Lever  * containing the Reply.
397*f13193f5SChuck Lever  */
398*f13193f5SChuck Lever static int svc_rdma_send_xdr_kvec(struct svc_rdma_write_info *info,
399*f13193f5SChuck Lever 				  struct kvec *vec)
400*f13193f5SChuck Lever {
401*f13193f5SChuck Lever 	info->wi_base = vec->iov_base;
402*f13193f5SChuck Lever 	return svc_rdma_build_writes(info, svc_rdma_vec_to_sg,
403*f13193f5SChuck Lever 				     vec->iov_len);
404*f13193f5SChuck Lever }
405*f13193f5SChuck Lever 
406*f13193f5SChuck Lever /* Send an xdr_buf's page list by itself. A Write chunk is
407*f13193f5SChuck Lever  * just the page list. a Reply chunk is the head, page list,
408*f13193f5SChuck Lever  * and tail. This function is shared between the two types
409*f13193f5SChuck Lever  * of chunk.
410*f13193f5SChuck Lever  */
411*f13193f5SChuck Lever static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info,
412*f13193f5SChuck Lever 				      struct xdr_buf *xdr)
413*f13193f5SChuck Lever {
414*f13193f5SChuck Lever 	info->wi_xdr = xdr;
415*f13193f5SChuck Lever 	info->wi_next_off = 0;
416*f13193f5SChuck Lever 	return svc_rdma_build_writes(info, svc_rdma_pagelist_to_sg,
417*f13193f5SChuck Lever 				     xdr->page_len);
418*f13193f5SChuck Lever }
419*f13193f5SChuck Lever 
420*f13193f5SChuck Lever /**
421*f13193f5SChuck Lever  * svc_rdma_send_write_chunk - Write all segments in a Write chunk
422*f13193f5SChuck Lever  * @rdma: controlling RDMA transport
423*f13193f5SChuck Lever  * @wr_ch: Write chunk provided by client
424*f13193f5SChuck Lever  * @xdr: xdr_buf containing the data payload
425*f13193f5SChuck Lever  *
426*f13193f5SChuck Lever  * Returns a non-negative number of bytes the chunk consumed, or
427*f13193f5SChuck Lever  *	%-E2BIG if the payload was larger than the Write chunk,
428*f13193f5SChuck Lever  *	%-ENOMEM if rdma_rw context pool was exhausted,
429*f13193f5SChuck Lever  *	%-ENOTCONN if posting failed (connection is lost),
430*f13193f5SChuck Lever  *	%-EIO if rdma_rw initialization failed (DMA mapping, etc).
431*f13193f5SChuck Lever  */
432*f13193f5SChuck Lever int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch,
433*f13193f5SChuck Lever 			      struct xdr_buf *xdr)
434*f13193f5SChuck Lever {
435*f13193f5SChuck Lever 	struct svc_rdma_write_info *info;
436*f13193f5SChuck Lever 	int ret;
437*f13193f5SChuck Lever 
438*f13193f5SChuck Lever 	if (!xdr->page_len)
439*f13193f5SChuck Lever 		return 0;
440*f13193f5SChuck Lever 
441*f13193f5SChuck Lever 	info = svc_rdma_write_info_alloc(rdma, wr_ch);
442*f13193f5SChuck Lever 	if (!info)
443*f13193f5SChuck Lever 		return -ENOMEM;
444*f13193f5SChuck Lever 
445*f13193f5SChuck Lever 	ret = svc_rdma_send_xdr_pagelist(info, xdr);
446*f13193f5SChuck Lever 	if (ret < 0)
447*f13193f5SChuck Lever 		goto out_err;
448*f13193f5SChuck Lever 
449*f13193f5SChuck Lever 	ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
450*f13193f5SChuck Lever 	if (ret < 0)
451*f13193f5SChuck Lever 		goto out_err;
452*f13193f5SChuck Lever 	return xdr->page_len;
453*f13193f5SChuck Lever 
454*f13193f5SChuck Lever out_err:
455*f13193f5SChuck Lever 	svc_rdma_write_info_free(info);
456*f13193f5SChuck Lever 	return ret;
457*f13193f5SChuck Lever }
458*f13193f5SChuck Lever 
459*f13193f5SChuck Lever /**
460*f13193f5SChuck Lever  * svc_rdma_send_reply_chunk - Write all segments in the Reply chunk
461*f13193f5SChuck Lever  * @rdma: controlling RDMA transport
462*f13193f5SChuck Lever  * @rp_ch: Reply chunk provided by client
463*f13193f5SChuck Lever  * @writelist: true if client provided a Write list
464*f13193f5SChuck Lever  * @xdr: xdr_buf containing an RPC Reply
465*f13193f5SChuck Lever  *
466*f13193f5SChuck Lever  * Returns a non-negative number of bytes the chunk consumed, or
467*f13193f5SChuck Lever  *	%-E2BIG if the payload was larger than the Reply chunk,
468*f13193f5SChuck Lever  *	%-ENOMEM if rdma_rw context pool was exhausted,
469*f13193f5SChuck Lever  *	%-ENOTCONN if posting failed (connection is lost),
470*f13193f5SChuck Lever  *	%-EIO if rdma_rw initialization failed (DMA mapping, etc).
471*f13193f5SChuck Lever  */
472*f13193f5SChuck Lever int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, __be32 *rp_ch,
473*f13193f5SChuck Lever 			      bool writelist, struct xdr_buf *xdr)
474*f13193f5SChuck Lever {
475*f13193f5SChuck Lever 	struct svc_rdma_write_info *info;
476*f13193f5SChuck Lever 	int consumed, ret;
477*f13193f5SChuck Lever 
478*f13193f5SChuck Lever 	info = svc_rdma_write_info_alloc(rdma, rp_ch);
479*f13193f5SChuck Lever 	if (!info)
480*f13193f5SChuck Lever 		return -ENOMEM;
481*f13193f5SChuck Lever 
482*f13193f5SChuck Lever 	ret = svc_rdma_send_xdr_kvec(info, &xdr->head[0]);
483*f13193f5SChuck Lever 	if (ret < 0)
484*f13193f5SChuck Lever 		goto out_err;
485*f13193f5SChuck Lever 	consumed = xdr->head[0].iov_len;
486*f13193f5SChuck Lever 
487*f13193f5SChuck Lever 	/* Send the page list in the Reply chunk only if the
488*f13193f5SChuck Lever 	 * client did not provide Write chunks.
489*f13193f5SChuck Lever 	 */
490*f13193f5SChuck Lever 	if (!writelist && xdr->page_len) {
491*f13193f5SChuck Lever 		ret = svc_rdma_send_xdr_pagelist(info, xdr);
492*f13193f5SChuck Lever 		if (ret < 0)
493*f13193f5SChuck Lever 			goto out_err;
494*f13193f5SChuck Lever 		consumed += xdr->page_len;
495*f13193f5SChuck Lever 	}
496*f13193f5SChuck Lever 
497*f13193f5SChuck Lever 	if (xdr->tail[0].iov_len) {
498*f13193f5SChuck Lever 		ret = svc_rdma_send_xdr_kvec(info, &xdr->tail[0]);
499*f13193f5SChuck Lever 		if (ret < 0)
500*f13193f5SChuck Lever 			goto out_err;
501*f13193f5SChuck Lever 		consumed += xdr->tail[0].iov_len;
502*f13193f5SChuck Lever 	}
503*f13193f5SChuck Lever 
504*f13193f5SChuck Lever 	ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
505*f13193f5SChuck Lever 	if (ret < 0)
506*f13193f5SChuck Lever 		goto out_err;
507*f13193f5SChuck Lever 	return consumed;
508*f13193f5SChuck Lever 
509*f13193f5SChuck Lever out_err:
510*f13193f5SChuck Lever 	svc_rdma_write_info_free(info);
511*f13193f5SChuck Lever 	return ret;
512*f13193f5SChuck Lever }
513