1c06b540aSTom Tucker /*
20bf48289SSteve Wise  * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
3c06b540aSTom Tucker  * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
4c06b540aSTom Tucker  *
5c06b540aSTom Tucker  * This software is available to you under a choice of one of two
6c06b540aSTom Tucker  * licenses.  You may choose to be licensed under the terms of the GNU
7c06b540aSTom Tucker  * General Public License (GPL) Version 2, available from the file
8c06b540aSTom Tucker  * COPYING in the main directory of this source tree, or the BSD-type
9c06b540aSTom Tucker  * license below:
10c06b540aSTom Tucker  *
11c06b540aSTom Tucker  * Redistribution and use in source and binary forms, with or without
12c06b540aSTom Tucker  * modification, are permitted provided that the following conditions
13c06b540aSTom Tucker  * are met:
14c06b540aSTom Tucker  *
15c06b540aSTom Tucker  *      Redistributions of source code must retain the above copyright
16c06b540aSTom Tucker  *      notice, this list of conditions and the following disclaimer.
17c06b540aSTom Tucker  *
18c06b540aSTom Tucker  *      Redistributions in binary form must reproduce the above
19c06b540aSTom Tucker  *      copyright notice, this list of conditions and the following
20c06b540aSTom Tucker  *      disclaimer in the documentation and/or other materials provided
21c06b540aSTom Tucker  *      with the distribution.
22c06b540aSTom Tucker  *
23c06b540aSTom Tucker  *      Neither the name of the Network Appliance, Inc. nor the names of
24c06b540aSTom Tucker  *      its contributors may be used to endorse or promote products
25c06b540aSTom Tucker  *      derived from this software without specific prior written
26c06b540aSTom Tucker  *      permission.
27c06b540aSTom Tucker  *
28c06b540aSTom Tucker  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29c06b540aSTom Tucker  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30c06b540aSTom Tucker  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31c06b540aSTom Tucker  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32c06b540aSTom Tucker  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33c06b540aSTom Tucker  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34c06b540aSTom Tucker  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35c06b540aSTom Tucker  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36c06b540aSTom Tucker  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37c06b540aSTom Tucker  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38c06b540aSTom Tucker  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39c06b540aSTom Tucker  *
40c06b540aSTom Tucker  * Author: Tom Tucker <tom@opengridcomputing.com>
41c06b540aSTom Tucker  */
42c06b540aSTom Tucker 
43c06b540aSTom Tucker #include <linux/sunrpc/debug.h>
44c06b540aSTom Tucker #include <linux/sunrpc/rpc_rdma.h>
45c06b540aSTom Tucker #include <linux/spinlock.h>
46c06b540aSTom Tucker #include <asm/unaligned.h>
47c06b540aSTom Tucker #include <rdma/ib_verbs.h>
48c06b540aSTom Tucker #include <rdma/rdma_cm.h>
49c06b540aSTom Tucker #include <linux/sunrpc/svc_rdma.h>
50c06b540aSTom Tucker 
51c06b540aSTom Tucker #define RPCDBG_FACILITY	RPCDBG_SVCXPRT
52c06b540aSTom Tucker 
53cf570a93SChuck Lever static u32 xdr_padsize(u32 len)
54cf570a93SChuck Lever {
55cf570a93SChuck Lever 	return (len & 3) ? (4 - (len & 3)) : 0;
56cf570a93SChuck Lever }
57cf570a93SChuck Lever 
58ba986c96SChuck Lever int svc_rdma_map_xdr(struct svcxprt_rdma *xprt,
59c06b540aSTom Tucker 		     struct xdr_buf *xdr,
60f6763c29SChuck Lever 		     struct svc_rdma_req_map *vec,
61f6763c29SChuck Lever 		     bool write_chunk_present)
62c06b540aSTom Tucker {
63c06b540aSTom Tucker 	int sge_no;
64c06b540aSTom Tucker 	u32 sge_bytes;
65c06b540aSTom Tucker 	u32 page_bytes;
6634d16e42STom Tucker 	u32 page_off;
67c06b540aSTom Tucker 	int page_no;
68c06b540aSTom Tucker 
693fe04ee9SChuck Lever 	if (xdr->len !=
703fe04ee9SChuck Lever 	    (xdr->head[0].iov_len + xdr->page_len + xdr->tail[0].iov_len)) {
71ba986c96SChuck Lever 		pr_err("svcrdma: %s: XDR buffer length error\n", __func__);
723fe04ee9SChuck Lever 		return -EIO;
733fe04ee9SChuck Lever 	}
7434d16e42STom Tucker 
75c06b540aSTom Tucker 	/* Skip the first sge, this is for the RPCRDMA header */
76c06b540aSTom Tucker 	sge_no = 1;
77c06b540aSTom Tucker 
78c06b540aSTom Tucker 	/* Head SGE */
7934d16e42STom Tucker 	vec->sge[sge_no].iov_base = xdr->head[0].iov_base;
8034d16e42STom Tucker 	vec->sge[sge_no].iov_len = xdr->head[0].iov_len;
81c06b540aSTom Tucker 	sge_no++;
82c06b540aSTom Tucker 
83c06b540aSTom Tucker 	/* pages SGE */
84c06b540aSTom Tucker 	page_no = 0;
85c06b540aSTom Tucker 	page_bytes = xdr->page_len;
86c06b540aSTom Tucker 	page_off = xdr->page_base;
8734d16e42STom Tucker 	while (page_bytes) {
8834d16e42STom Tucker 		vec->sge[sge_no].iov_base =
8934d16e42STom Tucker 			page_address(xdr->pages[page_no]) + page_off;
9034d16e42STom Tucker 		sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off));
91c06b540aSTom Tucker 		page_bytes -= sge_bytes;
9234d16e42STom Tucker 		vec->sge[sge_no].iov_len = sge_bytes;
93c06b540aSTom Tucker 
94c06b540aSTom Tucker 		sge_no++;
95c06b540aSTom Tucker 		page_no++;
96c06b540aSTom Tucker 		page_off = 0; /* reset for next time through loop */
97c06b540aSTom Tucker 	}
98c06b540aSTom Tucker 
99c06b540aSTom Tucker 	/* Tail SGE */
10034d16e42STom Tucker 	if (xdr->tail[0].iov_len) {
101f6763c29SChuck Lever 		unsigned char *base = xdr->tail[0].iov_base;
102f6763c29SChuck Lever 		size_t len = xdr->tail[0].iov_len;
103f6763c29SChuck Lever 		u32 xdr_pad = xdr_padsize(xdr->page_len);
104f6763c29SChuck Lever 
105f6763c29SChuck Lever 		if (write_chunk_present && xdr_pad) {
106f6763c29SChuck Lever 			base += xdr_pad;
107f6763c29SChuck Lever 			len -= xdr_pad;
108f6763c29SChuck Lever 		}
109f6763c29SChuck Lever 
110f6763c29SChuck Lever 		if (len) {
111f6763c29SChuck Lever 			vec->sge[sge_no].iov_base = base;
112f6763c29SChuck Lever 			vec->sge[sge_no].iov_len = len;
113c06b540aSTom Tucker 			sge_no++;
114c06b540aSTom Tucker 		}
115f6763c29SChuck Lever 	}
116c06b540aSTom Tucker 
117ba986c96SChuck Lever 	dprintk("svcrdma: %s: sge_no %d page_no %d "
1182e3c230bSTom Talpey 		"page_base %u page_len %u head_len %zu tail_len %zu\n",
119ba986c96SChuck Lever 		__func__, sge_no, page_no, xdr->page_base, xdr->page_len,
120b1e1e158STom Talpey 		xdr->head[0].iov_len, xdr->tail[0].iov_len);
121b1e1e158STom Talpey 
12234d16e42STom Tucker 	vec->count = sge_no;
123afd566eaSTom Tucker 	return 0;
124c06b540aSTom Tucker }
125c06b540aSTom Tucker 
126b432e6b3STom Tucker static dma_addr_t dma_map_xdr(struct svcxprt_rdma *xprt,
127b432e6b3STom Tucker 			      struct xdr_buf *xdr,
128b432e6b3STom Tucker 			      u32 xdr_off, size_t len, int dir)
129b432e6b3STom Tucker {
130b432e6b3STom Tucker 	struct page *page;
131b432e6b3STom Tucker 	dma_addr_t dma_addr;
132b432e6b3STom Tucker 	if (xdr_off < xdr->head[0].iov_len) {
133b432e6b3STom Tucker 		/* This offset is in the head */
134b432e6b3STom Tucker 		xdr_off += (unsigned long)xdr->head[0].iov_base & ~PAGE_MASK;
135b432e6b3STom Tucker 		page = virt_to_page(xdr->head[0].iov_base);
136b432e6b3STom Tucker 	} else {
137b432e6b3STom Tucker 		xdr_off -= xdr->head[0].iov_len;
138b432e6b3STom Tucker 		if (xdr_off < xdr->page_len) {
139b432e6b3STom Tucker 			/* This offset is in the page list */
1403cbe01a9SJeff Layton 			xdr_off += xdr->page_base;
141b432e6b3STom Tucker 			page = xdr->pages[xdr_off >> PAGE_SHIFT];
142b432e6b3STom Tucker 			xdr_off &= ~PAGE_MASK;
143b432e6b3STom Tucker 		} else {
144b432e6b3STom Tucker 			/* This offset is in the tail */
145b432e6b3STom Tucker 			xdr_off -= xdr->page_len;
146b432e6b3STom Tucker 			xdr_off += (unsigned long)
147b432e6b3STom Tucker 				xdr->tail[0].iov_base & ~PAGE_MASK;
148b432e6b3STom Tucker 			page = virt_to_page(xdr->tail[0].iov_base);
149b432e6b3STom Tucker 		}
150b432e6b3STom Tucker 	}
151b432e6b3STom Tucker 	dma_addr = ib_dma_map_page(xprt->sc_cm_id->device, page, xdr_off,
152b432e6b3STom Tucker 				   min_t(size_t, PAGE_SIZE, len), dir);
153b432e6b3STom Tucker 	return dma_addr;
154b432e6b3STom Tucker }
155b432e6b3STom Tucker 
1565fdca653SChuck Lever /* Parse the RPC Call's transport header.
15710dc4512SChuck Lever  */
1585fdca653SChuck Lever static void svc_rdma_get_write_arrays(struct rpcrdma_msg *rmsgp,
1595fdca653SChuck Lever 				      struct rpcrdma_write_array **write,
1605fdca653SChuck Lever 				      struct rpcrdma_write_array **reply)
16110dc4512SChuck Lever {
1625fdca653SChuck Lever 	__be32 *p;
16310dc4512SChuck Lever 
1645fdca653SChuck Lever 	p = (__be32 *)&rmsgp->rm_body.rm_chunks[0];
1655fdca653SChuck Lever 
1665fdca653SChuck Lever 	/* Read list */
1675fdca653SChuck Lever 	while (*p++ != xdr_zero)
1685fdca653SChuck Lever 		p += 5;
1695fdca653SChuck Lever 
1705fdca653SChuck Lever 	/* Write list */
1715fdca653SChuck Lever 	if (*p != xdr_zero) {
1725fdca653SChuck Lever 		*write = (struct rpcrdma_write_array *)p;
1735fdca653SChuck Lever 		while (*p++ != xdr_zero)
1745fdca653SChuck Lever 			p += 1 + be32_to_cpu(*p) * 4;
1755fdca653SChuck Lever 	} else {
1765fdca653SChuck Lever 		*write = NULL;
1775fdca653SChuck Lever 		p++;
17810dc4512SChuck Lever 	}
17910dc4512SChuck Lever 
1805fdca653SChuck Lever 	/* Reply chunk */
1815fdca653SChuck Lever 	if (*p != xdr_zero)
1825fdca653SChuck Lever 		*reply = (struct rpcrdma_write_array *)p;
1835fdca653SChuck Lever 	else
1845fdca653SChuck Lever 		*reply = NULL;
18510dc4512SChuck Lever }
18610dc4512SChuck Lever 
18725d55296SChuck Lever /* RPC-over-RDMA Version One private extension: Remote Invalidation.
18825d55296SChuck Lever  * Responder's choice: requester signals it can handle Send With
18925d55296SChuck Lever  * Invalidate, and responder chooses one rkey to invalidate.
19025d55296SChuck Lever  *
19125d55296SChuck Lever  * Find a candidate rkey to invalidate when sending a reply.  Picks the
19225d55296SChuck Lever  * first rkey it finds in the chunks lists.
19325d55296SChuck Lever  *
19425d55296SChuck Lever  * Returns zero if RPC's chunk lists are empty.
19525d55296SChuck Lever  */
19625d55296SChuck Lever static u32 svc_rdma_get_inv_rkey(struct rpcrdma_msg *rdma_argp,
19725d55296SChuck Lever 				 struct rpcrdma_write_array *wr_ary,
19825d55296SChuck Lever 				 struct rpcrdma_write_array *rp_ary)
19925d55296SChuck Lever {
20025d55296SChuck Lever 	struct rpcrdma_read_chunk *rd_ary;
20125d55296SChuck Lever 	struct rpcrdma_segment *arg_ch;
20225d55296SChuck Lever 
2035fdca653SChuck Lever 	rd_ary = (struct rpcrdma_read_chunk *)&rdma_argp->rm_body.rm_chunks[0];
204fafedf81SChuck Lever 	if (rd_ary->rc_discrim != xdr_zero)
205fafedf81SChuck Lever 		return be32_to_cpu(rd_ary->rc_target.rs_handle);
20625d55296SChuck Lever 
20725d55296SChuck Lever 	if (wr_ary && be32_to_cpu(wr_ary->wc_nchunks)) {
20825d55296SChuck Lever 		arg_ch = &wr_ary->wc_array[0].wc_target;
209fafedf81SChuck Lever 		return be32_to_cpu(arg_ch->rs_handle);
21025d55296SChuck Lever 	}
21125d55296SChuck Lever 
21225d55296SChuck Lever 	if (rp_ary && be32_to_cpu(rp_ary->wc_nchunks)) {
21325d55296SChuck Lever 		arg_ch = &rp_ary->wc_array[0].wc_target;
214fafedf81SChuck Lever 		return be32_to_cpu(arg_ch->rs_handle);
21525d55296SChuck Lever 	}
21625d55296SChuck Lever 
217fafedf81SChuck Lever 	return 0;
21825d55296SChuck Lever }
21925d55296SChuck Lever 
2206e6092caSChuck Lever static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma,
2216e6092caSChuck Lever 				 struct svc_rdma_op_ctxt *ctxt,
2226e6092caSChuck Lever 				 unsigned int sge_no,
2236e6092caSChuck Lever 				 struct page *page,
2246e6092caSChuck Lever 				 unsigned int offset,
2256e6092caSChuck Lever 				 unsigned int len)
2266e6092caSChuck Lever {
2276e6092caSChuck Lever 	struct ib_device *dev = rdma->sc_cm_id->device;
2286e6092caSChuck Lever 	dma_addr_t dma_addr;
2296e6092caSChuck Lever 
2306e6092caSChuck Lever 	dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE);
2316e6092caSChuck Lever 	if (ib_dma_mapping_error(dev, dma_addr))
2326e6092caSChuck Lever 		return -EIO;
2336e6092caSChuck Lever 
2346e6092caSChuck Lever 	ctxt->sge[sge_no].addr = dma_addr;
2356e6092caSChuck Lever 	ctxt->sge[sge_no].length = len;
2366e6092caSChuck Lever 	ctxt->sge[sge_no].lkey = rdma->sc_pd->local_dma_lkey;
2376e6092caSChuck Lever 	svc_rdma_count_mappings(rdma, ctxt);
2386e6092caSChuck Lever 	return 0;
2396e6092caSChuck Lever }
2406e6092caSChuck Lever 
2416e6092caSChuck Lever /**
2426e6092caSChuck Lever  * svc_rdma_map_reply_hdr - DMA map the transport header buffer
2436e6092caSChuck Lever  * @rdma: controlling transport
2446e6092caSChuck Lever  * @ctxt: op_ctxt for the Send WR
2456e6092caSChuck Lever  * @rdma_resp: buffer containing transport header
2466e6092caSChuck Lever  * @len: length of transport header
2476e6092caSChuck Lever  *
2486e6092caSChuck Lever  * Returns:
2496e6092caSChuck Lever  *	%0 if the header is DMA mapped,
2506e6092caSChuck Lever  *	%-EIO if DMA mapping failed.
2516e6092caSChuck Lever  */
2526e6092caSChuck Lever int svc_rdma_map_reply_hdr(struct svcxprt_rdma *rdma,
2536e6092caSChuck Lever 			   struct svc_rdma_op_ctxt *ctxt,
2546e6092caSChuck Lever 			   __be32 *rdma_resp,
2556e6092caSChuck Lever 			   unsigned int len)
2566e6092caSChuck Lever {
2576e6092caSChuck Lever 	ctxt->direction = DMA_TO_DEVICE;
2586e6092caSChuck Lever 	ctxt->pages[0] = virt_to_page(rdma_resp);
2596e6092caSChuck Lever 	ctxt->count = 1;
2606e6092caSChuck Lever 	return svc_rdma_dma_map_page(rdma, ctxt, 0, ctxt->pages[0], 0, len);
2616e6092caSChuck Lever }
2626e6092caSChuck Lever 
263c06b540aSTom Tucker /* Assumptions:
264c06b540aSTom Tucker  * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
265c06b540aSTom Tucker  */
266c06b540aSTom Tucker static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
267c06b540aSTom Tucker 		      u32 rmr, u64 to,
268c06b540aSTom Tucker 		      u32 xdr_off, int write_len,
26934d16e42STom Tucker 		      struct svc_rdma_req_map *vec)
270c06b540aSTom Tucker {
271e622f2f4SChristoph Hellwig 	struct ib_rdma_wr write_wr;
272c06b540aSTom Tucker 	struct ib_sge *sge;
273c06b540aSTom Tucker 	int xdr_sge_no;
274c06b540aSTom Tucker 	int sge_no;
275c06b540aSTom Tucker 	int sge_bytes;
276c06b540aSTom Tucker 	int sge_off;
277c06b540aSTom Tucker 	int bc;
278c06b540aSTom Tucker 	struct svc_rdma_op_ctxt *ctxt;
279c06b540aSTom Tucker 
2803fe04ee9SChuck Lever 	if (vec->count > RPCSVC_MAXPAGES) {
2813fe04ee9SChuck Lever 		pr_err("svcrdma: Too many pages (%lu)\n", vec->count);
2823fe04ee9SChuck Lever 		return -EIO;
2833fe04ee9SChuck Lever 	}
2843fe04ee9SChuck Lever 
285c06b540aSTom Tucker 	dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, "
28634d16e42STom Tucker 		"write_len=%d, vec->sge=%p, vec->count=%lu\n",
287bb50c801SRoland Dreier 		rmr, (unsigned long long)to, xdr_off,
28834d16e42STom Tucker 		write_len, vec->sge, vec->count);
289c06b540aSTom Tucker 
290c06b540aSTom Tucker 	ctxt = svc_rdma_get_context(xprt);
29134d16e42STom Tucker 	ctxt->direction = DMA_TO_DEVICE;
29234d16e42STom Tucker 	sge = ctxt->sge;
293c06b540aSTom Tucker 
294c06b540aSTom Tucker 	/* Find the SGE associated with xdr_off */
29534d16e42STom Tucker 	for (bc = xdr_off, xdr_sge_no = 1; bc && xdr_sge_no < vec->count;
296c06b540aSTom Tucker 	     xdr_sge_no++) {
29734d16e42STom Tucker 		if (vec->sge[xdr_sge_no].iov_len > bc)
298c06b540aSTom Tucker 			break;
29934d16e42STom Tucker 		bc -= vec->sge[xdr_sge_no].iov_len;
300c06b540aSTom Tucker 	}
301c06b540aSTom Tucker 
302c06b540aSTom Tucker 	sge_off = bc;
303c06b540aSTom Tucker 	bc = write_len;
304c06b540aSTom Tucker 	sge_no = 0;
305c06b540aSTom Tucker 
306c06b540aSTom Tucker 	/* Copy the remaining SGE */
307afd566eaSTom Tucker 	while (bc != 0) {
308afd566eaSTom Tucker 		sge_bytes = min_t(size_t,
309afd566eaSTom Tucker 			  bc, vec->sge[xdr_sge_no].iov_len-sge_off);
310c06b540aSTom Tucker 		sge[sge_no].length = sge_bytes;
31134d16e42STom Tucker 		sge[sge_no].addr =
312b432e6b3STom Tucker 			dma_map_xdr(xprt, &rqstp->rq_res, xdr_off,
31334d16e42STom Tucker 				    sge_bytes, DMA_TO_DEVICE);
314b432e6b3STom Tucker 		xdr_off += sge_bytes;
315afd566eaSTom Tucker 		if (ib_dma_mapping_error(xprt->sc_cm_id->device,
3168d8bb39bSFUJITA Tomonori 					 sge[sge_no].addr))
31734d16e42STom Tucker 			goto err;
318cace564fSChuck Lever 		svc_rdma_count_mappings(xprt, ctxt);
3195fe1043dSChristoph Hellwig 		sge[sge_no].lkey = xprt->sc_pd->local_dma_lkey;
320afd566eaSTom Tucker 		ctxt->count++;
321c06b540aSTom Tucker 		sge_off = 0;
322c06b540aSTom Tucker 		sge_no++;
323c06b540aSTom Tucker 		xdr_sge_no++;
3243fe04ee9SChuck Lever 		if (xdr_sge_no > vec->count) {
3253fe04ee9SChuck Lever 			pr_err("svcrdma: Too many sges (%d)\n", xdr_sge_no);
3263fe04ee9SChuck Lever 			goto err;
3273fe04ee9SChuck Lever 		}
328c06b540aSTom Tucker 		bc -= sge_bytes;
32925594290SSteve Wise 		if (sge_no == xprt->sc_max_sge)
33025594290SSteve Wise 			break;
331c06b540aSTom Tucker 	}
332c06b540aSTom Tucker 
333c06b540aSTom Tucker 	/* Prepare WRITE WR */
334c06b540aSTom Tucker 	memset(&write_wr, 0, sizeof write_wr);
335be99bb11SChuck Lever 	ctxt->cqe.done = svc_rdma_wc_write;
336be99bb11SChuck Lever 	write_wr.wr.wr_cqe = &ctxt->cqe;
337e622f2f4SChristoph Hellwig 	write_wr.wr.sg_list = &sge[0];
338e622f2f4SChristoph Hellwig 	write_wr.wr.num_sge = sge_no;
339e622f2f4SChristoph Hellwig 	write_wr.wr.opcode = IB_WR_RDMA_WRITE;
340e622f2f4SChristoph Hellwig 	write_wr.wr.send_flags = IB_SEND_SIGNALED;
341e622f2f4SChristoph Hellwig 	write_wr.rkey = rmr;
342e622f2f4SChristoph Hellwig 	write_wr.remote_addr = to;
343c06b540aSTom Tucker 
344c06b540aSTom Tucker 	/* Post It */
345c06b540aSTom Tucker 	atomic_inc(&rdma_stat_write);
346e622f2f4SChristoph Hellwig 	if (svc_rdma_send(xprt, &write_wr.wr))
34734d16e42STom Tucker 		goto err;
34825594290SSteve Wise 	return write_len - bc;
34934d16e42STom Tucker  err:
3504a84386fSTom Tucker 	svc_rdma_unmap_dma(ctxt);
35134d16e42STom Tucker 	svc_rdma_put_context(ctxt, 0);
35234d16e42STom Tucker 	return -EIO;
353c06b540aSTom Tucker }
354c06b540aSTom Tucker 
35508ae4e7fSChuck Lever noinline
356c06b540aSTom Tucker static int send_write_chunks(struct svcxprt_rdma *xprt,
35708ae4e7fSChuck Lever 			     struct rpcrdma_write_array *wr_ary,
358c06b540aSTom Tucker 			     struct rpcrdma_msg *rdma_resp,
359c06b540aSTom Tucker 			     struct svc_rqst *rqstp,
36034d16e42STom Tucker 			     struct svc_rdma_req_map *vec)
361c06b540aSTom Tucker {
362cf570a93SChuck Lever 	u32 xfer_len = rqstp->rq_res.page_len;
363c06b540aSTom Tucker 	int write_len;
364c06b540aSTom Tucker 	u32 xdr_off;
365c06b540aSTom Tucker 	int chunk_off;
366c06b540aSTom Tucker 	int chunk_no;
36770747c25SChuck Lever 	int nchunks;
368c06b540aSTom Tucker 	struct rpcrdma_write_array *res_ary;
369c06b540aSTom Tucker 	int ret;
370c06b540aSTom Tucker 
371c06b540aSTom Tucker 	res_ary = (struct rpcrdma_write_array *)
372c06b540aSTom Tucker 		&rdma_resp->rm_body.rm_chunks[1];
373c06b540aSTom Tucker 
374c06b540aSTom Tucker 	/* Write chunks start at the pagelist */
37508ae4e7fSChuck Lever 	nchunks = be32_to_cpu(wr_ary->wc_nchunks);
376c06b540aSTom Tucker 	for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0;
37770747c25SChuck Lever 	     xfer_len && chunk_no < nchunks;
378c06b540aSTom Tucker 	     chunk_no++) {
379c06b540aSTom Tucker 		struct rpcrdma_segment *arg_ch;
380c06b540aSTom Tucker 		u64 rs_offset;
381c06b540aSTom Tucker 
38208ae4e7fSChuck Lever 		arg_ch = &wr_ary->wc_array[chunk_no].wc_target;
38370747c25SChuck Lever 		write_len = min(xfer_len, be32_to_cpu(arg_ch->rs_length));
384c06b540aSTom Tucker 
385c06b540aSTom Tucker 		/* Prepare the response chunk given the length actually
386c06b540aSTom Tucker 		 * written */
387cec56c8fSTom Tucker 		xdr_decode_hyper((__be32 *)&arg_ch->rs_offset, &rs_offset);
388c06b540aSTom Tucker 		svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
389c06b540aSTom Tucker 						arg_ch->rs_handle,
390cec56c8fSTom Tucker 						arg_ch->rs_offset,
391c06b540aSTom Tucker 						write_len);
392c06b540aSTom Tucker 		chunk_off = 0;
393c06b540aSTom Tucker 		while (write_len) {
394c06b540aSTom Tucker 			ret = send_write(xprt, rqstp,
39570747c25SChuck Lever 					 be32_to_cpu(arg_ch->rs_handle),
396c06b540aSTom Tucker 					 rs_offset + chunk_off,
397c06b540aSTom Tucker 					 xdr_off,
39825594290SSteve Wise 					 write_len,
39934d16e42STom Tucker 					 vec);
40008ae4e7fSChuck Lever 			if (ret <= 0)
40108ae4e7fSChuck Lever 				goto out_err;
40225594290SSteve Wise 			chunk_off += ret;
40325594290SSteve Wise 			xdr_off += ret;
40425594290SSteve Wise 			xfer_len -= ret;
40525594290SSteve Wise 			write_len -= ret;
406c06b540aSTom Tucker 		}
407c06b540aSTom Tucker 	}
408c06b540aSTom Tucker 	/* Update the req with the number of chunks actually used */
409c06b540aSTom Tucker 	svc_rdma_xdr_encode_write_list(rdma_resp, chunk_no);
410c06b540aSTom Tucker 
411cf570a93SChuck Lever 	return rqstp->rq_res.page_len;
41208ae4e7fSChuck Lever 
41308ae4e7fSChuck Lever out_err:
41408ae4e7fSChuck Lever 	pr_err("svcrdma: failed to send write chunks, rc=%d\n", ret);
41508ae4e7fSChuck Lever 	return -EIO;
416c06b540aSTom Tucker }
417c06b540aSTom Tucker 
41808ae4e7fSChuck Lever noinline
419c06b540aSTom Tucker static int send_reply_chunks(struct svcxprt_rdma *xprt,
42008ae4e7fSChuck Lever 			     struct rpcrdma_write_array *rp_ary,
421c06b540aSTom Tucker 			     struct rpcrdma_msg *rdma_resp,
422c06b540aSTom Tucker 			     struct svc_rqst *rqstp,
42334d16e42STom Tucker 			     struct svc_rdma_req_map *vec)
424c06b540aSTom Tucker {
425c06b540aSTom Tucker 	u32 xfer_len = rqstp->rq_res.len;
426c06b540aSTom Tucker 	int write_len;
427c06b540aSTom Tucker 	u32 xdr_off;
428c06b540aSTom Tucker 	int chunk_no;
429c06b540aSTom Tucker 	int chunk_off;
430cec56c8fSTom Tucker 	int nchunks;
431c06b540aSTom Tucker 	struct rpcrdma_segment *ch;
432c06b540aSTom Tucker 	struct rpcrdma_write_array *res_ary;
433c06b540aSTom Tucker 	int ret;
434c06b540aSTom Tucker 
435c06b540aSTom Tucker 	/* XXX: need to fix when reply lists occur with read-list and or
436c06b540aSTom Tucker 	 * write-list */
437c06b540aSTom Tucker 	res_ary = (struct rpcrdma_write_array *)
438c06b540aSTom Tucker 		&rdma_resp->rm_body.rm_chunks[2];
439c06b540aSTom Tucker 
440c06b540aSTom Tucker 	/* xdr offset starts at RPC message */
44108ae4e7fSChuck Lever 	nchunks = be32_to_cpu(rp_ary->wc_nchunks);
442c06b540aSTom Tucker 	for (xdr_off = 0, chunk_no = 0;
443cec56c8fSTom Tucker 	     xfer_len && chunk_no < nchunks;
444c06b540aSTom Tucker 	     chunk_no++) {
445c06b540aSTom Tucker 		u64 rs_offset;
44608ae4e7fSChuck Lever 		ch = &rp_ary->wc_array[chunk_no].wc_target;
44770747c25SChuck Lever 		write_len = min(xfer_len, be32_to_cpu(ch->rs_length));
448c06b540aSTom Tucker 
449c06b540aSTom Tucker 		/* Prepare the reply chunk given the length actually
450c06b540aSTom Tucker 		 * written */
451cec56c8fSTom Tucker 		xdr_decode_hyper((__be32 *)&ch->rs_offset, &rs_offset);
452c06b540aSTom Tucker 		svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
453cec56c8fSTom Tucker 						ch->rs_handle, ch->rs_offset,
454c06b540aSTom Tucker 						write_len);
455c06b540aSTom Tucker 		chunk_off = 0;
456c06b540aSTom Tucker 		while (write_len) {
457c06b540aSTom Tucker 			ret = send_write(xprt, rqstp,
45870747c25SChuck Lever 					 be32_to_cpu(ch->rs_handle),
459c06b540aSTom Tucker 					 rs_offset + chunk_off,
460c06b540aSTom Tucker 					 xdr_off,
46125594290SSteve Wise 					 write_len,
46234d16e42STom Tucker 					 vec);
46308ae4e7fSChuck Lever 			if (ret <= 0)
46408ae4e7fSChuck Lever 				goto out_err;
46525594290SSteve Wise 			chunk_off += ret;
46625594290SSteve Wise 			xdr_off += ret;
46725594290SSteve Wise 			xfer_len -= ret;
46825594290SSteve Wise 			write_len -= ret;
469c06b540aSTom Tucker 		}
470c06b540aSTom Tucker 	}
471c06b540aSTom Tucker 	/* Update the req with the number of chunks actually used */
472c06b540aSTom Tucker 	svc_rdma_xdr_encode_reply_array(res_ary, chunk_no);
473c06b540aSTom Tucker 
474c06b540aSTom Tucker 	return rqstp->rq_res.len;
47508ae4e7fSChuck Lever 
47608ae4e7fSChuck Lever out_err:
47708ae4e7fSChuck Lever 	pr_err("svcrdma: failed to send reply chunks, rc=%d\n", ret);
47808ae4e7fSChuck Lever 	return -EIO;
479c06b540aSTom Tucker }
480c06b540aSTom Tucker 
48117f5f7f5SChuck Lever /**
48217f5f7f5SChuck Lever  * svc_rdma_post_send_wr - Set up and post one Send Work Request
48317f5f7f5SChuck Lever  * @rdma: controlling transport
48417f5f7f5SChuck Lever  * @ctxt: op_ctxt for transmitting the Send WR
48517f5f7f5SChuck Lever  * @num_sge: number of SGEs to send
48617f5f7f5SChuck Lever  * @inv_rkey: R_key argument to Send With Invalidate, or zero
48717f5f7f5SChuck Lever  *
48817f5f7f5SChuck Lever  * Returns:
48917f5f7f5SChuck Lever  *	%0 if the Send* was posted successfully,
49017f5f7f5SChuck Lever  *	%-ENOTCONN if the connection was lost or dropped,
49117f5f7f5SChuck Lever  *	%-EINVAL if there was a problem with the Send we built,
49217f5f7f5SChuck Lever  *	%-ENOMEM if ib_post_send failed.
49317f5f7f5SChuck Lever  */
49417f5f7f5SChuck Lever int svc_rdma_post_send_wr(struct svcxprt_rdma *rdma,
49517f5f7f5SChuck Lever 			  struct svc_rdma_op_ctxt *ctxt, int num_sge,
49617f5f7f5SChuck Lever 			  u32 inv_rkey)
49717f5f7f5SChuck Lever {
49817f5f7f5SChuck Lever 	struct ib_send_wr *send_wr = &ctxt->send_wr;
49917f5f7f5SChuck Lever 
50017f5f7f5SChuck Lever 	dprintk("svcrdma: posting Send WR with %u sge(s)\n", num_sge);
50117f5f7f5SChuck Lever 
50217f5f7f5SChuck Lever 	send_wr->next = NULL;
50317f5f7f5SChuck Lever 	ctxt->cqe.done = svc_rdma_wc_send;
50417f5f7f5SChuck Lever 	send_wr->wr_cqe = &ctxt->cqe;
50517f5f7f5SChuck Lever 	send_wr->sg_list = ctxt->sge;
50617f5f7f5SChuck Lever 	send_wr->num_sge = num_sge;
50717f5f7f5SChuck Lever 	send_wr->send_flags = IB_SEND_SIGNALED;
50817f5f7f5SChuck Lever 	if (inv_rkey) {
50917f5f7f5SChuck Lever 		send_wr->opcode = IB_WR_SEND_WITH_INV;
51017f5f7f5SChuck Lever 		send_wr->ex.invalidate_rkey = inv_rkey;
51117f5f7f5SChuck Lever 	} else {
51217f5f7f5SChuck Lever 		send_wr->opcode = IB_WR_SEND;
51317f5f7f5SChuck Lever 	}
51417f5f7f5SChuck Lever 
51517f5f7f5SChuck Lever 	return svc_rdma_send(rdma, send_wr);
51617f5f7f5SChuck Lever }
51717f5f7f5SChuck Lever 
518c06b540aSTom Tucker /* This function prepares the portion of the RPCRDMA message to be
519c06b540aSTom Tucker  * sent in the RDMA_SEND. This function is called after data sent via
520c06b540aSTom Tucker  * RDMA has already been transmitted. There are three cases:
521c06b540aSTom Tucker  * - The RPCRDMA header, RPC header, and payload are all sent in a
522c06b540aSTom Tucker  *   single RDMA_SEND. This is the "inline" case.
523c06b540aSTom Tucker  * - The RPCRDMA header and some portion of the RPC header and data
524c06b540aSTom Tucker  *   are sent via this RDMA_SEND and another portion of the data is
525c06b540aSTom Tucker  *   sent via RDMA.
526c06b540aSTom Tucker  * - The RPCRDMA header [NOMSG] is sent in this RDMA_SEND and the RPC
527c06b540aSTom Tucker  *   header and data are all transmitted via RDMA.
528c06b540aSTom Tucker  * In all three cases, this function prepares the RPCRDMA header in
529c06b540aSTom Tucker  * sge[0], the 'type' parameter indicates the type to place in the
530c06b540aSTom Tucker  * RPCRDMA header, and the 'byte_count' field indicates how much of
531b432e6b3STom Tucker  * the XDR to include in this RDMA_SEND. NB: The offset of the payload
532b432e6b3STom Tucker  * to send is zero in the XDR.
533c06b540aSTom Tucker  */
534c06b540aSTom Tucker static int send_reply(struct svcxprt_rdma *rdma,
535c06b540aSTom Tucker 		      struct svc_rqst *rqstp,
536c06b540aSTom Tucker 		      struct page *page,
537c06b540aSTom Tucker 		      struct rpcrdma_msg *rdma_resp,
53834d16e42STom Tucker 		      struct svc_rdma_req_map *vec,
53925d55296SChuck Lever 		      int byte_count,
54025d55296SChuck Lever 		      u32 inv_rkey)
541c06b540aSTom Tucker {
5429ec64052SChuck Lever 	struct svc_rdma_op_ctxt *ctxt;
5439d11b51cSChuck Lever 	u32 xdr_off;
544c06b540aSTom Tucker 	int sge_no;
545c06b540aSTom Tucker 	int sge_bytes;
546c06b540aSTom Tucker 	int page_no;
547afc59400SJ. Bruce Fields 	int pages;
5489ec64052SChuck Lever 	int ret = -EIO;
5490e7f011aSTom Tucker 
550c06b540aSTom Tucker 	/* Prepare the context */
5519ec64052SChuck Lever 	ctxt = svc_rdma_get_context(rdma);
5529ec64052SChuck Lever 	ctxt->direction = DMA_TO_DEVICE;
553c06b540aSTom Tucker 	ctxt->pages[0] = page;
554c06b540aSTom Tucker 	ctxt->count = 1;
555c06b540aSTom Tucker 
556c06b540aSTom Tucker 	/* Prepare the SGE for the RPCRDMA Header */
5575fe1043dSChristoph Hellwig 	ctxt->sge[0].lkey = rdma->sc_pd->local_dma_lkey;
558cbaf5803SChuck Lever 	ctxt->sge[0].length =
559cbaf5803SChuck Lever 	    svc_rdma_xdr_get_reply_hdr_len((__be32 *)rdma_resp);
560c06b540aSTom Tucker 	ctxt->sge[0].addr =
561b432e6b3STom Tucker 	    ib_dma_map_page(rdma->sc_cm_id->device, page, 0,
56298779be8SSteve Wise 			    ctxt->sge[0].length, DMA_TO_DEVICE);
563afd566eaSTom Tucker 	if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
564afd566eaSTom Tucker 		goto err;
565cace564fSChuck Lever 	svc_rdma_count_mappings(rdma, ctxt);
566afd566eaSTom Tucker 
567c06b540aSTom Tucker 	ctxt->direction = DMA_TO_DEVICE;
568afd566eaSTom Tucker 
569b432e6b3STom Tucker 	/* Map the payload indicated by 'byte_count' */
5709d11b51cSChuck Lever 	xdr_off = 0;
57134d16e42STom Tucker 	for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
57234d16e42STom Tucker 		sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
573c06b540aSTom Tucker 		byte_count -= sge_bytes;
57434d16e42STom Tucker 		ctxt->sge[sge_no].addr =
575b432e6b3STom Tucker 			dma_map_xdr(rdma, &rqstp->rq_res, xdr_off,
57634d16e42STom Tucker 				    sge_bytes, DMA_TO_DEVICE);
577b432e6b3STom Tucker 		xdr_off += sge_bytes;
578afd566eaSTom Tucker 		if (ib_dma_mapping_error(rdma->sc_cm_id->device,
579afd566eaSTom Tucker 					 ctxt->sge[sge_no].addr))
580afd566eaSTom Tucker 			goto err;
581cace564fSChuck Lever 		svc_rdma_count_mappings(rdma, ctxt);
5825fe1043dSChristoph Hellwig 		ctxt->sge[sge_no].lkey = rdma->sc_pd->local_dma_lkey;
58334d16e42STom Tucker 		ctxt->sge[sge_no].length = sge_bytes;
584c06b540aSTom Tucker 	}
5853fe04ee9SChuck Lever 	if (byte_count != 0) {
5863fe04ee9SChuck Lever 		pr_err("svcrdma: Could not map %d bytes\n", byte_count);
5873fe04ee9SChuck Lever 		goto err;
5883fe04ee9SChuck Lever 	}
589c06b540aSTom Tucker 
590c06b540aSTom Tucker 	/* Save all respages in the ctxt and remove them from the
591c06b540aSTom Tucker 	 * respages array. They are our pages until the I/O
592c06b540aSTom Tucker 	 * completes.
593c06b540aSTom Tucker 	 */
594afc59400SJ. Bruce Fields 	pages = rqstp->rq_next_page - rqstp->rq_respages;
595afc59400SJ. Bruce Fields 	for (page_no = 0; page_no < pages; page_no++) {
596c06b540aSTom Tucker 		ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
597c06b540aSTom Tucker 		ctxt->count++;
598c06b540aSTom Tucker 		rqstp->rq_respages[page_no] = NULL;
599c06b540aSTom Tucker 	}
6007e4359e2STom Tucker 	rqstp->rq_next_page = rqstp->rq_respages + 1;
6010bf48289SSteve Wise 
6023fe04ee9SChuck Lever 	if (sge_no > rdma->sc_max_sge) {
6033fe04ee9SChuck Lever 		pr_err("svcrdma: Too many sges (%d)\n", sge_no);
6043fe04ee9SChuck Lever 		goto err;
6053fe04ee9SChuck Lever 	}
606c06b540aSTom Tucker 
60717f5f7f5SChuck Lever 	ret = svc_rdma_post_send_wr(rdma, ctxt, sge_no, inv_rkey);
608c06b540aSTom Tucker 	if (ret)
609afd566eaSTom Tucker 		goto err;
610c06b540aSTom Tucker 
611afd566eaSTom Tucker 	return 0;
612afd566eaSTom Tucker 
613afd566eaSTom Tucker  err:
61421515e46SSteve Wise 	svc_rdma_unmap_dma(ctxt);
615afd566eaSTom Tucker 	svc_rdma_put_context(ctxt, 1);
6169ec64052SChuck Lever 	return ret;
617c06b540aSTom Tucker }
618c06b540aSTom Tucker 
619c06b540aSTom Tucker void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp)
620c06b540aSTom Tucker {
621c06b540aSTom Tucker }
622c06b540aSTom Tucker 
623c06b540aSTom Tucker int svc_rdma_sendto(struct svc_rqst *rqstp)
624c06b540aSTom Tucker {
625c06b540aSTom Tucker 	struct svc_xprt *xprt = rqstp->rq_xprt;
626c06b540aSTom Tucker 	struct svcxprt_rdma *rdma =
627c06b540aSTom Tucker 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
628c06b540aSTom Tucker 	struct rpcrdma_msg *rdma_argp;
629c06b540aSTom Tucker 	struct rpcrdma_msg *rdma_resp;
63008ae4e7fSChuck Lever 	struct rpcrdma_write_array *wr_ary, *rp_ary;
631c06b540aSTom Tucker 	int ret;
632c06b540aSTom Tucker 	int inline_bytes;
633c06b540aSTom Tucker 	struct page *res_page;
63434d16e42STom Tucker 	struct svc_rdma_req_map *vec;
63525d55296SChuck Lever 	u32 inv_rkey;
63698fc21d3SChuck Lever 	__be32 *p;
637c06b540aSTom Tucker 
638c06b540aSTom Tucker 	dprintk("svcrdma: sending response for rqstp=%p\n", rqstp);
639c06b540aSTom Tucker 
640e5523bd2SChuck Lever 	/* Get the RDMA request header. The receive logic always
641e5523bd2SChuck Lever 	 * places this at the start of page 0.
642e5523bd2SChuck Lever 	 */
643e5523bd2SChuck Lever 	rdma_argp = page_address(rqstp->rq_pages[0]);
6445fdca653SChuck Lever 	svc_rdma_get_write_arrays(rdma_argp, &wr_ary, &rp_ary);
645c06b540aSTom Tucker 
64625d55296SChuck Lever 	inv_rkey = 0;
64725d55296SChuck Lever 	if (rdma->sc_snd_w_inv)
64825d55296SChuck Lever 		inv_rkey = svc_rdma_get_inv_rkey(rdma_argp, wr_ary, rp_ary);
64925d55296SChuck Lever 
65034d16e42STom Tucker 	/* Build an req vec for the XDR */
6512fe81b23SChuck Lever 	vec = svc_rdma_get_req_map(rdma);
652f6763c29SChuck Lever 	ret = svc_rdma_map_xdr(rdma, &rqstp->rq_res, vec, wr_ary != NULL);
653afd566eaSTom Tucker 	if (ret)
654afd566eaSTom Tucker 		goto err0;
655c06b540aSTom Tucker 	inline_bytes = rqstp->rq_res.len;
656c06b540aSTom Tucker 
657e4eb42ceSChuck Lever 	/* Create the RDMA response header. xprt->xpt_mutex,
658e4eb42ceSChuck Lever 	 * acquired in svc_send(), serializes RPC replies. The
659e4eb42ceSChuck Lever 	 * code path below that inserts the credit grant value
660e4eb42ceSChuck Lever 	 * into each transport header runs only inside this
661e4eb42ceSChuck Lever 	 * critical section.
662e4eb42ceSChuck Lever 	 */
66378da2b3cSChuck Lever 	ret = -ENOMEM;
66478da2b3cSChuck Lever 	res_page = alloc_page(GFP_KERNEL);
66578da2b3cSChuck Lever 	if (!res_page)
66678da2b3cSChuck Lever 		goto err0;
667c06b540aSTom Tucker 	rdma_resp = page_address(res_page);
66898fc21d3SChuck Lever 
66998fc21d3SChuck Lever 	p = &rdma_resp->rm_xid;
67098fc21d3SChuck Lever 	*p++ = rdma_argp->rm_xid;
67198fc21d3SChuck Lever 	*p++ = rdma_argp->rm_vers;
67298fc21d3SChuck Lever 	*p++ = rdma->sc_fc_credits;
67398fc21d3SChuck Lever 	*p++ = rp_ary ? rdma_nomsg : rdma_msg;
67498fc21d3SChuck Lever 
67598fc21d3SChuck Lever 	/* Start with empty chunks */
67698fc21d3SChuck Lever 	*p++ = xdr_zero;
67798fc21d3SChuck Lever 	*p++ = xdr_zero;
67898fc21d3SChuck Lever 	*p   = xdr_zero;
679c06b540aSTom Tucker 
680c06b540aSTom Tucker 	/* Send any write-chunk data and build resp write-list */
68108ae4e7fSChuck Lever 	if (wr_ary) {
68208ae4e7fSChuck Lever 		ret = send_write_chunks(rdma, wr_ary, rdma_resp, rqstp, vec);
68308ae4e7fSChuck Lever 		if (ret < 0)
684afd566eaSTom Tucker 			goto err1;
685cf570a93SChuck Lever 		inline_bytes -= ret + xdr_padsize(ret);
68608ae4e7fSChuck Lever 	}
687c06b540aSTom Tucker 
688c06b540aSTom Tucker 	/* Send any reply-list data and update resp reply-list */
68908ae4e7fSChuck Lever 	if (rp_ary) {
69008ae4e7fSChuck Lever 		ret = send_reply_chunks(rdma, rp_ary, rdma_resp, rqstp, vec);
69108ae4e7fSChuck Lever 		if (ret < 0)
692afd566eaSTom Tucker 			goto err1;
693c06b540aSTom Tucker 		inline_bytes -= ret;
69408ae4e7fSChuck Lever 	}
695c06b540aSTom Tucker 
6969ec64052SChuck Lever 	/* Post a fresh Receive buffer _before_ sending the reply */
6979ec64052SChuck Lever 	ret = svc_rdma_post_recv(rdma, GFP_KERNEL);
6989ec64052SChuck Lever 	if (ret)
6999ec64052SChuck Lever 		goto err1;
7009ec64052SChuck Lever 
7019ec64052SChuck Lever 	ret = send_reply(rdma, rqstp, res_page, rdma_resp, vec,
70225d55296SChuck Lever 			 inline_bytes, inv_rkey);
7033e1eeb98SChuck Lever 	if (ret < 0)
7049995237bSChuck Lever 		goto err0;
7053e1eeb98SChuck Lever 
7062fe81b23SChuck Lever 	svc_rdma_put_req_map(rdma, vec);
707c06b540aSTom Tucker 	dprintk("svcrdma: send_reply returns %d\n", ret);
708c06b540aSTom Tucker 	return ret;
709afd566eaSTom Tucker 
710afd566eaSTom Tucker  err1:
711afd566eaSTom Tucker 	put_page(res_page);
712afd566eaSTom Tucker  err0:
7132fe81b23SChuck Lever 	svc_rdma_put_req_map(rdma, vec);
7149ec64052SChuck Lever 	pr_err("svcrdma: Could not send reply, err=%d. Closing transport.\n",
7159ec64052SChuck Lever 	       ret);
7163e1eeb98SChuck Lever 	set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
7173e1eeb98SChuck Lever 	return -ENOTCONN;
718c06b540aSTom Tucker }
719a6081b82SChuck Lever 
720a6081b82SChuck Lever void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
721a6081b82SChuck Lever 			 int status)
722a6081b82SChuck Lever {
723a6081b82SChuck Lever 	struct page *p;
724a6081b82SChuck Lever 	struct svc_rdma_op_ctxt *ctxt;
725a6081b82SChuck Lever 	enum rpcrdma_errcode err;
726a6081b82SChuck Lever 	__be32 *va;
727a6081b82SChuck Lever 	int length;
728a6081b82SChuck Lever 	int ret;
729a6081b82SChuck Lever 
730a6081b82SChuck Lever 	ret = svc_rdma_repost_recv(xprt, GFP_KERNEL);
731a6081b82SChuck Lever 	if (ret)
732a6081b82SChuck Lever 		return;
733a6081b82SChuck Lever 
734a6081b82SChuck Lever 	p = alloc_page(GFP_KERNEL);
735a6081b82SChuck Lever 	if (!p)
736a6081b82SChuck Lever 		return;
737a6081b82SChuck Lever 	va = page_address(p);
738a6081b82SChuck Lever 
739a6081b82SChuck Lever 	/* XDR encode an error reply */
740a6081b82SChuck Lever 	err = ERR_CHUNK;
741a6081b82SChuck Lever 	if (status == -EPROTONOSUPPORT)
742a6081b82SChuck Lever 		err = ERR_VERS;
743a6081b82SChuck Lever 	length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
744a6081b82SChuck Lever 
7456e6092caSChuck Lever 	/* Map transport header; no RPC message payload */
746a6081b82SChuck Lever 	ctxt = svc_rdma_get_context(xprt);
7476e6092caSChuck Lever 	ret = svc_rdma_map_reply_hdr(xprt, ctxt, &rmsgp->rm_xid, length);
7486e6092caSChuck Lever 	if (ret) {
7496e6092caSChuck Lever 		dprintk("svcrdma: Error %d mapping send for protocol error\n",
7506e6092caSChuck Lever 			ret);
751a6081b82SChuck Lever 		return;
752a6081b82SChuck Lever 	}
753a6081b82SChuck Lever 
75417f5f7f5SChuck Lever 	ret = svc_rdma_post_send_wr(xprt, ctxt, 1, 0);
755a6081b82SChuck Lever 	if (ret) {
756a6081b82SChuck Lever 		dprintk("svcrdma: Error %d posting send for protocol error\n",
757a6081b82SChuck Lever 			ret);
758a6081b82SChuck Lever 		svc_rdma_unmap_dma(ctxt);
759a6081b82SChuck Lever 		svc_rdma_put_context(ctxt, 1);
760a6081b82SChuck Lever 	}
761a6081b82SChuck Lever }
762