xref: /openbmc/linux/net/sunrpc/xprtrdma/rpc_rdma.c (revision 2ef5a7f1)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2014-2017 Oracle.  All rights reserved.
4  * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the BSD-type
10  * license below:
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  *
16  *      Redistributions of source code must retain the above copyright
17  *      notice, this list of conditions and the following disclaimer.
18  *
19  *      Redistributions in binary form must reproduce the above
20  *      copyright notice, this list of conditions and the following
21  *      disclaimer in the documentation and/or other materials provided
22  *      with the distribution.
23  *
24  *      Neither the name of the Network Appliance, Inc. nor the names of
25  *      its contributors may be used to endorse or promote products
26  *      derived from this software without specific prior written
27  *      permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40  */
41 
42 /*
43  * rpc_rdma.c
44  *
45  * This file contains the guts of the RPC RDMA protocol, and
46  * does marshaling/unmarshaling, etc. It is also where interfacing
47  * to the Linux RPC framework lives.
48  */
49 
50 #include <linux/highmem.h>
51 
52 #include <linux/sunrpc/svc_rdma.h>
53 
54 #include "xprt_rdma.h"
55 #include <trace/events/rpcrdma.h>
56 
57 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
58 # define RPCDBG_FACILITY	RPCDBG_TRANS
59 #endif
60 
61 /* Returns size of largest RPC-over-RDMA header in a Call message
62  *
63  * The largest Call header contains a full-size Read list and a
64  * minimal Reply chunk.
65  */
66 static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
67 {
68 	unsigned int size;
69 
70 	/* Fixed header fields and list discriminators */
71 	size = RPCRDMA_HDRLEN_MIN;
72 
73 	/* Maximum Read list size */
74 	size = maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
75 
76 	/* Minimal Read chunk size */
77 	size += sizeof(__be32);	/* segment count */
78 	size += rpcrdma_segment_maxsz * sizeof(__be32);
79 	size += sizeof(__be32);	/* list discriminator */
80 
81 	dprintk("RPC:       %s: max call header size = %u\n",
82 		__func__, size);
83 	return size;
84 }
85 
86 /* Returns size of largest RPC-over-RDMA header in a Reply message
87  *
88  * There is only one Write list or one Reply chunk per Reply
89  * message.  The larger list is the Write list.
90  */
91 static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
92 {
93 	unsigned int size;
94 
95 	/* Fixed header fields and list discriminators */
96 	size = RPCRDMA_HDRLEN_MIN;
97 
98 	/* Maximum Write list size */
99 	size = sizeof(__be32);		/* segment count */
100 	size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32);
101 	size += sizeof(__be32);	/* list discriminator */
102 
103 	dprintk("RPC:       %s: max reply header size = %u\n",
104 		__func__, size);
105 	return size;
106 }
107 
108 /**
109  * rpcrdma_set_max_header_sizes - Initialize inline payload sizes
110  * @r_xprt: transport instance to initialize
111  *
112  * The max_inline fields contain the maximum size of an RPC message
113  * so the marshaling code doesn't have to repeat this calculation
114  * for every RPC.
115  */
116 void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt)
117 {
118 	unsigned int maxsegs = r_xprt->rx_ia.ri_max_segs;
119 	struct rpcrdma_ep *ep = &r_xprt->rx_ep;
120 
121 	ep->rep_max_inline_send =
122 		ep->rep_inline_send - rpcrdma_max_call_header_size(maxsegs);
123 	ep->rep_max_inline_recv =
124 		ep->rep_inline_recv - rpcrdma_max_reply_header_size(maxsegs);
125 }
126 
127 /* The client can send a request inline as long as the RPCRDMA header
128  * plus the RPC call fit under the transport's inline limit. If the
129  * combined call message size exceeds that limit, the client must use
130  * a Read chunk for this operation.
131  *
132  * A Read chunk is also required if sending the RPC call inline would
133  * exceed this device's max_sge limit.
134  */
135 static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
136 				struct rpc_rqst *rqst)
137 {
138 	struct xdr_buf *xdr = &rqst->rq_snd_buf;
139 	unsigned int count, remaining, offset;
140 
141 	if (xdr->len > r_xprt->rx_ep.rep_max_inline_send)
142 		return false;
143 
144 	if (xdr->page_len) {
145 		remaining = xdr->page_len;
146 		offset = offset_in_page(xdr->page_base);
147 		count = RPCRDMA_MIN_SEND_SGES;
148 		while (remaining) {
149 			remaining -= min_t(unsigned int,
150 					   PAGE_SIZE - offset, remaining);
151 			offset = 0;
152 			if (++count > r_xprt->rx_ia.ri_max_send_sges)
153 				return false;
154 		}
155 	}
156 
157 	return true;
158 }
159 
160 /* The client can't know how large the actual reply will be. Thus it
161  * plans for the largest possible reply for that particular ULP
162  * operation. If the maximum combined reply message size exceeds that
163  * limit, the client must provide a write list or a reply chunk for
164  * this request.
165  */
166 static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
167 				   struct rpc_rqst *rqst)
168 {
169 	return rqst->rq_rcv_buf.buflen <= r_xprt->rx_ep.rep_max_inline_recv;
170 }
171 
172 /* The client is required to provide a Reply chunk if the maximum
173  * size of the non-payload part of the RPC Reply is larger than
174  * the inline threshold.
175  */
176 static bool
177 rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt,
178 			  const struct rpc_rqst *rqst)
179 {
180 	const struct xdr_buf *buf = &rqst->rq_rcv_buf;
181 
182 	return (buf->head[0].iov_len + buf->tail[0].iov_len) <
183 		r_xprt->rx_ep.rep_max_inline_recv;
184 }
185 
186 /* Split @vec on page boundaries into SGEs. FMR registers pages, not
187  * a byte range. Other modes coalesce these SGEs into a single MR
188  * when they can.
189  *
190  * Returns pointer to next available SGE, and bumps the total number
191  * of SGEs consumed.
192  */
193 static struct rpcrdma_mr_seg *
194 rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
195 		     unsigned int *n)
196 {
197 	u32 remaining, page_offset;
198 	char *base;
199 
200 	base = vec->iov_base;
201 	page_offset = offset_in_page(base);
202 	remaining = vec->iov_len;
203 	while (remaining) {
204 		seg->mr_page = NULL;
205 		seg->mr_offset = base;
206 		seg->mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
207 		remaining -= seg->mr_len;
208 		base += seg->mr_len;
209 		++seg;
210 		++(*n);
211 		page_offset = 0;
212 	}
213 	return seg;
214 }
215 
216 /* Convert @xdrbuf into SGEs no larger than a page each. As they
217  * are registered, these SGEs are then coalesced into RDMA segments
218  * when the selected memreg mode supports it.
219  *
220  * Returns positive number of SGEs consumed, or a negative errno.
221  */
222 
223 static int
224 rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
225 		     unsigned int pos, enum rpcrdma_chunktype type,
226 		     struct rpcrdma_mr_seg *seg)
227 {
228 	unsigned long page_base;
229 	unsigned int len, n;
230 	struct page **ppages;
231 
232 	n = 0;
233 	if (pos == 0)
234 		seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n);
235 
236 	len = xdrbuf->page_len;
237 	ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
238 	page_base = offset_in_page(xdrbuf->page_base);
239 	while (len) {
240 		/* ACL likes to be lazy in allocating pages - ACLs
241 		 * are small by default but can get huge.
242 		 */
243 		if (unlikely(xdrbuf->flags & XDRBUF_SPARSE_PAGES)) {
244 			if (!*ppages)
245 				*ppages = alloc_page(GFP_NOWAIT | __GFP_NOWARN);
246 			if (!*ppages)
247 				return -ENOBUFS;
248 		}
249 		seg->mr_page = *ppages;
250 		seg->mr_offset = (char *)page_base;
251 		seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
252 		len -= seg->mr_len;
253 		++ppages;
254 		++seg;
255 		++n;
256 		page_base = 0;
257 	}
258 
259 	/* When encoding a Read chunk, the tail iovec contains an
260 	 * XDR pad and may be omitted.
261 	 */
262 	if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup)
263 		goto out;
264 
265 	/* When encoding a Write chunk, some servers need to see an
266 	 * extra segment for non-XDR-aligned Write chunks. The upper
267 	 * layer provides space in the tail iovec that may be used
268 	 * for this purpose.
269 	 */
270 	if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup)
271 		goto out;
272 
273 	if (xdrbuf->tail[0].iov_len)
274 		seg = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n);
275 
276 out:
277 	if (unlikely(n > RPCRDMA_MAX_SEGS))
278 		return -EIO;
279 	return n;
280 }
281 
282 static inline int
283 encode_item_present(struct xdr_stream *xdr)
284 {
285 	__be32 *p;
286 
287 	p = xdr_reserve_space(xdr, sizeof(*p));
288 	if (unlikely(!p))
289 		return -EMSGSIZE;
290 
291 	*p = xdr_one;
292 	return 0;
293 }
294 
295 static inline int
296 encode_item_not_present(struct xdr_stream *xdr)
297 {
298 	__be32 *p;
299 
300 	p = xdr_reserve_space(xdr, sizeof(*p));
301 	if (unlikely(!p))
302 		return -EMSGSIZE;
303 
304 	*p = xdr_zero;
305 	return 0;
306 }
307 
308 static void
309 xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr *mr)
310 {
311 	*iptr++ = cpu_to_be32(mr->mr_handle);
312 	*iptr++ = cpu_to_be32(mr->mr_length);
313 	xdr_encode_hyper(iptr, mr->mr_offset);
314 }
315 
316 static int
317 encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr)
318 {
319 	__be32 *p;
320 
321 	p = xdr_reserve_space(xdr, 4 * sizeof(*p));
322 	if (unlikely(!p))
323 		return -EMSGSIZE;
324 
325 	xdr_encode_rdma_segment(p, mr);
326 	return 0;
327 }
328 
329 static int
330 encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr,
331 		    u32 position)
332 {
333 	__be32 *p;
334 
335 	p = xdr_reserve_space(xdr, 6 * sizeof(*p));
336 	if (unlikely(!p))
337 		return -EMSGSIZE;
338 
339 	*p++ = xdr_one;			/* Item present */
340 	*p++ = cpu_to_be32(position);
341 	xdr_encode_rdma_segment(p, mr);
342 	return 0;
343 }
344 
345 /* Register and XDR encode the Read list. Supports encoding a list of read
346  * segments that belong to a single read chunk.
347  *
348  * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
349  *
350  *  Read chunklist (a linked list):
351  *   N elements, position P (same P for all chunks of same arg!):
352  *    1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
353  *
354  * Returns zero on success, or a negative errno if a failure occurred.
355  * @xdr is advanced to the next position in the stream.
356  *
357  * Only a single @pos value is currently supported.
358  */
359 static noinline int
360 rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
361 			 struct rpc_rqst *rqst, enum rpcrdma_chunktype rtype)
362 {
363 	struct xdr_stream *xdr = &req->rl_stream;
364 	struct rpcrdma_mr_seg *seg;
365 	struct rpcrdma_mr *mr;
366 	unsigned int pos;
367 	int nsegs;
368 
369 	if (rtype == rpcrdma_noch)
370 		goto done;
371 
372 	pos = rqst->rq_snd_buf.head[0].iov_len;
373 	if (rtype == rpcrdma_areadch)
374 		pos = 0;
375 	seg = req->rl_segments;
376 	nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
377 				     rtype, seg);
378 	if (nsegs < 0)
379 		return nsegs;
380 
381 	do {
382 		seg = frwr_map(r_xprt, seg, nsegs, false, rqst->rq_xid, &mr);
383 		if (IS_ERR(seg))
384 			return PTR_ERR(seg);
385 		rpcrdma_mr_push(mr, &req->rl_registered);
386 
387 		if (encode_read_segment(xdr, mr, pos) < 0)
388 			return -EMSGSIZE;
389 
390 		trace_xprtrdma_chunk_read(rqst->rq_task, pos, mr, nsegs);
391 		r_xprt->rx_stats.read_chunk_count++;
392 		nsegs -= mr->mr_nents;
393 	} while (nsegs);
394 
395 done:
396 	return encode_item_not_present(xdr);
397 }
398 
399 /* Register and XDR encode the Write list. Supports encoding a list
400  * containing one array of plain segments that belong to a single
401  * write chunk.
402  *
403  * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
404  *
405  *  Write chunklist (a list of (one) counted array):
406  *   N elements:
407  *    1 - N - HLOO - HLOO - ... - HLOO - 0
408  *
409  * Returns zero on success, or a negative errno if a failure occurred.
410  * @xdr is advanced to the next position in the stream.
411  *
412  * Only a single Write chunk is currently supported.
413  */
414 static noinline int
415 rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
416 			  struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
417 {
418 	struct xdr_stream *xdr = &req->rl_stream;
419 	struct rpcrdma_mr_seg *seg;
420 	struct rpcrdma_mr *mr;
421 	int nsegs, nchunks;
422 	__be32 *segcount;
423 
424 	if (wtype != rpcrdma_writech)
425 		goto done;
426 
427 	seg = req->rl_segments;
428 	nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
429 				     rqst->rq_rcv_buf.head[0].iov_len,
430 				     wtype, seg);
431 	if (nsegs < 0)
432 		return nsegs;
433 
434 	if (encode_item_present(xdr) < 0)
435 		return -EMSGSIZE;
436 	segcount = xdr_reserve_space(xdr, sizeof(*segcount));
437 	if (unlikely(!segcount))
438 		return -EMSGSIZE;
439 	/* Actual value encoded below */
440 
441 	nchunks = 0;
442 	do {
443 		seg = frwr_map(r_xprt, seg, nsegs, true, rqst->rq_xid, &mr);
444 		if (IS_ERR(seg))
445 			return PTR_ERR(seg);
446 		rpcrdma_mr_push(mr, &req->rl_registered);
447 
448 		if (encode_rdma_segment(xdr, mr) < 0)
449 			return -EMSGSIZE;
450 
451 		trace_xprtrdma_chunk_write(rqst->rq_task, mr, nsegs);
452 		r_xprt->rx_stats.write_chunk_count++;
453 		r_xprt->rx_stats.total_rdma_request += mr->mr_length;
454 		nchunks++;
455 		nsegs -= mr->mr_nents;
456 	} while (nsegs);
457 
458 	/* Update count of segments in this Write chunk */
459 	*segcount = cpu_to_be32(nchunks);
460 
461 done:
462 	return encode_item_not_present(xdr);
463 }
464 
465 /* Register and XDR encode the Reply chunk. Supports encoding an array
466  * of plain segments that belong to a single write (reply) chunk.
467  *
468  * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
469  *
470  *  Reply chunk (a counted array):
471  *   N elements:
472  *    1 - N - HLOO - HLOO - ... - HLOO
473  *
474  * Returns zero on success, or a negative errno if a failure occurred.
475  * @xdr is advanced to the next position in the stream.
476  */
477 static noinline int
478 rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
479 			   struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
480 {
481 	struct xdr_stream *xdr = &req->rl_stream;
482 	struct rpcrdma_mr_seg *seg;
483 	struct rpcrdma_mr *mr;
484 	int nsegs, nchunks;
485 	__be32 *segcount;
486 
487 	if (wtype != rpcrdma_replych)
488 		return encode_item_not_present(xdr);
489 
490 	seg = req->rl_segments;
491 	nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
492 	if (nsegs < 0)
493 		return nsegs;
494 
495 	if (encode_item_present(xdr) < 0)
496 		return -EMSGSIZE;
497 	segcount = xdr_reserve_space(xdr, sizeof(*segcount));
498 	if (unlikely(!segcount))
499 		return -EMSGSIZE;
500 	/* Actual value encoded below */
501 
502 	nchunks = 0;
503 	do {
504 		seg = frwr_map(r_xprt, seg, nsegs, true, rqst->rq_xid, &mr);
505 		if (IS_ERR(seg))
506 			return PTR_ERR(seg);
507 		rpcrdma_mr_push(mr, &req->rl_registered);
508 
509 		if (encode_rdma_segment(xdr, mr) < 0)
510 			return -EMSGSIZE;
511 
512 		trace_xprtrdma_chunk_reply(rqst->rq_task, mr, nsegs);
513 		r_xprt->rx_stats.reply_chunk_count++;
514 		r_xprt->rx_stats.total_rdma_request += mr->mr_length;
515 		nchunks++;
516 		nsegs -= mr->mr_nents;
517 	} while (nsegs);
518 
519 	/* Update count of segments in the Reply chunk */
520 	*segcount = cpu_to_be32(nchunks);
521 
522 	return 0;
523 }
524 
525 static void rpcrdma_sendctx_done(struct kref *kref)
526 {
527 	struct rpcrdma_req *req =
528 		container_of(kref, struct rpcrdma_req, rl_kref);
529 	struct rpcrdma_rep *rep = req->rl_reply;
530 
531 	rpcrdma_complete_rqst(rep);
532 	rep->rr_rxprt->rx_stats.reply_waits_for_send++;
533 }
534 
535 /**
536  * rpcrdma_sendctx_unmap - DMA-unmap Send buffer
537  * @sc: sendctx containing SGEs to unmap
538  *
539  */
540 void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc)
541 {
542 	struct ib_sge *sge;
543 
544 	if (!sc->sc_unmap_count)
545 		return;
546 
547 	/* The first two SGEs contain the transport header and
548 	 * the inline buffer. These are always left mapped so
549 	 * they can be cheaply re-used.
550 	 */
551 	for (sge = &sc->sc_sges[2]; sc->sc_unmap_count;
552 	     ++sge, --sc->sc_unmap_count)
553 		ib_dma_unmap_page(sc->sc_device, sge->addr, sge->length,
554 				  DMA_TO_DEVICE);
555 
556 	kref_put(&sc->sc_req->rl_kref, rpcrdma_sendctx_done);
557 }
558 
559 /* Prepare an SGE for the RPC-over-RDMA transport header.
560  */
561 static bool rpcrdma_prepare_hdr_sge(struct rpcrdma_xprt *r_xprt,
562 				    struct rpcrdma_req *req, u32 len)
563 {
564 	struct rpcrdma_sendctx *sc = req->rl_sendctx;
565 	struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
566 	struct ib_sge *sge = sc->sc_sges;
567 
568 	if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
569 		goto out_regbuf;
570 	sge->addr = rdmab_addr(rb);
571 	sge->length = len;
572 	sge->lkey = rdmab_lkey(rb);
573 
574 	ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length,
575 				      DMA_TO_DEVICE);
576 	sc->sc_wr.num_sge++;
577 	return true;
578 
579 out_regbuf:
580 	pr_err("rpcrdma: failed to DMA map a Send buffer\n");
581 	return false;
582 }
583 
584 /* Prepare the Send SGEs. The head and tail iovec, and each entry
585  * in the page list, gets its own SGE.
586  */
587 static bool rpcrdma_prepare_msg_sges(struct rpcrdma_xprt *r_xprt,
588 				     struct rpcrdma_req *req,
589 				     struct xdr_buf *xdr,
590 				     enum rpcrdma_chunktype rtype)
591 {
592 	struct rpcrdma_sendctx *sc = req->rl_sendctx;
593 	unsigned int sge_no, page_base, len, remaining;
594 	struct rpcrdma_regbuf *rb = req->rl_sendbuf;
595 	struct ib_sge *sge = sc->sc_sges;
596 	struct page *page, **ppages;
597 
598 	/* The head iovec is straightforward, as it is already
599 	 * DMA-mapped. Sync the content that has changed.
600 	 */
601 	if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
602 		goto out_regbuf;
603 	sc->sc_device = rdmab_device(rb);
604 	sge_no = 1;
605 	sge[sge_no].addr = rdmab_addr(rb);
606 	sge[sge_no].length = xdr->head[0].iov_len;
607 	sge[sge_no].lkey = rdmab_lkey(rb);
608 	ib_dma_sync_single_for_device(rdmab_device(rb), sge[sge_no].addr,
609 				      sge[sge_no].length, DMA_TO_DEVICE);
610 
611 	/* If there is a Read chunk, the page list is being handled
612 	 * via explicit RDMA, and thus is skipped here. However, the
613 	 * tail iovec may include an XDR pad for the page list, as
614 	 * well as additional content, and may not reside in the
615 	 * same page as the head iovec.
616 	 */
617 	if (rtype == rpcrdma_readch) {
618 		len = xdr->tail[0].iov_len;
619 
620 		/* Do not include the tail if it is only an XDR pad */
621 		if (len < 4)
622 			goto out;
623 
624 		page = virt_to_page(xdr->tail[0].iov_base);
625 		page_base = offset_in_page(xdr->tail[0].iov_base);
626 
627 		/* If the content in the page list is an odd length,
628 		 * xdr_write_pages() has added a pad at the beginning
629 		 * of the tail iovec. Force the tail's non-pad content
630 		 * to land at the next XDR position in the Send message.
631 		 */
632 		page_base += len & 3;
633 		len -= len & 3;
634 		goto map_tail;
635 	}
636 
637 	/* If there is a page list present, temporarily DMA map
638 	 * and prepare an SGE for each page to be sent.
639 	 */
640 	if (xdr->page_len) {
641 		ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
642 		page_base = offset_in_page(xdr->page_base);
643 		remaining = xdr->page_len;
644 		while (remaining) {
645 			sge_no++;
646 			if (sge_no > RPCRDMA_MAX_SEND_SGES - 2)
647 				goto out_mapping_overflow;
648 
649 			len = min_t(u32, PAGE_SIZE - page_base, remaining);
650 			sge[sge_no].addr =
651 				ib_dma_map_page(rdmab_device(rb), *ppages,
652 						page_base, len, DMA_TO_DEVICE);
653 			if (ib_dma_mapping_error(rdmab_device(rb),
654 						 sge[sge_no].addr))
655 				goto out_mapping_err;
656 			sge[sge_no].length = len;
657 			sge[sge_no].lkey = rdmab_lkey(rb);
658 
659 			sc->sc_unmap_count++;
660 			ppages++;
661 			remaining -= len;
662 			page_base = 0;
663 		}
664 	}
665 
666 	/* The tail iovec is not always constructed in the same
667 	 * page where the head iovec resides (see, for example,
668 	 * gss_wrap_req_priv). To neatly accommodate that case,
669 	 * DMA map it separately.
670 	 */
671 	if (xdr->tail[0].iov_len) {
672 		page = virt_to_page(xdr->tail[0].iov_base);
673 		page_base = offset_in_page(xdr->tail[0].iov_base);
674 		len = xdr->tail[0].iov_len;
675 
676 map_tail:
677 		sge_no++;
678 		sge[sge_no].addr =
679 			ib_dma_map_page(rdmab_device(rb), page, page_base, len,
680 					DMA_TO_DEVICE);
681 		if (ib_dma_mapping_error(rdmab_device(rb), sge[sge_no].addr))
682 			goto out_mapping_err;
683 		sge[sge_no].length = len;
684 		sge[sge_no].lkey = rdmab_lkey(rb);
685 		sc->sc_unmap_count++;
686 	}
687 
688 out:
689 	sc->sc_wr.num_sge += sge_no;
690 	if (sc->sc_unmap_count)
691 		kref_get(&req->rl_kref);
692 	return true;
693 
694 out_regbuf:
695 	pr_err("rpcrdma: failed to DMA map a Send buffer\n");
696 	return false;
697 
698 out_mapping_overflow:
699 	rpcrdma_sendctx_unmap(sc);
700 	pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no);
701 	return false;
702 
703 out_mapping_err:
704 	rpcrdma_sendctx_unmap(sc);
705 	trace_xprtrdma_dma_maperr(sge[sge_no].addr);
706 	return false;
707 }
708 
709 /**
710  * rpcrdma_prepare_send_sges - Construct SGEs for a Send WR
711  * @r_xprt: controlling transport
712  * @req: context of RPC Call being marshalled
713  * @hdrlen: size of transport header, in bytes
714  * @xdr: xdr_buf containing RPC Call
715  * @rtype: chunk type being encoded
716  *
717  * Returns 0 on success; otherwise a negative errno is returned.
718  */
719 int
720 rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
721 			  struct rpcrdma_req *req, u32 hdrlen,
722 			  struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
723 {
724 	int ret;
725 
726 	ret = -EAGAIN;
727 	req->rl_sendctx = rpcrdma_sendctx_get_locked(r_xprt);
728 	if (!req->rl_sendctx)
729 		goto err;
730 	req->rl_sendctx->sc_wr.num_sge = 0;
731 	req->rl_sendctx->sc_unmap_count = 0;
732 	req->rl_sendctx->sc_req = req;
733 	kref_init(&req->rl_kref);
734 
735 	ret = -EIO;
736 	if (!rpcrdma_prepare_hdr_sge(r_xprt, req, hdrlen))
737 		goto err;
738 	if (rtype != rpcrdma_areadch)
739 		if (!rpcrdma_prepare_msg_sges(r_xprt, req, xdr, rtype))
740 			goto err;
741 	return 0;
742 
743 err:
744 	trace_xprtrdma_prepsend_failed(&req->rl_slot, ret);
745 	return ret;
746 }
747 
748 /**
749  * rpcrdma_marshal_req - Marshal and send one RPC request
750  * @r_xprt: controlling transport
751  * @rqst: RPC request to be marshaled
752  *
753  * For the RPC in "rqst", this function:
754  *  - Chooses the transfer mode (eg., RDMA_MSG or RDMA_NOMSG)
755  *  - Registers Read, Write, and Reply chunks
756  *  - Constructs the transport header
757  *  - Posts a Send WR to send the transport header and request
758  *
759  * Returns:
760  *	%0 if the RPC was sent successfully,
761  *	%-ENOTCONN if the connection was lost,
762  *	%-EAGAIN if the caller should call again with the same arguments,
763  *	%-ENOBUFS if the caller should call again after a delay,
764  *	%-EMSGSIZE if the transport header is too small,
765  *	%-EIO if a permanent problem occurred while marshaling.
766  */
767 int
768 rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
769 {
770 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
771 	struct xdr_stream *xdr = &req->rl_stream;
772 	enum rpcrdma_chunktype rtype, wtype;
773 	bool ddp_allowed;
774 	__be32 *p;
775 	int ret;
776 
777 	rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
778 	xdr_init_encode(xdr, &req->rl_hdrbuf, rdmab_data(req->rl_rdmabuf),
779 			rqst);
780 
781 	/* Fixed header fields */
782 	ret = -EMSGSIZE;
783 	p = xdr_reserve_space(xdr, 4 * sizeof(*p));
784 	if (!p)
785 		goto out_err;
786 	*p++ = rqst->rq_xid;
787 	*p++ = rpcrdma_version;
788 	*p++ = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
789 
790 	/* When the ULP employs a GSS flavor that guarantees integrity
791 	 * or privacy, direct data placement of individual data items
792 	 * is not allowed.
793 	 */
794 	ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
795 						RPCAUTH_AUTH_DATATOUCH);
796 
797 	/*
798 	 * Chunks needed for results?
799 	 *
800 	 * o If the expected result is under the inline threshold, all ops
801 	 *   return as inline.
802 	 * o Large read ops return data as write chunk(s), header as
803 	 *   inline.
804 	 * o Large non-read ops return as a single reply chunk.
805 	 */
806 	if (rpcrdma_results_inline(r_xprt, rqst))
807 		wtype = rpcrdma_noch;
808 	else if ((ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ) &&
809 		 rpcrdma_nonpayload_inline(r_xprt, rqst))
810 		wtype = rpcrdma_writech;
811 	else
812 		wtype = rpcrdma_replych;
813 
814 	/*
815 	 * Chunks needed for arguments?
816 	 *
817 	 * o If the total request is under the inline threshold, all ops
818 	 *   are sent as inline.
819 	 * o Large write ops transmit data as read chunk(s), header as
820 	 *   inline.
821 	 * o Large non-write ops are sent with the entire message as a
822 	 *   single read chunk (protocol 0-position special case).
823 	 *
824 	 * This assumes that the upper layer does not present a request
825 	 * that both has a data payload, and whose non-data arguments
826 	 * by themselves are larger than the inline threshold.
827 	 */
828 	if (rpcrdma_args_inline(r_xprt, rqst)) {
829 		*p++ = rdma_msg;
830 		rtype = rpcrdma_noch;
831 	} else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
832 		*p++ = rdma_msg;
833 		rtype = rpcrdma_readch;
834 	} else {
835 		r_xprt->rx_stats.nomsg_call_count++;
836 		*p++ = rdma_nomsg;
837 		rtype = rpcrdma_areadch;
838 	}
839 
840 	/* If this is a retransmit, discard previously registered
841 	 * chunks. Very likely the connection has been replaced,
842 	 * so these registrations are invalid and unusable.
843 	 */
844 	while (unlikely(!list_empty(&req->rl_registered))) {
845 		struct rpcrdma_mr *mr;
846 
847 		mr = rpcrdma_mr_pop(&req->rl_registered);
848 		rpcrdma_mr_recycle(mr);
849 	}
850 
851 	/* This implementation supports the following combinations
852 	 * of chunk lists in one RPC-over-RDMA Call message:
853 	 *
854 	 *   - Read list
855 	 *   - Write list
856 	 *   - Reply chunk
857 	 *   - Read list + Reply chunk
858 	 *
859 	 * It might not yet support the following combinations:
860 	 *
861 	 *   - Read list + Write list
862 	 *
863 	 * It does not support the following combinations:
864 	 *
865 	 *   - Write list + Reply chunk
866 	 *   - Read list + Write list + Reply chunk
867 	 *
868 	 * This implementation supports only a single chunk in each
869 	 * Read or Write list. Thus for example the client cannot
870 	 * send a Call message with a Position Zero Read chunk and a
871 	 * regular Read chunk at the same time.
872 	 */
873 	ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
874 	if (ret)
875 		goto out_err;
876 	ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
877 	if (ret)
878 		goto out_err;
879 	ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
880 	if (ret)
881 		goto out_err;
882 
883 	ret = rpcrdma_prepare_send_sges(r_xprt, req, req->rl_hdrbuf.len,
884 					&rqst->rq_snd_buf, rtype);
885 	if (ret)
886 		goto out_err;
887 
888 	trace_xprtrdma_marshal(req, rtype, wtype);
889 	return 0;
890 
891 out_err:
892 	trace_xprtrdma_marshal_failed(rqst, ret);
893 	r_xprt->rx_stats.failed_marshal_count++;
894 	frwr_reset(req);
895 	return ret;
896 }
897 
898 /**
899  * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
900  * @rqst: controlling RPC request
901  * @srcp: points to RPC message payload in receive buffer
902  * @copy_len: remaining length of receive buffer content
903  * @pad: Write chunk pad bytes needed (zero for pure inline)
904  *
905  * The upper layer has set the maximum number of bytes it can
906  * receive in each component of rq_rcv_buf. These values are set in
907  * the head.iov_len, page_len, tail.iov_len, and buflen fields.
908  *
909  * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in
910  * many cases this function simply updates iov_base pointers in
911  * rq_rcv_buf to point directly to the received reply data, to
912  * avoid copying reply data.
913  *
914  * Returns the count of bytes which had to be memcopied.
915  */
916 static unsigned long
917 rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
918 {
919 	unsigned long fixup_copy_count;
920 	int i, npages, curlen;
921 	char *destp;
922 	struct page **ppages;
923 	int page_base;
924 
925 	/* The head iovec is redirected to the RPC reply message
926 	 * in the receive buffer, to avoid a memcopy.
927 	 */
928 	rqst->rq_rcv_buf.head[0].iov_base = srcp;
929 	rqst->rq_private_buf.head[0].iov_base = srcp;
930 
931 	/* The contents of the receive buffer that follow
932 	 * head.iov_len bytes are copied into the page list.
933 	 */
934 	curlen = rqst->rq_rcv_buf.head[0].iov_len;
935 	if (curlen > copy_len)
936 		curlen = copy_len;
937 	trace_xprtrdma_fixup(rqst, copy_len, curlen);
938 	srcp += curlen;
939 	copy_len -= curlen;
940 
941 	ppages = rqst->rq_rcv_buf.pages +
942 		(rqst->rq_rcv_buf.page_base >> PAGE_SHIFT);
943 	page_base = offset_in_page(rqst->rq_rcv_buf.page_base);
944 	fixup_copy_count = 0;
945 	if (copy_len && rqst->rq_rcv_buf.page_len) {
946 		int pagelist_len;
947 
948 		pagelist_len = rqst->rq_rcv_buf.page_len;
949 		if (pagelist_len > copy_len)
950 			pagelist_len = copy_len;
951 		npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
952 		for (i = 0; i < npages; i++) {
953 			curlen = PAGE_SIZE - page_base;
954 			if (curlen > pagelist_len)
955 				curlen = pagelist_len;
956 
957 			trace_xprtrdma_fixup_pg(rqst, i, srcp,
958 						copy_len, curlen);
959 			destp = kmap_atomic(ppages[i]);
960 			memcpy(destp + page_base, srcp, curlen);
961 			flush_dcache_page(ppages[i]);
962 			kunmap_atomic(destp);
963 			srcp += curlen;
964 			copy_len -= curlen;
965 			fixup_copy_count += curlen;
966 			pagelist_len -= curlen;
967 			if (!pagelist_len)
968 				break;
969 			page_base = 0;
970 		}
971 
972 		/* Implicit padding for the last segment in a Write
973 		 * chunk is inserted inline at the front of the tail
974 		 * iovec. The upper layer ignores the content of
975 		 * the pad. Simply ensure inline content in the tail
976 		 * that follows the Write chunk is properly aligned.
977 		 */
978 		if (pad)
979 			srcp -= pad;
980 	}
981 
982 	/* The tail iovec is redirected to the remaining data
983 	 * in the receive buffer, to avoid a memcopy.
984 	 */
985 	if (copy_len || pad) {
986 		rqst->rq_rcv_buf.tail[0].iov_base = srcp;
987 		rqst->rq_private_buf.tail[0].iov_base = srcp;
988 	}
989 
990 	return fixup_copy_count;
991 }
992 
993 /* By convention, backchannel calls arrive via rdma_msg type
994  * messages, and never populate the chunk lists. This makes
995  * the RPC/RDMA header small and fixed in size, so it is
996  * straightforward to check the RPC header's direction field.
997  */
998 static bool
999 rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1000 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1001 {
1002 	struct xdr_stream *xdr = &rep->rr_stream;
1003 	__be32 *p;
1004 
1005 	if (rep->rr_proc != rdma_msg)
1006 		return false;
1007 
1008 	/* Peek at stream contents without advancing. */
1009 	p = xdr_inline_decode(xdr, 0);
1010 
1011 	/* Chunk lists */
1012 	if (*p++ != xdr_zero)
1013 		return false;
1014 	if (*p++ != xdr_zero)
1015 		return false;
1016 	if (*p++ != xdr_zero)
1017 		return false;
1018 
1019 	/* RPC header */
1020 	if (*p++ != rep->rr_xid)
1021 		return false;
1022 	if (*p != cpu_to_be32(RPC_CALL))
1023 		return false;
1024 
1025 	/* Now that we are sure this is a backchannel call,
1026 	 * advance to the RPC header.
1027 	 */
1028 	p = xdr_inline_decode(xdr, 3 * sizeof(*p));
1029 	if (unlikely(!p))
1030 		goto out_short;
1031 
1032 	rpcrdma_bc_receive_call(r_xprt, rep);
1033 	return true;
1034 
1035 out_short:
1036 	pr_warn("RPC/RDMA short backward direction call\n");
1037 	return true;
1038 }
1039 #else	/* CONFIG_SUNRPC_BACKCHANNEL */
1040 {
1041 	return false;
1042 }
1043 #endif	/* CONFIG_SUNRPC_BACKCHANNEL */
1044 
1045 static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
1046 {
1047 	u32 handle;
1048 	u64 offset;
1049 	__be32 *p;
1050 
1051 	p = xdr_inline_decode(xdr, 4 * sizeof(*p));
1052 	if (unlikely(!p))
1053 		return -EIO;
1054 
1055 	handle = be32_to_cpup(p++);
1056 	*length = be32_to_cpup(p++);
1057 	xdr_decode_hyper(p, &offset);
1058 
1059 	trace_xprtrdma_decode_seg(handle, *length, offset);
1060 	return 0;
1061 }
1062 
1063 static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
1064 {
1065 	u32 segcount, seglength;
1066 	__be32 *p;
1067 
1068 	p = xdr_inline_decode(xdr, sizeof(*p));
1069 	if (unlikely(!p))
1070 		return -EIO;
1071 
1072 	*length = 0;
1073 	segcount = be32_to_cpup(p);
1074 	while (segcount--) {
1075 		if (decode_rdma_segment(xdr, &seglength))
1076 			return -EIO;
1077 		*length += seglength;
1078 	}
1079 
1080 	return 0;
1081 }
1082 
1083 /* In RPC-over-RDMA Version One replies, a Read list is never
1084  * expected. This decoder is a stub that returns an error if
1085  * a Read list is present.
1086  */
1087 static int decode_read_list(struct xdr_stream *xdr)
1088 {
1089 	__be32 *p;
1090 
1091 	p = xdr_inline_decode(xdr, sizeof(*p));
1092 	if (unlikely(!p))
1093 		return -EIO;
1094 	if (unlikely(*p != xdr_zero))
1095 		return -EIO;
1096 	return 0;
1097 }
1098 
1099 /* Supports only one Write chunk in the Write list
1100  */
1101 static int decode_write_list(struct xdr_stream *xdr, u32 *length)
1102 {
1103 	u32 chunklen;
1104 	bool first;
1105 	__be32 *p;
1106 
1107 	*length = 0;
1108 	first = true;
1109 	do {
1110 		p = xdr_inline_decode(xdr, sizeof(*p));
1111 		if (unlikely(!p))
1112 			return -EIO;
1113 		if (*p == xdr_zero)
1114 			break;
1115 		if (!first)
1116 			return -EIO;
1117 
1118 		if (decode_write_chunk(xdr, &chunklen))
1119 			return -EIO;
1120 		*length += chunklen;
1121 		first = false;
1122 	} while (true);
1123 	return 0;
1124 }
1125 
1126 static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length)
1127 {
1128 	__be32 *p;
1129 
1130 	p = xdr_inline_decode(xdr, sizeof(*p));
1131 	if (unlikely(!p))
1132 		return -EIO;
1133 
1134 	*length = 0;
1135 	if (*p != xdr_zero)
1136 		if (decode_write_chunk(xdr, length))
1137 			return -EIO;
1138 	return 0;
1139 }
1140 
1141 static int
1142 rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1143 		   struct rpc_rqst *rqst)
1144 {
1145 	struct xdr_stream *xdr = &rep->rr_stream;
1146 	u32 writelist, replychunk, rpclen;
1147 	char *base;
1148 
1149 	/* Decode the chunk lists */
1150 	if (decode_read_list(xdr))
1151 		return -EIO;
1152 	if (decode_write_list(xdr, &writelist))
1153 		return -EIO;
1154 	if (decode_reply_chunk(xdr, &replychunk))
1155 		return -EIO;
1156 
1157 	/* RDMA_MSG sanity checks */
1158 	if (unlikely(replychunk))
1159 		return -EIO;
1160 
1161 	/* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */
1162 	base = (char *)xdr_inline_decode(xdr, 0);
1163 	rpclen = xdr_stream_remaining(xdr);
1164 	r_xprt->rx_stats.fixup_copy_count +=
1165 		rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
1166 
1167 	r_xprt->rx_stats.total_rdma_reply += writelist;
1168 	return rpclen + xdr_align_size(writelist);
1169 }
1170 
1171 static noinline int
1172 rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1173 {
1174 	struct xdr_stream *xdr = &rep->rr_stream;
1175 	u32 writelist, replychunk;
1176 
1177 	/* Decode the chunk lists */
1178 	if (decode_read_list(xdr))
1179 		return -EIO;
1180 	if (decode_write_list(xdr, &writelist))
1181 		return -EIO;
1182 	if (decode_reply_chunk(xdr, &replychunk))
1183 		return -EIO;
1184 
1185 	/* RDMA_NOMSG sanity checks */
1186 	if (unlikely(writelist))
1187 		return -EIO;
1188 	if (unlikely(!replychunk))
1189 		return -EIO;
1190 
1191 	/* Reply chunk buffer already is the reply vector */
1192 	r_xprt->rx_stats.total_rdma_reply += replychunk;
1193 	return replychunk;
1194 }
1195 
1196 static noinline int
1197 rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1198 		     struct rpc_rqst *rqst)
1199 {
1200 	struct xdr_stream *xdr = &rep->rr_stream;
1201 	__be32 *p;
1202 
1203 	p = xdr_inline_decode(xdr, sizeof(*p));
1204 	if (unlikely(!p))
1205 		return -EIO;
1206 
1207 	switch (*p) {
1208 	case err_vers:
1209 		p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1210 		if (!p)
1211 			break;
1212 		dprintk("RPC:       %s: server reports "
1213 			"version error (%u-%u), xid %08x\n", __func__,
1214 			be32_to_cpup(p), be32_to_cpu(*(p + 1)),
1215 			be32_to_cpu(rep->rr_xid));
1216 		break;
1217 	case err_chunk:
1218 		dprintk("RPC:       %s: server reports "
1219 			"header decoding error, xid %08x\n", __func__,
1220 			be32_to_cpu(rep->rr_xid));
1221 		break;
1222 	default:
1223 		dprintk("RPC:       %s: server reports "
1224 			"unrecognized error %d, xid %08x\n", __func__,
1225 			be32_to_cpup(p), be32_to_cpu(rep->rr_xid));
1226 	}
1227 
1228 	r_xprt->rx_stats.bad_reply_count++;
1229 	return -EREMOTEIO;
1230 }
1231 
1232 /* Perform XID lookup, reconstruction of the RPC reply, and
1233  * RPC completion while holding the transport lock to ensure
1234  * the rep, rqst, and rq_task pointers remain stable.
1235  */
1236 void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
1237 {
1238 	struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1239 	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1240 	struct rpc_rqst *rqst = rep->rr_rqst;
1241 	int status;
1242 
1243 	xprt->reestablish_timeout = 0;
1244 
1245 	switch (rep->rr_proc) {
1246 	case rdma_msg:
1247 		status = rpcrdma_decode_msg(r_xprt, rep, rqst);
1248 		break;
1249 	case rdma_nomsg:
1250 		status = rpcrdma_decode_nomsg(r_xprt, rep);
1251 		break;
1252 	case rdma_error:
1253 		status = rpcrdma_decode_error(r_xprt, rep, rqst);
1254 		break;
1255 	default:
1256 		status = -EIO;
1257 	}
1258 	if (status < 0)
1259 		goto out_badheader;
1260 
1261 out:
1262 	spin_lock(&xprt->queue_lock);
1263 	xprt_complete_rqst(rqst->rq_task, status);
1264 	xprt_unpin_rqst(rqst);
1265 	spin_unlock(&xprt->queue_lock);
1266 	return;
1267 
1268 /* If the incoming reply terminated a pending RPC, the next
1269  * RPC call will post a replacement receive buffer as it is
1270  * being marshaled.
1271  */
1272 out_badheader:
1273 	trace_xprtrdma_reply_hdr(rep);
1274 	r_xprt->rx_stats.bad_reply_count++;
1275 	goto out;
1276 }
1277 
1278 static void rpcrdma_reply_done(struct kref *kref)
1279 {
1280 	struct rpcrdma_req *req =
1281 		container_of(kref, struct rpcrdma_req, rl_kref);
1282 
1283 	rpcrdma_complete_rqst(req->rl_reply);
1284 }
1285 
1286 /**
1287  * rpcrdma_reply_handler - Process received RPC/RDMA messages
1288  * @rep: Incoming rpcrdma_rep object to process
1289  *
1290  * Errors must result in the RPC task either being awakened, or
1291  * allowed to timeout, to discover the errors at that time.
1292  */
1293 void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
1294 {
1295 	struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1296 	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1297 	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1298 	struct rpcrdma_req *req;
1299 	struct rpc_rqst *rqst;
1300 	u32 credits;
1301 	__be32 *p;
1302 
1303 	/* Fixed transport header fields */
1304 	xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
1305 			rep->rr_hdrbuf.head[0].iov_base, NULL);
1306 	p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p));
1307 	if (unlikely(!p))
1308 		goto out_shortreply;
1309 	rep->rr_xid = *p++;
1310 	rep->rr_vers = *p++;
1311 	credits = be32_to_cpu(*p++);
1312 	rep->rr_proc = *p++;
1313 
1314 	if (rep->rr_vers != rpcrdma_version)
1315 		goto out_badversion;
1316 
1317 	if (rpcrdma_is_bcall(r_xprt, rep))
1318 		return;
1319 
1320 	/* Match incoming rpcrdma_rep to an rpcrdma_req to
1321 	 * get context for handling any incoming chunks.
1322 	 */
1323 	spin_lock(&xprt->queue_lock);
1324 	rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
1325 	if (!rqst)
1326 		goto out_norqst;
1327 	xprt_pin_rqst(rqst);
1328 	spin_unlock(&xprt->queue_lock);
1329 
1330 	if (credits == 0)
1331 		credits = 1;	/* don't deadlock */
1332 	else if (credits > buf->rb_max_requests)
1333 		credits = buf->rb_max_requests;
1334 	if (buf->rb_credits != credits) {
1335 		spin_lock(&xprt->transport_lock);
1336 		buf->rb_credits = credits;
1337 		xprt->cwnd = credits << RPC_CWNDSHIFT;
1338 		spin_unlock(&xprt->transport_lock);
1339 	}
1340 
1341 	req = rpcr_to_rdmar(rqst);
1342 	if (req->rl_reply) {
1343 		trace_xprtrdma_leaked_rep(rqst, req->rl_reply);
1344 		rpcrdma_recv_buffer_put(req->rl_reply);
1345 	}
1346 	req->rl_reply = rep;
1347 	rep->rr_rqst = rqst;
1348 
1349 	trace_xprtrdma_reply(rqst->rq_task, rep, req, credits);
1350 
1351 	if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
1352 		frwr_reminv(rep, &req->rl_registered);
1353 	if (!list_empty(&req->rl_registered))
1354 		frwr_unmap_async(r_xprt, req);
1355 		/* LocalInv completion will complete the RPC */
1356 	else
1357 		kref_put(&req->rl_kref, rpcrdma_reply_done);
1358 	return;
1359 
1360 out_badversion:
1361 	trace_xprtrdma_reply_vers(rep);
1362 	goto out;
1363 
1364 out_norqst:
1365 	spin_unlock(&xprt->queue_lock);
1366 	trace_xprtrdma_reply_rqst(rep);
1367 	goto out;
1368 
1369 out_shortreply:
1370 	trace_xprtrdma_reply_short(rep);
1371 
1372 out:
1373 	rpcrdma_recv_buffer_put(rep);
1374 }
1375