xref: /openbmc/linux/net/sunrpc/xprtrdma/rpc_rdma.c (revision 8631f940b81bf0da3d375fce166d381fa8c47bb2)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2014-2017 Oracle.  All rights reserved.
4  * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the BSD-type
10  * license below:
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  *
16  *      Redistributions of source code must retain the above copyright
17  *      notice, this list of conditions and the following disclaimer.
18  *
19  *      Redistributions in binary form must reproduce the above
20  *      copyright notice, this list of conditions and the following
21  *      disclaimer in the documentation and/or other materials provided
22  *      with the distribution.
23  *
24  *      Neither the name of the Network Appliance, Inc. nor the names of
25  *      its contributors may be used to endorse or promote products
26  *      derived from this software without specific prior written
27  *      permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40  */
41 
42 /*
43  * rpc_rdma.c
44  *
45  * This file contains the guts of the RPC RDMA protocol, and
46  * does marshaling/unmarshaling, etc. It is also where interfacing
47  * to the Linux RPC framework lives.
48  */
49 
50 #include <linux/highmem.h>
51 
52 #include <linux/sunrpc/svc_rdma.h>
53 
54 #include "xprt_rdma.h"
55 #include <trace/events/rpcrdma.h>
56 
57 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
58 # define RPCDBG_FACILITY	RPCDBG_TRANS
59 #endif
60 
61 /* Returns size of largest RPC-over-RDMA header in a Call message
62  *
63  * The largest Call header contains a full-size Read list and a
64  * minimal Reply chunk.
65  */
66 static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
67 {
68 	unsigned int size;
69 
70 	/* Fixed header fields and list discriminators */
71 	size = RPCRDMA_HDRLEN_MIN;
72 
73 	/* Maximum Read list size */
74 	size = maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
75 
76 	/* Minimal Read chunk size */
77 	size += sizeof(__be32);	/* segment count */
78 	size += rpcrdma_segment_maxsz * sizeof(__be32);
79 	size += sizeof(__be32);	/* list discriminator */
80 
81 	dprintk("RPC:       %s: max call header size = %u\n",
82 		__func__, size);
83 	return size;
84 }
85 
86 /* Returns size of largest RPC-over-RDMA header in a Reply message
87  *
88  * There is only one Write list or one Reply chunk per Reply
89  * message.  The larger list is the Write list.
90  */
91 static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
92 {
93 	unsigned int size;
94 
95 	/* Fixed header fields and list discriminators */
96 	size = RPCRDMA_HDRLEN_MIN;
97 
98 	/* Maximum Write list size */
99 	size = sizeof(__be32);		/* segment count */
100 	size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32);
101 	size += sizeof(__be32);	/* list discriminator */
102 
103 	dprintk("RPC:       %s: max reply header size = %u\n",
104 		__func__, size);
105 	return size;
106 }
107 
108 void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt)
109 {
110 	struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
111 	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
112 	unsigned int maxsegs = ia->ri_max_segs;
113 
114 	ia->ri_max_inline_write = cdata->inline_wsize -
115 				  rpcrdma_max_call_header_size(maxsegs);
116 	ia->ri_max_inline_read = cdata->inline_rsize -
117 				 rpcrdma_max_reply_header_size(maxsegs);
118 }
119 
120 /* The client can send a request inline as long as the RPCRDMA header
121  * plus the RPC call fit under the transport's inline limit. If the
122  * combined call message size exceeds that limit, the client must use
123  * a Read chunk for this operation.
124  *
125  * A Read chunk is also required if sending the RPC call inline would
126  * exceed this device's max_sge limit.
127  */
128 static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
129 				struct rpc_rqst *rqst)
130 {
131 	struct xdr_buf *xdr = &rqst->rq_snd_buf;
132 	unsigned int count, remaining, offset;
133 
134 	if (xdr->len > r_xprt->rx_ia.ri_max_inline_write)
135 		return false;
136 
137 	if (xdr->page_len) {
138 		remaining = xdr->page_len;
139 		offset = offset_in_page(xdr->page_base);
140 		count = RPCRDMA_MIN_SEND_SGES;
141 		while (remaining) {
142 			remaining -= min_t(unsigned int,
143 					   PAGE_SIZE - offset, remaining);
144 			offset = 0;
145 			if (++count > r_xprt->rx_ia.ri_max_send_sges)
146 				return false;
147 		}
148 	}
149 
150 	return true;
151 }
152 
153 /* The client can't know how large the actual reply will be. Thus it
154  * plans for the largest possible reply for that particular ULP
155  * operation. If the maximum combined reply message size exceeds that
156  * limit, the client must provide a write list or a reply chunk for
157  * this request.
158  */
159 static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
160 				   struct rpc_rqst *rqst)
161 {
162 	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
163 
164 	return rqst->rq_rcv_buf.buflen <= ia->ri_max_inline_read;
165 }
166 
167 /* Split @vec on page boundaries into SGEs. FMR registers pages, not
168  * a byte range. Other modes coalesce these SGEs into a single MR
169  * when they can.
170  *
171  * Returns pointer to next available SGE, and bumps the total number
172  * of SGEs consumed.
173  */
174 static struct rpcrdma_mr_seg *
175 rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
176 		     unsigned int *n)
177 {
178 	u32 remaining, page_offset;
179 	char *base;
180 
181 	base = vec->iov_base;
182 	page_offset = offset_in_page(base);
183 	remaining = vec->iov_len;
184 	while (remaining) {
185 		seg->mr_page = NULL;
186 		seg->mr_offset = base;
187 		seg->mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
188 		remaining -= seg->mr_len;
189 		base += seg->mr_len;
190 		++seg;
191 		++(*n);
192 		page_offset = 0;
193 	}
194 	return seg;
195 }
196 
197 /* Convert @xdrbuf into SGEs no larger than a page each. As they
198  * are registered, these SGEs are then coalesced into RDMA segments
199  * when the selected memreg mode supports it.
200  *
201  * Returns positive number of SGEs consumed, or a negative errno.
202  */
203 
204 static int
205 rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
206 		     unsigned int pos, enum rpcrdma_chunktype type,
207 		     struct rpcrdma_mr_seg *seg)
208 {
209 	unsigned long page_base;
210 	unsigned int len, n;
211 	struct page **ppages;
212 
213 	n = 0;
214 	if (pos == 0)
215 		seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n);
216 
217 	len = xdrbuf->page_len;
218 	ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
219 	page_base = offset_in_page(xdrbuf->page_base);
220 	while (len) {
221 		/* ACL likes to be lazy in allocating pages - ACLs
222 		 * are small by default but can get huge.
223 		 */
224 		if (unlikely(xdrbuf->flags & XDRBUF_SPARSE_PAGES)) {
225 			if (!*ppages)
226 				*ppages = alloc_page(GFP_ATOMIC);
227 			if (!*ppages)
228 				return -ENOBUFS;
229 		}
230 		seg->mr_page = *ppages;
231 		seg->mr_offset = (char *)page_base;
232 		seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
233 		len -= seg->mr_len;
234 		++ppages;
235 		++seg;
236 		++n;
237 		page_base = 0;
238 	}
239 
240 	/* When encoding a Read chunk, the tail iovec contains an
241 	 * XDR pad and may be omitted.
242 	 */
243 	if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup)
244 		goto out;
245 
246 	/* When encoding a Write chunk, some servers need to see an
247 	 * extra segment for non-XDR-aligned Write chunks. The upper
248 	 * layer provides space in the tail iovec that may be used
249 	 * for this purpose.
250 	 */
251 	if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup)
252 		goto out;
253 
254 	if (xdrbuf->tail[0].iov_len)
255 		seg = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n);
256 
257 out:
258 	if (unlikely(n > RPCRDMA_MAX_SEGS))
259 		return -EIO;
260 	return n;
261 }
262 
263 static inline int
264 encode_item_present(struct xdr_stream *xdr)
265 {
266 	__be32 *p;
267 
268 	p = xdr_reserve_space(xdr, sizeof(*p));
269 	if (unlikely(!p))
270 		return -EMSGSIZE;
271 
272 	*p = xdr_one;
273 	return 0;
274 }
275 
276 static inline int
277 encode_item_not_present(struct xdr_stream *xdr)
278 {
279 	__be32 *p;
280 
281 	p = xdr_reserve_space(xdr, sizeof(*p));
282 	if (unlikely(!p))
283 		return -EMSGSIZE;
284 
285 	*p = xdr_zero;
286 	return 0;
287 }
288 
289 static void
290 xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr *mr)
291 {
292 	*iptr++ = cpu_to_be32(mr->mr_handle);
293 	*iptr++ = cpu_to_be32(mr->mr_length);
294 	xdr_encode_hyper(iptr, mr->mr_offset);
295 }
296 
297 static int
298 encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr)
299 {
300 	__be32 *p;
301 
302 	p = xdr_reserve_space(xdr, 4 * sizeof(*p));
303 	if (unlikely(!p))
304 		return -EMSGSIZE;
305 
306 	xdr_encode_rdma_segment(p, mr);
307 	return 0;
308 }
309 
310 static int
311 encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr,
312 		    u32 position)
313 {
314 	__be32 *p;
315 
316 	p = xdr_reserve_space(xdr, 6 * sizeof(*p));
317 	if (unlikely(!p))
318 		return -EMSGSIZE;
319 
320 	*p++ = xdr_one;			/* Item present */
321 	*p++ = cpu_to_be32(position);
322 	xdr_encode_rdma_segment(p, mr);
323 	return 0;
324 }
325 
326 /* Register and XDR encode the Read list. Supports encoding a list of read
327  * segments that belong to a single read chunk.
328  *
329  * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
330  *
331  *  Read chunklist (a linked list):
332  *   N elements, position P (same P for all chunks of same arg!):
333  *    1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
334  *
335  * Returns zero on success, or a negative errno if a failure occurred.
336  * @xdr is advanced to the next position in the stream.
337  *
338  * Only a single @pos value is currently supported.
339  */
340 static noinline int
341 rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
342 			 struct rpc_rqst *rqst, enum rpcrdma_chunktype rtype)
343 {
344 	struct xdr_stream *xdr = &req->rl_stream;
345 	struct rpcrdma_mr_seg *seg;
346 	struct rpcrdma_mr *mr;
347 	unsigned int pos;
348 	int nsegs;
349 
350 	pos = rqst->rq_snd_buf.head[0].iov_len;
351 	if (rtype == rpcrdma_areadch)
352 		pos = 0;
353 	seg = req->rl_segments;
354 	nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
355 				     rtype, seg);
356 	if (nsegs < 0)
357 		return nsegs;
358 
359 	do {
360 		seg = frwr_map(r_xprt, seg, nsegs, false, rqst->rq_xid, &mr);
361 		if (IS_ERR(seg))
362 			return PTR_ERR(seg);
363 		rpcrdma_mr_push(mr, &req->rl_registered);
364 
365 		if (encode_read_segment(xdr, mr, pos) < 0)
366 			return -EMSGSIZE;
367 
368 		trace_xprtrdma_chunk_read(rqst->rq_task, pos, mr, nsegs);
369 		r_xprt->rx_stats.read_chunk_count++;
370 		nsegs -= mr->mr_nents;
371 	} while (nsegs);
372 
373 	return 0;
374 }
375 
376 /* Register and XDR encode the Write list. Supports encoding a list
377  * containing one array of plain segments that belong to a single
378  * write chunk.
379  *
380  * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
381  *
382  *  Write chunklist (a list of (one) counted array):
383  *   N elements:
384  *    1 - N - HLOO - HLOO - ... - HLOO - 0
385  *
386  * Returns zero on success, or a negative errno if a failure occurred.
387  * @xdr is advanced to the next position in the stream.
388  *
389  * Only a single Write chunk is currently supported.
390  */
391 static noinline int
392 rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
393 			  struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
394 {
395 	struct xdr_stream *xdr = &req->rl_stream;
396 	struct rpcrdma_mr_seg *seg;
397 	struct rpcrdma_mr *mr;
398 	int nsegs, nchunks;
399 	__be32 *segcount;
400 
401 	seg = req->rl_segments;
402 	nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
403 				     rqst->rq_rcv_buf.head[0].iov_len,
404 				     wtype, seg);
405 	if (nsegs < 0)
406 		return nsegs;
407 
408 	if (encode_item_present(xdr) < 0)
409 		return -EMSGSIZE;
410 	segcount = xdr_reserve_space(xdr, sizeof(*segcount));
411 	if (unlikely(!segcount))
412 		return -EMSGSIZE;
413 	/* Actual value encoded below */
414 
415 	nchunks = 0;
416 	do {
417 		seg = frwr_map(r_xprt, seg, nsegs, true, rqst->rq_xid, &mr);
418 		if (IS_ERR(seg))
419 			return PTR_ERR(seg);
420 		rpcrdma_mr_push(mr, &req->rl_registered);
421 
422 		if (encode_rdma_segment(xdr, mr) < 0)
423 			return -EMSGSIZE;
424 
425 		trace_xprtrdma_chunk_write(rqst->rq_task, mr, nsegs);
426 		r_xprt->rx_stats.write_chunk_count++;
427 		r_xprt->rx_stats.total_rdma_request += mr->mr_length;
428 		nchunks++;
429 		nsegs -= mr->mr_nents;
430 	} while (nsegs);
431 
432 	/* Update count of segments in this Write chunk */
433 	*segcount = cpu_to_be32(nchunks);
434 
435 	return 0;
436 }
437 
438 /* Register and XDR encode the Reply chunk. Supports encoding an array
439  * of plain segments that belong to a single write (reply) chunk.
440  *
441  * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
442  *
443  *  Reply chunk (a counted array):
444  *   N elements:
445  *    1 - N - HLOO - HLOO - ... - HLOO
446  *
447  * Returns zero on success, or a negative errno if a failure occurred.
448  * @xdr is advanced to the next position in the stream.
449  */
450 static noinline int
451 rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
452 			   struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
453 {
454 	struct xdr_stream *xdr = &req->rl_stream;
455 	struct rpcrdma_mr_seg *seg;
456 	struct rpcrdma_mr *mr;
457 	int nsegs, nchunks;
458 	__be32 *segcount;
459 
460 	seg = req->rl_segments;
461 	nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
462 	if (nsegs < 0)
463 		return nsegs;
464 
465 	if (encode_item_present(xdr) < 0)
466 		return -EMSGSIZE;
467 	segcount = xdr_reserve_space(xdr, sizeof(*segcount));
468 	if (unlikely(!segcount))
469 		return -EMSGSIZE;
470 	/* Actual value encoded below */
471 
472 	nchunks = 0;
473 	do {
474 		seg = frwr_map(r_xprt, seg, nsegs, true, rqst->rq_xid, &mr);
475 		if (IS_ERR(seg))
476 			return PTR_ERR(seg);
477 		rpcrdma_mr_push(mr, &req->rl_registered);
478 
479 		if (encode_rdma_segment(xdr, mr) < 0)
480 			return -EMSGSIZE;
481 
482 		trace_xprtrdma_chunk_reply(rqst->rq_task, mr, nsegs);
483 		r_xprt->rx_stats.reply_chunk_count++;
484 		r_xprt->rx_stats.total_rdma_request += mr->mr_length;
485 		nchunks++;
486 		nsegs -= mr->mr_nents;
487 	} while (nsegs);
488 
489 	/* Update count of segments in the Reply chunk */
490 	*segcount = cpu_to_be32(nchunks);
491 
492 	return 0;
493 }
494 
495 /**
496  * rpcrdma_unmap_sendctx - DMA-unmap Send buffers
497  * @sc: sendctx containing SGEs to unmap
498  *
499  */
500 void
501 rpcrdma_unmap_sendctx(struct rpcrdma_sendctx *sc)
502 {
503 	struct rpcrdma_ia *ia = &sc->sc_xprt->rx_ia;
504 	struct ib_sge *sge;
505 	unsigned int count;
506 
507 	/* The first two SGEs contain the transport header and
508 	 * the inline buffer. These are always left mapped so
509 	 * they can be cheaply re-used.
510 	 */
511 	sge = &sc->sc_sges[2];
512 	for (count = sc->sc_unmap_count; count; ++sge, --count)
513 		ib_dma_unmap_page(ia->ri_device,
514 				  sge->addr, sge->length, DMA_TO_DEVICE);
515 
516 	if (test_and_clear_bit(RPCRDMA_REQ_F_TX_RESOURCES, &sc->sc_req->rl_flags)) {
517 		smp_mb__after_atomic();
518 		wake_up_bit(&sc->sc_req->rl_flags, RPCRDMA_REQ_F_TX_RESOURCES);
519 	}
520 }
521 
522 /* Prepare an SGE for the RPC-over-RDMA transport header.
523  */
524 static bool
525 rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
526 			u32 len)
527 {
528 	struct rpcrdma_sendctx *sc = req->rl_sendctx;
529 	struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
530 	struct ib_sge *sge = sc->sc_sges;
531 
532 	if (!rpcrdma_dma_map_regbuf(ia, rb))
533 		goto out_regbuf;
534 	sge->addr = rdmab_addr(rb);
535 	sge->length = len;
536 	sge->lkey = rdmab_lkey(rb);
537 
538 	ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr,
539 				      sge->length, DMA_TO_DEVICE);
540 	sc->sc_wr.num_sge++;
541 	return true;
542 
543 out_regbuf:
544 	pr_err("rpcrdma: failed to DMA map a Send buffer\n");
545 	return false;
546 }
547 
548 /* Prepare the Send SGEs. The head and tail iovec, and each entry
549  * in the page list, gets its own SGE.
550  */
551 static bool
552 rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
553 			 struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
554 {
555 	struct rpcrdma_sendctx *sc = req->rl_sendctx;
556 	unsigned int sge_no, page_base, len, remaining;
557 	struct rpcrdma_regbuf *rb = req->rl_sendbuf;
558 	struct ib_device *device = ia->ri_device;
559 	struct ib_sge *sge = sc->sc_sges;
560 	u32 lkey = ia->ri_pd->local_dma_lkey;
561 	struct page *page, **ppages;
562 
563 	/* The head iovec is straightforward, as it is already
564 	 * DMA-mapped. Sync the content that has changed.
565 	 */
566 	if (!rpcrdma_dma_map_regbuf(ia, rb))
567 		goto out_regbuf;
568 	sge_no = 1;
569 	sge[sge_no].addr = rdmab_addr(rb);
570 	sge[sge_no].length = xdr->head[0].iov_len;
571 	sge[sge_no].lkey = rdmab_lkey(rb);
572 	ib_dma_sync_single_for_device(rdmab_device(rb), sge[sge_no].addr,
573 				      sge[sge_no].length, DMA_TO_DEVICE);
574 
575 	/* If there is a Read chunk, the page list is being handled
576 	 * via explicit RDMA, and thus is skipped here. However, the
577 	 * tail iovec may include an XDR pad for the page list, as
578 	 * well as additional content, and may not reside in the
579 	 * same page as the head iovec.
580 	 */
581 	if (rtype == rpcrdma_readch) {
582 		len = xdr->tail[0].iov_len;
583 
584 		/* Do not include the tail if it is only an XDR pad */
585 		if (len < 4)
586 			goto out;
587 
588 		page = virt_to_page(xdr->tail[0].iov_base);
589 		page_base = offset_in_page(xdr->tail[0].iov_base);
590 
591 		/* If the content in the page list is an odd length,
592 		 * xdr_write_pages() has added a pad at the beginning
593 		 * of the tail iovec. Force the tail's non-pad content
594 		 * to land at the next XDR position in the Send message.
595 		 */
596 		page_base += len & 3;
597 		len -= len & 3;
598 		goto map_tail;
599 	}
600 
601 	/* If there is a page list present, temporarily DMA map
602 	 * and prepare an SGE for each page to be sent.
603 	 */
604 	if (xdr->page_len) {
605 		ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
606 		page_base = offset_in_page(xdr->page_base);
607 		remaining = xdr->page_len;
608 		while (remaining) {
609 			sge_no++;
610 			if (sge_no > RPCRDMA_MAX_SEND_SGES - 2)
611 				goto out_mapping_overflow;
612 
613 			len = min_t(u32, PAGE_SIZE - page_base, remaining);
614 			sge[sge_no].addr = ib_dma_map_page(device, *ppages,
615 							   page_base, len,
616 							   DMA_TO_DEVICE);
617 			if (ib_dma_mapping_error(device, sge[sge_no].addr))
618 				goto out_mapping_err;
619 			sge[sge_no].length = len;
620 			sge[sge_no].lkey = lkey;
621 
622 			sc->sc_unmap_count++;
623 			ppages++;
624 			remaining -= len;
625 			page_base = 0;
626 		}
627 	}
628 
629 	/* The tail iovec is not always constructed in the same
630 	 * page where the head iovec resides (see, for example,
631 	 * gss_wrap_req_priv). To neatly accommodate that case,
632 	 * DMA map it separately.
633 	 */
634 	if (xdr->tail[0].iov_len) {
635 		page = virt_to_page(xdr->tail[0].iov_base);
636 		page_base = offset_in_page(xdr->tail[0].iov_base);
637 		len = xdr->tail[0].iov_len;
638 
639 map_tail:
640 		sge_no++;
641 		sge[sge_no].addr = ib_dma_map_page(device, page,
642 						   page_base, len,
643 						   DMA_TO_DEVICE);
644 		if (ib_dma_mapping_error(device, sge[sge_no].addr))
645 			goto out_mapping_err;
646 		sge[sge_no].length = len;
647 		sge[sge_no].lkey = lkey;
648 		sc->sc_unmap_count++;
649 	}
650 
651 out:
652 	sc->sc_wr.num_sge += sge_no;
653 	if (sc->sc_unmap_count)
654 		__set_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags);
655 	return true;
656 
657 out_regbuf:
658 	pr_err("rpcrdma: failed to DMA map a Send buffer\n");
659 	return false;
660 
661 out_mapping_overflow:
662 	rpcrdma_unmap_sendctx(sc);
663 	pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no);
664 	return false;
665 
666 out_mapping_err:
667 	rpcrdma_unmap_sendctx(sc);
668 	trace_xprtrdma_dma_maperr(sge[sge_no].addr);
669 	return false;
670 }
671 
672 /**
673  * rpcrdma_prepare_send_sges - Construct SGEs for a Send WR
674  * @r_xprt: controlling transport
675  * @req: context of RPC Call being marshalled
676  * @hdrlen: size of transport header, in bytes
677  * @xdr: xdr_buf containing RPC Call
678  * @rtype: chunk type being encoded
679  *
680  * Returns 0 on success; otherwise a negative errno is returned.
681  */
682 int
683 rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
684 			  struct rpcrdma_req *req, u32 hdrlen,
685 			  struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
686 {
687 	req->rl_sendctx = rpcrdma_sendctx_get_locked(&r_xprt->rx_buf);
688 	if (!req->rl_sendctx)
689 		return -EAGAIN;
690 	req->rl_sendctx->sc_wr.num_sge = 0;
691 	req->rl_sendctx->sc_unmap_count = 0;
692 	req->rl_sendctx->sc_req = req;
693 	__clear_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags);
694 
695 	if (!rpcrdma_prepare_hdr_sge(&r_xprt->rx_ia, req, hdrlen))
696 		return -EIO;
697 
698 	if (rtype != rpcrdma_areadch)
699 		if (!rpcrdma_prepare_msg_sges(&r_xprt->rx_ia, req, xdr, rtype))
700 			return -EIO;
701 
702 	return 0;
703 }
704 
705 /**
706  * rpcrdma_marshal_req - Marshal and send one RPC request
707  * @r_xprt: controlling transport
708  * @rqst: RPC request to be marshaled
709  *
710  * For the RPC in "rqst", this function:
711  *  - Chooses the transfer mode (eg., RDMA_MSG or RDMA_NOMSG)
712  *  - Registers Read, Write, and Reply chunks
713  *  - Constructs the transport header
714  *  - Posts a Send WR to send the transport header and request
715  *
716  * Returns:
717  *	%0 if the RPC was sent successfully,
718  *	%-ENOTCONN if the connection was lost,
719  *	%-EAGAIN if the caller should call again with the same arguments,
720  *	%-ENOBUFS if the caller should call again after a delay,
721  *	%-EMSGSIZE if the transport header is too small,
722  *	%-EIO if a permanent problem occurred while marshaling.
723  */
724 int
725 rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
726 {
727 	struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
728 	struct xdr_stream *xdr = &req->rl_stream;
729 	enum rpcrdma_chunktype rtype, wtype;
730 	bool ddp_allowed;
731 	__be32 *p;
732 	int ret;
733 
734 	rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
735 	xdr_init_encode(xdr, &req->rl_hdrbuf,
736 			req->rl_rdmabuf->rg_base);
737 
738 	/* Fixed header fields */
739 	ret = -EMSGSIZE;
740 	p = xdr_reserve_space(xdr, 4 * sizeof(*p));
741 	if (!p)
742 		goto out_err;
743 	*p++ = rqst->rq_xid;
744 	*p++ = rpcrdma_version;
745 	*p++ = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
746 
747 	/* When the ULP employs a GSS flavor that guarantees integrity
748 	 * or privacy, direct data placement of individual data items
749 	 * is not allowed.
750 	 */
751 	ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
752 						RPCAUTH_AUTH_DATATOUCH);
753 
754 	/*
755 	 * Chunks needed for results?
756 	 *
757 	 * o If the expected result is under the inline threshold, all ops
758 	 *   return as inline.
759 	 * o Large read ops return data as write chunk(s), header as
760 	 *   inline.
761 	 * o Large non-read ops return as a single reply chunk.
762 	 */
763 	if (rpcrdma_results_inline(r_xprt, rqst))
764 		wtype = rpcrdma_noch;
765 	else if (ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ)
766 		wtype = rpcrdma_writech;
767 	else
768 		wtype = rpcrdma_replych;
769 
770 	/*
771 	 * Chunks needed for arguments?
772 	 *
773 	 * o If the total request is under the inline threshold, all ops
774 	 *   are sent as inline.
775 	 * o Large write ops transmit data as read chunk(s), header as
776 	 *   inline.
777 	 * o Large non-write ops are sent with the entire message as a
778 	 *   single read chunk (protocol 0-position special case).
779 	 *
780 	 * This assumes that the upper layer does not present a request
781 	 * that both has a data payload, and whose non-data arguments
782 	 * by themselves are larger than the inline threshold.
783 	 */
784 	if (rpcrdma_args_inline(r_xprt, rqst)) {
785 		*p++ = rdma_msg;
786 		rtype = rpcrdma_noch;
787 	} else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
788 		*p++ = rdma_msg;
789 		rtype = rpcrdma_readch;
790 	} else {
791 		r_xprt->rx_stats.nomsg_call_count++;
792 		*p++ = rdma_nomsg;
793 		rtype = rpcrdma_areadch;
794 	}
795 
796 	/* If this is a retransmit, discard previously registered
797 	 * chunks. Very likely the connection has been replaced,
798 	 * so these registrations are invalid and unusable.
799 	 */
800 	while (unlikely(!list_empty(&req->rl_registered))) {
801 		struct rpcrdma_mr *mr;
802 
803 		mr = rpcrdma_mr_pop(&req->rl_registered);
804 		rpcrdma_mr_recycle(mr);
805 	}
806 
807 	/* This implementation supports the following combinations
808 	 * of chunk lists in one RPC-over-RDMA Call message:
809 	 *
810 	 *   - Read list
811 	 *   - Write list
812 	 *   - Reply chunk
813 	 *   - Read list + Reply chunk
814 	 *
815 	 * It might not yet support the following combinations:
816 	 *
817 	 *   - Read list + Write list
818 	 *
819 	 * It does not support the following combinations:
820 	 *
821 	 *   - Write list + Reply chunk
822 	 *   - Read list + Write list + Reply chunk
823 	 *
824 	 * This implementation supports only a single chunk in each
825 	 * Read or Write list. Thus for example the client cannot
826 	 * send a Call message with a Position Zero Read chunk and a
827 	 * regular Read chunk at the same time.
828 	 */
829 	if (rtype != rpcrdma_noch) {
830 		ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
831 		if (ret)
832 			goto out_err;
833 	}
834 	ret = encode_item_not_present(xdr);
835 	if (ret)
836 		goto out_err;
837 
838 	if (wtype == rpcrdma_writech) {
839 		ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
840 		if (ret)
841 			goto out_err;
842 	}
843 	ret = encode_item_not_present(xdr);
844 	if (ret)
845 		goto out_err;
846 
847 	if (wtype != rpcrdma_replych)
848 		ret = encode_item_not_present(xdr);
849 	else
850 		ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
851 	if (ret)
852 		goto out_err;
853 
854 	trace_xprtrdma_marshal(rqst, xdr_stream_pos(xdr), rtype, wtype);
855 
856 	ret = rpcrdma_prepare_send_sges(r_xprt, req, xdr_stream_pos(xdr),
857 					&rqst->rq_snd_buf, rtype);
858 	if (ret)
859 		goto out_err;
860 	return 0;
861 
862 out_err:
863 	switch (ret) {
864 	case -EAGAIN:
865 		xprt_wait_for_buffer_space(rqst->rq_xprt);
866 		break;
867 	case -ENOBUFS:
868 		break;
869 	default:
870 		r_xprt->rx_stats.failed_marshal_count++;
871 	}
872 	return ret;
873 }
874 
875 /**
876  * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
877  * @rqst: controlling RPC request
878  * @srcp: points to RPC message payload in receive buffer
879  * @copy_len: remaining length of receive buffer content
880  * @pad: Write chunk pad bytes needed (zero for pure inline)
881  *
882  * The upper layer has set the maximum number of bytes it can
883  * receive in each component of rq_rcv_buf. These values are set in
884  * the head.iov_len, page_len, tail.iov_len, and buflen fields.
885  *
886  * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in
887  * many cases this function simply updates iov_base pointers in
888  * rq_rcv_buf to point directly to the received reply data, to
889  * avoid copying reply data.
890  *
891  * Returns the count of bytes which had to be memcopied.
892  */
893 static unsigned long
894 rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
895 {
896 	unsigned long fixup_copy_count;
897 	int i, npages, curlen;
898 	char *destp;
899 	struct page **ppages;
900 	int page_base;
901 
902 	/* The head iovec is redirected to the RPC reply message
903 	 * in the receive buffer, to avoid a memcopy.
904 	 */
905 	rqst->rq_rcv_buf.head[0].iov_base = srcp;
906 	rqst->rq_private_buf.head[0].iov_base = srcp;
907 
908 	/* The contents of the receive buffer that follow
909 	 * head.iov_len bytes are copied into the page list.
910 	 */
911 	curlen = rqst->rq_rcv_buf.head[0].iov_len;
912 	if (curlen > copy_len)
913 		curlen = copy_len;
914 	trace_xprtrdma_fixup(rqst, copy_len, curlen);
915 	srcp += curlen;
916 	copy_len -= curlen;
917 
918 	ppages = rqst->rq_rcv_buf.pages +
919 		(rqst->rq_rcv_buf.page_base >> PAGE_SHIFT);
920 	page_base = offset_in_page(rqst->rq_rcv_buf.page_base);
921 	fixup_copy_count = 0;
922 	if (copy_len && rqst->rq_rcv_buf.page_len) {
923 		int pagelist_len;
924 
925 		pagelist_len = rqst->rq_rcv_buf.page_len;
926 		if (pagelist_len > copy_len)
927 			pagelist_len = copy_len;
928 		npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
929 		for (i = 0; i < npages; i++) {
930 			curlen = PAGE_SIZE - page_base;
931 			if (curlen > pagelist_len)
932 				curlen = pagelist_len;
933 
934 			trace_xprtrdma_fixup_pg(rqst, i, srcp,
935 						copy_len, curlen);
936 			destp = kmap_atomic(ppages[i]);
937 			memcpy(destp + page_base, srcp, curlen);
938 			flush_dcache_page(ppages[i]);
939 			kunmap_atomic(destp);
940 			srcp += curlen;
941 			copy_len -= curlen;
942 			fixup_copy_count += curlen;
943 			pagelist_len -= curlen;
944 			if (!pagelist_len)
945 				break;
946 			page_base = 0;
947 		}
948 
949 		/* Implicit padding for the last segment in a Write
950 		 * chunk is inserted inline at the front of the tail
951 		 * iovec. The upper layer ignores the content of
952 		 * the pad. Simply ensure inline content in the tail
953 		 * that follows the Write chunk is properly aligned.
954 		 */
955 		if (pad)
956 			srcp -= pad;
957 	}
958 
959 	/* The tail iovec is redirected to the remaining data
960 	 * in the receive buffer, to avoid a memcopy.
961 	 */
962 	if (copy_len || pad) {
963 		rqst->rq_rcv_buf.tail[0].iov_base = srcp;
964 		rqst->rq_private_buf.tail[0].iov_base = srcp;
965 	}
966 
967 	return fixup_copy_count;
968 }
969 
970 /* By convention, backchannel calls arrive via rdma_msg type
971  * messages, and never populate the chunk lists. This makes
972  * the RPC/RDMA header small and fixed in size, so it is
973  * straightforward to check the RPC header's direction field.
974  */
975 static bool
976 rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
977 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
978 {
979 	struct xdr_stream *xdr = &rep->rr_stream;
980 	__be32 *p;
981 
982 	if (rep->rr_proc != rdma_msg)
983 		return false;
984 
985 	/* Peek at stream contents without advancing. */
986 	p = xdr_inline_decode(xdr, 0);
987 
988 	/* Chunk lists */
989 	if (*p++ != xdr_zero)
990 		return false;
991 	if (*p++ != xdr_zero)
992 		return false;
993 	if (*p++ != xdr_zero)
994 		return false;
995 
996 	/* RPC header */
997 	if (*p++ != rep->rr_xid)
998 		return false;
999 	if (*p != cpu_to_be32(RPC_CALL))
1000 		return false;
1001 
1002 	/* Now that we are sure this is a backchannel call,
1003 	 * advance to the RPC header.
1004 	 */
1005 	p = xdr_inline_decode(xdr, 3 * sizeof(*p));
1006 	if (unlikely(!p))
1007 		goto out_short;
1008 
1009 	rpcrdma_bc_receive_call(r_xprt, rep);
1010 	return true;
1011 
1012 out_short:
1013 	pr_warn("RPC/RDMA short backward direction call\n");
1014 	return true;
1015 }
1016 #else	/* CONFIG_SUNRPC_BACKCHANNEL */
1017 {
1018 	return false;
1019 }
1020 #endif	/* CONFIG_SUNRPC_BACKCHANNEL */
1021 
1022 static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
1023 {
1024 	u32 handle;
1025 	u64 offset;
1026 	__be32 *p;
1027 
1028 	p = xdr_inline_decode(xdr, 4 * sizeof(*p));
1029 	if (unlikely(!p))
1030 		return -EIO;
1031 
1032 	handle = be32_to_cpup(p++);
1033 	*length = be32_to_cpup(p++);
1034 	xdr_decode_hyper(p, &offset);
1035 
1036 	trace_xprtrdma_decode_seg(handle, *length, offset);
1037 	return 0;
1038 }
1039 
1040 static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
1041 {
1042 	u32 segcount, seglength;
1043 	__be32 *p;
1044 
1045 	p = xdr_inline_decode(xdr, sizeof(*p));
1046 	if (unlikely(!p))
1047 		return -EIO;
1048 
1049 	*length = 0;
1050 	segcount = be32_to_cpup(p);
1051 	while (segcount--) {
1052 		if (decode_rdma_segment(xdr, &seglength))
1053 			return -EIO;
1054 		*length += seglength;
1055 	}
1056 
1057 	return 0;
1058 }
1059 
1060 /* In RPC-over-RDMA Version One replies, a Read list is never
1061  * expected. This decoder is a stub that returns an error if
1062  * a Read list is present.
1063  */
1064 static int decode_read_list(struct xdr_stream *xdr)
1065 {
1066 	__be32 *p;
1067 
1068 	p = xdr_inline_decode(xdr, sizeof(*p));
1069 	if (unlikely(!p))
1070 		return -EIO;
1071 	if (unlikely(*p != xdr_zero))
1072 		return -EIO;
1073 	return 0;
1074 }
1075 
1076 /* Supports only one Write chunk in the Write list
1077  */
1078 static int decode_write_list(struct xdr_stream *xdr, u32 *length)
1079 {
1080 	u32 chunklen;
1081 	bool first;
1082 	__be32 *p;
1083 
1084 	*length = 0;
1085 	first = true;
1086 	do {
1087 		p = xdr_inline_decode(xdr, sizeof(*p));
1088 		if (unlikely(!p))
1089 			return -EIO;
1090 		if (*p == xdr_zero)
1091 			break;
1092 		if (!first)
1093 			return -EIO;
1094 
1095 		if (decode_write_chunk(xdr, &chunklen))
1096 			return -EIO;
1097 		*length += chunklen;
1098 		first = false;
1099 	} while (true);
1100 	return 0;
1101 }
1102 
1103 static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length)
1104 {
1105 	__be32 *p;
1106 
1107 	p = xdr_inline_decode(xdr, sizeof(*p));
1108 	if (unlikely(!p))
1109 		return -EIO;
1110 
1111 	*length = 0;
1112 	if (*p != xdr_zero)
1113 		if (decode_write_chunk(xdr, length))
1114 			return -EIO;
1115 	return 0;
1116 }
1117 
1118 static int
1119 rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1120 		   struct rpc_rqst *rqst)
1121 {
1122 	struct xdr_stream *xdr = &rep->rr_stream;
1123 	u32 writelist, replychunk, rpclen;
1124 	char *base;
1125 
1126 	/* Decode the chunk lists */
1127 	if (decode_read_list(xdr))
1128 		return -EIO;
1129 	if (decode_write_list(xdr, &writelist))
1130 		return -EIO;
1131 	if (decode_reply_chunk(xdr, &replychunk))
1132 		return -EIO;
1133 
1134 	/* RDMA_MSG sanity checks */
1135 	if (unlikely(replychunk))
1136 		return -EIO;
1137 
1138 	/* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */
1139 	base = (char *)xdr_inline_decode(xdr, 0);
1140 	rpclen = xdr_stream_remaining(xdr);
1141 	r_xprt->rx_stats.fixup_copy_count +=
1142 		rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
1143 
1144 	r_xprt->rx_stats.total_rdma_reply += writelist;
1145 	return rpclen + xdr_align_size(writelist);
1146 }
1147 
1148 static noinline int
1149 rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1150 {
1151 	struct xdr_stream *xdr = &rep->rr_stream;
1152 	u32 writelist, replychunk;
1153 
1154 	/* Decode the chunk lists */
1155 	if (decode_read_list(xdr))
1156 		return -EIO;
1157 	if (decode_write_list(xdr, &writelist))
1158 		return -EIO;
1159 	if (decode_reply_chunk(xdr, &replychunk))
1160 		return -EIO;
1161 
1162 	/* RDMA_NOMSG sanity checks */
1163 	if (unlikely(writelist))
1164 		return -EIO;
1165 	if (unlikely(!replychunk))
1166 		return -EIO;
1167 
1168 	/* Reply chunk buffer already is the reply vector */
1169 	r_xprt->rx_stats.total_rdma_reply += replychunk;
1170 	return replychunk;
1171 }
1172 
1173 static noinline int
1174 rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1175 		     struct rpc_rqst *rqst)
1176 {
1177 	struct xdr_stream *xdr = &rep->rr_stream;
1178 	__be32 *p;
1179 
1180 	p = xdr_inline_decode(xdr, sizeof(*p));
1181 	if (unlikely(!p))
1182 		return -EIO;
1183 
1184 	switch (*p) {
1185 	case err_vers:
1186 		p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1187 		if (!p)
1188 			break;
1189 		dprintk("RPC:       %s: server reports "
1190 			"version error (%u-%u), xid %08x\n", __func__,
1191 			be32_to_cpup(p), be32_to_cpu(*(p + 1)),
1192 			be32_to_cpu(rep->rr_xid));
1193 		break;
1194 	case err_chunk:
1195 		dprintk("RPC:       %s: server reports "
1196 			"header decoding error, xid %08x\n", __func__,
1197 			be32_to_cpu(rep->rr_xid));
1198 		break;
1199 	default:
1200 		dprintk("RPC:       %s: server reports "
1201 			"unrecognized error %d, xid %08x\n", __func__,
1202 			be32_to_cpup(p), be32_to_cpu(rep->rr_xid));
1203 	}
1204 
1205 	r_xprt->rx_stats.bad_reply_count++;
1206 	return -EREMOTEIO;
1207 }
1208 
1209 /* Perform XID lookup, reconstruction of the RPC reply, and
1210  * RPC completion while holding the transport lock to ensure
1211  * the rep, rqst, and rq_task pointers remain stable.
1212  */
1213 void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
1214 {
1215 	struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1216 	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1217 	struct rpc_rqst *rqst = rep->rr_rqst;
1218 	int status;
1219 
1220 	xprt->reestablish_timeout = 0;
1221 
1222 	switch (rep->rr_proc) {
1223 	case rdma_msg:
1224 		status = rpcrdma_decode_msg(r_xprt, rep, rqst);
1225 		break;
1226 	case rdma_nomsg:
1227 		status = rpcrdma_decode_nomsg(r_xprt, rep);
1228 		break;
1229 	case rdma_error:
1230 		status = rpcrdma_decode_error(r_xprt, rep, rqst);
1231 		break;
1232 	default:
1233 		status = -EIO;
1234 	}
1235 	if (status < 0)
1236 		goto out_badheader;
1237 
1238 out:
1239 	spin_lock(&xprt->queue_lock);
1240 	xprt_complete_rqst(rqst->rq_task, status);
1241 	xprt_unpin_rqst(rqst);
1242 	spin_unlock(&xprt->queue_lock);
1243 	return;
1244 
1245 /* If the incoming reply terminated a pending RPC, the next
1246  * RPC call will post a replacement receive buffer as it is
1247  * being marshaled.
1248  */
1249 out_badheader:
1250 	trace_xprtrdma_reply_hdr(rep);
1251 	r_xprt->rx_stats.bad_reply_count++;
1252 	goto out;
1253 }
1254 
1255 void rpcrdma_release_rqst(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
1256 {
1257 	/* Invalidate and unmap the data payloads before waking
1258 	 * the waiting application. This guarantees the memory
1259 	 * regions are properly fenced from the server before the
1260 	 * application accesses the data. It also ensures proper
1261 	 * send flow control: waking the next RPC waits until this
1262 	 * RPC has relinquished all its Send Queue entries.
1263 	 */
1264 	if (!list_empty(&req->rl_registered))
1265 		frwr_unmap_sync(r_xprt, &req->rl_registered);
1266 
1267 	/* Ensure that any DMA mapped pages associated with
1268 	 * the Send of the RPC Call have been unmapped before
1269 	 * allowing the RPC to complete. This protects argument
1270 	 * memory not controlled by the RPC client from being
1271 	 * re-used before we're done with it.
1272 	 */
1273 	if (test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) {
1274 		r_xprt->rx_stats.reply_waits_for_send++;
1275 		out_of_line_wait_on_bit(&req->rl_flags,
1276 					RPCRDMA_REQ_F_TX_RESOURCES,
1277 					bit_wait,
1278 					TASK_UNINTERRUPTIBLE);
1279 	}
1280 }
1281 
1282 /* Reply handling runs in the poll worker thread. Anything that
1283  * might wait is deferred to a separate workqueue.
1284  */
1285 void rpcrdma_deferred_completion(struct work_struct *work)
1286 {
1287 	struct rpcrdma_rep *rep =
1288 			container_of(work, struct rpcrdma_rep, rr_work);
1289 	struct rpcrdma_req *req = rpcr_to_rdmar(rep->rr_rqst);
1290 	struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1291 
1292 	trace_xprtrdma_defer_cmp(rep);
1293 	if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
1294 		frwr_reminv(rep, &req->rl_registered);
1295 	rpcrdma_release_rqst(r_xprt, req);
1296 	rpcrdma_complete_rqst(rep);
1297 }
1298 
1299 /* Process received RPC/RDMA messages.
1300  *
1301  * Errors must result in the RPC task either being awakened, or
1302  * allowed to timeout, to discover the errors at that time.
1303  */
1304 void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
1305 {
1306 	struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1307 	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1308 	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1309 	struct rpcrdma_req *req;
1310 	struct rpc_rqst *rqst;
1311 	u32 credits;
1312 	__be32 *p;
1313 
1314 	/* Fixed transport header fields */
1315 	xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
1316 			rep->rr_hdrbuf.head[0].iov_base);
1317 	p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p));
1318 	if (unlikely(!p))
1319 		goto out_shortreply;
1320 	rep->rr_xid = *p++;
1321 	rep->rr_vers = *p++;
1322 	credits = be32_to_cpu(*p++);
1323 	rep->rr_proc = *p++;
1324 
1325 	if (rep->rr_vers != rpcrdma_version)
1326 		goto out_badversion;
1327 
1328 	if (rpcrdma_is_bcall(r_xprt, rep))
1329 		return;
1330 
1331 	/* Match incoming rpcrdma_rep to an rpcrdma_req to
1332 	 * get context for handling any incoming chunks.
1333 	 */
1334 	spin_lock(&xprt->queue_lock);
1335 	rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
1336 	if (!rqst)
1337 		goto out_norqst;
1338 	xprt_pin_rqst(rqst);
1339 	spin_unlock(&xprt->queue_lock);
1340 
1341 	if (credits == 0)
1342 		credits = 1;	/* don't deadlock */
1343 	else if (credits > buf->rb_max_requests)
1344 		credits = buf->rb_max_requests;
1345 	if (buf->rb_credits != credits) {
1346 		spin_lock_bh(&xprt->transport_lock);
1347 		buf->rb_credits = credits;
1348 		xprt->cwnd = credits << RPC_CWNDSHIFT;
1349 		spin_unlock_bh(&xprt->transport_lock);
1350 	}
1351 
1352 	req = rpcr_to_rdmar(rqst);
1353 	if (req->rl_reply) {
1354 		trace_xprtrdma_leaked_rep(rqst, req->rl_reply);
1355 		rpcrdma_recv_buffer_put(req->rl_reply);
1356 	}
1357 	req->rl_reply = rep;
1358 	rep->rr_rqst = rqst;
1359 	clear_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags);
1360 
1361 	trace_xprtrdma_reply(rqst->rq_task, rep, req, credits);
1362 	queue_work(buf->rb_completion_wq, &rep->rr_work);
1363 	return;
1364 
1365 out_badversion:
1366 	trace_xprtrdma_reply_vers(rep);
1367 	goto out;
1368 
1369 out_norqst:
1370 	spin_unlock(&xprt->queue_lock);
1371 	trace_xprtrdma_reply_rqst(rep);
1372 	goto out;
1373 
1374 out_shortreply:
1375 	trace_xprtrdma_reply_short(rep);
1376 
1377 out:
1378 	rpcrdma_recv_buffer_put(rep);
1379 }
1380