1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2016-2018 Oracle. All rights reserved.
4  * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5  * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the BSD-type
11  * license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  *      Redistributions of source code must retain the above copyright
18  *      notice, this list of conditions and the following disclaimer.
19  *
20  *      Redistributions in binary form must reproduce the above
21  *      copyright notice, this list of conditions and the following
22  *      disclaimer in the documentation and/or other materials provided
23  *      with the distribution.
24  *
25  *      Neither the name of the Network Appliance, Inc. nor the names of
26  *      its contributors may be used to endorse or promote products
27  *      derived from this software without specific prior written
28  *      permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41  *
42  * Author: Tom Tucker <tom@opengridcomputing.com>
43  */
44 
45 /* Operation
46  *
47  * The main entry point is svc_rdma_recvfrom. This is called from
48  * svc_recv when the transport indicates there is incoming data to
49  * be read. "Data Ready" is signaled when an RDMA Receive completes,
50  * or when a set of RDMA Reads complete.
51  *
52  * An svc_rqst is passed in. This structure contains an array of
53  * free pages (rq_pages) that will contain the incoming RPC message.
54  *
55  * Short messages are moved directly into svc_rqst::rq_arg, and
56  * the RPC Call is ready to be processed by the Upper Layer.
57  * svc_rdma_recvfrom returns the length of the RPC Call message,
58  * completing the reception of the RPC Call.
59  *
60  * However, when an incoming message has Read chunks,
61  * svc_rdma_recvfrom must post RDMA Reads to pull the RPC Call's
62  * data payload from the client. svc_rdma_recvfrom sets up the
63  * RDMA Reads using pages in svc_rqst::rq_pages, which are
64  * transferred to an svc_rdma_recv_ctxt for the duration of the
65  * I/O. svc_rdma_recvfrom then returns zero, since the RPC message
66  * is still not yet ready.
67  *
68  * When the Read chunk payloads have become available on the
69  * server, "Data Ready" is raised again, and svc_recv calls
70  * svc_rdma_recvfrom again. This second call may use a different
71  * svc_rqst than the first one, thus any information that needs
72  * to be preserved across these two calls is kept in an
73  * svc_rdma_recv_ctxt.
74  *
75  * The second call to svc_rdma_recvfrom performs final assembly
76  * of the RPC Call message, using the RDMA Read sink pages kept in
77  * the svc_rdma_recv_ctxt. The xdr_buf is copied from the
78  * svc_rdma_recv_ctxt to the second svc_rqst. The second call returns
79  * the length of the completed RPC Call message.
80  *
81  * Page Management
82  *
83  * Pages under I/O must be transferred from the first svc_rqst to an
84  * svc_rdma_recv_ctxt before the first svc_rdma_recvfrom call returns.
85  *
86  * The first svc_rqst supplies pages for RDMA Reads. These are moved
87  * from rqstp::rq_pages into ctxt::pages. The consumed elements of
88  * the rq_pages array are set to NULL and refilled with the first
89  * svc_rdma_recvfrom call returns.
90  *
91  * During the second svc_rdma_recvfrom call, RDMA Read sink pages
92  * are transferred from the svc_rdma_recv_ctxt to the second svc_rqst
93  * (see rdma_read_complete() below).
94  */
95 
96 #include <linux/spinlock.h>
97 #include <asm/unaligned.h>
98 #include <rdma/ib_verbs.h>
99 #include <rdma/rdma_cm.h>
100 
101 #include <linux/sunrpc/xdr.h>
102 #include <linux/sunrpc/debug.h>
103 #include <linux/sunrpc/rpc_rdma.h>
104 #include <linux/sunrpc/svc_rdma.h>
105 
106 #include "xprt_rdma.h"
107 #include <trace/events/rpcrdma.h>
108 
109 #define RPCDBG_FACILITY	RPCDBG_SVCXPRT
110 
111 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc);
112 
113 static inline struct svc_rdma_recv_ctxt *
114 svc_rdma_next_recv_ctxt(struct list_head *list)
115 {
116 	return list_first_entry_or_null(list, struct svc_rdma_recv_ctxt,
117 					rc_list);
118 }
119 
120 static void svc_rdma_recv_cid_init(struct svcxprt_rdma *rdma,
121 				   struct rpc_rdma_cid *cid)
122 {
123 	cid->ci_queue_id = rdma->sc_rq_cq->res.id;
124 	cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
125 }
126 
127 static struct svc_rdma_recv_ctxt *
128 svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
129 {
130 	struct svc_rdma_recv_ctxt *ctxt;
131 	dma_addr_t addr;
132 	void *buffer;
133 
134 	ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL);
135 	if (!ctxt)
136 		goto fail0;
137 	buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
138 	if (!buffer)
139 		goto fail1;
140 	addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
141 				 rdma->sc_max_req_size, DMA_FROM_DEVICE);
142 	if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
143 		goto fail2;
144 
145 	svc_rdma_recv_cid_init(rdma, &ctxt->rc_cid);
146 
147 	ctxt->rc_recv_wr.next = NULL;
148 	ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe;
149 	ctxt->rc_recv_wr.sg_list = &ctxt->rc_recv_sge;
150 	ctxt->rc_recv_wr.num_sge = 1;
151 	ctxt->rc_cqe.done = svc_rdma_wc_receive;
152 	ctxt->rc_recv_sge.addr = addr;
153 	ctxt->rc_recv_sge.length = rdma->sc_max_req_size;
154 	ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey;
155 	ctxt->rc_recv_buf = buffer;
156 	ctxt->rc_temp = false;
157 	return ctxt;
158 
159 fail2:
160 	kfree(buffer);
161 fail1:
162 	kfree(ctxt);
163 fail0:
164 	return NULL;
165 }
166 
167 static void svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma *rdma,
168 				       struct svc_rdma_recv_ctxt *ctxt)
169 {
170 	ib_dma_unmap_single(rdma->sc_pd->device, ctxt->rc_recv_sge.addr,
171 			    ctxt->rc_recv_sge.length, DMA_FROM_DEVICE);
172 	kfree(ctxt->rc_recv_buf);
173 	kfree(ctxt);
174 }
175 
176 /**
177  * svc_rdma_recv_ctxts_destroy - Release all recv_ctxt's for an xprt
178  * @rdma: svcxprt_rdma being torn down
179  *
180  */
181 void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma)
182 {
183 	struct svc_rdma_recv_ctxt *ctxt;
184 	struct llist_node *node;
185 
186 	while ((node = llist_del_first(&rdma->sc_recv_ctxts))) {
187 		ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node);
188 		svc_rdma_recv_ctxt_destroy(rdma, ctxt);
189 	}
190 }
191 
192 static struct svc_rdma_recv_ctxt *
193 svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma)
194 {
195 	struct svc_rdma_recv_ctxt *ctxt;
196 	struct llist_node *node;
197 
198 	node = llist_del_first(&rdma->sc_recv_ctxts);
199 	if (!node)
200 		goto out_empty;
201 	ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node);
202 
203 out:
204 	ctxt->rc_page_count = 0;
205 	ctxt->rc_read_payload_length = 0;
206 	return ctxt;
207 
208 out_empty:
209 	ctxt = svc_rdma_recv_ctxt_alloc(rdma);
210 	if (!ctxt)
211 		return NULL;
212 	goto out;
213 }
214 
215 /**
216  * svc_rdma_recv_ctxt_put - Return recv_ctxt to free list
217  * @rdma: controlling svcxprt_rdma
218  * @ctxt: object to return to the free list
219  *
220  */
221 void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
222 			    struct svc_rdma_recv_ctxt *ctxt)
223 {
224 	unsigned int i;
225 
226 	for (i = 0; i < ctxt->rc_page_count; i++)
227 		put_page(ctxt->rc_pages[i]);
228 
229 	if (!ctxt->rc_temp)
230 		llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts);
231 	else
232 		svc_rdma_recv_ctxt_destroy(rdma, ctxt);
233 }
234 
235 /**
236  * svc_rdma_release_rqst - Release transport-specific per-rqst resources
237  * @rqstp: svc_rqst being released
238  *
239  * Ensure that the recv_ctxt is released whether or not a Reply
240  * was sent. For example, the client could close the connection,
241  * or svc_process could drop an RPC, before the Reply is sent.
242  */
243 void svc_rdma_release_rqst(struct svc_rqst *rqstp)
244 {
245 	struct svc_rdma_recv_ctxt *ctxt = rqstp->rq_xprt_ctxt;
246 	struct svc_xprt *xprt = rqstp->rq_xprt;
247 	struct svcxprt_rdma *rdma =
248 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
249 
250 	rqstp->rq_xprt_ctxt = NULL;
251 	if (ctxt)
252 		svc_rdma_recv_ctxt_put(rdma, ctxt);
253 }
254 
255 static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma,
256 				struct svc_rdma_recv_ctxt *ctxt)
257 {
258 	int ret;
259 
260 	trace_svcrdma_post_recv(ctxt);
261 	ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, NULL);
262 	if (ret)
263 		goto err_post;
264 	return 0;
265 
266 err_post:
267 	trace_svcrdma_rq_post_err(rdma, ret);
268 	svc_rdma_recv_ctxt_put(rdma, ctxt);
269 	return ret;
270 }
271 
272 static int svc_rdma_post_recv(struct svcxprt_rdma *rdma)
273 {
274 	struct svc_rdma_recv_ctxt *ctxt;
275 
276 	if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
277 		return 0;
278 	ctxt = svc_rdma_recv_ctxt_get(rdma);
279 	if (!ctxt)
280 		return -ENOMEM;
281 	return __svc_rdma_post_recv(rdma, ctxt);
282 }
283 
284 /**
285  * svc_rdma_post_recvs - Post initial set of Recv WRs
286  * @rdma: fresh svcxprt_rdma
287  *
288  * Returns true if successful, otherwise false.
289  */
290 bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
291 {
292 	struct svc_rdma_recv_ctxt *ctxt;
293 	unsigned int i;
294 	int ret;
295 
296 	for (i = 0; i < rdma->sc_max_requests; i++) {
297 		ctxt = svc_rdma_recv_ctxt_get(rdma);
298 		if (!ctxt)
299 			return false;
300 		ctxt->rc_temp = true;
301 		ret = __svc_rdma_post_recv(rdma, ctxt);
302 		if (ret)
303 			return false;
304 	}
305 	return true;
306 }
307 
308 /**
309  * svc_rdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
310  * @cq: Completion Queue context
311  * @wc: Work Completion object
312  *
313  * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
314  * the Receive completion handler could be running.
315  */
316 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
317 {
318 	struct svcxprt_rdma *rdma = cq->cq_context;
319 	struct ib_cqe *cqe = wc->wr_cqe;
320 	struct svc_rdma_recv_ctxt *ctxt;
321 
322 	/* WARNING: Only wc->wr_cqe and wc->status are reliable */
323 	ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe);
324 
325 	trace_svcrdma_wc_receive(wc, &ctxt->rc_cid);
326 	if (wc->status != IB_WC_SUCCESS)
327 		goto flushed;
328 
329 	if (svc_rdma_post_recv(rdma))
330 		goto post_err;
331 
332 	/* All wc fields are now known to be valid */
333 	ctxt->rc_byte_len = wc->byte_len;
334 	ib_dma_sync_single_for_cpu(rdma->sc_pd->device,
335 				   ctxt->rc_recv_sge.addr,
336 				   wc->byte_len, DMA_FROM_DEVICE);
337 
338 	spin_lock(&rdma->sc_rq_dto_lock);
339 	list_add_tail(&ctxt->rc_list, &rdma->sc_rq_dto_q);
340 	/* Note the unlock pairs with the smp_rmb in svc_xprt_ready: */
341 	set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
342 	spin_unlock(&rdma->sc_rq_dto_lock);
343 	if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags))
344 		svc_xprt_enqueue(&rdma->sc_xprt);
345 	return;
346 
347 flushed:
348 post_err:
349 	svc_rdma_recv_ctxt_put(rdma, ctxt);
350 	set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
351 	svc_xprt_enqueue(&rdma->sc_xprt);
352 }
353 
354 /**
355  * svc_rdma_flush_recv_queues - Drain pending Receive work
356  * @rdma: svcxprt_rdma being shut down
357  *
358  */
359 void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma)
360 {
361 	struct svc_rdma_recv_ctxt *ctxt;
362 
363 	while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_read_complete_q))) {
364 		list_del(&ctxt->rc_list);
365 		svc_rdma_recv_ctxt_put(rdma, ctxt);
366 	}
367 	while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) {
368 		list_del(&ctxt->rc_list);
369 		svc_rdma_recv_ctxt_put(rdma, ctxt);
370 	}
371 }
372 
373 static void svc_rdma_build_arg_xdr(struct svc_rqst *rqstp,
374 				   struct svc_rdma_recv_ctxt *ctxt)
375 {
376 	struct xdr_buf *arg = &rqstp->rq_arg;
377 
378 	arg->head[0].iov_base = ctxt->rc_recv_buf;
379 	arg->head[0].iov_len = ctxt->rc_byte_len;
380 	arg->tail[0].iov_base = NULL;
381 	arg->tail[0].iov_len = 0;
382 	arg->page_len = 0;
383 	arg->page_base = 0;
384 	arg->buflen = ctxt->rc_byte_len;
385 	arg->len = ctxt->rc_byte_len;
386 }
387 
388 /* This accommodates the largest possible Write chunk.
389  */
390 #define MAX_BYTES_WRITE_CHUNK ((u32)(RPCSVC_MAXPAGES << PAGE_SHIFT))
391 
392 /* This accommodates the largest possible Position-Zero
393  * Read chunk or Reply chunk.
394  */
395 #define MAX_BYTES_SPECIAL_CHUNK ((u32)((RPCSVC_MAXPAGES + 2) << PAGE_SHIFT))
396 
397 /* Sanity check the Read list.
398  *
399  * Implementation limits:
400  * - This implementation supports only one Read chunk.
401  *
402  * Sanity checks:
403  * - Read list does not overflow Receive buffer.
404  * - Segment size limited by largest NFS data payload.
405  *
406  * The segment count is limited to how many segments can
407  * fit in the transport header without overflowing the
408  * buffer. That's about 40 Read segments for a 1KB inline
409  * threshold.
410  *
411  * Return values:
412  *       %true: Read list is valid. @rctxt's xdr_stream is updated
413  *		to point to the first byte past the Read list.
414  *      %false: Read list is corrupt. @rctxt's xdr_stream is left
415  *		in an unknown state.
416  */
417 static bool xdr_check_read_list(struct svc_rdma_recv_ctxt *rctxt)
418 {
419 	u32 position, len;
420 	bool first;
421 	__be32 *p;
422 
423 	p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
424 	if (!p)
425 		return false;
426 
427 	len = 0;
428 	first = true;
429 	while (xdr_item_is_present(p)) {
430 		p = xdr_inline_decode(&rctxt->rc_stream,
431 				      rpcrdma_readseg_maxsz * sizeof(*p));
432 		if (!p)
433 			return false;
434 
435 		if (first) {
436 			position = be32_to_cpup(p);
437 			first = false;
438 		} else if (be32_to_cpup(p) != position) {
439 			return false;
440 		}
441 		p += 2;
442 		len += be32_to_cpup(p);
443 
444 		p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
445 		if (!p)
446 			return false;
447 	}
448 	return len <= MAX_BYTES_SPECIAL_CHUNK;
449 }
450 
451 /* The segment count is limited to how many segments can
452  * fit in the transport header without overflowing the
453  * buffer. That's about 60 Write segments for a 1KB inline
454  * threshold.
455  */
456 static bool xdr_check_write_chunk(struct svc_rdma_recv_ctxt *rctxt, u32 maxlen)
457 {
458 	u32 i, segcount, total;
459 	__be32 *p;
460 
461 	p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
462 	if (!p)
463 		return false;
464 	segcount = be32_to_cpup(p);
465 
466 	total = 0;
467 	for (i = 0; i < segcount; i++) {
468 		u32 handle, length;
469 		u64 offset;
470 
471 		p = xdr_inline_decode(&rctxt->rc_stream,
472 				      rpcrdma_segment_maxsz * sizeof(*p));
473 		if (!p)
474 			return false;
475 
476 		xdr_decode_rdma_segment(p, &handle, &length, &offset);
477 		trace_svcrdma_decode_wseg(handle, length, offset);
478 
479 		total += length;
480 	}
481 	return total <= maxlen;
482 }
483 
484 /* Sanity check the Write list.
485  *
486  * Implementation limits:
487  * - This implementation currently supports only one Write chunk.
488  *
489  * Sanity checks:
490  * - Write list does not overflow Receive buffer.
491  * - Chunk size limited by largest NFS data payload.
492  *
493  * Return values:
494  *       %true: Write list is valid. @rctxt's xdr_stream is updated
495  *		to point to the first byte past the Write list.
496  *      %false: Write list is corrupt. @rctxt's xdr_stream is left
497  *		in an unknown state.
498  */
499 static bool xdr_check_write_list(struct svc_rdma_recv_ctxt *rctxt)
500 {
501 	u32 chcount = 0;
502 	__be32 *p;
503 
504 	p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
505 	if (!p)
506 		return false;
507 	rctxt->rc_write_list = p;
508 	while (xdr_item_is_present(p)) {
509 		if (!xdr_check_write_chunk(rctxt, MAX_BYTES_WRITE_CHUNK))
510 			return false;
511 		++chcount;
512 		p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
513 		if (!p)
514 			return false;
515 	}
516 	if (!chcount)
517 		rctxt->rc_write_list = NULL;
518 	return chcount < 2;
519 }
520 
521 /* Sanity check the Reply chunk.
522  *
523  * Sanity checks:
524  * - Reply chunk does not overflow Receive buffer.
525  * - Chunk size limited by largest NFS data payload.
526  *
527  * Return values:
528  *       %true: Reply chunk is valid. @rctxt's xdr_stream is updated
529  *		to point to the first byte past the Reply chunk.
530  *      %false: Reply chunk is corrupt. @rctxt's xdr_stream is left
531  *		in an unknown state.
532  */
533 static bool xdr_check_reply_chunk(struct svc_rdma_recv_ctxt *rctxt)
534 {
535 	__be32 *p;
536 
537 	p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
538 	if (!p)
539 		return false;
540 	rctxt->rc_reply_chunk = NULL;
541 	if (xdr_item_is_present(p)) {
542 		if (!xdr_check_write_chunk(rctxt, MAX_BYTES_SPECIAL_CHUNK))
543 			return false;
544 		rctxt->rc_reply_chunk = p;
545 	}
546 	return true;
547 }
548 
549 /* RPC-over-RDMA Version One private extension: Remote Invalidation.
550  * Responder's choice: requester signals it can handle Send With
551  * Invalidate, and responder chooses one R_key to invalidate.
552  *
553  * If there is exactly one distinct R_key in the received transport
554  * header, set rc_inv_rkey to that R_key. Otherwise, set it to zero.
555  *
556  * Perform this operation while the received transport header is
557  * still in the CPU cache.
558  */
559 static void svc_rdma_get_inv_rkey(struct svcxprt_rdma *rdma,
560 				  struct svc_rdma_recv_ctxt *ctxt)
561 {
562 	__be32 inv_rkey, *p;
563 	u32 i, segcount;
564 
565 	ctxt->rc_inv_rkey = 0;
566 
567 	if (!rdma->sc_snd_w_inv)
568 		return;
569 
570 	inv_rkey = xdr_zero;
571 	p = ctxt->rc_recv_buf;
572 	p += rpcrdma_fixed_maxsz;
573 
574 	/* Read list */
575 	while (xdr_item_is_present(p++)) {
576 		p++;	/* position */
577 		if (inv_rkey == xdr_zero)
578 			inv_rkey = *p;
579 		else if (inv_rkey != *p)
580 			return;
581 		p += 4;
582 	}
583 
584 	/* Write list */
585 	while (xdr_item_is_present(p++)) {
586 		segcount = be32_to_cpup(p++);
587 		for (i = 0; i < segcount; i++) {
588 			if (inv_rkey == xdr_zero)
589 				inv_rkey = *p;
590 			else if (inv_rkey != *p)
591 				return;
592 			p += 4;
593 		}
594 	}
595 
596 	/* Reply chunk */
597 	if (xdr_item_is_present(p++)) {
598 		segcount = be32_to_cpup(p++);
599 		for (i = 0; i < segcount; i++) {
600 			if (inv_rkey == xdr_zero)
601 				inv_rkey = *p;
602 			else if (inv_rkey != *p)
603 				return;
604 			p += 4;
605 		}
606 	}
607 
608 	ctxt->rc_inv_rkey = be32_to_cpu(inv_rkey);
609 }
610 
611 /**
612  * svc_rdma_xdr_decode_req - Decode the transport header
613  * @rq_arg: xdr_buf containing ingress RPC/RDMA message
614  * @rctxt: state of decoding
615  *
616  * On entry, xdr->head[0].iov_base points to first byte of the
617  * RPC-over-RDMA transport header.
618  *
619  * On successful exit, head[0] points to first byte past the
620  * RPC-over-RDMA header. For RDMA_MSG, this is the RPC message.
621  *
622  * The length of the RPC-over-RDMA header is returned.
623  *
624  * Assumptions:
625  * - The transport header is entirely contained in the head iovec.
626  */
627 static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg,
628 				   struct svc_rdma_recv_ctxt *rctxt)
629 {
630 	__be32 *p, *rdma_argp;
631 	unsigned int hdr_len;
632 
633 	rdma_argp = rq_arg->head[0].iov_base;
634 	xdr_init_decode(&rctxt->rc_stream, rq_arg, rdma_argp, NULL);
635 
636 	p = xdr_inline_decode(&rctxt->rc_stream,
637 			      rpcrdma_fixed_maxsz * sizeof(*p));
638 	if (unlikely(!p))
639 		goto out_short;
640 	p++;
641 	if (*p != rpcrdma_version)
642 		goto out_version;
643 	p += 2;
644 	switch (*p) {
645 	case rdma_msg:
646 		break;
647 	case rdma_nomsg:
648 		break;
649 	case rdma_done:
650 		goto out_drop;
651 	case rdma_error:
652 		goto out_drop;
653 	default:
654 		goto out_proc;
655 	}
656 
657 	if (!xdr_check_read_list(rctxt))
658 		goto out_inval;
659 	if (!xdr_check_write_list(rctxt))
660 		goto out_inval;
661 	if (!xdr_check_reply_chunk(rctxt))
662 		goto out_inval;
663 
664 	rq_arg->head[0].iov_base = rctxt->rc_stream.p;
665 	hdr_len = xdr_stream_pos(&rctxt->rc_stream);
666 	rq_arg->head[0].iov_len -= hdr_len;
667 	rq_arg->len -= hdr_len;
668 	trace_svcrdma_decode_rqst(rctxt, rdma_argp, hdr_len);
669 	return hdr_len;
670 
671 out_short:
672 	trace_svcrdma_decode_short_err(rctxt, rq_arg->len);
673 	return -EINVAL;
674 
675 out_version:
676 	trace_svcrdma_decode_badvers_err(rctxt, rdma_argp);
677 	return -EPROTONOSUPPORT;
678 
679 out_drop:
680 	trace_svcrdma_decode_drop_err(rctxt, rdma_argp);
681 	return 0;
682 
683 out_proc:
684 	trace_svcrdma_decode_badproc_err(rctxt, rdma_argp);
685 	return -EINVAL;
686 
687 out_inval:
688 	trace_svcrdma_decode_parse_err(rctxt, rdma_argp);
689 	return -EINVAL;
690 }
691 
692 static void rdma_read_complete(struct svc_rqst *rqstp,
693 			       struct svc_rdma_recv_ctxt *head)
694 {
695 	int page_no;
696 
697 	/* Move Read chunk pages to rqstp so that they will be released
698 	 * when svc_process is done with them.
699 	 */
700 	for (page_no = 0; page_no < head->rc_page_count; page_no++) {
701 		put_page(rqstp->rq_pages[page_no]);
702 		rqstp->rq_pages[page_no] = head->rc_pages[page_no];
703 	}
704 	head->rc_page_count = 0;
705 
706 	/* Point rq_arg.pages past header */
707 	rqstp->rq_arg.pages = &rqstp->rq_pages[head->rc_hdr_count];
708 	rqstp->rq_arg.page_len = head->rc_arg.page_len;
709 
710 	/* rq_respages starts after the last arg page */
711 	rqstp->rq_respages = &rqstp->rq_pages[page_no];
712 	rqstp->rq_next_page = rqstp->rq_respages + 1;
713 
714 	/* Rebuild rq_arg head and tail. */
715 	rqstp->rq_arg.head[0] = head->rc_arg.head[0];
716 	rqstp->rq_arg.tail[0] = head->rc_arg.tail[0];
717 	rqstp->rq_arg.len = head->rc_arg.len;
718 	rqstp->rq_arg.buflen = head->rc_arg.buflen;
719 }
720 
721 static void svc_rdma_send_error(struct svcxprt_rdma *rdma,
722 				struct svc_rdma_recv_ctxt *rctxt,
723 				int status)
724 {
725 	struct svc_rdma_send_ctxt *sctxt;
726 
727 	sctxt = svc_rdma_send_ctxt_get(rdma);
728 	if (!sctxt)
729 		return;
730 	svc_rdma_send_error_msg(rdma, sctxt, rctxt, status);
731 }
732 
733 /* By convention, backchannel calls arrive via rdma_msg type
734  * messages, and never populate the chunk lists. This makes
735  * the RPC/RDMA header small and fixed in size, so it is
736  * straightforward to check the RPC header's direction field.
737  */
738 static bool svc_rdma_is_backchannel_reply(struct svc_xprt *xprt,
739 					  __be32 *rdma_resp)
740 {
741 	__be32 *p;
742 
743 	if (!xprt->xpt_bc_xprt)
744 		return false;
745 
746 	p = rdma_resp + 3;
747 	if (*p++ != rdma_msg)
748 		return false;
749 
750 	if (*p++ != xdr_zero)
751 		return false;
752 	if (*p++ != xdr_zero)
753 		return false;
754 	if (*p++ != xdr_zero)
755 		return false;
756 
757 	/* XID sanity */
758 	if (*p++ != *rdma_resp)
759 		return false;
760 	/* call direction */
761 	if (*p == cpu_to_be32(RPC_CALL))
762 		return false;
763 
764 	return true;
765 }
766 
767 /**
768  * svc_rdma_recvfrom - Receive an RPC call
769  * @rqstp: request structure into which to receive an RPC Call
770  *
771  * Returns:
772  *	The positive number of bytes in the RPC Call message,
773  *	%0 if there were no Calls ready to return,
774  *	%-EINVAL if the Read chunk data is too large,
775  *	%-ENOMEM if rdma_rw context pool was exhausted,
776  *	%-ENOTCONN if posting failed (connection is lost),
777  *	%-EIO if rdma_rw initialization failed (DMA mapping, etc).
778  *
779  * Called in a loop when XPT_DATA is set. XPT_DATA is cleared only
780  * when there are no remaining ctxt's to process.
781  *
782  * The next ctxt is removed from the "receive" lists.
783  *
784  * - If the ctxt completes a Read, then finish assembling the Call
785  *   message and return the number of bytes in the message.
786  *
787  * - If the ctxt completes a Receive, then construct the Call
788  *   message from the contents of the Receive buffer.
789  *
790  *   - If there are no Read chunks in this message, then finish
791  *     assembling the Call message and return the number of bytes
792  *     in the message.
793  *
794  *   - If there are Read chunks in this message, post Read WRs to
795  *     pull that payload and return 0.
796  */
797 int svc_rdma_recvfrom(struct svc_rqst *rqstp)
798 {
799 	struct svc_xprt *xprt = rqstp->rq_xprt;
800 	struct svcxprt_rdma *rdma_xprt =
801 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
802 	struct svc_rdma_recv_ctxt *ctxt;
803 	__be32 *p;
804 	int ret;
805 
806 	rqstp->rq_xprt_ctxt = NULL;
807 
808 	spin_lock(&rdma_xprt->sc_rq_dto_lock);
809 	ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_read_complete_q);
810 	if (ctxt) {
811 		list_del(&ctxt->rc_list);
812 		spin_unlock(&rdma_xprt->sc_rq_dto_lock);
813 		rdma_read_complete(rqstp, ctxt);
814 		goto complete;
815 	}
816 	ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_rq_dto_q);
817 	if (!ctxt) {
818 		/* No new incoming requests, terminate the loop */
819 		clear_bit(XPT_DATA, &xprt->xpt_flags);
820 		spin_unlock(&rdma_xprt->sc_rq_dto_lock);
821 		return 0;
822 	}
823 	list_del(&ctxt->rc_list);
824 	spin_unlock(&rdma_xprt->sc_rq_dto_lock);
825 
826 	atomic_inc(&rdma_stat_recv);
827 
828 	svc_rdma_build_arg_xdr(rqstp, ctxt);
829 
830 	/* Prevent svc_xprt_release from releasing pages in rq_pages
831 	 * if we return 0 or an error.
832 	 */
833 	rqstp->rq_respages = rqstp->rq_pages;
834 	rqstp->rq_next_page = rqstp->rq_respages;
835 
836 	p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
837 	ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg, ctxt);
838 	if (ret < 0)
839 		goto out_err;
840 	if (ret == 0)
841 		goto out_drop;
842 	rqstp->rq_xprt_hlen = ret;
843 
844 	if (svc_rdma_is_backchannel_reply(xprt, p))
845 		goto out_backchannel;
846 
847 	svc_rdma_get_inv_rkey(rdma_xprt, ctxt);
848 
849 	p += rpcrdma_fixed_maxsz;
850 	if (*p != xdr_zero)
851 		goto out_readchunk;
852 
853 complete:
854 	rqstp->rq_xprt_ctxt = ctxt;
855 	rqstp->rq_prot = IPPROTO_MAX;
856 	svc_xprt_copy_addrs(rqstp, xprt);
857 	return rqstp->rq_arg.len;
858 
859 out_readchunk:
860 	ret = svc_rdma_recv_read_chunk(rdma_xprt, rqstp, ctxt, p);
861 	if (ret < 0)
862 		goto out_postfail;
863 	return 0;
864 
865 out_err:
866 	svc_rdma_send_error(rdma_xprt, ctxt, ret);
867 	svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
868 	return 0;
869 
870 out_postfail:
871 	if (ret == -EINVAL)
872 		svc_rdma_send_error(rdma_xprt, ctxt, ret);
873 	svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
874 	return ret;
875 
876 out_backchannel:
877 	svc_rdma_handle_bc_reply(rqstp, ctxt);
878 out_drop:
879 	svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
880 	return 0;
881 }
882