1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2016-2018 Oracle. All rights reserved.
4  * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5  * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the BSD-type
11  * license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  *      Redistributions of source code must retain the above copyright
18  *      notice, this list of conditions and the following disclaimer.
19  *
20  *      Redistributions in binary form must reproduce the above
21  *      copyright notice, this list of conditions and the following
22  *      disclaimer in the documentation and/or other materials provided
23  *      with the distribution.
24  *
25  *      Neither the name of the Network Appliance, Inc. nor the names of
26  *      its contributors may be used to endorse or promote products
27  *      derived from this software without specific prior written
28  *      permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41  *
42  * Author: Tom Tucker <tom@opengridcomputing.com>
43  */
44 
45 /* Operation
46  *
47  * The main entry point is svc_rdma_recvfrom. This is called from
48  * svc_recv when the transport indicates there is incoming data to
49  * be read. "Data Ready" is signaled when an RDMA Receive completes,
50  * or when a set of RDMA Reads complete.
51  *
52  * An svc_rqst is passed in. This structure contains an array of
53  * free pages (rq_pages) that will contain the incoming RPC message.
54  *
55  * Short messages are moved directly into svc_rqst::rq_arg, and
56  * the RPC Call is ready to be processed by the Upper Layer.
57  * svc_rdma_recvfrom returns the length of the RPC Call message,
58  * completing the reception of the RPC Call.
59  *
60  * However, when an incoming message has Read chunks,
61  * svc_rdma_recvfrom must post RDMA Reads to pull the RPC Call's
62  * data payload from the client. svc_rdma_recvfrom sets up the
63  * RDMA Reads using pages in svc_rqst::rq_pages, which are
64  * transferred to an svc_rdma_recv_ctxt for the duration of the
65  * I/O. svc_rdma_recvfrom then returns zero, since the RPC message
66  * is still not yet ready.
67  *
68  * When the Read chunk payloads have become available on the
69  * server, "Data Ready" is raised again, and svc_recv calls
70  * svc_rdma_recvfrom again. This second call may use a different
71  * svc_rqst than the first one, thus any information that needs
72  * to be preserved across these two calls is kept in an
73  * svc_rdma_recv_ctxt.
74  *
75  * The second call to svc_rdma_recvfrom performs final assembly
76  * of the RPC Call message, using the RDMA Read sink pages kept in
77  * the svc_rdma_recv_ctxt. The xdr_buf is copied from the
78  * svc_rdma_recv_ctxt to the second svc_rqst. The second call returns
79  * the length of the completed RPC Call message.
80  *
81  * Page Management
82  *
83  * Pages under I/O must be transferred from the first svc_rqst to an
84  * svc_rdma_recv_ctxt before the first svc_rdma_recvfrom call returns.
85  *
86  * The first svc_rqst supplies pages for RDMA Reads. These are moved
87  * from rqstp::rq_pages into ctxt::pages. The consumed elements of
88  * the rq_pages array are set to NULL and refilled with the first
89  * svc_rdma_recvfrom call returns.
90  *
91  * During the second svc_rdma_recvfrom call, RDMA Read sink pages
92  * are transferred from the svc_rdma_recv_ctxt to the second svc_rqst
93  * (see rdma_read_complete() below).
94  */
95 
96 #include <linux/spinlock.h>
97 #include <asm/unaligned.h>
98 #include <rdma/ib_verbs.h>
99 #include <rdma/rdma_cm.h>
100 
101 #include <linux/sunrpc/xdr.h>
102 #include <linux/sunrpc/debug.h>
103 #include <linux/sunrpc/rpc_rdma.h>
104 #include <linux/sunrpc/svc_rdma.h>
105 
106 #include "xprt_rdma.h"
107 #include <trace/events/rpcrdma.h>
108 
109 #define RPCDBG_FACILITY	RPCDBG_SVCXPRT
110 
111 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc);
112 
113 static inline struct svc_rdma_recv_ctxt *
114 svc_rdma_next_recv_ctxt(struct list_head *list)
115 {
116 	return list_first_entry_or_null(list, struct svc_rdma_recv_ctxt,
117 					rc_list);
118 }
119 
120 static struct svc_rdma_recv_ctxt *
121 svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
122 {
123 	struct svc_rdma_recv_ctxt *ctxt;
124 	dma_addr_t addr;
125 	void *buffer;
126 
127 	ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL);
128 	if (!ctxt)
129 		goto fail0;
130 	buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
131 	if (!buffer)
132 		goto fail1;
133 	addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
134 				 rdma->sc_max_req_size, DMA_FROM_DEVICE);
135 	if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
136 		goto fail2;
137 
138 	ctxt->rc_recv_wr.next = NULL;
139 	ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe;
140 	ctxt->rc_recv_wr.sg_list = &ctxt->rc_recv_sge;
141 	ctxt->rc_recv_wr.num_sge = 1;
142 	ctxt->rc_cqe.done = svc_rdma_wc_receive;
143 	ctxt->rc_recv_sge.addr = addr;
144 	ctxt->rc_recv_sge.length = rdma->sc_max_req_size;
145 	ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey;
146 	ctxt->rc_recv_buf = buffer;
147 	ctxt->rc_temp = false;
148 	return ctxt;
149 
150 fail2:
151 	kfree(buffer);
152 fail1:
153 	kfree(ctxt);
154 fail0:
155 	return NULL;
156 }
157 
158 static void svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma *rdma,
159 				       struct svc_rdma_recv_ctxt *ctxt)
160 {
161 	ib_dma_unmap_single(rdma->sc_pd->device, ctxt->rc_recv_sge.addr,
162 			    ctxt->rc_recv_sge.length, DMA_FROM_DEVICE);
163 	kfree(ctxt->rc_recv_buf);
164 	kfree(ctxt);
165 }
166 
167 /**
168  * svc_rdma_recv_ctxts_destroy - Release all recv_ctxt's for an xprt
169  * @rdma: svcxprt_rdma being torn down
170  *
171  */
172 void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma)
173 {
174 	struct svc_rdma_recv_ctxt *ctxt;
175 
176 	while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_recv_ctxts))) {
177 		list_del(&ctxt->rc_list);
178 		svc_rdma_recv_ctxt_destroy(rdma, ctxt);
179 	}
180 }
181 
182 static struct svc_rdma_recv_ctxt *
183 svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma)
184 {
185 	struct svc_rdma_recv_ctxt *ctxt;
186 
187 	spin_lock(&rdma->sc_recv_lock);
188 	ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_recv_ctxts);
189 	if (!ctxt)
190 		goto out_empty;
191 	list_del(&ctxt->rc_list);
192 	spin_unlock(&rdma->sc_recv_lock);
193 
194 out:
195 	ctxt->rc_page_count = 0;
196 	return ctxt;
197 
198 out_empty:
199 	spin_unlock(&rdma->sc_recv_lock);
200 
201 	ctxt = svc_rdma_recv_ctxt_alloc(rdma);
202 	if (!ctxt)
203 		return NULL;
204 	goto out;
205 }
206 
207 /**
208  * svc_rdma_recv_ctxt_put - Return recv_ctxt to free list
209  * @rdma: controlling svcxprt_rdma
210  * @ctxt: object to return to the free list
211  *
212  */
213 void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
214 			    struct svc_rdma_recv_ctxt *ctxt)
215 {
216 	unsigned int i;
217 
218 	for (i = 0; i < ctxt->rc_page_count; i++)
219 		put_page(ctxt->rc_pages[i]);
220 
221 	if (!ctxt->rc_temp) {
222 		spin_lock(&rdma->sc_recv_lock);
223 		list_add(&ctxt->rc_list, &rdma->sc_recv_ctxts);
224 		spin_unlock(&rdma->sc_recv_lock);
225 	} else
226 		svc_rdma_recv_ctxt_destroy(rdma, ctxt);
227 }
228 
229 static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma,
230 				struct svc_rdma_recv_ctxt *ctxt)
231 {
232 	int ret;
233 
234 	svc_xprt_get(&rdma->sc_xprt);
235 	ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, NULL);
236 	trace_svcrdma_post_recv(&ctxt->rc_recv_wr, ret);
237 	if (ret)
238 		goto err_post;
239 	return 0;
240 
241 err_post:
242 	svc_rdma_recv_ctxt_put(rdma, ctxt);
243 	svc_xprt_put(&rdma->sc_xprt);
244 	return ret;
245 }
246 
247 static int svc_rdma_post_recv(struct svcxprt_rdma *rdma)
248 {
249 	struct svc_rdma_recv_ctxt *ctxt;
250 
251 	ctxt = svc_rdma_recv_ctxt_get(rdma);
252 	if (!ctxt)
253 		return -ENOMEM;
254 	return __svc_rdma_post_recv(rdma, ctxt);
255 }
256 
257 /**
258  * svc_rdma_post_recvs - Post initial set of Recv WRs
259  * @rdma: fresh svcxprt_rdma
260  *
261  * Returns true if successful, otherwise false.
262  */
263 bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
264 {
265 	struct svc_rdma_recv_ctxt *ctxt;
266 	unsigned int i;
267 	int ret;
268 
269 	for (i = 0; i < rdma->sc_max_requests; i++) {
270 		ctxt = svc_rdma_recv_ctxt_get(rdma);
271 		if (!ctxt)
272 			return false;
273 		ctxt->rc_temp = true;
274 		ret = __svc_rdma_post_recv(rdma, ctxt);
275 		if (ret) {
276 			pr_err("svcrdma: failure posting recv buffers: %d\n",
277 			       ret);
278 			return false;
279 		}
280 	}
281 	return true;
282 }
283 
284 /**
285  * svc_rdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
286  * @cq: Completion Queue context
287  * @wc: Work Completion object
288  *
289  * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
290  * the Receive completion handler could be running.
291  */
292 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
293 {
294 	struct svcxprt_rdma *rdma = cq->cq_context;
295 	struct ib_cqe *cqe = wc->wr_cqe;
296 	struct svc_rdma_recv_ctxt *ctxt;
297 
298 	trace_svcrdma_wc_receive(wc);
299 
300 	/* WARNING: Only wc->wr_cqe and wc->status are reliable */
301 	ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe);
302 
303 	if (wc->status != IB_WC_SUCCESS)
304 		goto flushed;
305 
306 	if (svc_rdma_post_recv(rdma))
307 		goto post_err;
308 
309 	/* All wc fields are now known to be valid */
310 	ctxt->rc_byte_len = wc->byte_len;
311 	ib_dma_sync_single_for_cpu(rdma->sc_pd->device,
312 				   ctxt->rc_recv_sge.addr,
313 				   wc->byte_len, DMA_FROM_DEVICE);
314 
315 	spin_lock(&rdma->sc_rq_dto_lock);
316 	list_add_tail(&ctxt->rc_list, &rdma->sc_rq_dto_q);
317 	spin_unlock(&rdma->sc_rq_dto_lock);
318 	set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
319 	if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags))
320 		svc_xprt_enqueue(&rdma->sc_xprt);
321 	goto out;
322 
323 flushed:
324 	if (wc->status != IB_WC_WR_FLUSH_ERR)
325 		pr_err("svcrdma: Recv: %s (%u/0x%x)\n",
326 		       ib_wc_status_msg(wc->status),
327 		       wc->status, wc->vendor_err);
328 post_err:
329 	svc_rdma_recv_ctxt_put(rdma, ctxt);
330 	set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
331 	svc_xprt_enqueue(&rdma->sc_xprt);
332 out:
333 	svc_xprt_put(&rdma->sc_xprt);
334 }
335 
336 /**
337  * svc_rdma_flush_recv_queues - Drain pending Receive work
338  * @rdma: svcxprt_rdma being shut down
339  *
340  */
341 void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma)
342 {
343 	struct svc_rdma_recv_ctxt *ctxt;
344 
345 	while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_read_complete_q))) {
346 		list_del(&ctxt->rc_list);
347 		svc_rdma_recv_ctxt_put(rdma, ctxt);
348 	}
349 	while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) {
350 		list_del(&ctxt->rc_list);
351 		svc_rdma_recv_ctxt_put(rdma, ctxt);
352 	}
353 }
354 
355 static void svc_rdma_build_arg_xdr(struct svc_rqst *rqstp,
356 				   struct svc_rdma_recv_ctxt *ctxt)
357 {
358 	struct xdr_buf *arg = &rqstp->rq_arg;
359 
360 	arg->head[0].iov_base = ctxt->rc_recv_buf;
361 	arg->head[0].iov_len = ctxt->rc_byte_len;
362 	arg->tail[0].iov_base = NULL;
363 	arg->tail[0].iov_len = 0;
364 	arg->page_len = 0;
365 	arg->page_base = 0;
366 	arg->buflen = ctxt->rc_byte_len;
367 	arg->len = ctxt->rc_byte_len;
368 
369 	rqstp->rq_respages = &rqstp->rq_pages[0];
370 	rqstp->rq_next_page = rqstp->rq_respages + 1;
371 }
372 
373 /* This accommodates the largest possible Write chunk,
374  * in one segment.
375  */
376 #define MAX_BYTES_WRITE_SEG	((u32)(RPCSVC_MAXPAGES << PAGE_SHIFT))
377 
378 /* This accommodates the largest possible Position-Zero
379  * Read chunk or Reply chunk, in one segment.
380  */
381 #define MAX_BYTES_SPECIAL_SEG	((u32)((RPCSVC_MAXPAGES + 2) << PAGE_SHIFT))
382 
383 /* Sanity check the Read list.
384  *
385  * Implementation limits:
386  * - This implementation supports only one Read chunk.
387  *
388  * Sanity checks:
389  * - Read list does not overflow buffer.
390  * - Segment size limited by largest NFS data payload.
391  *
392  * The segment count is limited to how many segments can
393  * fit in the transport header without overflowing the
394  * buffer. That's about 40 Read segments for a 1KB inline
395  * threshold.
396  *
397  * Returns pointer to the following Write list.
398  */
399 static __be32 *xdr_check_read_list(__be32 *p, const __be32 *end)
400 {
401 	u32 position;
402 	bool first;
403 
404 	first = true;
405 	while (*p++ != xdr_zero) {
406 		if (first) {
407 			position = be32_to_cpup(p++);
408 			first = false;
409 		} else if (be32_to_cpup(p++) != position) {
410 			return NULL;
411 		}
412 		p++;	/* handle */
413 		if (be32_to_cpup(p++) > MAX_BYTES_SPECIAL_SEG)
414 			return NULL;
415 		p += 2;	/* offset */
416 
417 		if (p > end)
418 			return NULL;
419 	}
420 	return p;
421 }
422 
423 /* The segment count is limited to how many segments can
424  * fit in the transport header without overflowing the
425  * buffer. That's about 60 Write segments for a 1KB inline
426  * threshold.
427  */
428 static __be32 *xdr_check_write_chunk(__be32 *p, const __be32 *end,
429 				     u32 maxlen)
430 {
431 	u32 i, segcount;
432 
433 	segcount = be32_to_cpup(p++);
434 	for (i = 0; i < segcount; i++) {
435 		p++;	/* handle */
436 		if (be32_to_cpup(p++) > maxlen)
437 			return NULL;
438 		p += 2;	/* offset */
439 
440 		if (p > end)
441 			return NULL;
442 	}
443 
444 	return p;
445 }
446 
447 /* Sanity check the Write list.
448  *
449  * Implementation limits:
450  * - This implementation supports only one Write chunk.
451  *
452  * Sanity checks:
453  * - Write list does not overflow buffer.
454  * - Segment size limited by largest NFS data payload.
455  *
456  * Returns pointer to the following Reply chunk.
457  */
458 static __be32 *xdr_check_write_list(__be32 *p, const __be32 *end)
459 {
460 	u32 chcount;
461 
462 	chcount = 0;
463 	while (*p++ != xdr_zero) {
464 		p = xdr_check_write_chunk(p, end, MAX_BYTES_WRITE_SEG);
465 		if (!p)
466 			return NULL;
467 		if (chcount++ > 1)
468 			return NULL;
469 	}
470 	return p;
471 }
472 
473 /* Sanity check the Reply chunk.
474  *
475  * Sanity checks:
476  * - Reply chunk does not overflow buffer.
477  * - Segment size limited by largest NFS data payload.
478  *
479  * Returns pointer to the following RPC header.
480  */
481 static __be32 *xdr_check_reply_chunk(__be32 *p, const __be32 *end)
482 {
483 	if (*p++ != xdr_zero) {
484 		p = xdr_check_write_chunk(p, end, MAX_BYTES_SPECIAL_SEG);
485 		if (!p)
486 			return NULL;
487 	}
488 	return p;
489 }
490 
491 /* On entry, xdr->head[0].iov_base points to first byte in the
492  * RPC-over-RDMA header.
493  *
494  * On successful exit, head[0] points to first byte past the
495  * RPC-over-RDMA header. For RDMA_MSG, this is the RPC message.
496  * The length of the RPC-over-RDMA header is returned.
497  *
498  * Assumptions:
499  * - The transport header is entirely contained in the head iovec.
500  */
501 static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg)
502 {
503 	__be32 *p, *end, *rdma_argp;
504 	unsigned int hdr_len;
505 
506 	/* Verify that there's enough bytes for header + something */
507 	if (rq_arg->len <= RPCRDMA_HDRLEN_ERR)
508 		goto out_short;
509 
510 	rdma_argp = rq_arg->head[0].iov_base;
511 	if (*(rdma_argp + 1) != rpcrdma_version)
512 		goto out_version;
513 
514 	switch (*(rdma_argp + 3)) {
515 	case rdma_msg:
516 		break;
517 	case rdma_nomsg:
518 		break;
519 
520 	case rdma_done:
521 		goto out_drop;
522 
523 	case rdma_error:
524 		goto out_drop;
525 
526 	default:
527 		goto out_proc;
528 	}
529 
530 	end = (__be32 *)((unsigned long)rdma_argp + rq_arg->len);
531 	p = xdr_check_read_list(rdma_argp + 4, end);
532 	if (!p)
533 		goto out_inval;
534 	p = xdr_check_write_list(p, end);
535 	if (!p)
536 		goto out_inval;
537 	p = xdr_check_reply_chunk(p, end);
538 	if (!p)
539 		goto out_inval;
540 	if (p > end)
541 		goto out_inval;
542 
543 	rq_arg->head[0].iov_base = p;
544 	hdr_len = (unsigned long)p - (unsigned long)rdma_argp;
545 	rq_arg->head[0].iov_len -= hdr_len;
546 	rq_arg->len -= hdr_len;
547 	trace_svcrdma_decode_rqst(rdma_argp, hdr_len);
548 	return hdr_len;
549 
550 out_short:
551 	trace_svcrdma_decode_short(rq_arg->len);
552 	return -EINVAL;
553 
554 out_version:
555 	trace_svcrdma_decode_badvers(rdma_argp);
556 	return -EPROTONOSUPPORT;
557 
558 out_drop:
559 	trace_svcrdma_decode_drop(rdma_argp);
560 	return 0;
561 
562 out_proc:
563 	trace_svcrdma_decode_badproc(rdma_argp);
564 	return -EINVAL;
565 
566 out_inval:
567 	trace_svcrdma_decode_parse(rdma_argp);
568 	return -EINVAL;
569 }
570 
571 static void rdma_read_complete(struct svc_rqst *rqstp,
572 			       struct svc_rdma_recv_ctxt *head)
573 {
574 	int page_no;
575 
576 	/* Move Read chunk pages to rqstp so that they will be released
577 	 * when svc_process is done with them.
578 	 */
579 	for (page_no = 0; page_no < head->rc_page_count; page_no++) {
580 		put_page(rqstp->rq_pages[page_no]);
581 		rqstp->rq_pages[page_no] = head->rc_pages[page_no];
582 	}
583 	head->rc_page_count = 0;
584 
585 	/* Point rq_arg.pages past header */
586 	rqstp->rq_arg.pages = &rqstp->rq_pages[head->rc_hdr_count];
587 	rqstp->rq_arg.page_len = head->rc_arg.page_len;
588 
589 	/* rq_respages starts after the last arg page */
590 	rqstp->rq_respages = &rqstp->rq_pages[page_no];
591 	rqstp->rq_next_page = rqstp->rq_respages + 1;
592 
593 	/* Rebuild rq_arg head and tail. */
594 	rqstp->rq_arg.head[0] = head->rc_arg.head[0];
595 	rqstp->rq_arg.tail[0] = head->rc_arg.tail[0];
596 	rqstp->rq_arg.len = head->rc_arg.len;
597 	rqstp->rq_arg.buflen = head->rc_arg.buflen;
598 }
599 
600 static void svc_rdma_send_error(struct svcxprt_rdma *xprt,
601 				__be32 *rdma_argp, int status)
602 {
603 	struct svc_rdma_send_ctxt *ctxt;
604 	unsigned int length;
605 	__be32 *p;
606 	int ret;
607 
608 	ctxt = svc_rdma_send_ctxt_get(xprt);
609 	if (!ctxt)
610 		return;
611 
612 	p = ctxt->sc_xprt_buf;
613 	*p++ = *rdma_argp;
614 	*p++ = *(rdma_argp + 1);
615 	*p++ = xprt->sc_fc_credits;
616 	*p++ = rdma_error;
617 	switch (status) {
618 	case -EPROTONOSUPPORT:
619 		*p++ = err_vers;
620 		*p++ = rpcrdma_version;
621 		*p++ = rpcrdma_version;
622 		trace_svcrdma_err_vers(*rdma_argp);
623 		break;
624 	default:
625 		*p++ = err_chunk;
626 		trace_svcrdma_err_chunk(*rdma_argp);
627 	}
628 	length = (unsigned long)p - (unsigned long)ctxt->sc_xprt_buf;
629 	svc_rdma_sync_reply_hdr(xprt, ctxt, length);
630 
631 	ctxt->sc_send_wr.opcode = IB_WR_SEND;
632 	ret = svc_rdma_send(xprt, &ctxt->sc_send_wr);
633 	if (ret)
634 		svc_rdma_send_ctxt_put(xprt, ctxt);
635 }
636 
637 /* By convention, backchannel calls arrive via rdma_msg type
638  * messages, and never populate the chunk lists. This makes
639  * the RPC/RDMA header small and fixed in size, so it is
640  * straightforward to check the RPC header's direction field.
641  */
642 static bool svc_rdma_is_backchannel_reply(struct svc_xprt *xprt,
643 					  __be32 *rdma_resp)
644 {
645 	__be32 *p;
646 
647 	if (!xprt->xpt_bc_xprt)
648 		return false;
649 
650 	p = rdma_resp + 3;
651 	if (*p++ != rdma_msg)
652 		return false;
653 
654 	if (*p++ != xdr_zero)
655 		return false;
656 	if (*p++ != xdr_zero)
657 		return false;
658 	if (*p++ != xdr_zero)
659 		return false;
660 
661 	/* XID sanity */
662 	if (*p++ != *rdma_resp)
663 		return false;
664 	/* call direction */
665 	if (*p == cpu_to_be32(RPC_CALL))
666 		return false;
667 
668 	return true;
669 }
670 
671 /**
672  * svc_rdma_recvfrom - Receive an RPC call
673  * @rqstp: request structure into which to receive an RPC Call
674  *
675  * Returns:
676  *	The positive number of bytes in the RPC Call message,
677  *	%0 if there were no Calls ready to return,
678  *	%-EINVAL if the Read chunk data is too large,
679  *	%-ENOMEM if rdma_rw context pool was exhausted,
680  *	%-ENOTCONN if posting failed (connection is lost),
681  *	%-EIO if rdma_rw initialization failed (DMA mapping, etc).
682  *
683  * Called in a loop when XPT_DATA is set. XPT_DATA is cleared only
684  * when there are no remaining ctxt's to process.
685  *
686  * The next ctxt is removed from the "receive" lists.
687  *
688  * - If the ctxt completes a Read, then finish assembling the Call
689  *   message and return the number of bytes in the message.
690  *
691  * - If the ctxt completes a Receive, then construct the Call
692  *   message from the contents of the Receive buffer.
693  *
694  *   - If there are no Read chunks in this message, then finish
695  *     assembling the Call message and return the number of bytes
696  *     in the message.
697  *
698  *   - If there are Read chunks in this message, post Read WRs to
699  *     pull that payload and return 0.
700  */
701 int svc_rdma_recvfrom(struct svc_rqst *rqstp)
702 {
703 	struct svc_xprt *xprt = rqstp->rq_xprt;
704 	struct svcxprt_rdma *rdma_xprt =
705 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
706 	struct svc_rdma_recv_ctxt *ctxt;
707 	__be32 *p;
708 	int ret;
709 
710 	spin_lock(&rdma_xprt->sc_rq_dto_lock);
711 	ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_read_complete_q);
712 	if (ctxt) {
713 		list_del(&ctxt->rc_list);
714 		spin_unlock(&rdma_xprt->sc_rq_dto_lock);
715 		rdma_read_complete(rqstp, ctxt);
716 		goto complete;
717 	}
718 	ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_rq_dto_q);
719 	if (!ctxt) {
720 		/* No new incoming requests, terminate the loop */
721 		clear_bit(XPT_DATA, &xprt->xpt_flags);
722 		spin_unlock(&rdma_xprt->sc_rq_dto_lock);
723 		return 0;
724 	}
725 	list_del(&ctxt->rc_list);
726 	spin_unlock(&rdma_xprt->sc_rq_dto_lock);
727 
728 	atomic_inc(&rdma_stat_recv);
729 
730 	svc_rdma_build_arg_xdr(rqstp, ctxt);
731 
732 	p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
733 	ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg);
734 	if (ret < 0)
735 		goto out_err;
736 	if (ret == 0)
737 		goto out_drop;
738 	rqstp->rq_xprt_hlen = ret;
739 
740 	if (svc_rdma_is_backchannel_reply(xprt, p)) {
741 		ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p,
742 					       &rqstp->rq_arg);
743 		svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
744 		return ret;
745 	}
746 
747 	p += rpcrdma_fixed_maxsz;
748 	if (*p != xdr_zero)
749 		goto out_readchunk;
750 
751 complete:
752 	rqstp->rq_xprt_ctxt = ctxt;
753 	rqstp->rq_prot = IPPROTO_MAX;
754 	svc_xprt_copy_addrs(rqstp, xprt);
755 	return rqstp->rq_arg.len;
756 
757 out_readchunk:
758 	ret = svc_rdma_recv_read_chunk(rdma_xprt, rqstp, ctxt, p);
759 	if (ret < 0)
760 		goto out_postfail;
761 	return 0;
762 
763 out_err:
764 	svc_rdma_send_error(rdma_xprt, p, ret);
765 	svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
766 	return 0;
767 
768 out_postfail:
769 	if (ret == -EINVAL)
770 		svc_rdma_send_error(rdma_xprt, p, ret);
771 	svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
772 	return ret;
773 
774 out_drop:
775 	svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
776 	return 0;
777 }
778