1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2016-2018 Oracle. All rights reserved.
4  * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5  * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the BSD-type
11  * license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  *      Redistributions of source code must retain the above copyright
18  *      notice, this list of conditions and the following disclaimer.
19  *
20  *      Redistributions in binary form must reproduce the above
21  *      copyright notice, this list of conditions and the following
22  *      disclaimer in the documentation and/or other materials provided
23  *      with the distribution.
24  *
25  *      Neither the name of the Network Appliance, Inc. nor the names of
26  *      its contributors may be used to endorse or promote products
27  *      derived from this software without specific prior written
28  *      permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41  *
42  * Author: Tom Tucker <tom@opengridcomputing.com>
43  */
44 
45 /* Operation
46  *
47  * The main entry point is svc_rdma_recvfrom. This is called from
48  * svc_recv when the transport indicates there is incoming data to
49  * be read. "Data Ready" is signaled when an RDMA Receive completes,
50  * or when a set of RDMA Reads complete.
51  *
52  * An svc_rqst is passed in. This structure contains an array of
53  * free pages (rq_pages) that will contain the incoming RPC message.
54  *
55  * Short messages are moved directly into svc_rqst::rq_arg, and
56  * the RPC Call is ready to be processed by the Upper Layer.
57  * svc_rdma_recvfrom returns the length of the RPC Call message,
58  * completing the reception of the RPC Call.
59  *
60  * However, when an incoming message has Read chunks,
61  * svc_rdma_recvfrom must post RDMA Reads to pull the RPC Call's
62  * data payload from the client. svc_rdma_recvfrom sets up the
63  * RDMA Reads using pages in svc_rqst::rq_pages, which are
64  * transferred to an svc_rdma_recv_ctxt for the duration of the
65  * I/O. svc_rdma_recvfrom then returns zero, since the RPC message
66  * is still not yet ready.
67  *
68  * When the Read chunk payloads have become available on the
69  * server, "Data Ready" is raised again, and svc_recv calls
70  * svc_rdma_recvfrom again. This second call may use a different
71  * svc_rqst than the first one, thus any information that needs
72  * to be preserved across these two calls is kept in an
73  * svc_rdma_recv_ctxt.
74  *
75  * The second call to svc_rdma_recvfrom performs final assembly
76  * of the RPC Call message, using the RDMA Read sink pages kept in
77  * the svc_rdma_recv_ctxt. The xdr_buf is copied from the
78  * svc_rdma_recv_ctxt to the second svc_rqst. The second call returns
79  * the length of the completed RPC Call message.
80  *
81  * Page Management
82  *
83  * Pages under I/O must be transferred from the first svc_rqst to an
84  * svc_rdma_recv_ctxt before the first svc_rdma_recvfrom call returns.
85  *
86  * The first svc_rqst supplies pages for RDMA Reads. These are moved
87  * from rqstp::rq_pages into ctxt::pages. The consumed elements of
88  * the rq_pages array are set to NULL and refilled with the first
89  * svc_rdma_recvfrom call returns.
90  *
91  * During the second svc_rdma_recvfrom call, RDMA Read sink pages
92  * are transferred from the svc_rdma_recv_ctxt to the second svc_rqst
93  * (see rdma_read_complete() below).
94  */
95 
96 #include <linux/slab.h>
97 #include <linux/spinlock.h>
98 #include <asm/unaligned.h>
99 #include <rdma/ib_verbs.h>
100 #include <rdma/rdma_cm.h>
101 
102 #include <linux/sunrpc/xdr.h>
103 #include <linux/sunrpc/debug.h>
104 #include <linux/sunrpc/rpc_rdma.h>
105 #include <linux/sunrpc/svc_rdma.h>
106 
107 #include "xprt_rdma.h"
108 #include <trace/events/rpcrdma.h>
109 
110 #define RPCDBG_FACILITY	RPCDBG_SVCXPRT
111 
112 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc);
113 
114 static inline struct svc_rdma_recv_ctxt *
115 svc_rdma_next_recv_ctxt(struct list_head *list)
116 {
117 	return list_first_entry_or_null(list, struct svc_rdma_recv_ctxt,
118 					rc_list);
119 }
120 
121 static void svc_rdma_recv_cid_init(struct svcxprt_rdma *rdma,
122 				   struct rpc_rdma_cid *cid)
123 {
124 	cid->ci_queue_id = rdma->sc_rq_cq->res.id;
125 	cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
126 }
127 
128 static struct svc_rdma_recv_ctxt *
129 svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
130 {
131 	struct svc_rdma_recv_ctxt *ctxt;
132 	dma_addr_t addr;
133 	void *buffer;
134 
135 	ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL);
136 	if (!ctxt)
137 		goto fail0;
138 	buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
139 	if (!buffer)
140 		goto fail1;
141 	addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
142 				 rdma->sc_max_req_size, DMA_FROM_DEVICE);
143 	if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
144 		goto fail2;
145 
146 	svc_rdma_recv_cid_init(rdma, &ctxt->rc_cid);
147 	pcl_init(&ctxt->rc_call_pcl);
148 	pcl_init(&ctxt->rc_read_pcl);
149 	pcl_init(&ctxt->rc_write_pcl);
150 	pcl_init(&ctxt->rc_reply_pcl);
151 
152 	ctxt->rc_recv_wr.next = NULL;
153 	ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe;
154 	ctxt->rc_recv_wr.sg_list = &ctxt->rc_recv_sge;
155 	ctxt->rc_recv_wr.num_sge = 1;
156 	ctxt->rc_cqe.done = svc_rdma_wc_receive;
157 	ctxt->rc_recv_sge.addr = addr;
158 	ctxt->rc_recv_sge.length = rdma->sc_max_req_size;
159 	ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey;
160 	ctxt->rc_recv_buf = buffer;
161 	ctxt->rc_temp = false;
162 	return ctxt;
163 
164 fail2:
165 	kfree(buffer);
166 fail1:
167 	kfree(ctxt);
168 fail0:
169 	return NULL;
170 }
171 
172 static void svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma *rdma,
173 				       struct svc_rdma_recv_ctxt *ctxt)
174 {
175 	ib_dma_unmap_single(rdma->sc_pd->device, ctxt->rc_recv_sge.addr,
176 			    ctxt->rc_recv_sge.length, DMA_FROM_DEVICE);
177 	kfree(ctxt->rc_recv_buf);
178 	kfree(ctxt);
179 }
180 
181 /**
182  * svc_rdma_recv_ctxts_destroy - Release all recv_ctxt's for an xprt
183  * @rdma: svcxprt_rdma being torn down
184  *
185  */
186 void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma)
187 {
188 	struct svc_rdma_recv_ctxt *ctxt;
189 	struct llist_node *node;
190 
191 	while ((node = llist_del_first(&rdma->sc_recv_ctxts))) {
192 		ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node);
193 		svc_rdma_recv_ctxt_destroy(rdma, ctxt);
194 	}
195 }
196 
197 /**
198  * svc_rdma_recv_ctxt_get - Allocate a recv_ctxt
199  * @rdma: controlling svcxprt_rdma
200  *
201  * Returns a recv_ctxt or (rarely) NULL if none are available.
202  */
203 struct svc_rdma_recv_ctxt *svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma)
204 {
205 	struct svc_rdma_recv_ctxt *ctxt;
206 	struct llist_node *node;
207 
208 	node = llist_del_first(&rdma->sc_recv_ctxts);
209 	if (!node)
210 		goto out_empty;
211 	ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node);
212 
213 out:
214 	ctxt->rc_page_count = 0;
215 	return ctxt;
216 
217 out_empty:
218 	ctxt = svc_rdma_recv_ctxt_alloc(rdma);
219 	if (!ctxt)
220 		return NULL;
221 	goto out;
222 }
223 
224 /**
225  * svc_rdma_recv_ctxt_put - Return recv_ctxt to free list
226  * @rdma: controlling svcxprt_rdma
227  * @ctxt: object to return to the free list
228  *
229  */
230 void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
231 			    struct svc_rdma_recv_ctxt *ctxt)
232 {
233 	unsigned int i;
234 
235 	for (i = 0; i < ctxt->rc_page_count; i++)
236 		put_page(ctxt->rc_pages[i]);
237 
238 	pcl_free(&ctxt->rc_call_pcl);
239 	pcl_free(&ctxt->rc_read_pcl);
240 	pcl_free(&ctxt->rc_write_pcl);
241 	pcl_free(&ctxt->rc_reply_pcl);
242 
243 	if (!ctxt->rc_temp)
244 		llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts);
245 	else
246 		svc_rdma_recv_ctxt_destroy(rdma, ctxt);
247 }
248 
249 /**
250  * svc_rdma_release_rqst - Release transport-specific per-rqst resources
251  * @rqstp: svc_rqst being released
252  *
253  * Ensure that the recv_ctxt is released whether or not a Reply
254  * was sent. For example, the client could close the connection,
255  * or svc_process could drop an RPC, before the Reply is sent.
256  */
257 void svc_rdma_release_rqst(struct svc_rqst *rqstp)
258 {
259 	struct svc_rdma_recv_ctxt *ctxt = rqstp->rq_xprt_ctxt;
260 	struct svc_xprt *xprt = rqstp->rq_xprt;
261 	struct svcxprt_rdma *rdma =
262 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
263 
264 	rqstp->rq_xprt_ctxt = NULL;
265 	if (ctxt)
266 		svc_rdma_recv_ctxt_put(rdma, ctxt);
267 }
268 
269 static bool svc_rdma_refresh_recvs(struct svcxprt_rdma *rdma,
270 				   unsigned int wanted, bool temp)
271 {
272 	const struct ib_recv_wr *bad_wr = NULL;
273 	struct svc_rdma_recv_ctxt *ctxt;
274 	struct ib_recv_wr *recv_chain;
275 	int ret;
276 
277 	recv_chain = NULL;
278 	while (wanted--) {
279 		ctxt = svc_rdma_recv_ctxt_get(rdma);
280 		if (!ctxt)
281 			break;
282 
283 		trace_svcrdma_post_recv(ctxt);
284 		ctxt->rc_temp = temp;
285 		ctxt->rc_recv_wr.next = recv_chain;
286 		recv_chain = &ctxt->rc_recv_wr;
287 		rdma->sc_pending_recvs++;
288 	}
289 	if (!recv_chain)
290 		return false;
291 
292 	ret = ib_post_recv(rdma->sc_qp, recv_chain, &bad_wr);
293 	if (ret)
294 		goto err_post;
295 	return true;
296 
297 err_post:
298 	while (bad_wr) {
299 		ctxt = container_of(bad_wr, struct svc_rdma_recv_ctxt,
300 				    rc_recv_wr);
301 		bad_wr = bad_wr->next;
302 		svc_rdma_recv_ctxt_put(rdma, ctxt);
303 	}
304 
305 	trace_svcrdma_rq_post_err(rdma, ret);
306 	/* Since we're destroying the xprt, no need to reset
307 	 * sc_pending_recvs. */
308 	return false;
309 }
310 
311 /**
312  * svc_rdma_post_recvs - Post initial set of Recv WRs
313  * @rdma: fresh svcxprt_rdma
314  *
315  * Returns true if successful, otherwise false.
316  */
317 bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
318 {
319 	return svc_rdma_refresh_recvs(rdma, rdma->sc_max_requests, true);
320 }
321 
322 /**
323  * svc_rdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
324  * @cq: Completion Queue context
325  * @wc: Work Completion object
326  *
327  */
328 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
329 {
330 	struct svcxprt_rdma *rdma = cq->cq_context;
331 	struct ib_cqe *cqe = wc->wr_cqe;
332 	struct svc_rdma_recv_ctxt *ctxt;
333 
334 	rdma->sc_pending_recvs--;
335 
336 	/* WARNING: Only wc->wr_cqe and wc->status are reliable */
337 	ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe);
338 
339 	trace_svcrdma_wc_receive(wc, &ctxt->rc_cid);
340 	if (wc->status != IB_WC_SUCCESS)
341 		goto flushed;
342 
343 	/* All wc fields are now known to be valid */
344 	ctxt->rc_byte_len = wc->byte_len;
345 
346 	spin_lock(&rdma->sc_rq_dto_lock);
347 	list_add_tail(&ctxt->rc_list, &rdma->sc_rq_dto_q);
348 	/* Note the unlock pairs with the smp_rmb in svc_xprt_ready: */
349 	set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
350 	spin_unlock(&rdma->sc_rq_dto_lock);
351 	if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags))
352 		svc_xprt_enqueue(&rdma->sc_xprt);
353 
354 	if (!test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags) &&
355 	    rdma->sc_pending_recvs < rdma->sc_max_requests)
356 		if (!svc_rdma_refresh_recvs(rdma, RPCRDMA_MAX_RECV_BATCH,
357 					    false))
358 			goto post_err;
359 
360 	return;
361 
362 flushed:
363 	svc_rdma_recv_ctxt_put(rdma, ctxt);
364 post_err:
365 	set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
366 	svc_xprt_enqueue(&rdma->sc_xprt);
367 }
368 
369 /**
370  * svc_rdma_flush_recv_queues - Drain pending Receive work
371  * @rdma: svcxprt_rdma being shut down
372  *
373  */
374 void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma)
375 {
376 	struct svc_rdma_recv_ctxt *ctxt;
377 
378 	while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_read_complete_q))) {
379 		list_del(&ctxt->rc_list);
380 		svc_rdma_recv_ctxt_put(rdma, ctxt);
381 	}
382 	while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) {
383 		list_del(&ctxt->rc_list);
384 		svc_rdma_recv_ctxt_put(rdma, ctxt);
385 	}
386 }
387 
388 static void svc_rdma_build_arg_xdr(struct svc_rqst *rqstp,
389 				   struct svc_rdma_recv_ctxt *ctxt)
390 {
391 	struct xdr_buf *arg = &rqstp->rq_arg;
392 
393 	arg->head[0].iov_base = ctxt->rc_recv_buf;
394 	arg->head[0].iov_len = ctxt->rc_byte_len;
395 	arg->tail[0].iov_base = NULL;
396 	arg->tail[0].iov_len = 0;
397 	arg->page_len = 0;
398 	arg->page_base = 0;
399 	arg->buflen = ctxt->rc_byte_len;
400 	arg->len = ctxt->rc_byte_len;
401 }
402 
403 /**
404  * xdr_count_read_segments - Count number of Read segments in Read list
405  * @rctxt: Ingress receive context
406  * @p: Start of an un-decoded Read list
407  *
408  * Before allocating anything, ensure the ingress Read list is safe
409  * to use.
410  *
411  * The segment count is limited to how many segments can fit in the
412  * transport header without overflowing the buffer. That's about 40
413  * Read segments for a 1KB inline threshold.
414  *
415  * Return values:
416  *   %true: Read list is valid. @rctxt's xdr_stream is updated to point
417  *	    to the first byte past the Read list. rc_read_pcl and
418  *	    rc_call_pcl cl_count fields are set to the number of
419  *	    Read segments in the list.
420  *  %false: Read list is corrupt. @rctxt's xdr_stream is left in an
421  *	    unknown state.
422  */
423 static bool xdr_count_read_segments(struct svc_rdma_recv_ctxt *rctxt, __be32 *p)
424 {
425 	rctxt->rc_call_pcl.cl_count = 0;
426 	rctxt->rc_read_pcl.cl_count = 0;
427 	while (xdr_item_is_present(p)) {
428 		u32 position, handle, length;
429 		u64 offset;
430 
431 		p = xdr_inline_decode(&rctxt->rc_stream,
432 				      rpcrdma_readseg_maxsz * sizeof(*p));
433 		if (!p)
434 			return false;
435 
436 		xdr_decode_read_segment(p, &position, &handle,
437 					    &length, &offset);
438 		if (position) {
439 			if (position & 3)
440 				return false;
441 			++rctxt->rc_read_pcl.cl_count;
442 		} else {
443 			++rctxt->rc_call_pcl.cl_count;
444 		}
445 
446 		p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
447 		if (!p)
448 			return false;
449 	}
450 	return true;
451 }
452 
453 /* Sanity check the Read list.
454  *
455  * Sanity checks:
456  * - Read list does not overflow Receive buffer.
457  * - Chunk size limited by largest NFS data payload.
458  *
459  * Return values:
460  *   %true: Read list is valid. @rctxt's xdr_stream is updated
461  *	    to point to the first byte past the Read list.
462  *  %false: Read list is corrupt. @rctxt's xdr_stream is left
463  *	    in an unknown state.
464  */
465 static bool xdr_check_read_list(struct svc_rdma_recv_ctxt *rctxt)
466 {
467 	__be32 *p;
468 
469 	p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
470 	if (!p)
471 		return false;
472 	if (!xdr_count_read_segments(rctxt, p))
473 		return false;
474 	if (!pcl_alloc_call(rctxt, p))
475 		return false;
476 	return pcl_alloc_read(rctxt, p);
477 }
478 
479 static bool xdr_check_write_chunk(struct svc_rdma_recv_ctxt *rctxt)
480 {
481 	u32 segcount;
482 	__be32 *p;
483 
484 	if (xdr_stream_decode_u32(&rctxt->rc_stream, &segcount))
485 		return false;
486 
487 	/* A bogus segcount causes this buffer overflow check to fail. */
488 	p = xdr_inline_decode(&rctxt->rc_stream,
489 			      segcount * rpcrdma_segment_maxsz * sizeof(*p));
490 	return p != NULL;
491 }
492 
493 /**
494  * xdr_count_write_chunks - Count number of Write chunks in Write list
495  * @rctxt: Received header and decoding state
496  * @p: start of an un-decoded Write list
497  *
498  * Before allocating anything, ensure the ingress Write list is
499  * safe to use.
500  *
501  * Return values:
502  *       %true: Write list is valid. @rctxt's xdr_stream is updated
503  *		to point to the first byte past the Write list, and
504  *		the number of Write chunks is in rc_write_pcl.cl_count.
505  *      %false: Write list is corrupt. @rctxt's xdr_stream is left
506  *		in an indeterminate state.
507  */
508 static bool xdr_count_write_chunks(struct svc_rdma_recv_ctxt *rctxt, __be32 *p)
509 {
510 	rctxt->rc_write_pcl.cl_count = 0;
511 	while (xdr_item_is_present(p)) {
512 		if (!xdr_check_write_chunk(rctxt))
513 			return false;
514 		++rctxt->rc_write_pcl.cl_count;
515 		p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
516 		if (!p)
517 			return false;
518 	}
519 	return true;
520 }
521 
522 /* Sanity check the Write list.
523  *
524  * Implementation limits:
525  * - This implementation currently supports only one Write chunk.
526  *
527  * Sanity checks:
528  * - Write list does not overflow Receive buffer.
529  * - Chunk size limited by largest NFS data payload.
530  *
531  * Return values:
532  *       %true: Write list is valid. @rctxt's xdr_stream is updated
533  *		to point to the first byte past the Write list.
534  *      %false: Write list is corrupt. @rctxt's xdr_stream is left
535  *		in an unknown state.
536  */
537 static bool xdr_check_write_list(struct svc_rdma_recv_ctxt *rctxt)
538 {
539 	__be32 *p;
540 
541 	p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
542 	if (!p)
543 		return false;
544 	if (!xdr_count_write_chunks(rctxt, p))
545 		return false;
546 	if (!pcl_alloc_write(rctxt, &rctxt->rc_write_pcl, p))
547 		return false;
548 
549 	rctxt->rc_cur_result_payload = pcl_first_chunk(&rctxt->rc_write_pcl);
550 	return true;
551 }
552 
553 /* Sanity check the Reply chunk.
554  *
555  * Sanity checks:
556  * - Reply chunk does not overflow Receive buffer.
557  * - Chunk size limited by largest NFS data payload.
558  *
559  * Return values:
560  *       %true: Reply chunk is valid. @rctxt's xdr_stream is updated
561  *		to point to the first byte past the Reply chunk.
562  *      %false: Reply chunk is corrupt. @rctxt's xdr_stream is left
563  *		in an unknown state.
564  */
565 static bool xdr_check_reply_chunk(struct svc_rdma_recv_ctxt *rctxt)
566 {
567 	__be32 *p;
568 
569 	p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
570 	if (!p)
571 		return false;
572 
573 	if (!xdr_item_is_present(p))
574 		return true;
575 	if (!xdr_check_write_chunk(rctxt))
576 		return false;
577 
578 	rctxt->rc_reply_pcl.cl_count = 1;
579 	return pcl_alloc_write(rctxt, &rctxt->rc_reply_pcl, p);
580 }
581 
582 /* RPC-over-RDMA Version One private extension: Remote Invalidation.
583  * Responder's choice: requester signals it can handle Send With
584  * Invalidate, and responder chooses one R_key to invalidate.
585  *
586  * If there is exactly one distinct R_key in the received transport
587  * header, set rc_inv_rkey to that R_key. Otherwise, set it to zero.
588  */
589 static void svc_rdma_get_inv_rkey(struct svcxprt_rdma *rdma,
590 				  struct svc_rdma_recv_ctxt *ctxt)
591 {
592 	struct svc_rdma_segment *segment;
593 	struct svc_rdma_chunk *chunk;
594 	u32 inv_rkey;
595 
596 	ctxt->rc_inv_rkey = 0;
597 
598 	if (!rdma->sc_snd_w_inv)
599 		return;
600 
601 	inv_rkey = 0;
602 	pcl_for_each_chunk(chunk, &ctxt->rc_call_pcl) {
603 		pcl_for_each_segment(segment, chunk) {
604 			if (inv_rkey == 0)
605 				inv_rkey = segment->rs_handle;
606 			else if (inv_rkey != segment->rs_handle)
607 				return;
608 		}
609 	}
610 	pcl_for_each_chunk(chunk, &ctxt->rc_read_pcl) {
611 		pcl_for_each_segment(segment, chunk) {
612 			if (inv_rkey == 0)
613 				inv_rkey = segment->rs_handle;
614 			else if (inv_rkey != segment->rs_handle)
615 				return;
616 		}
617 	}
618 	pcl_for_each_chunk(chunk, &ctxt->rc_write_pcl) {
619 		pcl_for_each_segment(segment, chunk) {
620 			if (inv_rkey == 0)
621 				inv_rkey = segment->rs_handle;
622 			else if (inv_rkey != segment->rs_handle)
623 				return;
624 		}
625 	}
626 	pcl_for_each_chunk(chunk, &ctxt->rc_reply_pcl) {
627 		pcl_for_each_segment(segment, chunk) {
628 			if (inv_rkey == 0)
629 				inv_rkey = segment->rs_handle;
630 			else if (inv_rkey != segment->rs_handle)
631 				return;
632 		}
633 	}
634 	ctxt->rc_inv_rkey = inv_rkey;
635 }
636 
637 /**
638  * svc_rdma_xdr_decode_req - Decode the transport header
639  * @rq_arg: xdr_buf containing ingress RPC/RDMA message
640  * @rctxt: state of decoding
641  *
642  * On entry, xdr->head[0].iov_base points to first byte of the
643  * RPC-over-RDMA transport header.
644  *
645  * On successful exit, head[0] points to first byte past the
646  * RPC-over-RDMA header. For RDMA_MSG, this is the RPC message.
647  *
648  * The length of the RPC-over-RDMA header is returned.
649  *
650  * Assumptions:
651  * - The transport header is entirely contained in the head iovec.
652  */
653 static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg,
654 				   struct svc_rdma_recv_ctxt *rctxt)
655 {
656 	__be32 *p, *rdma_argp;
657 	unsigned int hdr_len;
658 
659 	rdma_argp = rq_arg->head[0].iov_base;
660 	xdr_init_decode(&rctxt->rc_stream, rq_arg, rdma_argp, NULL);
661 
662 	p = xdr_inline_decode(&rctxt->rc_stream,
663 			      rpcrdma_fixed_maxsz * sizeof(*p));
664 	if (unlikely(!p))
665 		goto out_short;
666 	p++;
667 	if (*p != rpcrdma_version)
668 		goto out_version;
669 	p += 2;
670 	rctxt->rc_msgtype = *p;
671 	switch (rctxt->rc_msgtype) {
672 	case rdma_msg:
673 		break;
674 	case rdma_nomsg:
675 		break;
676 	case rdma_done:
677 		goto out_drop;
678 	case rdma_error:
679 		goto out_drop;
680 	default:
681 		goto out_proc;
682 	}
683 
684 	if (!xdr_check_read_list(rctxt))
685 		goto out_inval;
686 	if (!xdr_check_write_list(rctxt))
687 		goto out_inval;
688 	if (!xdr_check_reply_chunk(rctxt))
689 		goto out_inval;
690 
691 	rq_arg->head[0].iov_base = rctxt->rc_stream.p;
692 	hdr_len = xdr_stream_pos(&rctxt->rc_stream);
693 	rq_arg->head[0].iov_len -= hdr_len;
694 	rq_arg->len -= hdr_len;
695 	trace_svcrdma_decode_rqst(rctxt, rdma_argp, hdr_len);
696 	return hdr_len;
697 
698 out_short:
699 	trace_svcrdma_decode_short_err(rctxt, rq_arg->len);
700 	return -EINVAL;
701 
702 out_version:
703 	trace_svcrdma_decode_badvers_err(rctxt, rdma_argp);
704 	return -EPROTONOSUPPORT;
705 
706 out_drop:
707 	trace_svcrdma_decode_drop_err(rctxt, rdma_argp);
708 	return 0;
709 
710 out_proc:
711 	trace_svcrdma_decode_badproc_err(rctxt, rdma_argp);
712 	return -EINVAL;
713 
714 out_inval:
715 	trace_svcrdma_decode_parse_err(rctxt, rdma_argp);
716 	return -EINVAL;
717 }
718 
719 static void rdma_read_complete(struct svc_rqst *rqstp,
720 			       struct svc_rdma_recv_ctxt *head)
721 {
722 	int page_no;
723 
724 	/* Move Read chunk pages to rqstp so that they will be released
725 	 * when svc_process is done with them.
726 	 */
727 	for (page_no = 0; page_no < head->rc_page_count; page_no++) {
728 		put_page(rqstp->rq_pages[page_no]);
729 		rqstp->rq_pages[page_no] = head->rc_pages[page_no];
730 	}
731 	head->rc_page_count = 0;
732 
733 	/* Point rq_arg.pages past header */
734 	rqstp->rq_arg.pages = &rqstp->rq_pages[head->rc_hdr_count];
735 	rqstp->rq_arg.page_len = head->rc_arg.page_len;
736 
737 	/* rq_respages starts after the last arg page */
738 	rqstp->rq_respages = &rqstp->rq_pages[page_no];
739 	rqstp->rq_next_page = rqstp->rq_respages + 1;
740 
741 	/* Rebuild rq_arg head and tail. */
742 	rqstp->rq_arg.head[0] = head->rc_arg.head[0];
743 	rqstp->rq_arg.tail[0] = head->rc_arg.tail[0];
744 	rqstp->rq_arg.len = head->rc_arg.len;
745 	rqstp->rq_arg.buflen = head->rc_arg.buflen;
746 }
747 
748 static void svc_rdma_send_error(struct svcxprt_rdma *rdma,
749 				struct svc_rdma_recv_ctxt *rctxt,
750 				int status)
751 {
752 	struct svc_rdma_send_ctxt *sctxt;
753 
754 	sctxt = svc_rdma_send_ctxt_get(rdma);
755 	if (!sctxt)
756 		return;
757 	svc_rdma_send_error_msg(rdma, sctxt, rctxt, status);
758 }
759 
760 /* By convention, backchannel calls arrive via rdma_msg type
761  * messages, and never populate the chunk lists. This makes
762  * the RPC/RDMA header small and fixed in size, so it is
763  * straightforward to check the RPC header's direction field.
764  */
765 static bool svc_rdma_is_reverse_direction_reply(struct svc_xprt *xprt,
766 						struct svc_rdma_recv_ctxt *rctxt)
767 {
768 	__be32 *p = rctxt->rc_recv_buf;
769 
770 	if (!xprt->xpt_bc_xprt)
771 		return false;
772 
773 	if (rctxt->rc_msgtype != rdma_msg)
774 		return false;
775 
776 	if (!pcl_is_empty(&rctxt->rc_call_pcl))
777 		return false;
778 	if (!pcl_is_empty(&rctxt->rc_read_pcl))
779 		return false;
780 	if (!pcl_is_empty(&rctxt->rc_write_pcl))
781 		return false;
782 	if (!pcl_is_empty(&rctxt->rc_reply_pcl))
783 		return false;
784 
785 	/* RPC call direction */
786 	if (*(p + 8) == cpu_to_be32(RPC_CALL))
787 		return false;
788 
789 	return true;
790 }
791 
792 /**
793  * svc_rdma_recvfrom - Receive an RPC call
794  * @rqstp: request structure into which to receive an RPC Call
795  *
796  * Returns:
797  *	The positive number of bytes in the RPC Call message,
798  *	%0 if there were no Calls ready to return,
799  *	%-EINVAL if the Read chunk data is too large,
800  *	%-ENOMEM if rdma_rw context pool was exhausted,
801  *	%-ENOTCONN if posting failed (connection is lost),
802  *	%-EIO if rdma_rw initialization failed (DMA mapping, etc).
803  *
804  * Called in a loop when XPT_DATA is set. XPT_DATA is cleared only
805  * when there are no remaining ctxt's to process.
806  *
807  * The next ctxt is removed from the "receive" lists.
808  *
809  * - If the ctxt completes a Read, then finish assembling the Call
810  *   message and return the number of bytes in the message.
811  *
812  * - If the ctxt completes a Receive, then construct the Call
813  *   message from the contents of the Receive buffer.
814  *
815  *   - If there are no Read chunks in this message, then finish
816  *     assembling the Call message and return the number of bytes
817  *     in the message.
818  *
819  *   - If there are Read chunks in this message, post Read WRs to
820  *     pull that payload and return 0.
821  */
822 int svc_rdma_recvfrom(struct svc_rqst *rqstp)
823 {
824 	struct svc_xprt *xprt = rqstp->rq_xprt;
825 	struct svcxprt_rdma *rdma_xprt =
826 		container_of(xprt, struct svcxprt_rdma, sc_xprt);
827 	struct svc_rdma_recv_ctxt *ctxt;
828 	int ret;
829 
830 	rqstp->rq_xprt_ctxt = NULL;
831 
832 	spin_lock(&rdma_xprt->sc_rq_dto_lock);
833 	ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_read_complete_q);
834 	if (ctxt) {
835 		list_del(&ctxt->rc_list);
836 		spin_unlock(&rdma_xprt->sc_rq_dto_lock);
837 		rdma_read_complete(rqstp, ctxt);
838 		goto complete;
839 	}
840 	ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_rq_dto_q);
841 	if (!ctxt) {
842 		/* No new incoming requests, terminate the loop */
843 		clear_bit(XPT_DATA, &xprt->xpt_flags);
844 		spin_unlock(&rdma_xprt->sc_rq_dto_lock);
845 		return 0;
846 	}
847 	list_del(&ctxt->rc_list);
848 	spin_unlock(&rdma_xprt->sc_rq_dto_lock);
849 	percpu_counter_inc(&svcrdma_stat_recv);
850 
851 	ib_dma_sync_single_for_cpu(rdma_xprt->sc_pd->device,
852 				   ctxt->rc_recv_sge.addr, ctxt->rc_byte_len,
853 				   DMA_FROM_DEVICE);
854 	svc_rdma_build_arg_xdr(rqstp, ctxt);
855 
856 	/* Prevent svc_xprt_release from releasing pages in rq_pages
857 	 * if we return 0 or an error.
858 	 */
859 	rqstp->rq_respages = rqstp->rq_pages;
860 	rqstp->rq_next_page = rqstp->rq_respages;
861 
862 	ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg, ctxt);
863 	if (ret < 0)
864 		goto out_err;
865 	if (ret == 0)
866 		goto out_drop;
867 	rqstp->rq_xprt_hlen = ret;
868 
869 	if (svc_rdma_is_reverse_direction_reply(xprt, ctxt))
870 		goto out_backchannel;
871 
872 	svc_rdma_get_inv_rkey(rdma_xprt, ctxt);
873 
874 	if (!pcl_is_empty(&ctxt->rc_read_pcl) ||
875 	    !pcl_is_empty(&ctxt->rc_call_pcl))
876 		goto out_readlist;
877 
878 complete:
879 	rqstp->rq_xprt_ctxt = ctxt;
880 	rqstp->rq_prot = IPPROTO_MAX;
881 	svc_xprt_copy_addrs(rqstp, xprt);
882 	return rqstp->rq_arg.len;
883 
884 out_readlist:
885 	ret = svc_rdma_process_read_list(rdma_xprt, rqstp, ctxt);
886 	if (ret < 0)
887 		goto out_readfail;
888 	return 0;
889 
890 out_err:
891 	svc_rdma_send_error(rdma_xprt, ctxt, ret);
892 	svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
893 	return 0;
894 
895 out_readfail:
896 	if (ret == -EINVAL)
897 		svc_rdma_send_error(rdma_xprt, ctxt, ret);
898 	svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
899 	return ret;
900 
901 out_backchannel:
902 	svc_rdma_handle_bc_reply(rqstp, ctxt);
903 out_drop:
904 	svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
905 	return 0;
906 }
907