1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (c) 2016-2018 Oracle. All rights reserved.
4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the BSD-type
11 * license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 *
20 * Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials provided
23 * with the distribution.
24 *
25 * Neither the name of the Network Appliance, Inc. nor the names of
26 * its contributors may be used to endorse or promote products
27 * derived from this software without specific prior written
28 * permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 *
42 * Author: Tom Tucker <tom@opengridcomputing.com>
43 */
44
45 /* Operation
46 *
47 * The main entry point is svc_rdma_recvfrom. This is called from
48 * svc_recv when the transport indicates there is incoming data to
49 * be read. "Data Ready" is signaled when an RDMA Receive completes,
50 * or when a set of RDMA Reads complete.
51 *
52 * An svc_rqst is passed in. This structure contains an array of
53 * free pages (rq_pages) that will contain the incoming RPC message.
54 *
55 * Short messages are moved directly into svc_rqst::rq_arg, and
56 * the RPC Call is ready to be processed by the Upper Layer.
57 * svc_rdma_recvfrom returns the length of the RPC Call message,
58 * completing the reception of the RPC Call.
59 *
60 * However, when an incoming message has Read chunks,
61 * svc_rdma_recvfrom must post RDMA Reads to pull the RPC Call's
62 * data payload from the client. svc_rdma_recvfrom sets up the
63 * RDMA Reads using pages in svc_rqst::rq_pages, which are
64 * transferred to an svc_rdma_recv_ctxt for the duration of the
65 * I/O. svc_rdma_recvfrom then returns zero, since the RPC message
66 * is still not yet ready.
67 *
68 * When the Read chunk payloads have become available on the
69 * server, "Data Ready" is raised again, and svc_recv calls
70 * svc_rdma_recvfrom again. This second call may use a different
71 * svc_rqst than the first one, thus any information that needs
72 * to be preserved across these two calls is kept in an
73 * svc_rdma_recv_ctxt.
74 *
75 * The second call to svc_rdma_recvfrom performs final assembly
76 * of the RPC Call message, using the RDMA Read sink pages kept in
77 * the svc_rdma_recv_ctxt. The xdr_buf is copied from the
78 * svc_rdma_recv_ctxt to the second svc_rqst. The second call returns
79 * the length of the completed RPC Call message.
80 *
81 * Page Management
82 *
83 * Pages under I/O must be transferred from the first svc_rqst to an
84 * svc_rdma_recv_ctxt before the first svc_rdma_recvfrom call returns.
85 *
86 * The first svc_rqst supplies pages for RDMA Reads. These are moved
87 * from rqstp::rq_pages into ctxt::pages. The consumed elements of
88 * the rq_pages array are set to NULL and refilled with the first
89 * svc_rdma_recvfrom call returns.
90 *
91 * During the second svc_rdma_recvfrom call, RDMA Read sink pages
92 * are transferred from the svc_rdma_recv_ctxt to the second svc_rqst.
93 */
94
95 #include <linux/slab.h>
96 #include <linux/spinlock.h>
97 #include <asm/unaligned.h>
98 #include <rdma/ib_verbs.h>
99 #include <rdma/rdma_cm.h>
100
101 #include <linux/sunrpc/xdr.h>
102 #include <linux/sunrpc/debug.h>
103 #include <linux/sunrpc/rpc_rdma.h>
104 #include <linux/sunrpc/svc_rdma.h>
105
106 #include "xprt_rdma.h"
107 #include <trace/events/rpcrdma.h>
108
109 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc);
110
111 static inline struct svc_rdma_recv_ctxt *
svc_rdma_next_recv_ctxt(struct list_head * list)112 svc_rdma_next_recv_ctxt(struct list_head *list)
113 {
114 return list_first_entry_or_null(list, struct svc_rdma_recv_ctxt,
115 rc_list);
116 }
117
svc_rdma_recv_cid_init(struct svcxprt_rdma * rdma,struct rpc_rdma_cid * cid)118 static void svc_rdma_recv_cid_init(struct svcxprt_rdma *rdma,
119 struct rpc_rdma_cid *cid)
120 {
121 cid->ci_queue_id = rdma->sc_rq_cq->res.id;
122 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
123 }
124
125 static struct svc_rdma_recv_ctxt *
svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma * rdma)126 svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
127 {
128 int node = ibdev_to_node(rdma->sc_cm_id->device);
129 struct svc_rdma_recv_ctxt *ctxt;
130 dma_addr_t addr;
131 void *buffer;
132
133 ctxt = kmalloc_node(sizeof(*ctxt), GFP_KERNEL, node);
134 if (!ctxt)
135 goto fail0;
136 buffer = kmalloc_node(rdma->sc_max_req_size, GFP_KERNEL, node);
137 if (!buffer)
138 goto fail1;
139 addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
140 rdma->sc_max_req_size, DMA_FROM_DEVICE);
141 if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
142 goto fail2;
143
144 svc_rdma_recv_cid_init(rdma, &ctxt->rc_cid);
145 pcl_init(&ctxt->rc_call_pcl);
146 pcl_init(&ctxt->rc_read_pcl);
147 pcl_init(&ctxt->rc_write_pcl);
148 pcl_init(&ctxt->rc_reply_pcl);
149
150 ctxt->rc_recv_wr.next = NULL;
151 ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe;
152 ctxt->rc_recv_wr.sg_list = &ctxt->rc_recv_sge;
153 ctxt->rc_recv_wr.num_sge = 1;
154 ctxt->rc_cqe.done = svc_rdma_wc_receive;
155 ctxt->rc_recv_sge.addr = addr;
156 ctxt->rc_recv_sge.length = rdma->sc_max_req_size;
157 ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey;
158 ctxt->rc_recv_buf = buffer;
159 return ctxt;
160
161 fail2:
162 kfree(buffer);
163 fail1:
164 kfree(ctxt);
165 fail0:
166 return NULL;
167 }
168
svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma * rdma,struct svc_rdma_recv_ctxt * ctxt)169 static void svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma *rdma,
170 struct svc_rdma_recv_ctxt *ctxt)
171 {
172 ib_dma_unmap_single(rdma->sc_pd->device, ctxt->rc_recv_sge.addr,
173 ctxt->rc_recv_sge.length, DMA_FROM_DEVICE);
174 kfree(ctxt->rc_recv_buf);
175 kfree(ctxt);
176 }
177
178 /**
179 * svc_rdma_recv_ctxts_destroy - Release all recv_ctxt's for an xprt
180 * @rdma: svcxprt_rdma being torn down
181 *
182 */
svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma * rdma)183 void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma)
184 {
185 struct svc_rdma_recv_ctxt *ctxt;
186 struct llist_node *node;
187
188 while ((node = llist_del_first(&rdma->sc_recv_ctxts))) {
189 ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node);
190 svc_rdma_recv_ctxt_destroy(rdma, ctxt);
191 }
192 }
193
194 /**
195 * svc_rdma_recv_ctxt_get - Allocate a recv_ctxt
196 * @rdma: controlling svcxprt_rdma
197 *
198 * Returns a recv_ctxt or (rarely) NULL if none are available.
199 */
svc_rdma_recv_ctxt_get(struct svcxprt_rdma * rdma)200 struct svc_rdma_recv_ctxt *svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma)
201 {
202 struct svc_rdma_recv_ctxt *ctxt;
203 struct llist_node *node;
204
205 node = llist_del_first(&rdma->sc_recv_ctxts);
206 if (!node)
207 goto out_empty;
208 ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node);
209
210 out:
211 ctxt->rc_page_count = 0;
212 return ctxt;
213
214 out_empty:
215 ctxt = svc_rdma_recv_ctxt_alloc(rdma);
216 if (!ctxt)
217 return NULL;
218 goto out;
219 }
220
221 /**
222 * svc_rdma_recv_ctxt_put - Return recv_ctxt to free list
223 * @rdma: controlling svcxprt_rdma
224 * @ctxt: object to return to the free list
225 *
226 */
svc_rdma_recv_ctxt_put(struct svcxprt_rdma * rdma,struct svc_rdma_recv_ctxt * ctxt)227 void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
228 struct svc_rdma_recv_ctxt *ctxt)
229 {
230 pcl_free(&ctxt->rc_call_pcl);
231 pcl_free(&ctxt->rc_read_pcl);
232 pcl_free(&ctxt->rc_write_pcl);
233 pcl_free(&ctxt->rc_reply_pcl);
234
235 llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts);
236 }
237
238 /**
239 * svc_rdma_release_ctxt - Release transport-specific per-rqst resources
240 * @xprt: the transport which owned the context
241 * @vctxt: the context from rqstp->rq_xprt_ctxt or dr->xprt_ctxt
242 *
243 * Ensure that the recv_ctxt is released whether or not a Reply
244 * was sent. For example, the client could close the connection,
245 * or svc_process could drop an RPC, before the Reply is sent.
246 */
svc_rdma_release_ctxt(struct svc_xprt * xprt,void * vctxt)247 void svc_rdma_release_ctxt(struct svc_xprt *xprt, void *vctxt)
248 {
249 struct svc_rdma_recv_ctxt *ctxt = vctxt;
250 struct svcxprt_rdma *rdma =
251 container_of(xprt, struct svcxprt_rdma, sc_xprt);
252
253 if (ctxt)
254 svc_rdma_recv_ctxt_put(rdma, ctxt);
255 }
256
svc_rdma_refresh_recvs(struct svcxprt_rdma * rdma,unsigned int wanted)257 static bool svc_rdma_refresh_recvs(struct svcxprt_rdma *rdma,
258 unsigned int wanted)
259 {
260 const struct ib_recv_wr *bad_wr = NULL;
261 struct svc_rdma_recv_ctxt *ctxt;
262 struct ib_recv_wr *recv_chain;
263 int ret;
264
265 if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
266 return false;
267
268 recv_chain = NULL;
269 while (wanted--) {
270 ctxt = svc_rdma_recv_ctxt_get(rdma);
271 if (!ctxt)
272 break;
273
274 trace_svcrdma_post_recv(ctxt);
275 ctxt->rc_recv_wr.next = recv_chain;
276 recv_chain = &ctxt->rc_recv_wr;
277 rdma->sc_pending_recvs++;
278 }
279 if (!recv_chain)
280 return false;
281
282 ret = ib_post_recv(rdma->sc_qp, recv_chain, &bad_wr);
283 if (ret)
284 goto err_free;
285 return true;
286
287 err_free:
288 trace_svcrdma_rq_post_err(rdma, ret);
289 while (bad_wr) {
290 ctxt = container_of(bad_wr, struct svc_rdma_recv_ctxt,
291 rc_recv_wr);
292 bad_wr = bad_wr->next;
293 svc_rdma_recv_ctxt_put(rdma, ctxt);
294 }
295 /* Since we're destroying the xprt, no need to reset
296 * sc_pending_recvs. */
297 return false;
298 }
299
300 /**
301 * svc_rdma_post_recvs - Post initial set of Recv WRs
302 * @rdma: fresh svcxprt_rdma
303 *
304 * Returns true if successful, otherwise false.
305 */
svc_rdma_post_recvs(struct svcxprt_rdma * rdma)306 bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
307 {
308 return svc_rdma_refresh_recvs(rdma, rdma->sc_max_requests);
309 }
310
311 /**
312 * svc_rdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
313 * @cq: Completion Queue context
314 * @wc: Work Completion object
315 *
316 */
svc_rdma_wc_receive(struct ib_cq * cq,struct ib_wc * wc)317 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
318 {
319 struct svcxprt_rdma *rdma = cq->cq_context;
320 struct ib_cqe *cqe = wc->wr_cqe;
321 struct svc_rdma_recv_ctxt *ctxt;
322
323 rdma->sc_pending_recvs--;
324
325 /* WARNING: Only wc->wr_cqe and wc->status are reliable */
326 ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe);
327
328 if (wc->status != IB_WC_SUCCESS)
329 goto flushed;
330 trace_svcrdma_wc_recv(wc, &ctxt->rc_cid);
331
332 /* If receive posting fails, the connection is about to be
333 * lost anyway. The server will not be able to send a reply
334 * for this RPC, and the client will retransmit this RPC
335 * anyway when it reconnects.
336 *
337 * Therefore we drop the Receive, even if status was SUCCESS
338 * to reduce the likelihood of replayed requests once the
339 * client reconnects.
340 */
341 if (rdma->sc_pending_recvs < rdma->sc_max_requests)
342 if (!svc_rdma_refresh_recvs(rdma, rdma->sc_recv_batch))
343 goto dropped;
344
345 /* All wc fields are now known to be valid */
346 ctxt->rc_byte_len = wc->byte_len;
347
348 spin_lock(&rdma->sc_rq_dto_lock);
349 list_add_tail(&ctxt->rc_list, &rdma->sc_rq_dto_q);
350 /* Note the unlock pairs with the smp_rmb in svc_xprt_ready: */
351 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
352 spin_unlock(&rdma->sc_rq_dto_lock);
353 if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags))
354 svc_xprt_enqueue(&rdma->sc_xprt);
355 return;
356
357 flushed:
358 if (wc->status == IB_WC_WR_FLUSH_ERR)
359 trace_svcrdma_wc_recv_flush(wc, &ctxt->rc_cid);
360 else
361 trace_svcrdma_wc_recv_err(wc, &ctxt->rc_cid);
362 dropped:
363 svc_rdma_recv_ctxt_put(rdma, ctxt);
364 svc_xprt_deferred_close(&rdma->sc_xprt);
365 }
366
367 /**
368 * svc_rdma_flush_recv_queues - Drain pending Receive work
369 * @rdma: svcxprt_rdma being shut down
370 *
371 */
svc_rdma_flush_recv_queues(struct svcxprt_rdma * rdma)372 void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma)
373 {
374 struct svc_rdma_recv_ctxt *ctxt;
375
376 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) {
377 list_del(&ctxt->rc_list);
378 svc_rdma_recv_ctxt_put(rdma, ctxt);
379 }
380 }
381
svc_rdma_build_arg_xdr(struct svc_rqst * rqstp,struct svc_rdma_recv_ctxt * ctxt)382 static void svc_rdma_build_arg_xdr(struct svc_rqst *rqstp,
383 struct svc_rdma_recv_ctxt *ctxt)
384 {
385 struct xdr_buf *arg = &rqstp->rq_arg;
386
387 arg->head[0].iov_base = ctxt->rc_recv_buf;
388 arg->head[0].iov_len = ctxt->rc_byte_len;
389 arg->tail[0].iov_base = NULL;
390 arg->tail[0].iov_len = 0;
391 arg->page_len = 0;
392 arg->page_base = 0;
393 arg->buflen = ctxt->rc_byte_len;
394 arg->len = ctxt->rc_byte_len;
395 }
396
397 /**
398 * xdr_count_read_segments - Count number of Read segments in Read list
399 * @rctxt: Ingress receive context
400 * @p: Start of an un-decoded Read list
401 *
402 * Before allocating anything, ensure the ingress Read list is safe
403 * to use.
404 *
405 * The segment count is limited to how many segments can fit in the
406 * transport header without overflowing the buffer. That's about 40
407 * Read segments for a 1KB inline threshold.
408 *
409 * Return values:
410 * %true: Read list is valid. @rctxt's xdr_stream is updated to point
411 * to the first byte past the Read list. rc_read_pcl and
412 * rc_call_pcl cl_count fields are set to the number of
413 * Read segments in the list.
414 * %false: Read list is corrupt. @rctxt's xdr_stream is left in an
415 * unknown state.
416 */
xdr_count_read_segments(struct svc_rdma_recv_ctxt * rctxt,__be32 * p)417 static bool xdr_count_read_segments(struct svc_rdma_recv_ctxt *rctxt, __be32 *p)
418 {
419 rctxt->rc_call_pcl.cl_count = 0;
420 rctxt->rc_read_pcl.cl_count = 0;
421 while (xdr_item_is_present(p)) {
422 u32 position, handle, length;
423 u64 offset;
424
425 p = xdr_inline_decode(&rctxt->rc_stream,
426 rpcrdma_readseg_maxsz * sizeof(*p));
427 if (!p)
428 return false;
429
430 xdr_decode_read_segment(p, &position, &handle,
431 &length, &offset);
432 if (position) {
433 if (position & 3)
434 return false;
435 ++rctxt->rc_read_pcl.cl_count;
436 } else {
437 ++rctxt->rc_call_pcl.cl_count;
438 }
439
440 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
441 if (!p)
442 return false;
443 }
444 return true;
445 }
446
447 /* Sanity check the Read list.
448 *
449 * Sanity checks:
450 * - Read list does not overflow Receive buffer.
451 * - Chunk size limited by largest NFS data payload.
452 *
453 * Return values:
454 * %true: Read list is valid. @rctxt's xdr_stream is updated
455 * to point to the first byte past the Read list.
456 * %false: Read list is corrupt. @rctxt's xdr_stream is left
457 * in an unknown state.
458 */
xdr_check_read_list(struct svc_rdma_recv_ctxt * rctxt)459 static bool xdr_check_read_list(struct svc_rdma_recv_ctxt *rctxt)
460 {
461 __be32 *p;
462
463 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
464 if (!p)
465 return false;
466 if (!xdr_count_read_segments(rctxt, p))
467 return false;
468 if (!pcl_alloc_call(rctxt, p))
469 return false;
470 return pcl_alloc_read(rctxt, p);
471 }
472
xdr_check_write_chunk(struct svc_rdma_recv_ctxt * rctxt)473 static bool xdr_check_write_chunk(struct svc_rdma_recv_ctxt *rctxt)
474 {
475 u32 segcount;
476 __be32 *p;
477
478 if (xdr_stream_decode_u32(&rctxt->rc_stream, &segcount))
479 return false;
480
481 /* Before trusting the segcount value enough to use it in
482 * a computation, perform a simple range check. This is an
483 * arbitrary but sensible limit (ie, not architectural).
484 */
485 if (unlikely(segcount > RPCSVC_MAXPAGES))
486 return false;
487
488 p = xdr_inline_decode(&rctxt->rc_stream,
489 segcount * rpcrdma_segment_maxsz * sizeof(*p));
490 return p != NULL;
491 }
492
493 /**
494 * xdr_count_write_chunks - Count number of Write chunks in Write list
495 * @rctxt: Received header and decoding state
496 * @p: start of an un-decoded Write list
497 *
498 * Before allocating anything, ensure the ingress Write list is
499 * safe to use.
500 *
501 * Return values:
502 * %true: Write list is valid. @rctxt's xdr_stream is updated
503 * to point to the first byte past the Write list, and
504 * the number of Write chunks is in rc_write_pcl.cl_count.
505 * %false: Write list is corrupt. @rctxt's xdr_stream is left
506 * in an indeterminate state.
507 */
xdr_count_write_chunks(struct svc_rdma_recv_ctxt * rctxt,__be32 * p)508 static bool xdr_count_write_chunks(struct svc_rdma_recv_ctxt *rctxt, __be32 *p)
509 {
510 rctxt->rc_write_pcl.cl_count = 0;
511 while (xdr_item_is_present(p)) {
512 if (!xdr_check_write_chunk(rctxt))
513 return false;
514 ++rctxt->rc_write_pcl.cl_count;
515 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
516 if (!p)
517 return false;
518 }
519 return true;
520 }
521
522 /* Sanity check the Write list.
523 *
524 * Implementation limits:
525 * - This implementation currently supports only one Write chunk.
526 *
527 * Sanity checks:
528 * - Write list does not overflow Receive buffer.
529 * - Chunk size limited by largest NFS data payload.
530 *
531 * Return values:
532 * %true: Write list is valid. @rctxt's xdr_stream is updated
533 * to point to the first byte past the Write list.
534 * %false: Write list is corrupt. @rctxt's xdr_stream is left
535 * in an unknown state.
536 */
xdr_check_write_list(struct svc_rdma_recv_ctxt * rctxt)537 static bool xdr_check_write_list(struct svc_rdma_recv_ctxt *rctxt)
538 {
539 __be32 *p;
540
541 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
542 if (!p)
543 return false;
544 if (!xdr_count_write_chunks(rctxt, p))
545 return false;
546 if (!pcl_alloc_write(rctxt, &rctxt->rc_write_pcl, p))
547 return false;
548
549 rctxt->rc_cur_result_payload = pcl_first_chunk(&rctxt->rc_write_pcl);
550 return true;
551 }
552
553 /* Sanity check the Reply chunk.
554 *
555 * Sanity checks:
556 * - Reply chunk does not overflow Receive buffer.
557 * - Chunk size limited by largest NFS data payload.
558 *
559 * Return values:
560 * %true: Reply chunk is valid. @rctxt's xdr_stream is updated
561 * to point to the first byte past the Reply chunk.
562 * %false: Reply chunk is corrupt. @rctxt's xdr_stream is left
563 * in an unknown state.
564 */
xdr_check_reply_chunk(struct svc_rdma_recv_ctxt * rctxt)565 static bool xdr_check_reply_chunk(struct svc_rdma_recv_ctxt *rctxt)
566 {
567 __be32 *p;
568
569 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
570 if (!p)
571 return false;
572
573 if (!xdr_item_is_present(p))
574 return true;
575 if (!xdr_check_write_chunk(rctxt))
576 return false;
577
578 rctxt->rc_reply_pcl.cl_count = 1;
579 return pcl_alloc_write(rctxt, &rctxt->rc_reply_pcl, p);
580 }
581
582 /* RPC-over-RDMA Version One private extension: Remote Invalidation.
583 * Responder's choice: requester signals it can handle Send With
584 * Invalidate, and responder chooses one R_key to invalidate.
585 *
586 * If there is exactly one distinct R_key in the received transport
587 * header, set rc_inv_rkey to that R_key. Otherwise, set it to zero.
588 */
svc_rdma_get_inv_rkey(struct svcxprt_rdma * rdma,struct svc_rdma_recv_ctxt * ctxt)589 static void svc_rdma_get_inv_rkey(struct svcxprt_rdma *rdma,
590 struct svc_rdma_recv_ctxt *ctxt)
591 {
592 struct svc_rdma_segment *segment;
593 struct svc_rdma_chunk *chunk;
594 u32 inv_rkey;
595
596 ctxt->rc_inv_rkey = 0;
597
598 if (!rdma->sc_snd_w_inv)
599 return;
600
601 inv_rkey = 0;
602 pcl_for_each_chunk(chunk, &ctxt->rc_call_pcl) {
603 pcl_for_each_segment(segment, chunk) {
604 if (inv_rkey == 0)
605 inv_rkey = segment->rs_handle;
606 else if (inv_rkey != segment->rs_handle)
607 return;
608 }
609 }
610 pcl_for_each_chunk(chunk, &ctxt->rc_read_pcl) {
611 pcl_for_each_segment(segment, chunk) {
612 if (inv_rkey == 0)
613 inv_rkey = segment->rs_handle;
614 else if (inv_rkey != segment->rs_handle)
615 return;
616 }
617 }
618 pcl_for_each_chunk(chunk, &ctxt->rc_write_pcl) {
619 pcl_for_each_segment(segment, chunk) {
620 if (inv_rkey == 0)
621 inv_rkey = segment->rs_handle;
622 else if (inv_rkey != segment->rs_handle)
623 return;
624 }
625 }
626 pcl_for_each_chunk(chunk, &ctxt->rc_reply_pcl) {
627 pcl_for_each_segment(segment, chunk) {
628 if (inv_rkey == 0)
629 inv_rkey = segment->rs_handle;
630 else if (inv_rkey != segment->rs_handle)
631 return;
632 }
633 }
634 ctxt->rc_inv_rkey = inv_rkey;
635 }
636
637 /**
638 * svc_rdma_xdr_decode_req - Decode the transport header
639 * @rq_arg: xdr_buf containing ingress RPC/RDMA message
640 * @rctxt: state of decoding
641 *
642 * On entry, xdr->head[0].iov_base points to first byte of the
643 * RPC-over-RDMA transport header.
644 *
645 * On successful exit, head[0] points to first byte past the
646 * RPC-over-RDMA header. For RDMA_MSG, this is the RPC message.
647 *
648 * The length of the RPC-over-RDMA header is returned.
649 *
650 * Assumptions:
651 * - The transport header is entirely contained in the head iovec.
652 */
svc_rdma_xdr_decode_req(struct xdr_buf * rq_arg,struct svc_rdma_recv_ctxt * rctxt)653 static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg,
654 struct svc_rdma_recv_ctxt *rctxt)
655 {
656 __be32 *p, *rdma_argp;
657 unsigned int hdr_len;
658
659 rdma_argp = rq_arg->head[0].iov_base;
660 xdr_init_decode(&rctxt->rc_stream, rq_arg, rdma_argp, NULL);
661
662 p = xdr_inline_decode(&rctxt->rc_stream,
663 rpcrdma_fixed_maxsz * sizeof(*p));
664 if (unlikely(!p))
665 goto out_short;
666 p++;
667 if (*p != rpcrdma_version)
668 goto out_version;
669 p += 2;
670 rctxt->rc_msgtype = *p;
671 switch (rctxt->rc_msgtype) {
672 case rdma_msg:
673 break;
674 case rdma_nomsg:
675 break;
676 case rdma_done:
677 goto out_drop;
678 case rdma_error:
679 goto out_drop;
680 default:
681 goto out_proc;
682 }
683
684 if (!xdr_check_read_list(rctxt))
685 goto out_inval;
686 if (!xdr_check_write_list(rctxt))
687 goto out_inval;
688 if (!xdr_check_reply_chunk(rctxt))
689 goto out_inval;
690
691 rq_arg->head[0].iov_base = rctxt->rc_stream.p;
692 hdr_len = xdr_stream_pos(&rctxt->rc_stream);
693 rq_arg->head[0].iov_len -= hdr_len;
694 rq_arg->len -= hdr_len;
695 trace_svcrdma_decode_rqst(rctxt, rdma_argp, hdr_len);
696 return hdr_len;
697
698 out_short:
699 trace_svcrdma_decode_short_err(rctxt, rq_arg->len);
700 return -EINVAL;
701
702 out_version:
703 trace_svcrdma_decode_badvers_err(rctxt, rdma_argp);
704 return -EPROTONOSUPPORT;
705
706 out_drop:
707 trace_svcrdma_decode_drop_err(rctxt, rdma_argp);
708 return 0;
709
710 out_proc:
711 trace_svcrdma_decode_badproc_err(rctxt, rdma_argp);
712 return -EINVAL;
713
714 out_inval:
715 trace_svcrdma_decode_parse_err(rctxt, rdma_argp);
716 return -EINVAL;
717 }
718
svc_rdma_send_error(struct svcxprt_rdma * rdma,struct svc_rdma_recv_ctxt * rctxt,int status)719 static void svc_rdma_send_error(struct svcxprt_rdma *rdma,
720 struct svc_rdma_recv_ctxt *rctxt,
721 int status)
722 {
723 struct svc_rdma_send_ctxt *sctxt;
724
725 sctxt = svc_rdma_send_ctxt_get(rdma);
726 if (!sctxt)
727 return;
728 svc_rdma_send_error_msg(rdma, sctxt, rctxt, status);
729 }
730
731 /* By convention, backchannel calls arrive via rdma_msg type
732 * messages, and never populate the chunk lists. This makes
733 * the RPC/RDMA header small and fixed in size, so it is
734 * straightforward to check the RPC header's direction field.
735 */
svc_rdma_is_reverse_direction_reply(struct svc_xprt * xprt,struct svc_rdma_recv_ctxt * rctxt)736 static bool svc_rdma_is_reverse_direction_reply(struct svc_xprt *xprt,
737 struct svc_rdma_recv_ctxt *rctxt)
738 {
739 __be32 *p = rctxt->rc_recv_buf;
740
741 if (!xprt->xpt_bc_xprt)
742 return false;
743
744 if (rctxt->rc_msgtype != rdma_msg)
745 return false;
746
747 if (!pcl_is_empty(&rctxt->rc_call_pcl))
748 return false;
749 if (!pcl_is_empty(&rctxt->rc_read_pcl))
750 return false;
751 if (!pcl_is_empty(&rctxt->rc_write_pcl))
752 return false;
753 if (!pcl_is_empty(&rctxt->rc_reply_pcl))
754 return false;
755
756 /* RPC call direction */
757 if (*(p + 8) == cpu_to_be32(RPC_CALL))
758 return false;
759
760 return true;
761 }
762
763 /**
764 * svc_rdma_recvfrom - Receive an RPC call
765 * @rqstp: request structure into which to receive an RPC Call
766 *
767 * Returns:
768 * The positive number of bytes in the RPC Call message,
769 * %0 if there were no Calls ready to return,
770 * %-EINVAL if the Read chunk data is too large,
771 * %-ENOMEM if rdma_rw context pool was exhausted,
772 * %-ENOTCONN if posting failed (connection is lost),
773 * %-EIO if rdma_rw initialization failed (DMA mapping, etc).
774 *
775 * Called in a loop when XPT_DATA is set. XPT_DATA is cleared only
776 * when there are no remaining ctxt's to process.
777 *
778 * The next ctxt is removed from the "receive" lists.
779 *
780 * - If the ctxt completes a Receive, then construct the Call
781 * message from the contents of the Receive buffer.
782 *
783 * - If there are no Read chunks in this message, then finish
784 * assembling the Call message and return the number of bytes
785 * in the message.
786 *
787 * - If there are Read chunks in this message, post Read WRs to
788 * pull that payload. When the Read WRs complete, build the
789 * full message and return the number of bytes in it.
790 */
svc_rdma_recvfrom(struct svc_rqst * rqstp)791 int svc_rdma_recvfrom(struct svc_rqst *rqstp)
792 {
793 struct svc_xprt *xprt = rqstp->rq_xprt;
794 struct svcxprt_rdma *rdma_xprt =
795 container_of(xprt, struct svcxprt_rdma, sc_xprt);
796 struct svc_rdma_recv_ctxt *ctxt;
797 int ret;
798
799 /* Prevent svc_xprt_release() from releasing pages in rq_pages
800 * when returning 0 or an error.
801 */
802 rqstp->rq_respages = rqstp->rq_pages;
803 rqstp->rq_next_page = rqstp->rq_respages;
804
805 rqstp->rq_xprt_ctxt = NULL;
806
807 ctxt = NULL;
808 spin_lock(&rdma_xprt->sc_rq_dto_lock);
809 ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_rq_dto_q);
810 if (ctxt)
811 list_del(&ctxt->rc_list);
812 else
813 /* No new incoming requests, terminate the loop */
814 clear_bit(XPT_DATA, &xprt->xpt_flags);
815 spin_unlock(&rdma_xprt->sc_rq_dto_lock);
816
817 /* Unblock the transport for the next receive */
818 svc_xprt_received(xprt);
819 if (!ctxt)
820 return 0;
821
822 percpu_counter_inc(&svcrdma_stat_recv);
823 ib_dma_sync_single_for_cpu(rdma_xprt->sc_pd->device,
824 ctxt->rc_recv_sge.addr, ctxt->rc_byte_len,
825 DMA_FROM_DEVICE);
826 svc_rdma_build_arg_xdr(rqstp, ctxt);
827
828 ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg, ctxt);
829 if (ret < 0)
830 goto out_err;
831 if (ret == 0)
832 goto out_drop;
833
834 if (svc_rdma_is_reverse_direction_reply(xprt, ctxt))
835 goto out_backchannel;
836
837 svc_rdma_get_inv_rkey(rdma_xprt, ctxt);
838
839 if (!pcl_is_empty(&ctxt->rc_read_pcl) ||
840 !pcl_is_empty(&ctxt->rc_call_pcl)) {
841 ret = svc_rdma_process_read_list(rdma_xprt, rqstp, ctxt);
842 if (ret < 0)
843 goto out_readfail;
844 }
845
846 rqstp->rq_xprt_ctxt = ctxt;
847 rqstp->rq_prot = IPPROTO_MAX;
848 svc_xprt_copy_addrs(rqstp, xprt);
849 set_bit(RQ_SECURE, &rqstp->rq_flags);
850 return rqstp->rq_arg.len;
851
852 out_err:
853 svc_rdma_send_error(rdma_xprt, ctxt, ret);
854 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
855 return 0;
856
857 out_readfail:
858 if (ret == -EINVAL)
859 svc_rdma_send_error(rdma_xprt, ctxt, ret);
860 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
861 svc_xprt_deferred_close(xprt);
862 return -ENOTCONN;
863
864 out_backchannel:
865 svc_rdma_handle_bc_reply(rqstp, ctxt);
866 out_drop:
867 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
868 return 0;
869 }
870