1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (c) 2016-2018 Oracle. All rights reserved. 4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved. 5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the BSD-type 11 * license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 20 * Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials provided 23 * with the distribution. 24 * 25 * Neither the name of the Network Appliance, Inc. nor the names of 26 * its contributors may be used to endorse or promote products 27 * derived from this software without specific prior written 28 * permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 41 * 42 * Author: Tom Tucker <tom@opengridcomputing.com> 43 */ 44 45 /* Operation 46 * 47 * The main entry point is svc_rdma_recvfrom. This is called from 48 * svc_recv when the transport indicates there is incoming data to 49 * be read. "Data Ready" is signaled when an RDMA Receive completes, 50 * or when a set of RDMA Reads complete. 51 * 52 * An svc_rqst is passed in. This structure contains an array of 53 * free pages (rq_pages) that will contain the incoming RPC message. 54 * 55 * Short messages are moved directly into svc_rqst::rq_arg, and 56 * the RPC Call is ready to be processed by the Upper Layer. 57 * svc_rdma_recvfrom returns the length of the RPC Call message, 58 * completing the reception of the RPC Call. 59 * 60 * However, when an incoming message has Read chunks, 61 * svc_rdma_recvfrom must post RDMA Reads to pull the RPC Call's 62 * data payload from the client. svc_rdma_recvfrom sets up the 63 * RDMA Reads using pages in svc_rqst::rq_pages, which are 64 * transferred to an svc_rdma_recv_ctxt for the duration of the 65 * I/O. svc_rdma_recvfrom then returns zero, since the RPC message 66 * is still not yet ready. 67 * 68 * When the Read chunk payloads have become available on the 69 * server, "Data Ready" is raised again, and svc_recv calls 70 * svc_rdma_recvfrom again. This second call may use a different 71 * svc_rqst than the first one, thus any information that needs 72 * to be preserved across these two calls is kept in an 73 * svc_rdma_recv_ctxt. 74 * 75 * The second call to svc_rdma_recvfrom performs final assembly 76 * of the RPC Call message, using the RDMA Read sink pages kept in 77 * the svc_rdma_recv_ctxt. The xdr_buf is copied from the 78 * svc_rdma_recv_ctxt to the second svc_rqst. The second call returns 79 * the length of the completed RPC Call message. 80 * 81 * Page Management 82 * 83 * Pages under I/O must be transferred from the first svc_rqst to an 84 * svc_rdma_recv_ctxt before the first svc_rdma_recvfrom call returns. 85 * 86 * The first svc_rqst supplies pages for RDMA Reads. These are moved 87 * from rqstp::rq_pages into ctxt::pages. The consumed elements of 88 * the rq_pages array are set to NULL and refilled with the first 89 * svc_rdma_recvfrom call returns. 90 * 91 * During the second svc_rdma_recvfrom call, RDMA Read sink pages 92 * are transferred from the svc_rdma_recv_ctxt to the second svc_rqst 93 * (see rdma_read_complete() below). 94 */ 95 96 #include <linux/spinlock.h> 97 #include <asm/unaligned.h> 98 #include <rdma/ib_verbs.h> 99 #include <rdma/rdma_cm.h> 100 101 #include <linux/sunrpc/xdr.h> 102 #include <linux/sunrpc/debug.h> 103 #include <linux/sunrpc/rpc_rdma.h> 104 #include <linux/sunrpc/svc_rdma.h> 105 106 #include "xprt_rdma.h" 107 #include <trace/events/rpcrdma.h> 108 109 #define RPCDBG_FACILITY RPCDBG_SVCXPRT 110 111 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc); 112 113 static inline struct svc_rdma_recv_ctxt * 114 svc_rdma_next_recv_ctxt(struct list_head *list) 115 { 116 return list_first_entry_or_null(list, struct svc_rdma_recv_ctxt, 117 rc_list); 118 } 119 120 static struct svc_rdma_recv_ctxt * 121 svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma) 122 { 123 struct svc_rdma_recv_ctxt *ctxt; 124 dma_addr_t addr; 125 void *buffer; 126 127 ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL); 128 if (!ctxt) 129 goto fail0; 130 buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL); 131 if (!buffer) 132 goto fail1; 133 addr = ib_dma_map_single(rdma->sc_pd->device, buffer, 134 rdma->sc_max_req_size, DMA_FROM_DEVICE); 135 if (ib_dma_mapping_error(rdma->sc_pd->device, addr)) 136 goto fail2; 137 138 ctxt->rc_recv_wr.next = NULL; 139 ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe; 140 ctxt->rc_recv_wr.sg_list = &ctxt->rc_recv_sge; 141 ctxt->rc_recv_wr.num_sge = 1; 142 ctxt->rc_cqe.done = svc_rdma_wc_receive; 143 ctxt->rc_recv_sge.addr = addr; 144 ctxt->rc_recv_sge.length = rdma->sc_max_req_size; 145 ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey; 146 ctxt->rc_recv_buf = buffer; 147 ctxt->rc_temp = false; 148 return ctxt; 149 150 fail2: 151 kfree(buffer); 152 fail1: 153 kfree(ctxt); 154 fail0: 155 return NULL; 156 } 157 158 static void svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma *rdma, 159 struct svc_rdma_recv_ctxt *ctxt) 160 { 161 ib_dma_unmap_single(rdma->sc_pd->device, ctxt->rc_recv_sge.addr, 162 ctxt->rc_recv_sge.length, DMA_FROM_DEVICE); 163 kfree(ctxt->rc_recv_buf); 164 kfree(ctxt); 165 } 166 167 /** 168 * svc_rdma_recv_ctxts_destroy - Release all recv_ctxt's for an xprt 169 * @rdma: svcxprt_rdma being torn down 170 * 171 */ 172 void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma) 173 { 174 struct svc_rdma_recv_ctxt *ctxt; 175 struct llist_node *node; 176 177 while ((node = llist_del_first(&rdma->sc_recv_ctxts))) { 178 ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node); 179 svc_rdma_recv_ctxt_destroy(rdma, ctxt); 180 } 181 } 182 183 static struct svc_rdma_recv_ctxt * 184 svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma) 185 { 186 struct svc_rdma_recv_ctxt *ctxt; 187 struct llist_node *node; 188 189 node = llist_del_first(&rdma->sc_recv_ctxts); 190 if (!node) 191 goto out_empty; 192 ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node); 193 194 out: 195 ctxt->rc_page_count = 0; 196 ctxt->rc_read_payload_length = 0; 197 return ctxt; 198 199 out_empty: 200 ctxt = svc_rdma_recv_ctxt_alloc(rdma); 201 if (!ctxt) 202 return NULL; 203 goto out; 204 } 205 206 /** 207 * svc_rdma_recv_ctxt_put - Return recv_ctxt to free list 208 * @rdma: controlling svcxprt_rdma 209 * @ctxt: object to return to the free list 210 * 211 */ 212 void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma, 213 struct svc_rdma_recv_ctxt *ctxt) 214 { 215 unsigned int i; 216 217 for (i = 0; i < ctxt->rc_page_count; i++) 218 put_page(ctxt->rc_pages[i]); 219 220 if (!ctxt->rc_temp) 221 llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts); 222 else 223 svc_rdma_recv_ctxt_destroy(rdma, ctxt); 224 } 225 226 static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma, 227 struct svc_rdma_recv_ctxt *ctxt) 228 { 229 int ret; 230 231 svc_xprt_get(&rdma->sc_xprt); 232 ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, NULL); 233 trace_svcrdma_post_recv(&ctxt->rc_recv_wr, ret); 234 if (ret) 235 goto err_post; 236 return 0; 237 238 err_post: 239 svc_rdma_recv_ctxt_put(rdma, ctxt); 240 svc_xprt_put(&rdma->sc_xprt); 241 return ret; 242 } 243 244 static int svc_rdma_post_recv(struct svcxprt_rdma *rdma) 245 { 246 struct svc_rdma_recv_ctxt *ctxt; 247 248 ctxt = svc_rdma_recv_ctxt_get(rdma); 249 if (!ctxt) 250 return -ENOMEM; 251 return __svc_rdma_post_recv(rdma, ctxt); 252 } 253 254 /** 255 * svc_rdma_post_recvs - Post initial set of Recv WRs 256 * @rdma: fresh svcxprt_rdma 257 * 258 * Returns true if successful, otherwise false. 259 */ 260 bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma) 261 { 262 struct svc_rdma_recv_ctxt *ctxt; 263 unsigned int i; 264 int ret; 265 266 for (i = 0; i < rdma->sc_max_requests; i++) { 267 ctxt = svc_rdma_recv_ctxt_get(rdma); 268 if (!ctxt) 269 return false; 270 ctxt->rc_temp = true; 271 ret = __svc_rdma_post_recv(rdma, ctxt); 272 if (ret) 273 return false; 274 } 275 return true; 276 } 277 278 /** 279 * svc_rdma_wc_receive - Invoked by RDMA provider for each polled Receive WC 280 * @cq: Completion Queue context 281 * @wc: Work Completion object 282 * 283 * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that 284 * the Receive completion handler could be running. 285 */ 286 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) 287 { 288 struct svcxprt_rdma *rdma = cq->cq_context; 289 struct ib_cqe *cqe = wc->wr_cqe; 290 struct svc_rdma_recv_ctxt *ctxt; 291 292 trace_svcrdma_wc_receive(wc); 293 294 /* WARNING: Only wc->wr_cqe and wc->status are reliable */ 295 ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe); 296 297 if (wc->status != IB_WC_SUCCESS) 298 goto flushed; 299 300 if (svc_rdma_post_recv(rdma)) 301 goto post_err; 302 303 /* All wc fields are now known to be valid */ 304 ctxt->rc_byte_len = wc->byte_len; 305 ib_dma_sync_single_for_cpu(rdma->sc_pd->device, 306 ctxt->rc_recv_sge.addr, 307 wc->byte_len, DMA_FROM_DEVICE); 308 309 spin_lock(&rdma->sc_rq_dto_lock); 310 list_add_tail(&ctxt->rc_list, &rdma->sc_rq_dto_q); 311 /* Note the unlock pairs with the smp_rmb in svc_xprt_ready: */ 312 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags); 313 spin_unlock(&rdma->sc_rq_dto_lock); 314 if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags)) 315 svc_xprt_enqueue(&rdma->sc_xprt); 316 goto out; 317 318 flushed: 319 post_err: 320 svc_rdma_recv_ctxt_put(rdma, ctxt); 321 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); 322 svc_xprt_enqueue(&rdma->sc_xprt); 323 out: 324 svc_xprt_put(&rdma->sc_xprt); 325 } 326 327 /** 328 * svc_rdma_flush_recv_queues - Drain pending Receive work 329 * @rdma: svcxprt_rdma being shut down 330 * 331 */ 332 void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma) 333 { 334 struct svc_rdma_recv_ctxt *ctxt; 335 336 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_read_complete_q))) { 337 list_del(&ctxt->rc_list); 338 svc_rdma_recv_ctxt_put(rdma, ctxt); 339 } 340 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) { 341 list_del(&ctxt->rc_list); 342 svc_rdma_recv_ctxt_put(rdma, ctxt); 343 } 344 } 345 346 static void svc_rdma_build_arg_xdr(struct svc_rqst *rqstp, 347 struct svc_rdma_recv_ctxt *ctxt) 348 { 349 struct xdr_buf *arg = &rqstp->rq_arg; 350 351 arg->head[0].iov_base = ctxt->rc_recv_buf; 352 arg->head[0].iov_len = ctxt->rc_byte_len; 353 arg->tail[0].iov_base = NULL; 354 arg->tail[0].iov_len = 0; 355 arg->page_len = 0; 356 arg->page_base = 0; 357 arg->buflen = ctxt->rc_byte_len; 358 arg->len = ctxt->rc_byte_len; 359 } 360 361 /* This accommodates the largest possible Write chunk. 362 */ 363 #define MAX_BYTES_WRITE_CHUNK ((u32)(RPCSVC_MAXPAGES << PAGE_SHIFT)) 364 365 /* This accommodates the largest possible Position-Zero 366 * Read chunk or Reply chunk. 367 */ 368 #define MAX_BYTES_SPECIAL_CHUNK ((u32)((RPCSVC_MAXPAGES + 2) << PAGE_SHIFT)) 369 370 /* Sanity check the Read list. 371 * 372 * Implementation limits: 373 * - This implementation supports only one Read chunk. 374 * 375 * Sanity checks: 376 * - Read list does not overflow Receive buffer. 377 * - Segment size limited by largest NFS data payload. 378 * 379 * The segment count is limited to how many segments can 380 * fit in the transport header without overflowing the 381 * buffer. That's about 40 Read segments for a 1KB inline 382 * threshold. 383 * 384 * Return values: 385 * %true: Read list is valid. @rctxt's xdr_stream is updated 386 * to point to the first byte past the Read list. 387 * %false: Read list is corrupt. @rctxt's xdr_stream is left 388 * in an unknown state. 389 */ 390 static bool xdr_check_read_list(struct svc_rdma_recv_ctxt *rctxt) 391 { 392 u32 position, len; 393 bool first; 394 __be32 *p; 395 396 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); 397 if (!p) 398 return false; 399 400 len = 0; 401 first = true; 402 while (*p != xdr_zero) { 403 p = xdr_inline_decode(&rctxt->rc_stream, 404 rpcrdma_readseg_maxsz * sizeof(*p)); 405 if (!p) 406 return false; 407 408 if (first) { 409 position = be32_to_cpup(p); 410 first = false; 411 } else if (be32_to_cpup(p) != position) { 412 return false; 413 } 414 p += 2; 415 len += be32_to_cpup(p); 416 417 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); 418 if (!p) 419 return false; 420 } 421 return len <= MAX_BYTES_SPECIAL_CHUNK; 422 } 423 424 /* The segment count is limited to how many segments can 425 * fit in the transport header without overflowing the 426 * buffer. That's about 60 Write segments for a 1KB inline 427 * threshold. 428 */ 429 static bool xdr_check_write_chunk(struct svc_rdma_recv_ctxt *rctxt, u32 maxlen) 430 { 431 u32 i, segcount, total; 432 __be32 *p; 433 434 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); 435 if (!p) 436 return false; 437 segcount = be32_to_cpup(p); 438 439 total = 0; 440 for (i = 0; i < segcount; i++) { 441 u32 handle, length; 442 u64 offset; 443 444 p = xdr_inline_decode(&rctxt->rc_stream, 445 rpcrdma_segment_maxsz * sizeof(*p)); 446 if (!p) 447 return false; 448 449 handle = be32_to_cpup(p++); 450 length = be32_to_cpup(p++); 451 xdr_decode_hyper(p, &offset); 452 trace_svcrdma_decode_wseg(handle, length, offset); 453 454 total += length; 455 } 456 return total <= maxlen; 457 } 458 459 /* Sanity check the Write list. 460 * 461 * Implementation limits: 462 * - This implementation currently supports only one Write chunk. 463 * 464 * Sanity checks: 465 * - Write list does not overflow Receive buffer. 466 * - Chunk size limited by largest NFS data payload. 467 * 468 * Return values: 469 * %true: Write list is valid. @rctxt's xdr_stream is updated 470 * to point to the first byte past the Write list. 471 * %false: Write list is corrupt. @rctxt's xdr_stream is left 472 * in an unknown state. 473 */ 474 static bool xdr_check_write_list(struct svc_rdma_recv_ctxt *rctxt) 475 { 476 u32 chcount = 0; 477 __be32 *p; 478 479 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); 480 if (!p) 481 return false; 482 rctxt->rc_write_list = p; 483 while (*p != xdr_zero) { 484 if (!xdr_check_write_chunk(rctxt, MAX_BYTES_WRITE_CHUNK)) 485 return false; 486 ++chcount; 487 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); 488 if (!p) 489 return false; 490 } 491 if (!chcount) 492 rctxt->rc_write_list = NULL; 493 return chcount < 2; 494 } 495 496 /* Sanity check the Reply chunk. 497 * 498 * Sanity checks: 499 * - Reply chunk does not overflow Receive buffer. 500 * - Chunk size limited by largest NFS data payload. 501 * 502 * Return values: 503 * %true: Reply chunk is valid. @rctxt's xdr_stream is updated 504 * to point to the first byte past the Reply chunk. 505 * %false: Reply chunk is corrupt. @rctxt's xdr_stream is left 506 * in an unknown state. 507 */ 508 static bool xdr_check_reply_chunk(struct svc_rdma_recv_ctxt *rctxt) 509 { 510 __be32 *p; 511 512 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p)); 513 if (!p) 514 return false; 515 rctxt->rc_reply_chunk = p; 516 if (*p != xdr_zero) { 517 if (!xdr_check_write_chunk(rctxt, MAX_BYTES_SPECIAL_CHUNK)) 518 return false; 519 } else { 520 rctxt->rc_reply_chunk = NULL; 521 } 522 return true; 523 } 524 525 /* RPC-over-RDMA Version One private extension: Remote Invalidation. 526 * Responder's choice: requester signals it can handle Send With 527 * Invalidate, and responder chooses one R_key to invalidate. 528 * 529 * If there is exactly one distinct R_key in the received transport 530 * header, set rc_inv_rkey to that R_key. Otherwise, set it to zero. 531 * 532 * Perform this operation while the received transport header is 533 * still in the CPU cache. 534 */ 535 static void svc_rdma_get_inv_rkey(struct svcxprt_rdma *rdma, 536 struct svc_rdma_recv_ctxt *ctxt) 537 { 538 __be32 inv_rkey, *p; 539 u32 i, segcount; 540 541 ctxt->rc_inv_rkey = 0; 542 543 if (!rdma->sc_snd_w_inv) 544 return; 545 546 inv_rkey = xdr_zero; 547 p = ctxt->rc_recv_buf; 548 p += rpcrdma_fixed_maxsz; 549 550 /* Read list */ 551 while (*p++ != xdr_zero) { 552 p++; /* position */ 553 if (inv_rkey == xdr_zero) 554 inv_rkey = *p; 555 else if (inv_rkey != *p) 556 return; 557 p += 4; 558 } 559 560 /* Write list */ 561 while (*p++ != xdr_zero) { 562 segcount = be32_to_cpup(p++); 563 for (i = 0; i < segcount; i++) { 564 if (inv_rkey == xdr_zero) 565 inv_rkey = *p; 566 else if (inv_rkey != *p) 567 return; 568 p += 4; 569 } 570 } 571 572 /* Reply chunk */ 573 if (*p++ != xdr_zero) { 574 segcount = be32_to_cpup(p++); 575 for (i = 0; i < segcount; i++) { 576 if (inv_rkey == xdr_zero) 577 inv_rkey = *p; 578 else if (inv_rkey != *p) 579 return; 580 p += 4; 581 } 582 } 583 584 ctxt->rc_inv_rkey = be32_to_cpu(inv_rkey); 585 } 586 587 /** 588 * svc_rdma_xdr_decode_req - Decode the transport header 589 * @rq_arg: xdr_buf containing ingress RPC/RDMA message 590 * @rctxt: state of decoding 591 * 592 * On entry, xdr->head[0].iov_base points to first byte of the 593 * RPC-over-RDMA transport header. 594 * 595 * On successful exit, head[0] points to first byte past the 596 * RPC-over-RDMA header. For RDMA_MSG, this is the RPC message. 597 * 598 * The length of the RPC-over-RDMA header is returned. 599 * 600 * Assumptions: 601 * - The transport header is entirely contained in the head iovec. 602 */ 603 static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg, 604 struct svc_rdma_recv_ctxt *rctxt) 605 { 606 __be32 *p, *rdma_argp; 607 unsigned int hdr_len; 608 609 rdma_argp = rq_arg->head[0].iov_base; 610 xdr_init_decode(&rctxt->rc_stream, rq_arg, rdma_argp, NULL); 611 612 p = xdr_inline_decode(&rctxt->rc_stream, 613 rpcrdma_fixed_maxsz * sizeof(*p)); 614 if (unlikely(!p)) 615 goto out_short; 616 p++; 617 if (*p != rpcrdma_version) 618 goto out_version; 619 p += 2; 620 switch (*p) { 621 case rdma_msg: 622 break; 623 case rdma_nomsg: 624 break; 625 case rdma_done: 626 goto out_drop; 627 case rdma_error: 628 goto out_drop; 629 default: 630 goto out_proc; 631 } 632 633 if (!xdr_check_read_list(rctxt)) 634 goto out_inval; 635 if (!xdr_check_write_list(rctxt)) 636 goto out_inval; 637 if (!xdr_check_reply_chunk(rctxt)) 638 goto out_inval; 639 640 rq_arg->head[0].iov_base = rctxt->rc_stream.p; 641 hdr_len = xdr_stream_pos(&rctxt->rc_stream); 642 rq_arg->head[0].iov_len -= hdr_len; 643 rq_arg->len -= hdr_len; 644 trace_svcrdma_decode_rqst(rdma_argp, hdr_len); 645 return hdr_len; 646 647 out_short: 648 trace_svcrdma_decode_short(rq_arg->len); 649 return -EINVAL; 650 651 out_version: 652 trace_svcrdma_decode_badvers(rdma_argp); 653 return -EPROTONOSUPPORT; 654 655 out_drop: 656 trace_svcrdma_decode_drop(rdma_argp); 657 return 0; 658 659 out_proc: 660 trace_svcrdma_decode_badproc(rdma_argp); 661 return -EINVAL; 662 663 out_inval: 664 trace_svcrdma_decode_parse(rdma_argp); 665 return -EINVAL; 666 } 667 668 static void rdma_read_complete(struct svc_rqst *rqstp, 669 struct svc_rdma_recv_ctxt *head) 670 { 671 int page_no; 672 673 /* Move Read chunk pages to rqstp so that they will be released 674 * when svc_process is done with them. 675 */ 676 for (page_no = 0; page_no < head->rc_page_count; page_no++) { 677 put_page(rqstp->rq_pages[page_no]); 678 rqstp->rq_pages[page_no] = head->rc_pages[page_no]; 679 } 680 head->rc_page_count = 0; 681 682 /* Point rq_arg.pages past header */ 683 rqstp->rq_arg.pages = &rqstp->rq_pages[head->rc_hdr_count]; 684 rqstp->rq_arg.page_len = head->rc_arg.page_len; 685 686 /* rq_respages starts after the last arg page */ 687 rqstp->rq_respages = &rqstp->rq_pages[page_no]; 688 rqstp->rq_next_page = rqstp->rq_respages + 1; 689 690 /* Rebuild rq_arg head and tail. */ 691 rqstp->rq_arg.head[0] = head->rc_arg.head[0]; 692 rqstp->rq_arg.tail[0] = head->rc_arg.tail[0]; 693 rqstp->rq_arg.len = head->rc_arg.len; 694 rqstp->rq_arg.buflen = head->rc_arg.buflen; 695 } 696 697 static void svc_rdma_send_error(struct svcxprt_rdma *xprt, 698 __be32 *rdma_argp, int status) 699 { 700 struct svc_rdma_send_ctxt *ctxt; 701 __be32 *p; 702 int ret; 703 704 ctxt = svc_rdma_send_ctxt_get(xprt); 705 if (!ctxt) 706 return; 707 708 p = xdr_reserve_space(&ctxt->sc_stream, 709 rpcrdma_fixed_maxsz * sizeof(*p)); 710 if (!p) 711 goto put_ctxt; 712 713 *p++ = *rdma_argp; 714 *p++ = *(rdma_argp + 1); 715 *p++ = xprt->sc_fc_credits; 716 *p = rdma_error; 717 718 switch (status) { 719 case -EPROTONOSUPPORT: 720 p = xdr_reserve_space(&ctxt->sc_stream, 3 * sizeof(*p)); 721 if (!p) 722 goto put_ctxt; 723 724 *p++ = err_vers; 725 *p++ = rpcrdma_version; 726 *p = rpcrdma_version; 727 trace_svcrdma_err_vers(*rdma_argp); 728 break; 729 default: 730 p = xdr_reserve_space(&ctxt->sc_stream, sizeof(*p)); 731 if (!p) 732 goto put_ctxt; 733 734 *p = err_chunk; 735 trace_svcrdma_err_chunk(*rdma_argp); 736 } 737 738 ctxt->sc_send_wr.num_sge = 1; 739 ctxt->sc_send_wr.opcode = IB_WR_SEND; 740 ctxt->sc_sges[0].length = ctxt->sc_hdrbuf.len; 741 ret = svc_rdma_send(xprt, &ctxt->sc_send_wr); 742 if (ret) 743 goto put_ctxt; 744 return; 745 746 put_ctxt: 747 svc_rdma_send_ctxt_put(xprt, ctxt); 748 } 749 750 /* By convention, backchannel calls arrive via rdma_msg type 751 * messages, and never populate the chunk lists. This makes 752 * the RPC/RDMA header small and fixed in size, so it is 753 * straightforward to check the RPC header's direction field. 754 */ 755 static bool svc_rdma_is_backchannel_reply(struct svc_xprt *xprt, 756 __be32 *rdma_resp) 757 { 758 __be32 *p; 759 760 if (!xprt->xpt_bc_xprt) 761 return false; 762 763 p = rdma_resp + 3; 764 if (*p++ != rdma_msg) 765 return false; 766 767 if (*p++ != xdr_zero) 768 return false; 769 if (*p++ != xdr_zero) 770 return false; 771 if (*p++ != xdr_zero) 772 return false; 773 774 /* XID sanity */ 775 if (*p++ != *rdma_resp) 776 return false; 777 /* call direction */ 778 if (*p == cpu_to_be32(RPC_CALL)) 779 return false; 780 781 return true; 782 } 783 784 /** 785 * svc_rdma_recvfrom - Receive an RPC call 786 * @rqstp: request structure into which to receive an RPC Call 787 * 788 * Returns: 789 * The positive number of bytes in the RPC Call message, 790 * %0 if there were no Calls ready to return, 791 * %-EINVAL if the Read chunk data is too large, 792 * %-ENOMEM if rdma_rw context pool was exhausted, 793 * %-ENOTCONN if posting failed (connection is lost), 794 * %-EIO if rdma_rw initialization failed (DMA mapping, etc). 795 * 796 * Called in a loop when XPT_DATA is set. XPT_DATA is cleared only 797 * when there are no remaining ctxt's to process. 798 * 799 * The next ctxt is removed from the "receive" lists. 800 * 801 * - If the ctxt completes a Read, then finish assembling the Call 802 * message and return the number of bytes in the message. 803 * 804 * - If the ctxt completes a Receive, then construct the Call 805 * message from the contents of the Receive buffer. 806 * 807 * - If there are no Read chunks in this message, then finish 808 * assembling the Call message and return the number of bytes 809 * in the message. 810 * 811 * - If there are Read chunks in this message, post Read WRs to 812 * pull that payload and return 0. 813 */ 814 int svc_rdma_recvfrom(struct svc_rqst *rqstp) 815 { 816 struct svc_xprt *xprt = rqstp->rq_xprt; 817 struct svcxprt_rdma *rdma_xprt = 818 container_of(xprt, struct svcxprt_rdma, sc_xprt); 819 struct svc_rdma_recv_ctxt *ctxt; 820 __be32 *p; 821 int ret; 822 823 spin_lock(&rdma_xprt->sc_rq_dto_lock); 824 ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_read_complete_q); 825 if (ctxt) { 826 list_del(&ctxt->rc_list); 827 spin_unlock(&rdma_xprt->sc_rq_dto_lock); 828 rdma_read_complete(rqstp, ctxt); 829 goto complete; 830 } 831 ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_rq_dto_q); 832 if (!ctxt) { 833 /* No new incoming requests, terminate the loop */ 834 clear_bit(XPT_DATA, &xprt->xpt_flags); 835 spin_unlock(&rdma_xprt->sc_rq_dto_lock); 836 return 0; 837 } 838 list_del(&ctxt->rc_list); 839 spin_unlock(&rdma_xprt->sc_rq_dto_lock); 840 841 atomic_inc(&rdma_stat_recv); 842 843 svc_rdma_build_arg_xdr(rqstp, ctxt); 844 845 /* Prevent svc_xprt_release from releasing pages in rq_pages 846 * if we return 0 or an error. 847 */ 848 rqstp->rq_respages = rqstp->rq_pages; 849 rqstp->rq_next_page = rqstp->rq_respages; 850 851 p = (__be32 *)rqstp->rq_arg.head[0].iov_base; 852 ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg, ctxt); 853 if (ret < 0) 854 goto out_err; 855 if (ret == 0) 856 goto out_drop; 857 rqstp->rq_xprt_hlen = ret; 858 859 if (svc_rdma_is_backchannel_reply(xprt, p)) { 860 ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p, 861 &rqstp->rq_arg); 862 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); 863 return ret; 864 } 865 svc_rdma_get_inv_rkey(rdma_xprt, ctxt); 866 867 p += rpcrdma_fixed_maxsz; 868 if (*p != xdr_zero) 869 goto out_readchunk; 870 871 complete: 872 rqstp->rq_xprt_ctxt = ctxt; 873 rqstp->rq_prot = IPPROTO_MAX; 874 svc_xprt_copy_addrs(rqstp, xprt); 875 return rqstp->rq_arg.len; 876 877 out_readchunk: 878 ret = svc_rdma_recv_read_chunk(rdma_xprt, rqstp, ctxt, p); 879 if (ret < 0) 880 goto out_postfail; 881 return 0; 882 883 out_err: 884 svc_rdma_send_error(rdma_xprt, p, ret); 885 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); 886 return 0; 887 888 out_postfail: 889 if (ret == -EINVAL) 890 svc_rdma_send_error(rdma_xprt, p, ret); 891 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); 892 return ret; 893 894 out_drop: 895 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); 896 return 0; 897 } 898