1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (c) 2016-2018 Oracle. All rights reserved. 4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved. 5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the BSD-type 11 * license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 20 * Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials provided 23 * with the distribution. 24 * 25 * Neither the name of the Network Appliance, Inc. nor the names of 26 * its contributors may be used to endorse or promote products 27 * derived from this software without specific prior written 28 * permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 41 * 42 * Author: Tom Tucker <tom@opengridcomputing.com> 43 */ 44 45 /* Operation 46 * 47 * The main entry point is svc_rdma_recvfrom. This is called from 48 * svc_recv when the transport indicates there is incoming data to 49 * be read. "Data Ready" is signaled when an RDMA Receive completes, 50 * or when a set of RDMA Reads complete. 51 * 52 * An svc_rqst is passed in. This structure contains an array of 53 * free pages (rq_pages) that will contain the incoming RPC message. 54 * 55 * Short messages are moved directly into svc_rqst::rq_arg, and 56 * the RPC Call is ready to be processed by the Upper Layer. 57 * svc_rdma_recvfrom returns the length of the RPC Call message, 58 * completing the reception of the RPC Call. 59 * 60 * However, when an incoming message has Read chunks, 61 * svc_rdma_recvfrom must post RDMA Reads to pull the RPC Call's 62 * data payload from the client. svc_rdma_recvfrom sets up the 63 * RDMA Reads using pages in svc_rqst::rq_pages, which are 64 * transferred to an svc_rdma_recv_ctxt for the duration of the 65 * I/O. svc_rdma_recvfrom then returns zero, since the RPC message 66 * is still not yet ready. 67 * 68 * When the Read chunk payloads have become available on the 69 * server, "Data Ready" is raised again, and svc_recv calls 70 * svc_rdma_recvfrom again. This second call may use a different 71 * svc_rqst than the first one, thus any information that needs 72 * to be preserved across these two calls is kept in an 73 * svc_rdma_recv_ctxt. 74 * 75 * The second call to svc_rdma_recvfrom performs final assembly 76 * of the RPC Call message, using the RDMA Read sink pages kept in 77 * the svc_rdma_recv_ctxt. The xdr_buf is copied from the 78 * svc_rdma_recv_ctxt to the second svc_rqst. The second call returns 79 * the length of the completed RPC Call message. 80 * 81 * Page Management 82 * 83 * Pages under I/O must be transferred from the first svc_rqst to an 84 * svc_rdma_recv_ctxt before the first svc_rdma_recvfrom call returns. 85 * 86 * The first svc_rqst supplies pages for RDMA Reads. These are moved 87 * from rqstp::rq_pages into ctxt::pages. The consumed elements of 88 * the rq_pages array are set to NULL and refilled with the first 89 * svc_rdma_recvfrom call returns. 90 * 91 * During the second svc_rdma_recvfrom call, RDMA Read sink pages 92 * are transferred from the svc_rdma_recv_ctxt to the second svc_rqst 93 * (see rdma_read_complete() below). 94 */ 95 96 #include <linux/spinlock.h> 97 #include <asm/unaligned.h> 98 #include <rdma/ib_verbs.h> 99 #include <rdma/rdma_cm.h> 100 101 #include <linux/sunrpc/xdr.h> 102 #include <linux/sunrpc/debug.h> 103 #include <linux/sunrpc/rpc_rdma.h> 104 #include <linux/sunrpc/svc_rdma.h> 105 106 #include "xprt_rdma.h" 107 #include <trace/events/rpcrdma.h> 108 109 #define RPCDBG_FACILITY RPCDBG_SVCXPRT 110 111 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc); 112 113 static inline struct svc_rdma_recv_ctxt * 114 svc_rdma_next_recv_ctxt(struct list_head *list) 115 { 116 return list_first_entry_or_null(list, struct svc_rdma_recv_ctxt, 117 rc_list); 118 } 119 120 static struct svc_rdma_recv_ctxt * 121 svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma) 122 { 123 struct svc_rdma_recv_ctxt *ctxt; 124 dma_addr_t addr; 125 void *buffer; 126 127 ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL); 128 if (!ctxt) 129 goto fail0; 130 buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL); 131 if (!buffer) 132 goto fail1; 133 addr = ib_dma_map_single(rdma->sc_pd->device, buffer, 134 rdma->sc_max_req_size, DMA_FROM_DEVICE); 135 if (ib_dma_mapping_error(rdma->sc_pd->device, addr)) 136 goto fail2; 137 138 ctxt->rc_recv_wr.next = NULL; 139 ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe; 140 ctxt->rc_recv_wr.sg_list = &ctxt->rc_recv_sge; 141 ctxt->rc_recv_wr.num_sge = 1; 142 ctxt->rc_cqe.done = svc_rdma_wc_receive; 143 ctxt->rc_recv_sge.addr = addr; 144 ctxt->rc_recv_sge.length = rdma->sc_max_req_size; 145 ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey; 146 ctxt->rc_recv_buf = buffer; 147 ctxt->rc_temp = false; 148 return ctxt; 149 150 fail2: 151 kfree(buffer); 152 fail1: 153 kfree(ctxt); 154 fail0: 155 return NULL; 156 } 157 158 static void svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma *rdma, 159 struct svc_rdma_recv_ctxt *ctxt) 160 { 161 ib_dma_unmap_single(rdma->sc_pd->device, ctxt->rc_recv_sge.addr, 162 ctxt->rc_recv_sge.length, DMA_FROM_DEVICE); 163 kfree(ctxt->rc_recv_buf); 164 kfree(ctxt); 165 } 166 167 /** 168 * svc_rdma_recv_ctxts_destroy - Release all recv_ctxt's for an xprt 169 * @rdma: svcxprt_rdma being torn down 170 * 171 */ 172 void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma) 173 { 174 struct svc_rdma_recv_ctxt *ctxt; 175 176 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_recv_ctxts))) { 177 list_del(&ctxt->rc_list); 178 svc_rdma_recv_ctxt_destroy(rdma, ctxt); 179 } 180 } 181 182 static struct svc_rdma_recv_ctxt * 183 svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma) 184 { 185 struct svc_rdma_recv_ctxt *ctxt; 186 187 spin_lock(&rdma->sc_recv_lock); 188 ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_recv_ctxts); 189 if (!ctxt) 190 goto out_empty; 191 list_del(&ctxt->rc_list); 192 spin_unlock(&rdma->sc_recv_lock); 193 194 out: 195 ctxt->rc_page_count = 0; 196 return ctxt; 197 198 out_empty: 199 spin_unlock(&rdma->sc_recv_lock); 200 201 ctxt = svc_rdma_recv_ctxt_alloc(rdma); 202 if (!ctxt) 203 return NULL; 204 goto out; 205 } 206 207 /** 208 * svc_rdma_recv_ctxt_put - Return recv_ctxt to free list 209 * @rdma: controlling svcxprt_rdma 210 * @ctxt: object to return to the free list 211 * 212 */ 213 void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma, 214 struct svc_rdma_recv_ctxt *ctxt) 215 { 216 unsigned int i; 217 218 for (i = 0; i < ctxt->rc_page_count; i++) 219 put_page(ctxt->rc_pages[i]); 220 221 if (!ctxt->rc_temp) { 222 spin_lock(&rdma->sc_recv_lock); 223 list_add(&ctxt->rc_list, &rdma->sc_recv_ctxts); 224 spin_unlock(&rdma->sc_recv_lock); 225 } else 226 svc_rdma_recv_ctxt_destroy(rdma, ctxt); 227 } 228 229 static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma, 230 struct svc_rdma_recv_ctxt *ctxt) 231 { 232 int ret; 233 234 svc_xprt_get(&rdma->sc_xprt); 235 ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, NULL); 236 trace_svcrdma_post_recv(&ctxt->rc_recv_wr, ret); 237 if (ret) 238 goto err_post; 239 return 0; 240 241 err_post: 242 svc_rdma_recv_ctxt_put(rdma, ctxt); 243 svc_xprt_put(&rdma->sc_xprt); 244 return ret; 245 } 246 247 static int svc_rdma_post_recv(struct svcxprt_rdma *rdma) 248 { 249 struct svc_rdma_recv_ctxt *ctxt; 250 251 ctxt = svc_rdma_recv_ctxt_get(rdma); 252 if (!ctxt) 253 return -ENOMEM; 254 return __svc_rdma_post_recv(rdma, ctxt); 255 } 256 257 /** 258 * svc_rdma_post_recvs - Post initial set of Recv WRs 259 * @rdma: fresh svcxprt_rdma 260 * 261 * Returns true if successful, otherwise false. 262 */ 263 bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma) 264 { 265 struct svc_rdma_recv_ctxt *ctxt; 266 unsigned int i; 267 int ret; 268 269 for (i = 0; i < rdma->sc_max_requests; i++) { 270 ctxt = svc_rdma_recv_ctxt_get(rdma); 271 if (!ctxt) 272 return false; 273 ctxt->rc_temp = true; 274 ret = __svc_rdma_post_recv(rdma, ctxt); 275 if (ret) 276 return false; 277 } 278 return true; 279 } 280 281 /** 282 * svc_rdma_wc_receive - Invoked by RDMA provider for each polled Receive WC 283 * @cq: Completion Queue context 284 * @wc: Work Completion object 285 * 286 * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that 287 * the Receive completion handler could be running. 288 */ 289 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) 290 { 291 struct svcxprt_rdma *rdma = cq->cq_context; 292 struct ib_cqe *cqe = wc->wr_cqe; 293 struct svc_rdma_recv_ctxt *ctxt; 294 295 trace_svcrdma_wc_receive(wc); 296 297 /* WARNING: Only wc->wr_cqe and wc->status are reliable */ 298 ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe); 299 300 if (wc->status != IB_WC_SUCCESS) 301 goto flushed; 302 303 if (svc_rdma_post_recv(rdma)) 304 goto post_err; 305 306 /* All wc fields are now known to be valid */ 307 ctxt->rc_byte_len = wc->byte_len; 308 ib_dma_sync_single_for_cpu(rdma->sc_pd->device, 309 ctxt->rc_recv_sge.addr, 310 wc->byte_len, DMA_FROM_DEVICE); 311 312 spin_lock(&rdma->sc_rq_dto_lock); 313 list_add_tail(&ctxt->rc_list, &rdma->sc_rq_dto_q); 314 /* Note the unlock pairs with the smp_rmb in svc_xprt_ready: */ 315 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags); 316 spin_unlock(&rdma->sc_rq_dto_lock); 317 if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags)) 318 svc_xprt_enqueue(&rdma->sc_xprt); 319 goto out; 320 321 flushed: 322 post_err: 323 svc_rdma_recv_ctxt_put(rdma, ctxt); 324 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); 325 svc_xprt_enqueue(&rdma->sc_xprt); 326 out: 327 svc_xprt_put(&rdma->sc_xprt); 328 } 329 330 /** 331 * svc_rdma_flush_recv_queues - Drain pending Receive work 332 * @rdma: svcxprt_rdma being shut down 333 * 334 */ 335 void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma) 336 { 337 struct svc_rdma_recv_ctxt *ctxt; 338 339 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_read_complete_q))) { 340 list_del(&ctxt->rc_list); 341 svc_rdma_recv_ctxt_put(rdma, ctxt); 342 } 343 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) { 344 list_del(&ctxt->rc_list); 345 svc_rdma_recv_ctxt_put(rdma, ctxt); 346 } 347 } 348 349 static void svc_rdma_build_arg_xdr(struct svc_rqst *rqstp, 350 struct svc_rdma_recv_ctxt *ctxt) 351 { 352 struct xdr_buf *arg = &rqstp->rq_arg; 353 354 arg->head[0].iov_base = ctxt->rc_recv_buf; 355 arg->head[0].iov_len = ctxt->rc_byte_len; 356 arg->tail[0].iov_base = NULL; 357 arg->tail[0].iov_len = 0; 358 arg->page_len = 0; 359 arg->page_base = 0; 360 arg->buflen = ctxt->rc_byte_len; 361 arg->len = ctxt->rc_byte_len; 362 } 363 364 /* This accommodates the largest possible Write chunk, 365 * in one segment. 366 */ 367 #define MAX_BYTES_WRITE_SEG ((u32)(RPCSVC_MAXPAGES << PAGE_SHIFT)) 368 369 /* This accommodates the largest possible Position-Zero 370 * Read chunk or Reply chunk, in one segment. 371 */ 372 #define MAX_BYTES_SPECIAL_SEG ((u32)((RPCSVC_MAXPAGES + 2) << PAGE_SHIFT)) 373 374 /* Sanity check the Read list. 375 * 376 * Implementation limits: 377 * - This implementation supports only one Read chunk. 378 * 379 * Sanity checks: 380 * - Read list does not overflow buffer. 381 * - Segment size limited by largest NFS data payload. 382 * 383 * The segment count is limited to how many segments can 384 * fit in the transport header without overflowing the 385 * buffer. That's about 40 Read segments for a 1KB inline 386 * threshold. 387 * 388 * Returns pointer to the following Write list. 389 */ 390 static __be32 *xdr_check_read_list(__be32 *p, const __be32 *end) 391 { 392 u32 position; 393 bool first; 394 395 first = true; 396 while (*p++ != xdr_zero) { 397 if (first) { 398 position = be32_to_cpup(p++); 399 first = false; 400 } else if (be32_to_cpup(p++) != position) { 401 return NULL; 402 } 403 p++; /* handle */ 404 if (be32_to_cpup(p++) > MAX_BYTES_SPECIAL_SEG) 405 return NULL; 406 p += 2; /* offset */ 407 408 if (p > end) 409 return NULL; 410 } 411 return p; 412 } 413 414 /* The segment count is limited to how many segments can 415 * fit in the transport header without overflowing the 416 * buffer. That's about 60 Write segments for a 1KB inline 417 * threshold. 418 */ 419 static __be32 *xdr_check_write_chunk(__be32 *p, const __be32 *end, 420 u32 maxlen) 421 { 422 u32 i, segcount; 423 424 segcount = be32_to_cpup(p++); 425 for (i = 0; i < segcount; i++) { 426 p++; /* handle */ 427 if (be32_to_cpup(p++) > maxlen) 428 return NULL; 429 p += 2; /* offset */ 430 431 if (p > end) 432 return NULL; 433 } 434 435 return p; 436 } 437 438 /* Sanity check the Write list. 439 * 440 * Implementation limits: 441 * - This implementation supports only one Write chunk. 442 * 443 * Sanity checks: 444 * - Write list does not overflow buffer. 445 * - Segment size limited by largest NFS data payload. 446 * 447 * Returns pointer to the following Reply chunk. 448 */ 449 static __be32 *xdr_check_write_list(__be32 *p, const __be32 *end) 450 { 451 u32 chcount; 452 453 chcount = 0; 454 while (*p++ != xdr_zero) { 455 p = xdr_check_write_chunk(p, end, MAX_BYTES_WRITE_SEG); 456 if (!p) 457 return NULL; 458 if (chcount++ > 1) 459 return NULL; 460 } 461 return p; 462 } 463 464 /* Sanity check the Reply chunk. 465 * 466 * Sanity checks: 467 * - Reply chunk does not overflow buffer. 468 * - Segment size limited by largest NFS data payload. 469 * 470 * Returns pointer to the following RPC header. 471 */ 472 static __be32 *xdr_check_reply_chunk(__be32 *p, const __be32 *end) 473 { 474 if (*p++ != xdr_zero) { 475 p = xdr_check_write_chunk(p, end, MAX_BYTES_SPECIAL_SEG); 476 if (!p) 477 return NULL; 478 } 479 return p; 480 } 481 482 /* RPC-over-RDMA Version One private extension: Remote Invalidation. 483 * Responder's choice: requester signals it can handle Send With 484 * Invalidate, and responder chooses one R_key to invalidate. 485 * 486 * If there is exactly one distinct R_key in the received transport 487 * header, set rc_inv_rkey to that R_key. Otherwise, set it to zero. 488 * 489 * Perform this operation while the received transport header is 490 * still in the CPU cache. 491 */ 492 static void svc_rdma_get_inv_rkey(struct svcxprt_rdma *rdma, 493 struct svc_rdma_recv_ctxt *ctxt) 494 { 495 __be32 inv_rkey, *p; 496 u32 i, segcount; 497 498 ctxt->rc_inv_rkey = 0; 499 500 if (!rdma->sc_snd_w_inv) 501 return; 502 503 inv_rkey = xdr_zero; 504 p = ctxt->rc_recv_buf; 505 p += rpcrdma_fixed_maxsz; 506 507 /* Read list */ 508 while (*p++ != xdr_zero) { 509 p++; /* position */ 510 if (inv_rkey == xdr_zero) 511 inv_rkey = *p; 512 else if (inv_rkey != *p) 513 return; 514 p += 4; 515 } 516 517 /* Write list */ 518 while (*p++ != xdr_zero) { 519 segcount = be32_to_cpup(p++); 520 for (i = 0; i < segcount; i++) { 521 if (inv_rkey == xdr_zero) 522 inv_rkey = *p; 523 else if (inv_rkey != *p) 524 return; 525 p += 4; 526 } 527 } 528 529 /* Reply chunk */ 530 if (*p++ != xdr_zero) { 531 segcount = be32_to_cpup(p++); 532 for (i = 0; i < segcount; i++) { 533 if (inv_rkey == xdr_zero) 534 inv_rkey = *p; 535 else if (inv_rkey != *p) 536 return; 537 p += 4; 538 } 539 } 540 541 ctxt->rc_inv_rkey = be32_to_cpu(inv_rkey); 542 } 543 544 /* On entry, xdr->head[0].iov_base points to first byte in the 545 * RPC-over-RDMA header. 546 * 547 * On successful exit, head[0] points to first byte past the 548 * RPC-over-RDMA header. For RDMA_MSG, this is the RPC message. 549 * The length of the RPC-over-RDMA header is returned. 550 * 551 * Assumptions: 552 * - The transport header is entirely contained in the head iovec. 553 */ 554 static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg) 555 { 556 __be32 *p, *end, *rdma_argp; 557 unsigned int hdr_len; 558 559 /* Verify that there's enough bytes for header + something */ 560 if (rq_arg->len <= RPCRDMA_HDRLEN_ERR) 561 goto out_short; 562 563 rdma_argp = rq_arg->head[0].iov_base; 564 if (*(rdma_argp + 1) != rpcrdma_version) 565 goto out_version; 566 567 switch (*(rdma_argp + 3)) { 568 case rdma_msg: 569 break; 570 case rdma_nomsg: 571 break; 572 573 case rdma_done: 574 goto out_drop; 575 576 case rdma_error: 577 goto out_drop; 578 579 default: 580 goto out_proc; 581 } 582 583 end = (__be32 *)((unsigned long)rdma_argp + rq_arg->len); 584 p = xdr_check_read_list(rdma_argp + 4, end); 585 if (!p) 586 goto out_inval; 587 p = xdr_check_write_list(p, end); 588 if (!p) 589 goto out_inval; 590 p = xdr_check_reply_chunk(p, end); 591 if (!p) 592 goto out_inval; 593 if (p > end) 594 goto out_inval; 595 596 rq_arg->head[0].iov_base = p; 597 hdr_len = (unsigned long)p - (unsigned long)rdma_argp; 598 rq_arg->head[0].iov_len -= hdr_len; 599 rq_arg->len -= hdr_len; 600 trace_svcrdma_decode_rqst(rdma_argp, hdr_len); 601 return hdr_len; 602 603 out_short: 604 trace_svcrdma_decode_short(rq_arg->len); 605 return -EINVAL; 606 607 out_version: 608 trace_svcrdma_decode_badvers(rdma_argp); 609 return -EPROTONOSUPPORT; 610 611 out_drop: 612 trace_svcrdma_decode_drop(rdma_argp); 613 return 0; 614 615 out_proc: 616 trace_svcrdma_decode_badproc(rdma_argp); 617 return -EINVAL; 618 619 out_inval: 620 trace_svcrdma_decode_parse(rdma_argp); 621 return -EINVAL; 622 } 623 624 static void rdma_read_complete(struct svc_rqst *rqstp, 625 struct svc_rdma_recv_ctxt *head) 626 { 627 int page_no; 628 629 /* Move Read chunk pages to rqstp so that they will be released 630 * when svc_process is done with them. 631 */ 632 for (page_no = 0; page_no < head->rc_page_count; page_no++) { 633 put_page(rqstp->rq_pages[page_no]); 634 rqstp->rq_pages[page_no] = head->rc_pages[page_no]; 635 } 636 head->rc_page_count = 0; 637 638 /* Point rq_arg.pages past header */ 639 rqstp->rq_arg.pages = &rqstp->rq_pages[head->rc_hdr_count]; 640 rqstp->rq_arg.page_len = head->rc_arg.page_len; 641 642 /* rq_respages starts after the last arg page */ 643 rqstp->rq_respages = &rqstp->rq_pages[page_no]; 644 rqstp->rq_next_page = rqstp->rq_respages + 1; 645 646 /* Rebuild rq_arg head and tail. */ 647 rqstp->rq_arg.head[0] = head->rc_arg.head[0]; 648 rqstp->rq_arg.tail[0] = head->rc_arg.tail[0]; 649 rqstp->rq_arg.len = head->rc_arg.len; 650 rqstp->rq_arg.buflen = head->rc_arg.buflen; 651 } 652 653 static void svc_rdma_send_error(struct svcxprt_rdma *xprt, 654 __be32 *rdma_argp, int status) 655 { 656 struct svc_rdma_send_ctxt *ctxt; 657 unsigned int length; 658 __be32 *p; 659 int ret; 660 661 ctxt = svc_rdma_send_ctxt_get(xprt); 662 if (!ctxt) 663 return; 664 665 p = ctxt->sc_xprt_buf; 666 *p++ = *rdma_argp; 667 *p++ = *(rdma_argp + 1); 668 *p++ = xprt->sc_fc_credits; 669 *p++ = rdma_error; 670 switch (status) { 671 case -EPROTONOSUPPORT: 672 *p++ = err_vers; 673 *p++ = rpcrdma_version; 674 *p++ = rpcrdma_version; 675 trace_svcrdma_err_vers(*rdma_argp); 676 break; 677 default: 678 *p++ = err_chunk; 679 trace_svcrdma_err_chunk(*rdma_argp); 680 } 681 length = (unsigned long)p - (unsigned long)ctxt->sc_xprt_buf; 682 svc_rdma_sync_reply_hdr(xprt, ctxt, length); 683 684 ctxt->sc_send_wr.opcode = IB_WR_SEND; 685 ret = svc_rdma_send(xprt, &ctxt->sc_send_wr); 686 if (ret) 687 svc_rdma_send_ctxt_put(xprt, ctxt); 688 } 689 690 /* By convention, backchannel calls arrive via rdma_msg type 691 * messages, and never populate the chunk lists. This makes 692 * the RPC/RDMA header small and fixed in size, so it is 693 * straightforward to check the RPC header's direction field. 694 */ 695 static bool svc_rdma_is_backchannel_reply(struct svc_xprt *xprt, 696 __be32 *rdma_resp) 697 { 698 __be32 *p; 699 700 if (!xprt->xpt_bc_xprt) 701 return false; 702 703 p = rdma_resp + 3; 704 if (*p++ != rdma_msg) 705 return false; 706 707 if (*p++ != xdr_zero) 708 return false; 709 if (*p++ != xdr_zero) 710 return false; 711 if (*p++ != xdr_zero) 712 return false; 713 714 /* XID sanity */ 715 if (*p++ != *rdma_resp) 716 return false; 717 /* call direction */ 718 if (*p == cpu_to_be32(RPC_CALL)) 719 return false; 720 721 return true; 722 } 723 724 /** 725 * svc_rdma_recvfrom - Receive an RPC call 726 * @rqstp: request structure into which to receive an RPC Call 727 * 728 * Returns: 729 * The positive number of bytes in the RPC Call message, 730 * %0 if there were no Calls ready to return, 731 * %-EINVAL if the Read chunk data is too large, 732 * %-ENOMEM if rdma_rw context pool was exhausted, 733 * %-ENOTCONN if posting failed (connection is lost), 734 * %-EIO if rdma_rw initialization failed (DMA mapping, etc). 735 * 736 * Called in a loop when XPT_DATA is set. XPT_DATA is cleared only 737 * when there are no remaining ctxt's to process. 738 * 739 * The next ctxt is removed from the "receive" lists. 740 * 741 * - If the ctxt completes a Read, then finish assembling the Call 742 * message and return the number of bytes in the message. 743 * 744 * - If the ctxt completes a Receive, then construct the Call 745 * message from the contents of the Receive buffer. 746 * 747 * - If there are no Read chunks in this message, then finish 748 * assembling the Call message and return the number of bytes 749 * in the message. 750 * 751 * - If there are Read chunks in this message, post Read WRs to 752 * pull that payload and return 0. 753 */ 754 int svc_rdma_recvfrom(struct svc_rqst *rqstp) 755 { 756 struct svc_xprt *xprt = rqstp->rq_xprt; 757 struct svcxprt_rdma *rdma_xprt = 758 container_of(xprt, struct svcxprt_rdma, sc_xprt); 759 struct svc_rdma_recv_ctxt *ctxt; 760 __be32 *p; 761 int ret; 762 763 spin_lock(&rdma_xprt->sc_rq_dto_lock); 764 ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_read_complete_q); 765 if (ctxt) { 766 list_del(&ctxt->rc_list); 767 spin_unlock(&rdma_xprt->sc_rq_dto_lock); 768 rdma_read_complete(rqstp, ctxt); 769 goto complete; 770 } 771 ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_rq_dto_q); 772 if (!ctxt) { 773 /* No new incoming requests, terminate the loop */ 774 clear_bit(XPT_DATA, &xprt->xpt_flags); 775 spin_unlock(&rdma_xprt->sc_rq_dto_lock); 776 return 0; 777 } 778 list_del(&ctxt->rc_list); 779 spin_unlock(&rdma_xprt->sc_rq_dto_lock); 780 781 atomic_inc(&rdma_stat_recv); 782 783 svc_rdma_build_arg_xdr(rqstp, ctxt); 784 785 /* Prevent svc_xprt_release from releasing pages in rq_pages 786 * if we return 0 or an error. 787 */ 788 rqstp->rq_respages = rqstp->rq_pages; 789 rqstp->rq_next_page = rqstp->rq_respages; 790 791 p = (__be32 *)rqstp->rq_arg.head[0].iov_base; 792 ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg); 793 if (ret < 0) 794 goto out_err; 795 if (ret == 0) 796 goto out_drop; 797 rqstp->rq_xprt_hlen = ret; 798 799 if (svc_rdma_is_backchannel_reply(xprt, p)) { 800 ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p, 801 &rqstp->rq_arg); 802 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); 803 return ret; 804 } 805 svc_rdma_get_inv_rkey(rdma_xprt, ctxt); 806 807 p += rpcrdma_fixed_maxsz; 808 if (*p != xdr_zero) 809 goto out_readchunk; 810 811 complete: 812 rqstp->rq_xprt_ctxt = ctxt; 813 rqstp->rq_prot = IPPROTO_MAX; 814 svc_xprt_copy_addrs(rqstp, xprt); 815 return rqstp->rq_arg.len; 816 817 out_readchunk: 818 ret = svc_rdma_recv_read_chunk(rdma_xprt, rqstp, ctxt, p); 819 if (ret < 0) 820 goto out_postfail; 821 return 0; 822 823 out_err: 824 svc_rdma_send_error(rdma_xprt, p, ret); 825 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); 826 return 0; 827 828 out_postfail: 829 if (ret == -EINVAL) 830 svc_rdma_send_error(rdma_xprt, p, ret); 831 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); 832 return ret; 833 834 out_drop: 835 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); 836 return 0; 837 } 838