1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (c) 2016-2018 Oracle. All rights reserved. 4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved. 5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the BSD-type 11 * license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 20 * Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials provided 23 * with the distribution. 24 * 25 * Neither the name of the Network Appliance, Inc. nor the names of 26 * its contributors may be used to endorse or promote products 27 * derived from this software without specific prior written 28 * permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 41 * 42 * Author: Tom Tucker <tom@opengridcomputing.com> 43 */ 44 45 /* Operation 46 * 47 * The main entry point is svc_rdma_recvfrom. This is called from 48 * svc_recv when the transport indicates there is incoming data to 49 * be read. "Data Ready" is signaled when an RDMA Receive completes, 50 * or when a set of RDMA Reads complete. 51 * 52 * An svc_rqst is passed in. This structure contains an array of 53 * free pages (rq_pages) that will contain the incoming RPC message. 54 * 55 * Short messages are moved directly into svc_rqst::rq_arg, and 56 * the RPC Call is ready to be processed by the Upper Layer. 57 * svc_rdma_recvfrom returns the length of the RPC Call message, 58 * completing the reception of the RPC Call. 59 * 60 * However, when an incoming message has Read chunks, 61 * svc_rdma_recvfrom must post RDMA Reads to pull the RPC Call's 62 * data payload from the client. svc_rdma_recvfrom sets up the 63 * RDMA Reads using pages in svc_rqst::rq_pages, which are 64 * transferred to an svc_rdma_recv_ctxt for the duration of the 65 * I/O. svc_rdma_recvfrom then returns zero, since the RPC message 66 * is still not yet ready. 67 * 68 * When the Read chunk payloads have become available on the 69 * server, "Data Ready" is raised again, and svc_recv calls 70 * svc_rdma_recvfrom again. This second call may use a different 71 * svc_rqst than the first one, thus any information that needs 72 * to be preserved across these two calls is kept in an 73 * svc_rdma_recv_ctxt. 74 * 75 * The second call to svc_rdma_recvfrom performs final assembly 76 * of the RPC Call message, using the RDMA Read sink pages kept in 77 * the svc_rdma_recv_ctxt. The xdr_buf is copied from the 78 * svc_rdma_recv_ctxt to the second svc_rqst. The second call returns 79 * the length of the completed RPC Call message. 80 * 81 * Page Management 82 * 83 * Pages under I/O must be transferred from the first svc_rqst to an 84 * svc_rdma_recv_ctxt before the first svc_rdma_recvfrom call returns. 85 * 86 * The first svc_rqst supplies pages for RDMA Reads. These are moved 87 * from rqstp::rq_pages into ctxt::pages. The consumed elements of 88 * the rq_pages array are set to NULL and refilled with the first 89 * svc_rdma_recvfrom call returns. 90 * 91 * During the second svc_rdma_recvfrom call, RDMA Read sink pages 92 * are transferred from the svc_rdma_recv_ctxt to the second svc_rqst 93 * (see rdma_read_complete() below). 94 */ 95 96 #include <linux/spinlock.h> 97 #include <asm/unaligned.h> 98 #include <rdma/ib_verbs.h> 99 #include <rdma/rdma_cm.h> 100 101 #include <linux/sunrpc/xdr.h> 102 #include <linux/sunrpc/debug.h> 103 #include <linux/sunrpc/rpc_rdma.h> 104 #include <linux/sunrpc/svc_rdma.h> 105 106 #include "xprt_rdma.h" 107 #include <trace/events/rpcrdma.h> 108 109 #define RPCDBG_FACILITY RPCDBG_SVCXPRT 110 111 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc); 112 113 static inline struct svc_rdma_recv_ctxt * 114 svc_rdma_next_recv_ctxt(struct list_head *list) 115 { 116 return list_first_entry_or_null(list, struct svc_rdma_recv_ctxt, 117 rc_list); 118 } 119 120 static struct svc_rdma_recv_ctxt * 121 svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma) 122 { 123 struct svc_rdma_recv_ctxt *ctxt; 124 dma_addr_t addr; 125 void *buffer; 126 127 ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL); 128 if (!ctxt) 129 goto fail0; 130 buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL); 131 if (!buffer) 132 goto fail1; 133 addr = ib_dma_map_single(rdma->sc_pd->device, buffer, 134 rdma->sc_max_req_size, DMA_FROM_DEVICE); 135 if (ib_dma_mapping_error(rdma->sc_pd->device, addr)) 136 goto fail2; 137 138 ctxt->rc_recv_wr.next = NULL; 139 ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe; 140 ctxt->rc_recv_wr.sg_list = &ctxt->rc_recv_sge; 141 ctxt->rc_recv_wr.num_sge = 1; 142 ctxt->rc_cqe.done = svc_rdma_wc_receive; 143 ctxt->rc_recv_sge.addr = addr; 144 ctxt->rc_recv_sge.length = rdma->sc_max_req_size; 145 ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey; 146 ctxt->rc_recv_buf = buffer; 147 ctxt->rc_temp = false; 148 return ctxt; 149 150 fail2: 151 kfree(buffer); 152 fail1: 153 kfree(ctxt); 154 fail0: 155 return NULL; 156 } 157 158 static void svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma *rdma, 159 struct svc_rdma_recv_ctxt *ctxt) 160 { 161 ib_dma_unmap_single(rdma->sc_pd->device, ctxt->rc_recv_sge.addr, 162 ctxt->rc_recv_sge.length, DMA_FROM_DEVICE); 163 kfree(ctxt->rc_recv_buf); 164 kfree(ctxt); 165 } 166 167 /** 168 * svc_rdma_recv_ctxts_destroy - Release all recv_ctxt's for an xprt 169 * @rdma: svcxprt_rdma being torn down 170 * 171 */ 172 void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma) 173 { 174 struct svc_rdma_recv_ctxt *ctxt; 175 176 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_recv_ctxts))) { 177 list_del(&ctxt->rc_list); 178 svc_rdma_recv_ctxt_destroy(rdma, ctxt); 179 } 180 } 181 182 static struct svc_rdma_recv_ctxt * 183 svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma) 184 { 185 struct svc_rdma_recv_ctxt *ctxt; 186 187 spin_lock(&rdma->sc_recv_lock); 188 ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_recv_ctxts); 189 if (!ctxt) 190 goto out_empty; 191 list_del(&ctxt->rc_list); 192 spin_unlock(&rdma->sc_recv_lock); 193 194 out: 195 ctxt->rc_page_count = 0; 196 return ctxt; 197 198 out_empty: 199 spin_unlock(&rdma->sc_recv_lock); 200 201 ctxt = svc_rdma_recv_ctxt_alloc(rdma); 202 if (!ctxt) 203 return NULL; 204 goto out; 205 } 206 207 /** 208 * svc_rdma_recv_ctxt_put - Return recv_ctxt to free list 209 * @rdma: controlling svcxprt_rdma 210 * @ctxt: object to return to the free list 211 * 212 */ 213 void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma, 214 struct svc_rdma_recv_ctxt *ctxt) 215 { 216 unsigned int i; 217 218 for (i = 0; i < ctxt->rc_page_count; i++) 219 put_page(ctxt->rc_pages[i]); 220 221 if (!ctxt->rc_temp) { 222 spin_lock(&rdma->sc_recv_lock); 223 list_add(&ctxt->rc_list, &rdma->sc_recv_ctxts); 224 spin_unlock(&rdma->sc_recv_lock); 225 } else 226 svc_rdma_recv_ctxt_destroy(rdma, ctxt); 227 } 228 229 static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma, 230 struct svc_rdma_recv_ctxt *ctxt) 231 { 232 struct ib_recv_wr *bad_recv_wr; 233 int ret; 234 235 svc_xprt_get(&rdma->sc_xprt); 236 ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, &bad_recv_wr); 237 trace_svcrdma_post_recv(&ctxt->rc_recv_wr, ret); 238 if (ret) 239 goto err_post; 240 return 0; 241 242 err_post: 243 svc_rdma_recv_ctxt_put(rdma, ctxt); 244 svc_xprt_put(&rdma->sc_xprt); 245 return ret; 246 } 247 248 static int svc_rdma_post_recv(struct svcxprt_rdma *rdma) 249 { 250 struct svc_rdma_recv_ctxt *ctxt; 251 252 ctxt = svc_rdma_recv_ctxt_get(rdma); 253 if (!ctxt) 254 return -ENOMEM; 255 return __svc_rdma_post_recv(rdma, ctxt); 256 } 257 258 /** 259 * svc_rdma_post_recvs - Post initial set of Recv WRs 260 * @rdma: fresh svcxprt_rdma 261 * 262 * Returns true if successful, otherwise false. 263 */ 264 bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma) 265 { 266 struct svc_rdma_recv_ctxt *ctxt; 267 unsigned int i; 268 int ret; 269 270 for (i = 0; i < rdma->sc_max_requests; i++) { 271 ctxt = svc_rdma_recv_ctxt_get(rdma); 272 if (!ctxt) 273 return false; 274 ctxt->rc_temp = true; 275 ret = __svc_rdma_post_recv(rdma, ctxt); 276 if (ret) { 277 pr_err("svcrdma: failure posting recv buffers: %d\n", 278 ret); 279 return false; 280 } 281 } 282 return true; 283 } 284 285 /** 286 * svc_rdma_wc_receive - Invoked by RDMA provider for each polled Receive WC 287 * @cq: Completion Queue context 288 * @wc: Work Completion object 289 * 290 * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that 291 * the Receive completion handler could be running. 292 */ 293 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) 294 { 295 struct svcxprt_rdma *rdma = cq->cq_context; 296 struct ib_cqe *cqe = wc->wr_cqe; 297 struct svc_rdma_recv_ctxt *ctxt; 298 299 trace_svcrdma_wc_receive(wc); 300 301 /* WARNING: Only wc->wr_cqe and wc->status are reliable */ 302 ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe); 303 304 if (wc->status != IB_WC_SUCCESS) 305 goto flushed; 306 307 if (svc_rdma_post_recv(rdma)) 308 goto post_err; 309 310 /* All wc fields are now known to be valid */ 311 ctxt->rc_byte_len = wc->byte_len; 312 ib_dma_sync_single_for_cpu(rdma->sc_pd->device, 313 ctxt->rc_recv_sge.addr, 314 wc->byte_len, DMA_FROM_DEVICE); 315 316 spin_lock(&rdma->sc_rq_dto_lock); 317 list_add_tail(&ctxt->rc_list, &rdma->sc_rq_dto_q); 318 spin_unlock(&rdma->sc_rq_dto_lock); 319 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags); 320 if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags)) 321 svc_xprt_enqueue(&rdma->sc_xprt); 322 goto out; 323 324 flushed: 325 if (wc->status != IB_WC_WR_FLUSH_ERR) 326 pr_err("svcrdma: Recv: %s (%u/0x%x)\n", 327 ib_wc_status_msg(wc->status), 328 wc->status, wc->vendor_err); 329 post_err: 330 svc_rdma_recv_ctxt_put(rdma, ctxt); 331 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); 332 svc_xprt_enqueue(&rdma->sc_xprt); 333 out: 334 svc_xprt_put(&rdma->sc_xprt); 335 } 336 337 /** 338 * svc_rdma_flush_recv_queues - Drain pending Receive work 339 * @rdma: svcxprt_rdma being shut down 340 * 341 */ 342 void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma) 343 { 344 struct svc_rdma_recv_ctxt *ctxt; 345 346 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_read_complete_q))) { 347 list_del(&ctxt->rc_list); 348 svc_rdma_recv_ctxt_put(rdma, ctxt); 349 } 350 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) { 351 list_del(&ctxt->rc_list); 352 svc_rdma_recv_ctxt_put(rdma, ctxt); 353 } 354 } 355 356 static void svc_rdma_build_arg_xdr(struct svc_rqst *rqstp, 357 struct svc_rdma_recv_ctxt *ctxt) 358 { 359 struct xdr_buf *arg = &rqstp->rq_arg; 360 361 arg->head[0].iov_base = ctxt->rc_recv_buf; 362 arg->head[0].iov_len = ctxt->rc_byte_len; 363 arg->tail[0].iov_base = NULL; 364 arg->tail[0].iov_len = 0; 365 arg->page_len = 0; 366 arg->page_base = 0; 367 arg->buflen = ctxt->rc_byte_len; 368 arg->len = ctxt->rc_byte_len; 369 370 rqstp->rq_respages = &rqstp->rq_pages[0]; 371 rqstp->rq_next_page = rqstp->rq_respages + 1; 372 } 373 374 /* This accommodates the largest possible Write chunk, 375 * in one segment. 376 */ 377 #define MAX_BYTES_WRITE_SEG ((u32)(RPCSVC_MAXPAGES << PAGE_SHIFT)) 378 379 /* This accommodates the largest possible Position-Zero 380 * Read chunk or Reply chunk, in one segment. 381 */ 382 #define MAX_BYTES_SPECIAL_SEG ((u32)((RPCSVC_MAXPAGES + 2) << PAGE_SHIFT)) 383 384 /* Sanity check the Read list. 385 * 386 * Implementation limits: 387 * - This implementation supports only one Read chunk. 388 * 389 * Sanity checks: 390 * - Read list does not overflow buffer. 391 * - Segment size limited by largest NFS data payload. 392 * 393 * The segment count is limited to how many segments can 394 * fit in the transport header without overflowing the 395 * buffer. That's about 40 Read segments for a 1KB inline 396 * threshold. 397 * 398 * Returns pointer to the following Write list. 399 */ 400 static __be32 *xdr_check_read_list(__be32 *p, const __be32 *end) 401 { 402 u32 position; 403 bool first; 404 405 first = true; 406 while (*p++ != xdr_zero) { 407 if (first) { 408 position = be32_to_cpup(p++); 409 first = false; 410 } else if (be32_to_cpup(p++) != position) { 411 return NULL; 412 } 413 p++; /* handle */ 414 if (be32_to_cpup(p++) > MAX_BYTES_SPECIAL_SEG) 415 return NULL; 416 p += 2; /* offset */ 417 418 if (p > end) 419 return NULL; 420 } 421 return p; 422 } 423 424 /* The segment count is limited to how many segments can 425 * fit in the transport header without overflowing the 426 * buffer. That's about 60 Write segments for a 1KB inline 427 * threshold. 428 */ 429 static __be32 *xdr_check_write_chunk(__be32 *p, const __be32 *end, 430 u32 maxlen) 431 { 432 u32 i, segcount; 433 434 segcount = be32_to_cpup(p++); 435 for (i = 0; i < segcount; i++) { 436 p++; /* handle */ 437 if (be32_to_cpup(p++) > maxlen) 438 return NULL; 439 p += 2; /* offset */ 440 441 if (p > end) 442 return NULL; 443 } 444 445 return p; 446 } 447 448 /* Sanity check the Write list. 449 * 450 * Implementation limits: 451 * - This implementation supports only one Write chunk. 452 * 453 * Sanity checks: 454 * - Write list does not overflow buffer. 455 * - Segment size limited by largest NFS data payload. 456 * 457 * Returns pointer to the following Reply chunk. 458 */ 459 static __be32 *xdr_check_write_list(__be32 *p, const __be32 *end) 460 { 461 u32 chcount; 462 463 chcount = 0; 464 while (*p++ != xdr_zero) { 465 p = xdr_check_write_chunk(p, end, MAX_BYTES_WRITE_SEG); 466 if (!p) 467 return NULL; 468 if (chcount++ > 1) 469 return NULL; 470 } 471 return p; 472 } 473 474 /* Sanity check the Reply chunk. 475 * 476 * Sanity checks: 477 * - Reply chunk does not overflow buffer. 478 * - Segment size limited by largest NFS data payload. 479 * 480 * Returns pointer to the following RPC header. 481 */ 482 static __be32 *xdr_check_reply_chunk(__be32 *p, const __be32 *end) 483 { 484 if (*p++ != xdr_zero) { 485 p = xdr_check_write_chunk(p, end, MAX_BYTES_SPECIAL_SEG); 486 if (!p) 487 return NULL; 488 } 489 return p; 490 } 491 492 /* On entry, xdr->head[0].iov_base points to first byte in the 493 * RPC-over-RDMA header. 494 * 495 * On successful exit, head[0] points to first byte past the 496 * RPC-over-RDMA header. For RDMA_MSG, this is the RPC message. 497 * The length of the RPC-over-RDMA header is returned. 498 * 499 * Assumptions: 500 * - The transport header is entirely contained in the head iovec. 501 */ 502 static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg) 503 { 504 __be32 *p, *end, *rdma_argp; 505 unsigned int hdr_len; 506 507 /* Verify that there's enough bytes for header + something */ 508 if (rq_arg->len <= RPCRDMA_HDRLEN_ERR) 509 goto out_short; 510 511 rdma_argp = rq_arg->head[0].iov_base; 512 if (*(rdma_argp + 1) != rpcrdma_version) 513 goto out_version; 514 515 switch (*(rdma_argp + 3)) { 516 case rdma_msg: 517 break; 518 case rdma_nomsg: 519 break; 520 521 case rdma_done: 522 goto out_drop; 523 524 case rdma_error: 525 goto out_drop; 526 527 default: 528 goto out_proc; 529 } 530 531 end = (__be32 *)((unsigned long)rdma_argp + rq_arg->len); 532 p = xdr_check_read_list(rdma_argp + 4, end); 533 if (!p) 534 goto out_inval; 535 p = xdr_check_write_list(p, end); 536 if (!p) 537 goto out_inval; 538 p = xdr_check_reply_chunk(p, end); 539 if (!p) 540 goto out_inval; 541 if (p > end) 542 goto out_inval; 543 544 rq_arg->head[0].iov_base = p; 545 hdr_len = (unsigned long)p - (unsigned long)rdma_argp; 546 rq_arg->head[0].iov_len -= hdr_len; 547 rq_arg->len -= hdr_len; 548 trace_svcrdma_decode_rqst(rdma_argp, hdr_len); 549 return hdr_len; 550 551 out_short: 552 trace_svcrdma_decode_short(rq_arg->len); 553 return -EINVAL; 554 555 out_version: 556 trace_svcrdma_decode_badvers(rdma_argp); 557 return -EPROTONOSUPPORT; 558 559 out_drop: 560 trace_svcrdma_decode_drop(rdma_argp); 561 return 0; 562 563 out_proc: 564 trace_svcrdma_decode_badproc(rdma_argp); 565 return -EINVAL; 566 567 out_inval: 568 trace_svcrdma_decode_parse(rdma_argp); 569 return -EINVAL; 570 } 571 572 static void rdma_read_complete(struct svc_rqst *rqstp, 573 struct svc_rdma_recv_ctxt *head) 574 { 575 int page_no; 576 577 /* Move Read chunk pages to rqstp so that they will be released 578 * when svc_process is done with them. 579 */ 580 for (page_no = 0; page_no < head->rc_page_count; page_no++) { 581 put_page(rqstp->rq_pages[page_no]); 582 rqstp->rq_pages[page_no] = head->rc_pages[page_no]; 583 } 584 head->rc_page_count = 0; 585 586 /* Point rq_arg.pages past header */ 587 rqstp->rq_arg.pages = &rqstp->rq_pages[head->rc_hdr_count]; 588 rqstp->rq_arg.page_len = head->rc_arg.page_len; 589 590 /* rq_respages starts after the last arg page */ 591 rqstp->rq_respages = &rqstp->rq_pages[page_no]; 592 rqstp->rq_next_page = rqstp->rq_respages + 1; 593 594 /* Rebuild rq_arg head and tail. */ 595 rqstp->rq_arg.head[0] = head->rc_arg.head[0]; 596 rqstp->rq_arg.tail[0] = head->rc_arg.tail[0]; 597 rqstp->rq_arg.len = head->rc_arg.len; 598 rqstp->rq_arg.buflen = head->rc_arg.buflen; 599 } 600 601 static void svc_rdma_send_error(struct svcxprt_rdma *xprt, 602 __be32 *rdma_argp, int status) 603 { 604 struct svc_rdma_send_ctxt *ctxt; 605 unsigned int length; 606 __be32 *p; 607 int ret; 608 609 ctxt = svc_rdma_send_ctxt_get(xprt); 610 if (!ctxt) 611 return; 612 613 p = ctxt->sc_xprt_buf; 614 *p++ = *rdma_argp; 615 *p++ = *(rdma_argp + 1); 616 *p++ = xprt->sc_fc_credits; 617 *p++ = rdma_error; 618 switch (status) { 619 case -EPROTONOSUPPORT: 620 *p++ = err_vers; 621 *p++ = rpcrdma_version; 622 *p++ = rpcrdma_version; 623 trace_svcrdma_err_vers(*rdma_argp); 624 break; 625 default: 626 *p++ = err_chunk; 627 trace_svcrdma_err_chunk(*rdma_argp); 628 } 629 length = (unsigned long)p - (unsigned long)ctxt->sc_xprt_buf; 630 svc_rdma_sync_reply_hdr(xprt, ctxt, length); 631 632 ctxt->sc_send_wr.opcode = IB_WR_SEND; 633 ret = svc_rdma_send(xprt, &ctxt->sc_send_wr); 634 if (ret) 635 svc_rdma_send_ctxt_put(xprt, ctxt); 636 } 637 638 /* By convention, backchannel calls arrive via rdma_msg type 639 * messages, and never populate the chunk lists. This makes 640 * the RPC/RDMA header small and fixed in size, so it is 641 * straightforward to check the RPC header's direction field. 642 */ 643 static bool svc_rdma_is_backchannel_reply(struct svc_xprt *xprt, 644 __be32 *rdma_resp) 645 { 646 __be32 *p; 647 648 if (!xprt->xpt_bc_xprt) 649 return false; 650 651 p = rdma_resp + 3; 652 if (*p++ != rdma_msg) 653 return false; 654 655 if (*p++ != xdr_zero) 656 return false; 657 if (*p++ != xdr_zero) 658 return false; 659 if (*p++ != xdr_zero) 660 return false; 661 662 /* XID sanity */ 663 if (*p++ != *rdma_resp) 664 return false; 665 /* call direction */ 666 if (*p == cpu_to_be32(RPC_CALL)) 667 return false; 668 669 return true; 670 } 671 672 /** 673 * svc_rdma_recvfrom - Receive an RPC call 674 * @rqstp: request structure into which to receive an RPC Call 675 * 676 * Returns: 677 * The positive number of bytes in the RPC Call message, 678 * %0 if there were no Calls ready to return, 679 * %-EINVAL if the Read chunk data is too large, 680 * %-ENOMEM if rdma_rw context pool was exhausted, 681 * %-ENOTCONN if posting failed (connection is lost), 682 * %-EIO if rdma_rw initialization failed (DMA mapping, etc). 683 * 684 * Called in a loop when XPT_DATA is set. XPT_DATA is cleared only 685 * when there are no remaining ctxt's to process. 686 * 687 * The next ctxt is removed from the "receive" lists. 688 * 689 * - If the ctxt completes a Read, then finish assembling the Call 690 * message and return the number of bytes in the message. 691 * 692 * - If the ctxt completes a Receive, then construct the Call 693 * message from the contents of the Receive buffer. 694 * 695 * - If there are no Read chunks in this message, then finish 696 * assembling the Call message and return the number of bytes 697 * in the message. 698 * 699 * - If there are Read chunks in this message, post Read WRs to 700 * pull that payload and return 0. 701 */ 702 int svc_rdma_recvfrom(struct svc_rqst *rqstp) 703 { 704 struct svc_xprt *xprt = rqstp->rq_xprt; 705 struct svcxprt_rdma *rdma_xprt = 706 container_of(xprt, struct svcxprt_rdma, sc_xprt); 707 struct svc_rdma_recv_ctxt *ctxt; 708 __be32 *p; 709 int ret; 710 711 spin_lock(&rdma_xprt->sc_rq_dto_lock); 712 ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_read_complete_q); 713 if (ctxt) { 714 list_del(&ctxt->rc_list); 715 spin_unlock(&rdma_xprt->sc_rq_dto_lock); 716 rdma_read_complete(rqstp, ctxt); 717 goto complete; 718 } 719 ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_rq_dto_q); 720 if (!ctxt) { 721 /* No new incoming requests, terminate the loop */ 722 clear_bit(XPT_DATA, &xprt->xpt_flags); 723 spin_unlock(&rdma_xprt->sc_rq_dto_lock); 724 return 0; 725 } 726 list_del(&ctxt->rc_list); 727 spin_unlock(&rdma_xprt->sc_rq_dto_lock); 728 729 atomic_inc(&rdma_stat_recv); 730 731 svc_rdma_build_arg_xdr(rqstp, ctxt); 732 733 p = (__be32 *)rqstp->rq_arg.head[0].iov_base; 734 ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg); 735 if (ret < 0) 736 goto out_err; 737 if (ret == 0) 738 goto out_drop; 739 rqstp->rq_xprt_hlen = ret; 740 741 if (svc_rdma_is_backchannel_reply(xprt, p)) { 742 ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p, 743 &rqstp->rq_arg); 744 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); 745 return ret; 746 } 747 748 p += rpcrdma_fixed_maxsz; 749 if (*p != xdr_zero) 750 goto out_readchunk; 751 752 complete: 753 rqstp->rq_xprt_ctxt = ctxt; 754 rqstp->rq_prot = IPPROTO_MAX; 755 svc_xprt_copy_addrs(rqstp, xprt); 756 return rqstp->rq_arg.len; 757 758 out_readchunk: 759 ret = svc_rdma_recv_read_chunk(rdma_xprt, rqstp, ctxt, p); 760 if (ret < 0) 761 goto out_postfail; 762 return 0; 763 764 out_err: 765 svc_rdma_send_error(rdma_xprt, p, ret); 766 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); 767 return 0; 768 769 out_postfail: 770 if (ret == -EINVAL) 771 svc_rdma_send_error(rdma_xprt, p, ret); 772 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); 773 return ret; 774 775 out_drop: 776 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); 777 return 0; 778 } 779