1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (c) 2014-2020, Oracle and/or its affiliates. 4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the BSD-type 10 * license below: 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 19 * Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials provided 22 * with the distribution. 23 * 24 * Neither the name of the Network Appliance, Inc. nor the names of 25 * its contributors may be used to endorse or promote products 26 * derived from this software without specific prior written 27 * permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 */ 41 42 /* 43 * rpc_rdma.c 44 * 45 * This file contains the guts of the RPC RDMA protocol, and 46 * does marshaling/unmarshaling, etc. It is also where interfacing 47 * to the Linux RPC framework lives. 48 */ 49 50 #include <linux/highmem.h> 51 52 #include <linux/sunrpc/svc_rdma.h> 53 54 #include "xprt_rdma.h" 55 #include <trace/events/rpcrdma.h> 56 57 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 58 # define RPCDBG_FACILITY RPCDBG_TRANS 59 #endif 60 61 /* Returns size of largest RPC-over-RDMA header in a Call message 62 * 63 * The largest Call header contains a full-size Read list and a 64 * minimal Reply chunk. 65 */ 66 static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs) 67 { 68 unsigned int size; 69 70 /* Fixed header fields and list discriminators */ 71 size = RPCRDMA_HDRLEN_MIN; 72 73 /* Maximum Read list size */ 74 size += maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32); 75 76 /* Minimal Read chunk size */ 77 size += sizeof(__be32); /* segment count */ 78 size += rpcrdma_segment_maxsz * sizeof(__be32); 79 size += sizeof(__be32); /* list discriminator */ 80 81 return size; 82 } 83 84 /* Returns size of largest RPC-over-RDMA header in a Reply message 85 * 86 * There is only one Write list or one Reply chunk per Reply 87 * message. The larger list is the Write list. 88 */ 89 static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs) 90 { 91 unsigned int size; 92 93 /* Fixed header fields and list discriminators */ 94 size = RPCRDMA_HDRLEN_MIN; 95 96 /* Maximum Write list size */ 97 size += sizeof(__be32); /* segment count */ 98 size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32); 99 size += sizeof(__be32); /* list discriminator */ 100 101 return size; 102 } 103 104 /** 105 * rpcrdma_set_max_header_sizes - Initialize inline payload sizes 106 * @ep: endpoint to initialize 107 * 108 * The max_inline fields contain the maximum size of an RPC message 109 * so the marshaling code doesn't have to repeat this calculation 110 * for every RPC. 111 */ 112 void rpcrdma_set_max_header_sizes(struct rpcrdma_ep *ep) 113 { 114 unsigned int maxsegs = ep->re_max_rdma_segs; 115 116 ep->re_max_inline_send = 117 ep->re_inline_send - rpcrdma_max_call_header_size(maxsegs); 118 ep->re_max_inline_recv = 119 ep->re_inline_recv - rpcrdma_max_reply_header_size(maxsegs); 120 } 121 122 /* The client can send a request inline as long as the RPCRDMA header 123 * plus the RPC call fit under the transport's inline limit. If the 124 * combined call message size exceeds that limit, the client must use 125 * a Read chunk for this operation. 126 * 127 * A Read chunk is also required if sending the RPC call inline would 128 * exceed this device's max_sge limit. 129 */ 130 static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt, 131 struct rpc_rqst *rqst) 132 { 133 struct xdr_buf *xdr = &rqst->rq_snd_buf; 134 struct rpcrdma_ep *ep = r_xprt->rx_ep; 135 unsigned int count, remaining, offset; 136 137 if (xdr->len > ep->re_max_inline_send) 138 return false; 139 140 if (xdr->page_len) { 141 remaining = xdr->page_len; 142 offset = offset_in_page(xdr->page_base); 143 count = RPCRDMA_MIN_SEND_SGES; 144 while (remaining) { 145 remaining -= min_t(unsigned int, 146 PAGE_SIZE - offset, remaining); 147 offset = 0; 148 if (++count > ep->re_attr.cap.max_send_sge) 149 return false; 150 } 151 } 152 153 return true; 154 } 155 156 /* The client can't know how large the actual reply will be. Thus it 157 * plans for the largest possible reply for that particular ULP 158 * operation. If the maximum combined reply message size exceeds that 159 * limit, the client must provide a write list or a reply chunk for 160 * this request. 161 */ 162 static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt, 163 struct rpc_rqst *rqst) 164 { 165 return rqst->rq_rcv_buf.buflen <= r_xprt->rx_ep->re_max_inline_recv; 166 } 167 168 /* The client is required to provide a Reply chunk if the maximum 169 * size of the non-payload part of the RPC Reply is larger than 170 * the inline threshold. 171 */ 172 static bool 173 rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt, 174 const struct rpc_rqst *rqst) 175 { 176 const struct xdr_buf *buf = &rqst->rq_rcv_buf; 177 178 return (buf->head[0].iov_len + buf->tail[0].iov_len) < 179 r_xprt->rx_ep->re_max_inline_recv; 180 } 181 182 /* ACL likes to be lazy in allocating pages. For TCP, these 183 * pages can be allocated during receive processing. Not true 184 * for RDMA, which must always provision receive buffers 185 * up front. 186 */ 187 static noinline int 188 rpcrdma_alloc_sparse_pages(struct xdr_buf *buf) 189 { 190 struct page **ppages; 191 int len; 192 193 len = buf->page_len; 194 ppages = buf->pages + (buf->page_base >> PAGE_SHIFT); 195 while (len > 0) { 196 if (!*ppages) 197 *ppages = alloc_page(GFP_NOWAIT | __GFP_NOWARN); 198 if (!*ppages) 199 return -ENOBUFS; 200 ppages++; 201 len -= PAGE_SIZE; 202 } 203 204 return 0; 205 } 206 207 /* Convert @vec to a single SGL element. 208 * 209 * Returns pointer to next available SGE, and bumps the total number 210 * of SGEs consumed. 211 */ 212 static struct rpcrdma_mr_seg * 213 rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg, 214 unsigned int *n) 215 { 216 seg->mr_page = virt_to_page(vec->iov_base); 217 seg->mr_offset = offset_in_page(vec->iov_base); 218 seg->mr_len = vec->iov_len; 219 ++seg; 220 ++(*n); 221 return seg; 222 } 223 224 /* Convert @xdrbuf into SGEs no larger than a page each. As they 225 * are registered, these SGEs are then coalesced into RDMA segments 226 * when the selected memreg mode supports it. 227 * 228 * Returns positive number of SGEs consumed, or a negative errno. 229 */ 230 231 static int 232 rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf, 233 unsigned int pos, enum rpcrdma_chunktype type, 234 struct rpcrdma_mr_seg *seg) 235 { 236 unsigned long page_base; 237 unsigned int len, n; 238 struct page **ppages; 239 240 n = 0; 241 if (pos == 0) 242 seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n); 243 244 len = xdrbuf->page_len; 245 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT); 246 page_base = offset_in_page(xdrbuf->page_base); 247 while (len) { 248 seg->mr_page = *ppages; 249 seg->mr_offset = page_base; 250 seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len); 251 len -= seg->mr_len; 252 ++ppages; 253 ++seg; 254 ++n; 255 page_base = 0; 256 } 257 258 if (type == rpcrdma_readch) 259 goto out; 260 261 /* When encoding a Write chunk, some servers need to see an 262 * extra segment for non-XDR-aligned Write chunks. The upper 263 * layer provides space in the tail iovec that may be used 264 * for this purpose. 265 */ 266 if (type == rpcrdma_writech && r_xprt->rx_ep->re_implicit_roundup) 267 goto out; 268 269 if (xdrbuf->tail[0].iov_len) 270 rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n); 271 272 out: 273 if (unlikely(n > RPCRDMA_MAX_SEGS)) 274 return -EIO; 275 return n; 276 } 277 278 static int 279 encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr) 280 { 281 __be32 *p; 282 283 p = xdr_reserve_space(xdr, 4 * sizeof(*p)); 284 if (unlikely(!p)) 285 return -EMSGSIZE; 286 287 xdr_encode_rdma_segment(p, mr->mr_handle, mr->mr_length, mr->mr_offset); 288 return 0; 289 } 290 291 static int 292 encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr, 293 u32 position) 294 { 295 __be32 *p; 296 297 p = xdr_reserve_space(xdr, 6 * sizeof(*p)); 298 if (unlikely(!p)) 299 return -EMSGSIZE; 300 301 *p++ = xdr_one; /* Item present */ 302 xdr_encode_read_segment(p, position, mr->mr_handle, mr->mr_length, 303 mr->mr_offset); 304 return 0; 305 } 306 307 static struct rpcrdma_mr_seg *rpcrdma_mr_prepare(struct rpcrdma_xprt *r_xprt, 308 struct rpcrdma_req *req, 309 struct rpcrdma_mr_seg *seg, 310 int nsegs, bool writing, 311 struct rpcrdma_mr **mr) 312 { 313 *mr = rpcrdma_mr_pop(&req->rl_free_mrs); 314 if (!*mr) { 315 *mr = rpcrdma_mr_get(r_xprt); 316 if (!*mr) 317 goto out_getmr_err; 318 (*mr)->mr_req = req; 319 } 320 321 rpcrdma_mr_push(*mr, &req->rl_registered); 322 return frwr_map(r_xprt, seg, nsegs, writing, req->rl_slot.rq_xid, *mr); 323 324 out_getmr_err: 325 trace_xprtrdma_nomrs_err(r_xprt, req); 326 xprt_wait_for_buffer_space(&r_xprt->rx_xprt); 327 rpcrdma_mrs_refresh(r_xprt); 328 return ERR_PTR(-EAGAIN); 329 } 330 331 /* Register and XDR encode the Read list. Supports encoding a list of read 332 * segments that belong to a single read chunk. 333 * 334 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64): 335 * 336 * Read chunklist (a linked list): 337 * N elements, position P (same P for all chunks of same arg!): 338 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0 339 * 340 * Returns zero on success, or a negative errno if a failure occurred. 341 * @xdr is advanced to the next position in the stream. 342 * 343 * Only a single @pos value is currently supported. 344 */ 345 static int rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, 346 struct rpcrdma_req *req, 347 struct rpc_rqst *rqst, 348 enum rpcrdma_chunktype rtype) 349 { 350 struct xdr_stream *xdr = &req->rl_stream; 351 struct rpcrdma_mr_seg *seg; 352 struct rpcrdma_mr *mr; 353 unsigned int pos; 354 int nsegs; 355 356 if (rtype == rpcrdma_noch_pullup || rtype == rpcrdma_noch_mapped) 357 goto done; 358 359 pos = rqst->rq_snd_buf.head[0].iov_len; 360 if (rtype == rpcrdma_areadch) 361 pos = 0; 362 seg = req->rl_segments; 363 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos, 364 rtype, seg); 365 if (nsegs < 0) 366 return nsegs; 367 368 do { 369 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, false, &mr); 370 if (IS_ERR(seg)) 371 return PTR_ERR(seg); 372 373 if (encode_read_segment(xdr, mr, pos) < 0) 374 return -EMSGSIZE; 375 376 trace_xprtrdma_chunk_read(rqst->rq_task, pos, mr, nsegs); 377 r_xprt->rx_stats.read_chunk_count++; 378 nsegs -= mr->mr_nents; 379 } while (nsegs); 380 381 done: 382 if (xdr_stream_encode_item_absent(xdr) < 0) 383 return -EMSGSIZE; 384 return 0; 385 } 386 387 /* Register and XDR encode the Write list. Supports encoding a list 388 * containing one array of plain segments that belong to a single 389 * write chunk. 390 * 391 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64): 392 * 393 * Write chunklist (a list of (one) counted array): 394 * N elements: 395 * 1 - N - HLOO - HLOO - ... - HLOO - 0 396 * 397 * Returns zero on success, or a negative errno if a failure occurred. 398 * @xdr is advanced to the next position in the stream. 399 * 400 * Only a single Write chunk is currently supported. 401 */ 402 static int rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, 403 struct rpcrdma_req *req, 404 struct rpc_rqst *rqst, 405 enum rpcrdma_chunktype wtype) 406 { 407 struct xdr_stream *xdr = &req->rl_stream; 408 struct rpcrdma_mr_seg *seg; 409 struct rpcrdma_mr *mr; 410 int nsegs, nchunks; 411 __be32 *segcount; 412 413 if (wtype != rpcrdma_writech) 414 goto done; 415 416 seg = req->rl_segments; 417 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 418 rqst->rq_rcv_buf.head[0].iov_len, 419 wtype, seg); 420 if (nsegs < 0) 421 return nsegs; 422 423 if (xdr_stream_encode_item_present(xdr) < 0) 424 return -EMSGSIZE; 425 segcount = xdr_reserve_space(xdr, sizeof(*segcount)); 426 if (unlikely(!segcount)) 427 return -EMSGSIZE; 428 /* Actual value encoded below */ 429 430 nchunks = 0; 431 do { 432 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr); 433 if (IS_ERR(seg)) 434 return PTR_ERR(seg); 435 436 if (encode_rdma_segment(xdr, mr) < 0) 437 return -EMSGSIZE; 438 439 trace_xprtrdma_chunk_write(rqst->rq_task, mr, nsegs); 440 r_xprt->rx_stats.write_chunk_count++; 441 r_xprt->rx_stats.total_rdma_request += mr->mr_length; 442 nchunks++; 443 nsegs -= mr->mr_nents; 444 } while (nsegs); 445 446 /* Update count of segments in this Write chunk */ 447 *segcount = cpu_to_be32(nchunks); 448 449 done: 450 if (xdr_stream_encode_item_absent(xdr) < 0) 451 return -EMSGSIZE; 452 return 0; 453 } 454 455 /* Register and XDR encode the Reply chunk. Supports encoding an array 456 * of plain segments that belong to a single write (reply) chunk. 457 * 458 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64): 459 * 460 * Reply chunk (a counted array): 461 * N elements: 462 * 1 - N - HLOO - HLOO - ... - HLOO 463 * 464 * Returns zero on success, or a negative errno if a failure occurred. 465 * @xdr is advanced to the next position in the stream. 466 */ 467 static int rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, 468 struct rpcrdma_req *req, 469 struct rpc_rqst *rqst, 470 enum rpcrdma_chunktype wtype) 471 { 472 struct xdr_stream *xdr = &req->rl_stream; 473 struct rpcrdma_mr_seg *seg; 474 struct rpcrdma_mr *mr; 475 int nsegs, nchunks; 476 __be32 *segcount; 477 478 if (wtype != rpcrdma_replych) { 479 if (xdr_stream_encode_item_absent(xdr) < 0) 480 return -EMSGSIZE; 481 return 0; 482 } 483 484 seg = req->rl_segments; 485 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg); 486 if (nsegs < 0) 487 return nsegs; 488 489 if (xdr_stream_encode_item_present(xdr) < 0) 490 return -EMSGSIZE; 491 segcount = xdr_reserve_space(xdr, sizeof(*segcount)); 492 if (unlikely(!segcount)) 493 return -EMSGSIZE; 494 /* Actual value encoded below */ 495 496 nchunks = 0; 497 do { 498 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr); 499 if (IS_ERR(seg)) 500 return PTR_ERR(seg); 501 502 if (encode_rdma_segment(xdr, mr) < 0) 503 return -EMSGSIZE; 504 505 trace_xprtrdma_chunk_reply(rqst->rq_task, mr, nsegs); 506 r_xprt->rx_stats.reply_chunk_count++; 507 r_xprt->rx_stats.total_rdma_request += mr->mr_length; 508 nchunks++; 509 nsegs -= mr->mr_nents; 510 } while (nsegs); 511 512 /* Update count of segments in the Reply chunk */ 513 *segcount = cpu_to_be32(nchunks); 514 515 return 0; 516 } 517 518 static void rpcrdma_sendctx_done(struct kref *kref) 519 { 520 struct rpcrdma_req *req = 521 container_of(kref, struct rpcrdma_req, rl_kref); 522 struct rpcrdma_rep *rep = req->rl_reply; 523 524 rpcrdma_complete_rqst(rep); 525 rep->rr_rxprt->rx_stats.reply_waits_for_send++; 526 } 527 528 /** 529 * rpcrdma_sendctx_unmap - DMA-unmap Send buffer 530 * @sc: sendctx containing SGEs to unmap 531 * 532 */ 533 void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc) 534 { 535 struct rpcrdma_regbuf *rb = sc->sc_req->rl_sendbuf; 536 struct ib_sge *sge; 537 538 if (!sc->sc_unmap_count) 539 return; 540 541 /* The first two SGEs contain the transport header and 542 * the inline buffer. These are always left mapped so 543 * they can be cheaply re-used. 544 */ 545 for (sge = &sc->sc_sges[2]; sc->sc_unmap_count; 546 ++sge, --sc->sc_unmap_count) 547 ib_dma_unmap_page(rdmab_device(rb), sge->addr, sge->length, 548 DMA_TO_DEVICE); 549 550 kref_put(&sc->sc_req->rl_kref, rpcrdma_sendctx_done); 551 } 552 553 /* Prepare an SGE for the RPC-over-RDMA transport header. 554 */ 555 static void rpcrdma_prepare_hdr_sge(struct rpcrdma_xprt *r_xprt, 556 struct rpcrdma_req *req, u32 len) 557 { 558 struct rpcrdma_sendctx *sc = req->rl_sendctx; 559 struct rpcrdma_regbuf *rb = req->rl_rdmabuf; 560 struct ib_sge *sge = &sc->sc_sges[req->rl_wr.num_sge++]; 561 562 sge->addr = rdmab_addr(rb); 563 sge->length = len; 564 sge->lkey = rdmab_lkey(rb); 565 566 ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length, 567 DMA_TO_DEVICE); 568 } 569 570 /* The head iovec is straightforward, as it is usually already 571 * DMA-mapped. Sync the content that has changed. 572 */ 573 static bool rpcrdma_prepare_head_iov(struct rpcrdma_xprt *r_xprt, 574 struct rpcrdma_req *req, unsigned int len) 575 { 576 struct rpcrdma_sendctx *sc = req->rl_sendctx; 577 struct ib_sge *sge = &sc->sc_sges[req->rl_wr.num_sge++]; 578 struct rpcrdma_regbuf *rb = req->rl_sendbuf; 579 580 if (!rpcrdma_regbuf_dma_map(r_xprt, rb)) 581 return false; 582 583 sge->addr = rdmab_addr(rb); 584 sge->length = len; 585 sge->lkey = rdmab_lkey(rb); 586 587 ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length, 588 DMA_TO_DEVICE); 589 return true; 590 } 591 592 /* If there is a page list present, DMA map and prepare an 593 * SGE for each page to be sent. 594 */ 595 static bool rpcrdma_prepare_pagelist(struct rpcrdma_req *req, 596 struct xdr_buf *xdr) 597 { 598 struct rpcrdma_sendctx *sc = req->rl_sendctx; 599 struct rpcrdma_regbuf *rb = req->rl_sendbuf; 600 unsigned int page_base, len, remaining; 601 struct page **ppages; 602 struct ib_sge *sge; 603 604 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT); 605 page_base = offset_in_page(xdr->page_base); 606 remaining = xdr->page_len; 607 while (remaining) { 608 sge = &sc->sc_sges[req->rl_wr.num_sge++]; 609 len = min_t(unsigned int, PAGE_SIZE - page_base, remaining); 610 sge->addr = ib_dma_map_page(rdmab_device(rb), *ppages, 611 page_base, len, DMA_TO_DEVICE); 612 if (ib_dma_mapping_error(rdmab_device(rb), sge->addr)) 613 goto out_mapping_err; 614 615 sge->length = len; 616 sge->lkey = rdmab_lkey(rb); 617 618 sc->sc_unmap_count++; 619 ppages++; 620 remaining -= len; 621 page_base = 0; 622 } 623 624 return true; 625 626 out_mapping_err: 627 trace_xprtrdma_dma_maperr(sge->addr); 628 return false; 629 } 630 631 /* The tail iovec might not reside in the same page as the 632 * head iovec. 633 */ 634 static bool rpcrdma_prepare_tail_iov(struct rpcrdma_req *req, 635 struct xdr_buf *xdr, 636 unsigned int page_base, unsigned int len) 637 { 638 struct rpcrdma_sendctx *sc = req->rl_sendctx; 639 struct ib_sge *sge = &sc->sc_sges[req->rl_wr.num_sge++]; 640 struct rpcrdma_regbuf *rb = req->rl_sendbuf; 641 struct page *page = virt_to_page(xdr->tail[0].iov_base); 642 643 sge->addr = ib_dma_map_page(rdmab_device(rb), page, page_base, len, 644 DMA_TO_DEVICE); 645 if (ib_dma_mapping_error(rdmab_device(rb), sge->addr)) 646 goto out_mapping_err; 647 648 sge->length = len; 649 sge->lkey = rdmab_lkey(rb); 650 ++sc->sc_unmap_count; 651 return true; 652 653 out_mapping_err: 654 trace_xprtrdma_dma_maperr(sge->addr); 655 return false; 656 } 657 658 /* Copy the tail to the end of the head buffer. 659 */ 660 static void rpcrdma_pullup_tail_iov(struct rpcrdma_xprt *r_xprt, 661 struct rpcrdma_req *req, 662 struct xdr_buf *xdr) 663 { 664 unsigned char *dst; 665 666 dst = (unsigned char *)xdr->head[0].iov_base; 667 dst += xdr->head[0].iov_len + xdr->page_len; 668 memmove(dst, xdr->tail[0].iov_base, xdr->tail[0].iov_len); 669 r_xprt->rx_stats.pullup_copy_count += xdr->tail[0].iov_len; 670 } 671 672 /* Copy pagelist content into the head buffer. 673 */ 674 static void rpcrdma_pullup_pagelist(struct rpcrdma_xprt *r_xprt, 675 struct rpcrdma_req *req, 676 struct xdr_buf *xdr) 677 { 678 unsigned int len, page_base, remaining; 679 struct page **ppages; 680 unsigned char *src, *dst; 681 682 dst = (unsigned char *)xdr->head[0].iov_base; 683 dst += xdr->head[0].iov_len; 684 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT); 685 page_base = offset_in_page(xdr->page_base); 686 remaining = xdr->page_len; 687 while (remaining) { 688 src = page_address(*ppages); 689 src += page_base; 690 len = min_t(unsigned int, PAGE_SIZE - page_base, remaining); 691 memcpy(dst, src, len); 692 r_xprt->rx_stats.pullup_copy_count += len; 693 694 ppages++; 695 dst += len; 696 remaining -= len; 697 page_base = 0; 698 } 699 } 700 701 /* Copy the contents of @xdr into @rl_sendbuf and DMA sync it. 702 * When the head, pagelist, and tail are small, a pull-up copy 703 * is considerably less costly than DMA mapping the components 704 * of @xdr. 705 * 706 * Assumptions: 707 * - the caller has already verified that the total length 708 * of the RPC Call body will fit into @rl_sendbuf. 709 */ 710 static bool rpcrdma_prepare_noch_pullup(struct rpcrdma_xprt *r_xprt, 711 struct rpcrdma_req *req, 712 struct xdr_buf *xdr) 713 { 714 if (unlikely(xdr->tail[0].iov_len)) 715 rpcrdma_pullup_tail_iov(r_xprt, req, xdr); 716 717 if (unlikely(xdr->page_len)) 718 rpcrdma_pullup_pagelist(r_xprt, req, xdr); 719 720 /* The whole RPC message resides in the head iovec now */ 721 return rpcrdma_prepare_head_iov(r_xprt, req, xdr->len); 722 } 723 724 static bool rpcrdma_prepare_noch_mapped(struct rpcrdma_xprt *r_xprt, 725 struct rpcrdma_req *req, 726 struct xdr_buf *xdr) 727 { 728 struct kvec *tail = &xdr->tail[0]; 729 730 if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len)) 731 return false; 732 if (xdr->page_len) 733 if (!rpcrdma_prepare_pagelist(req, xdr)) 734 return false; 735 if (tail->iov_len) 736 if (!rpcrdma_prepare_tail_iov(req, xdr, 737 offset_in_page(tail->iov_base), 738 tail->iov_len)) 739 return false; 740 741 if (req->rl_sendctx->sc_unmap_count) 742 kref_get(&req->rl_kref); 743 return true; 744 } 745 746 static bool rpcrdma_prepare_readch(struct rpcrdma_xprt *r_xprt, 747 struct rpcrdma_req *req, 748 struct xdr_buf *xdr) 749 { 750 struct kvec *tail = &xdr->tail[0]; 751 752 if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len)) 753 return false; 754 755 /* If there is a Read chunk, the page list is handled 756 * via explicit RDMA, and thus is skipped here. 757 */ 758 759 if (tail->iov_len) { 760 if (!rpcrdma_prepare_tail_iov(req, xdr, 761 offset_in_page(tail->iov_base), 762 tail->iov_len)) 763 return false; 764 kref_get(&req->rl_kref); 765 } 766 767 return true; 768 } 769 770 /** 771 * rpcrdma_prepare_send_sges - Construct SGEs for a Send WR 772 * @r_xprt: controlling transport 773 * @req: context of RPC Call being marshalled 774 * @hdrlen: size of transport header, in bytes 775 * @xdr: xdr_buf containing RPC Call 776 * @rtype: chunk type being encoded 777 * 778 * Returns 0 on success; otherwise a negative errno is returned. 779 */ 780 inline int rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt, 781 struct rpcrdma_req *req, u32 hdrlen, 782 struct xdr_buf *xdr, 783 enum rpcrdma_chunktype rtype) 784 { 785 int ret; 786 787 ret = -EAGAIN; 788 req->rl_sendctx = rpcrdma_sendctx_get_locked(r_xprt); 789 if (!req->rl_sendctx) 790 goto out_nosc; 791 req->rl_sendctx->sc_unmap_count = 0; 792 req->rl_sendctx->sc_req = req; 793 kref_init(&req->rl_kref); 794 req->rl_wr.wr_cqe = &req->rl_sendctx->sc_cqe; 795 req->rl_wr.sg_list = req->rl_sendctx->sc_sges; 796 req->rl_wr.num_sge = 0; 797 req->rl_wr.opcode = IB_WR_SEND; 798 799 rpcrdma_prepare_hdr_sge(r_xprt, req, hdrlen); 800 801 ret = -EIO; 802 switch (rtype) { 803 case rpcrdma_noch_pullup: 804 if (!rpcrdma_prepare_noch_pullup(r_xprt, req, xdr)) 805 goto out_unmap; 806 break; 807 case rpcrdma_noch_mapped: 808 if (!rpcrdma_prepare_noch_mapped(r_xprt, req, xdr)) 809 goto out_unmap; 810 break; 811 case rpcrdma_readch: 812 if (!rpcrdma_prepare_readch(r_xprt, req, xdr)) 813 goto out_unmap; 814 break; 815 case rpcrdma_areadch: 816 break; 817 default: 818 goto out_unmap; 819 } 820 821 return 0; 822 823 out_unmap: 824 rpcrdma_sendctx_unmap(req->rl_sendctx); 825 out_nosc: 826 trace_xprtrdma_prepsend_failed(&req->rl_slot, ret); 827 return ret; 828 } 829 830 /** 831 * rpcrdma_marshal_req - Marshal and send one RPC request 832 * @r_xprt: controlling transport 833 * @rqst: RPC request to be marshaled 834 * 835 * For the RPC in "rqst", this function: 836 * - Chooses the transfer mode (eg., RDMA_MSG or RDMA_NOMSG) 837 * - Registers Read, Write, and Reply chunks 838 * - Constructs the transport header 839 * - Posts a Send WR to send the transport header and request 840 * 841 * Returns: 842 * %0 if the RPC was sent successfully, 843 * %-ENOTCONN if the connection was lost, 844 * %-EAGAIN if the caller should call again with the same arguments, 845 * %-ENOBUFS if the caller should call again after a delay, 846 * %-EMSGSIZE if the transport header is too small, 847 * %-EIO if a permanent problem occurred while marshaling. 848 */ 849 int 850 rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst) 851 { 852 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 853 struct xdr_stream *xdr = &req->rl_stream; 854 enum rpcrdma_chunktype rtype, wtype; 855 struct xdr_buf *buf = &rqst->rq_snd_buf; 856 bool ddp_allowed; 857 __be32 *p; 858 int ret; 859 860 if (unlikely(rqst->rq_rcv_buf.flags & XDRBUF_SPARSE_PAGES)) { 861 ret = rpcrdma_alloc_sparse_pages(&rqst->rq_rcv_buf); 862 if (ret) 863 return ret; 864 } 865 866 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0); 867 xdr_init_encode(xdr, &req->rl_hdrbuf, rdmab_data(req->rl_rdmabuf), 868 rqst); 869 870 /* Fixed header fields */ 871 ret = -EMSGSIZE; 872 p = xdr_reserve_space(xdr, 4 * sizeof(*p)); 873 if (!p) 874 goto out_err; 875 *p++ = rqst->rq_xid; 876 *p++ = rpcrdma_version; 877 *p++ = r_xprt->rx_buf.rb_max_requests; 878 879 /* When the ULP employs a GSS flavor that guarantees integrity 880 * or privacy, direct data placement of individual data items 881 * is not allowed. 882 */ 883 ddp_allowed = !test_bit(RPCAUTH_AUTH_DATATOUCH, 884 &rqst->rq_cred->cr_auth->au_flags); 885 886 /* 887 * Chunks needed for results? 888 * 889 * o If the expected result is under the inline threshold, all ops 890 * return as inline. 891 * o Large read ops return data as write chunk(s), header as 892 * inline. 893 * o Large non-read ops return as a single reply chunk. 894 */ 895 if (rpcrdma_results_inline(r_xprt, rqst)) 896 wtype = rpcrdma_noch; 897 else if ((ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ) && 898 rpcrdma_nonpayload_inline(r_xprt, rqst)) 899 wtype = rpcrdma_writech; 900 else 901 wtype = rpcrdma_replych; 902 903 /* 904 * Chunks needed for arguments? 905 * 906 * o If the total request is under the inline threshold, all ops 907 * are sent as inline. 908 * o Large write ops transmit data as read chunk(s), header as 909 * inline. 910 * o Large non-write ops are sent with the entire message as a 911 * single read chunk (protocol 0-position special case). 912 * 913 * This assumes that the upper layer does not present a request 914 * that both has a data payload, and whose non-data arguments 915 * by themselves are larger than the inline threshold. 916 */ 917 if (rpcrdma_args_inline(r_xprt, rqst)) { 918 *p++ = rdma_msg; 919 rtype = buf->len < rdmab_length(req->rl_sendbuf) ? 920 rpcrdma_noch_pullup : rpcrdma_noch_mapped; 921 } else if (ddp_allowed && buf->flags & XDRBUF_WRITE) { 922 *p++ = rdma_msg; 923 rtype = rpcrdma_readch; 924 } else { 925 r_xprt->rx_stats.nomsg_call_count++; 926 *p++ = rdma_nomsg; 927 rtype = rpcrdma_areadch; 928 } 929 930 /* This implementation supports the following combinations 931 * of chunk lists in one RPC-over-RDMA Call message: 932 * 933 * - Read list 934 * - Write list 935 * - Reply chunk 936 * - Read list + Reply chunk 937 * 938 * It might not yet support the following combinations: 939 * 940 * - Read list + Write list 941 * 942 * It does not support the following combinations: 943 * 944 * - Write list + Reply chunk 945 * - Read list + Write list + Reply chunk 946 * 947 * This implementation supports only a single chunk in each 948 * Read or Write list. Thus for example the client cannot 949 * send a Call message with a Position Zero Read chunk and a 950 * regular Read chunk at the same time. 951 */ 952 ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype); 953 if (ret) 954 goto out_err; 955 ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype); 956 if (ret) 957 goto out_err; 958 ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype); 959 if (ret) 960 goto out_err; 961 962 ret = rpcrdma_prepare_send_sges(r_xprt, req, req->rl_hdrbuf.len, 963 buf, rtype); 964 if (ret) 965 goto out_err; 966 967 trace_xprtrdma_marshal(req, rtype, wtype); 968 return 0; 969 970 out_err: 971 trace_xprtrdma_marshal_failed(rqst, ret); 972 r_xprt->rx_stats.failed_marshal_count++; 973 frwr_reset(req); 974 return ret; 975 } 976 977 static void __rpcrdma_update_cwnd_locked(struct rpc_xprt *xprt, 978 struct rpcrdma_buffer *buf, 979 u32 grant) 980 { 981 buf->rb_credits = grant; 982 xprt->cwnd = grant << RPC_CWNDSHIFT; 983 } 984 985 static void rpcrdma_update_cwnd(struct rpcrdma_xprt *r_xprt, u32 grant) 986 { 987 struct rpc_xprt *xprt = &r_xprt->rx_xprt; 988 989 spin_lock(&xprt->transport_lock); 990 __rpcrdma_update_cwnd_locked(xprt, &r_xprt->rx_buf, grant); 991 spin_unlock(&xprt->transport_lock); 992 } 993 994 /** 995 * rpcrdma_reset_cwnd - Reset the xprt's congestion window 996 * @r_xprt: controlling transport instance 997 * 998 * Prepare @r_xprt for the next connection by reinitializing 999 * its credit grant to one (see RFC 8166, Section 3.3.3). 1000 */ 1001 void rpcrdma_reset_cwnd(struct rpcrdma_xprt *r_xprt) 1002 { 1003 struct rpc_xprt *xprt = &r_xprt->rx_xprt; 1004 1005 spin_lock(&xprt->transport_lock); 1006 xprt->cong = 0; 1007 __rpcrdma_update_cwnd_locked(xprt, &r_xprt->rx_buf, 1); 1008 spin_unlock(&xprt->transport_lock); 1009 } 1010 1011 /** 1012 * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs 1013 * @rqst: controlling RPC request 1014 * @srcp: points to RPC message payload in receive buffer 1015 * @copy_len: remaining length of receive buffer content 1016 * @pad: Write chunk pad bytes needed (zero for pure inline) 1017 * 1018 * The upper layer has set the maximum number of bytes it can 1019 * receive in each component of rq_rcv_buf. These values are set in 1020 * the head.iov_len, page_len, tail.iov_len, and buflen fields. 1021 * 1022 * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in 1023 * many cases this function simply updates iov_base pointers in 1024 * rq_rcv_buf to point directly to the received reply data, to 1025 * avoid copying reply data. 1026 * 1027 * Returns the count of bytes which had to be memcopied. 1028 */ 1029 static unsigned long 1030 rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad) 1031 { 1032 unsigned long fixup_copy_count; 1033 int i, npages, curlen; 1034 char *destp; 1035 struct page **ppages; 1036 int page_base; 1037 1038 /* The head iovec is redirected to the RPC reply message 1039 * in the receive buffer, to avoid a memcopy. 1040 */ 1041 rqst->rq_rcv_buf.head[0].iov_base = srcp; 1042 rqst->rq_private_buf.head[0].iov_base = srcp; 1043 1044 /* The contents of the receive buffer that follow 1045 * head.iov_len bytes are copied into the page list. 1046 */ 1047 curlen = rqst->rq_rcv_buf.head[0].iov_len; 1048 if (curlen > copy_len) 1049 curlen = copy_len; 1050 srcp += curlen; 1051 copy_len -= curlen; 1052 1053 ppages = rqst->rq_rcv_buf.pages + 1054 (rqst->rq_rcv_buf.page_base >> PAGE_SHIFT); 1055 page_base = offset_in_page(rqst->rq_rcv_buf.page_base); 1056 fixup_copy_count = 0; 1057 if (copy_len && rqst->rq_rcv_buf.page_len) { 1058 int pagelist_len; 1059 1060 pagelist_len = rqst->rq_rcv_buf.page_len; 1061 if (pagelist_len > copy_len) 1062 pagelist_len = copy_len; 1063 npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT; 1064 for (i = 0; i < npages; i++) { 1065 curlen = PAGE_SIZE - page_base; 1066 if (curlen > pagelist_len) 1067 curlen = pagelist_len; 1068 1069 destp = kmap_atomic(ppages[i]); 1070 memcpy(destp + page_base, srcp, curlen); 1071 flush_dcache_page(ppages[i]); 1072 kunmap_atomic(destp); 1073 srcp += curlen; 1074 copy_len -= curlen; 1075 fixup_copy_count += curlen; 1076 pagelist_len -= curlen; 1077 if (!pagelist_len) 1078 break; 1079 page_base = 0; 1080 } 1081 1082 /* Implicit padding for the last segment in a Write 1083 * chunk is inserted inline at the front of the tail 1084 * iovec. The upper layer ignores the content of 1085 * the pad. Simply ensure inline content in the tail 1086 * that follows the Write chunk is properly aligned. 1087 */ 1088 if (pad) 1089 srcp -= pad; 1090 } 1091 1092 /* The tail iovec is redirected to the remaining data 1093 * in the receive buffer, to avoid a memcopy. 1094 */ 1095 if (copy_len || pad) { 1096 rqst->rq_rcv_buf.tail[0].iov_base = srcp; 1097 rqst->rq_private_buf.tail[0].iov_base = srcp; 1098 } 1099 1100 if (fixup_copy_count) 1101 trace_xprtrdma_fixup(rqst, fixup_copy_count); 1102 return fixup_copy_count; 1103 } 1104 1105 /* By convention, backchannel calls arrive via rdma_msg type 1106 * messages, and never populate the chunk lists. This makes 1107 * the RPC/RDMA header small and fixed in size, so it is 1108 * straightforward to check the RPC header's direction field. 1109 */ 1110 static bool 1111 rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep) 1112 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 1113 { 1114 struct xdr_stream *xdr = &rep->rr_stream; 1115 __be32 *p; 1116 1117 if (rep->rr_proc != rdma_msg) 1118 return false; 1119 1120 /* Peek at stream contents without advancing. */ 1121 p = xdr_inline_decode(xdr, 0); 1122 1123 /* Chunk lists */ 1124 if (xdr_item_is_present(p++)) 1125 return false; 1126 if (xdr_item_is_present(p++)) 1127 return false; 1128 if (xdr_item_is_present(p++)) 1129 return false; 1130 1131 /* RPC header */ 1132 if (*p++ != rep->rr_xid) 1133 return false; 1134 if (*p != cpu_to_be32(RPC_CALL)) 1135 return false; 1136 1137 /* Now that we are sure this is a backchannel call, 1138 * advance to the RPC header. 1139 */ 1140 p = xdr_inline_decode(xdr, 3 * sizeof(*p)); 1141 if (unlikely(!p)) 1142 return true; 1143 1144 rpcrdma_bc_receive_call(r_xprt, rep); 1145 return true; 1146 } 1147 #else /* CONFIG_SUNRPC_BACKCHANNEL */ 1148 { 1149 return false; 1150 } 1151 #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 1152 1153 static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length) 1154 { 1155 u32 handle; 1156 u64 offset; 1157 __be32 *p; 1158 1159 p = xdr_inline_decode(xdr, 4 * sizeof(*p)); 1160 if (unlikely(!p)) 1161 return -EIO; 1162 1163 xdr_decode_rdma_segment(p, &handle, length, &offset); 1164 trace_xprtrdma_decode_seg(handle, *length, offset); 1165 return 0; 1166 } 1167 1168 static int decode_write_chunk(struct xdr_stream *xdr, u32 *length) 1169 { 1170 u32 segcount, seglength; 1171 __be32 *p; 1172 1173 p = xdr_inline_decode(xdr, sizeof(*p)); 1174 if (unlikely(!p)) 1175 return -EIO; 1176 1177 *length = 0; 1178 segcount = be32_to_cpup(p); 1179 while (segcount--) { 1180 if (decode_rdma_segment(xdr, &seglength)) 1181 return -EIO; 1182 *length += seglength; 1183 } 1184 1185 return 0; 1186 } 1187 1188 /* In RPC-over-RDMA Version One replies, a Read list is never 1189 * expected. This decoder is a stub that returns an error if 1190 * a Read list is present. 1191 */ 1192 static int decode_read_list(struct xdr_stream *xdr) 1193 { 1194 __be32 *p; 1195 1196 p = xdr_inline_decode(xdr, sizeof(*p)); 1197 if (unlikely(!p)) 1198 return -EIO; 1199 if (unlikely(xdr_item_is_present(p))) 1200 return -EIO; 1201 return 0; 1202 } 1203 1204 /* Supports only one Write chunk in the Write list 1205 */ 1206 static int decode_write_list(struct xdr_stream *xdr, u32 *length) 1207 { 1208 u32 chunklen; 1209 bool first; 1210 __be32 *p; 1211 1212 *length = 0; 1213 first = true; 1214 do { 1215 p = xdr_inline_decode(xdr, sizeof(*p)); 1216 if (unlikely(!p)) 1217 return -EIO; 1218 if (xdr_item_is_absent(p)) 1219 break; 1220 if (!first) 1221 return -EIO; 1222 1223 if (decode_write_chunk(xdr, &chunklen)) 1224 return -EIO; 1225 *length += chunklen; 1226 first = false; 1227 } while (true); 1228 return 0; 1229 } 1230 1231 static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length) 1232 { 1233 __be32 *p; 1234 1235 p = xdr_inline_decode(xdr, sizeof(*p)); 1236 if (unlikely(!p)) 1237 return -EIO; 1238 1239 *length = 0; 1240 if (xdr_item_is_present(p)) 1241 if (decode_write_chunk(xdr, length)) 1242 return -EIO; 1243 return 0; 1244 } 1245 1246 static int 1247 rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep, 1248 struct rpc_rqst *rqst) 1249 { 1250 struct xdr_stream *xdr = &rep->rr_stream; 1251 u32 writelist, replychunk, rpclen; 1252 char *base; 1253 1254 /* Decode the chunk lists */ 1255 if (decode_read_list(xdr)) 1256 return -EIO; 1257 if (decode_write_list(xdr, &writelist)) 1258 return -EIO; 1259 if (decode_reply_chunk(xdr, &replychunk)) 1260 return -EIO; 1261 1262 /* RDMA_MSG sanity checks */ 1263 if (unlikely(replychunk)) 1264 return -EIO; 1265 1266 /* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */ 1267 base = (char *)xdr_inline_decode(xdr, 0); 1268 rpclen = xdr_stream_remaining(xdr); 1269 r_xprt->rx_stats.fixup_copy_count += 1270 rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3); 1271 1272 r_xprt->rx_stats.total_rdma_reply += writelist; 1273 return rpclen + xdr_align_size(writelist); 1274 } 1275 1276 static noinline int 1277 rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep) 1278 { 1279 struct xdr_stream *xdr = &rep->rr_stream; 1280 u32 writelist, replychunk; 1281 1282 /* Decode the chunk lists */ 1283 if (decode_read_list(xdr)) 1284 return -EIO; 1285 if (decode_write_list(xdr, &writelist)) 1286 return -EIO; 1287 if (decode_reply_chunk(xdr, &replychunk)) 1288 return -EIO; 1289 1290 /* RDMA_NOMSG sanity checks */ 1291 if (unlikely(writelist)) 1292 return -EIO; 1293 if (unlikely(!replychunk)) 1294 return -EIO; 1295 1296 /* Reply chunk buffer already is the reply vector */ 1297 r_xprt->rx_stats.total_rdma_reply += replychunk; 1298 return replychunk; 1299 } 1300 1301 static noinline int 1302 rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep, 1303 struct rpc_rqst *rqst) 1304 { 1305 struct xdr_stream *xdr = &rep->rr_stream; 1306 __be32 *p; 1307 1308 p = xdr_inline_decode(xdr, sizeof(*p)); 1309 if (unlikely(!p)) 1310 return -EIO; 1311 1312 switch (*p) { 1313 case err_vers: 1314 p = xdr_inline_decode(xdr, 2 * sizeof(*p)); 1315 if (!p) 1316 break; 1317 trace_xprtrdma_err_vers(rqst, p, p + 1); 1318 break; 1319 case err_chunk: 1320 trace_xprtrdma_err_chunk(rqst); 1321 break; 1322 default: 1323 trace_xprtrdma_err_unrecognized(rqst, p); 1324 } 1325 1326 return -EIO; 1327 } 1328 1329 /** 1330 * rpcrdma_unpin_rqst - Release rqst without completing it 1331 * @rep: RPC/RDMA Receive context 1332 * 1333 * This is done when a connection is lost so that a Reply 1334 * can be dropped and its matching Call can be subsequently 1335 * retransmitted on a new connection. 1336 */ 1337 void rpcrdma_unpin_rqst(struct rpcrdma_rep *rep) 1338 { 1339 struct rpc_xprt *xprt = &rep->rr_rxprt->rx_xprt; 1340 struct rpc_rqst *rqst = rep->rr_rqst; 1341 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 1342 1343 req->rl_reply = NULL; 1344 rep->rr_rqst = NULL; 1345 1346 spin_lock(&xprt->queue_lock); 1347 xprt_unpin_rqst(rqst); 1348 spin_unlock(&xprt->queue_lock); 1349 } 1350 1351 /** 1352 * rpcrdma_complete_rqst - Pass completed rqst back to RPC 1353 * @rep: RPC/RDMA Receive context 1354 * 1355 * Reconstruct the RPC reply and complete the transaction 1356 * while @rqst is still pinned to ensure the rep, rqst, and 1357 * rq_task pointers remain stable. 1358 */ 1359 void rpcrdma_complete_rqst(struct rpcrdma_rep *rep) 1360 { 1361 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt; 1362 struct rpc_xprt *xprt = &r_xprt->rx_xprt; 1363 struct rpc_rqst *rqst = rep->rr_rqst; 1364 int status; 1365 1366 switch (rep->rr_proc) { 1367 case rdma_msg: 1368 status = rpcrdma_decode_msg(r_xprt, rep, rqst); 1369 break; 1370 case rdma_nomsg: 1371 status = rpcrdma_decode_nomsg(r_xprt, rep); 1372 break; 1373 case rdma_error: 1374 status = rpcrdma_decode_error(r_xprt, rep, rqst); 1375 break; 1376 default: 1377 status = -EIO; 1378 } 1379 if (status < 0) 1380 goto out_badheader; 1381 1382 out: 1383 spin_lock(&xprt->queue_lock); 1384 xprt_complete_rqst(rqst->rq_task, status); 1385 xprt_unpin_rqst(rqst); 1386 spin_unlock(&xprt->queue_lock); 1387 return; 1388 1389 out_badheader: 1390 trace_xprtrdma_reply_hdr_err(rep); 1391 r_xprt->rx_stats.bad_reply_count++; 1392 rqst->rq_task->tk_status = status; 1393 status = 0; 1394 goto out; 1395 } 1396 1397 static void rpcrdma_reply_done(struct kref *kref) 1398 { 1399 struct rpcrdma_req *req = 1400 container_of(kref, struct rpcrdma_req, rl_kref); 1401 1402 rpcrdma_complete_rqst(req->rl_reply); 1403 } 1404 1405 /** 1406 * rpcrdma_reply_handler - Process received RPC/RDMA messages 1407 * @rep: Incoming rpcrdma_rep object to process 1408 * 1409 * Errors must result in the RPC task either being awakened, or 1410 * allowed to timeout, to discover the errors at that time. 1411 */ 1412 void rpcrdma_reply_handler(struct rpcrdma_rep *rep) 1413 { 1414 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt; 1415 struct rpc_xprt *xprt = &r_xprt->rx_xprt; 1416 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 1417 struct rpcrdma_req *req; 1418 struct rpc_rqst *rqst; 1419 u32 credits; 1420 __be32 *p; 1421 1422 /* Any data means we had a useful conversation, so 1423 * then we don't need to delay the next reconnect. 1424 */ 1425 if (xprt->reestablish_timeout) 1426 xprt->reestablish_timeout = 0; 1427 1428 /* Fixed transport header fields */ 1429 xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf, 1430 rep->rr_hdrbuf.head[0].iov_base, NULL); 1431 p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p)); 1432 if (unlikely(!p)) 1433 goto out_shortreply; 1434 rep->rr_xid = *p++; 1435 rep->rr_vers = *p++; 1436 credits = be32_to_cpu(*p++); 1437 rep->rr_proc = *p++; 1438 1439 if (rep->rr_vers != rpcrdma_version) 1440 goto out_badversion; 1441 1442 if (rpcrdma_is_bcall(r_xprt, rep)) 1443 return; 1444 1445 /* Match incoming rpcrdma_rep to an rpcrdma_req to 1446 * get context for handling any incoming chunks. 1447 */ 1448 spin_lock(&xprt->queue_lock); 1449 rqst = xprt_lookup_rqst(xprt, rep->rr_xid); 1450 if (!rqst) 1451 goto out_norqst; 1452 xprt_pin_rqst(rqst); 1453 spin_unlock(&xprt->queue_lock); 1454 1455 if (credits == 0) 1456 credits = 1; /* don't deadlock */ 1457 else if (credits > r_xprt->rx_ep->re_max_requests) 1458 credits = r_xprt->rx_ep->re_max_requests; 1459 rpcrdma_post_recvs(r_xprt, credits + (buf->rb_bc_srv_max_requests << 1), 1460 false); 1461 if (buf->rb_credits != credits) 1462 rpcrdma_update_cwnd(r_xprt, credits); 1463 1464 req = rpcr_to_rdmar(rqst); 1465 if (unlikely(req->rl_reply)) 1466 rpcrdma_rep_put(buf, req->rl_reply); 1467 req->rl_reply = rep; 1468 rep->rr_rqst = rqst; 1469 1470 trace_xprtrdma_reply(rqst->rq_task, rep, credits); 1471 1472 if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE) 1473 frwr_reminv(rep, &req->rl_registered); 1474 if (!list_empty(&req->rl_registered)) 1475 frwr_unmap_async(r_xprt, req); 1476 /* LocalInv completion will complete the RPC */ 1477 else 1478 kref_put(&req->rl_kref, rpcrdma_reply_done); 1479 return; 1480 1481 out_badversion: 1482 trace_xprtrdma_reply_vers_err(rep); 1483 goto out; 1484 1485 out_norqst: 1486 spin_unlock(&xprt->queue_lock); 1487 trace_xprtrdma_reply_rqst_err(rep); 1488 goto out; 1489 1490 out_shortreply: 1491 trace_xprtrdma_reply_short_err(rep); 1492 1493 out: 1494 rpcrdma_rep_put(buf, rep); 1495 } 1496