1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (c) 2016-2018 Oracle. All rights reserved. 4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved. 5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the BSD-type 11 * license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 20 * Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials provided 23 * with the distribution. 24 * 25 * Neither the name of the Network Appliance, Inc. nor the names of 26 * its contributors may be used to endorse or promote products 27 * derived from this software without specific prior written 28 * permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 41 * 42 * Author: Tom Tucker <tom@opengridcomputing.com> 43 */ 44 45 /* Operation 46 * 47 * The main entry point is svc_rdma_sendto. This is called by the 48 * RPC server when an RPC Reply is ready to be transmitted to a client. 49 * 50 * The passed-in svc_rqst contains a struct xdr_buf which holds an 51 * XDR-encoded RPC Reply message. sendto must construct the RPC-over-RDMA 52 * transport header, post all Write WRs needed for this Reply, then post 53 * a Send WR conveying the transport header and the RPC message itself to 54 * the client. 55 * 56 * svc_rdma_sendto must fully transmit the Reply before returning, as 57 * the svc_rqst will be recycled as soon as sendto returns. Remaining 58 * resources referred to by the svc_rqst are also recycled at that time. 59 * Therefore any resources that must remain longer must be detached 60 * from the svc_rqst and released later. 61 * 62 * Page Management 63 * 64 * The I/O that performs Reply transmission is asynchronous, and may 65 * complete well after sendto returns. Thus pages under I/O must be 66 * removed from the svc_rqst before sendto returns. 67 * 68 * The logic here depends on Send Queue and completion ordering. Since 69 * the Send WR is always posted last, it will always complete last. Thus 70 * when it completes, it is guaranteed that all previous Write WRs have 71 * also completed. 72 * 73 * Write WRs are constructed and posted. Each Write segment gets its own 74 * svc_rdma_rw_ctxt, allowing the Write completion handler to find and 75 * DMA-unmap the pages under I/O for that Write segment. The Write 76 * completion handler does not release any pages. 77 * 78 * When the Send WR is constructed, it also gets its own svc_rdma_send_ctxt. 79 * The ownership of all of the Reply's pages are transferred into that 80 * ctxt, the Send WR is posted, and sendto returns. 81 * 82 * The svc_rdma_send_ctxt is presented when the Send WR completes. The 83 * Send completion handler finally releases the Reply's pages. 84 * 85 * This mechanism also assumes that completions on the transport's Send 86 * Completion Queue do not run in parallel. Otherwise a Write completion 87 * and Send completion running at the same time could release pages that 88 * are still DMA-mapped. 89 * 90 * Error Handling 91 * 92 * - If the Send WR is posted successfully, it will either complete 93 * successfully, or get flushed. Either way, the Send completion 94 * handler releases the Reply's pages. 95 * - If the Send WR cannot be not posted, the forward path releases 96 * the Reply's pages. 97 * 98 * This handles the case, without the use of page reference counting, 99 * where two different Write segments send portions of the same page. 100 */ 101 102 #include <linux/spinlock.h> 103 #include <asm/unaligned.h> 104 105 #include <rdma/ib_verbs.h> 106 #include <rdma/rdma_cm.h> 107 108 #include <linux/sunrpc/debug.h> 109 #include <linux/sunrpc/svc_rdma.h> 110 111 #include "xprt_rdma.h" 112 #include <trace/events/rpcrdma.h> 113 114 #define RPCDBG_FACILITY RPCDBG_SVCXPRT 115 116 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc); 117 118 static inline struct svc_rdma_send_ctxt * 119 svc_rdma_next_send_ctxt(struct list_head *list) 120 { 121 return list_first_entry_or_null(list, struct svc_rdma_send_ctxt, 122 sc_list); 123 } 124 125 static struct svc_rdma_send_ctxt * 126 svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma) 127 { 128 struct svc_rdma_send_ctxt *ctxt; 129 dma_addr_t addr; 130 void *buffer; 131 size_t size; 132 int i; 133 134 size = sizeof(*ctxt); 135 size += rdma->sc_max_send_sges * sizeof(struct ib_sge); 136 ctxt = kmalloc(size, GFP_KERNEL); 137 if (!ctxt) 138 goto fail0; 139 buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL); 140 if (!buffer) 141 goto fail1; 142 addr = ib_dma_map_single(rdma->sc_pd->device, buffer, 143 rdma->sc_max_req_size, DMA_TO_DEVICE); 144 if (ib_dma_mapping_error(rdma->sc_pd->device, addr)) 145 goto fail2; 146 147 ctxt->sc_send_wr.next = NULL; 148 ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe; 149 ctxt->sc_send_wr.sg_list = ctxt->sc_sges; 150 ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED; 151 ctxt->sc_cqe.done = svc_rdma_wc_send; 152 ctxt->sc_xprt_buf = buffer; 153 xdr_buf_init(&ctxt->sc_hdrbuf, ctxt->sc_xprt_buf, 154 rdma->sc_max_req_size); 155 ctxt->sc_sges[0].addr = addr; 156 157 for (i = 0; i < rdma->sc_max_send_sges; i++) 158 ctxt->sc_sges[i].lkey = rdma->sc_pd->local_dma_lkey; 159 return ctxt; 160 161 fail2: 162 kfree(buffer); 163 fail1: 164 kfree(ctxt); 165 fail0: 166 return NULL; 167 } 168 169 /** 170 * svc_rdma_send_ctxts_destroy - Release all send_ctxt's for an xprt 171 * @rdma: svcxprt_rdma being torn down 172 * 173 */ 174 void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma) 175 { 176 struct svc_rdma_send_ctxt *ctxt; 177 178 while ((ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts))) { 179 list_del(&ctxt->sc_list); 180 ib_dma_unmap_single(rdma->sc_pd->device, 181 ctxt->sc_sges[0].addr, 182 rdma->sc_max_req_size, 183 DMA_TO_DEVICE); 184 kfree(ctxt->sc_xprt_buf); 185 kfree(ctxt); 186 } 187 } 188 189 /** 190 * svc_rdma_send_ctxt_get - Get a free send_ctxt 191 * @rdma: controlling svcxprt_rdma 192 * 193 * Returns a ready-to-use send_ctxt, or NULL if none are 194 * available and a fresh one cannot be allocated. 195 */ 196 struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma) 197 { 198 struct svc_rdma_send_ctxt *ctxt; 199 200 spin_lock(&rdma->sc_send_lock); 201 ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts); 202 if (!ctxt) 203 goto out_empty; 204 list_del(&ctxt->sc_list); 205 spin_unlock(&rdma->sc_send_lock); 206 207 out: 208 rpcrdma_set_xdrlen(&ctxt->sc_hdrbuf, 0); 209 xdr_init_encode(&ctxt->sc_stream, &ctxt->sc_hdrbuf, 210 ctxt->sc_xprt_buf, NULL); 211 212 ctxt->sc_send_wr.num_sge = 0; 213 ctxt->sc_cur_sge_no = 0; 214 ctxt->sc_page_count = 0; 215 return ctxt; 216 217 out_empty: 218 spin_unlock(&rdma->sc_send_lock); 219 ctxt = svc_rdma_send_ctxt_alloc(rdma); 220 if (!ctxt) 221 return NULL; 222 goto out; 223 } 224 225 /** 226 * svc_rdma_send_ctxt_put - Return send_ctxt to free list 227 * @rdma: controlling svcxprt_rdma 228 * @ctxt: object to return to the free list 229 * 230 * Pages left in sc_pages are DMA unmapped and released. 231 */ 232 void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma, 233 struct svc_rdma_send_ctxt *ctxt) 234 { 235 struct ib_device *device = rdma->sc_cm_id->device; 236 unsigned int i; 237 238 /* The first SGE contains the transport header, which 239 * remains mapped until @ctxt is destroyed. 240 */ 241 for (i = 1; i < ctxt->sc_send_wr.num_sge; i++) { 242 ib_dma_unmap_page(device, 243 ctxt->sc_sges[i].addr, 244 ctxt->sc_sges[i].length, 245 DMA_TO_DEVICE); 246 trace_svcrdma_dma_unmap_page(rdma, 247 ctxt->sc_sges[i].addr, 248 ctxt->sc_sges[i].length); 249 } 250 251 for (i = 0; i < ctxt->sc_page_count; ++i) 252 put_page(ctxt->sc_pages[i]); 253 254 spin_lock(&rdma->sc_send_lock); 255 list_add(&ctxt->sc_list, &rdma->sc_send_ctxts); 256 spin_unlock(&rdma->sc_send_lock); 257 } 258 259 /** 260 * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC 261 * @cq: Completion Queue context 262 * @wc: Work Completion object 263 * 264 * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that 265 * the Send completion handler could be running. 266 */ 267 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) 268 { 269 struct svcxprt_rdma *rdma = cq->cq_context; 270 struct ib_cqe *cqe = wc->wr_cqe; 271 struct svc_rdma_send_ctxt *ctxt; 272 273 trace_svcrdma_wc_send(wc); 274 275 atomic_inc(&rdma->sc_sq_avail); 276 wake_up(&rdma->sc_send_wait); 277 278 ctxt = container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe); 279 svc_rdma_send_ctxt_put(rdma, ctxt); 280 281 if (unlikely(wc->status != IB_WC_SUCCESS)) { 282 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); 283 svc_xprt_enqueue(&rdma->sc_xprt); 284 } 285 286 svc_xprt_put(&rdma->sc_xprt); 287 } 288 289 /** 290 * svc_rdma_send - Post a single Send WR 291 * @rdma: transport on which to post the WR 292 * @wr: prepared Send WR to post 293 * 294 * Returns zero the Send WR was posted successfully. Otherwise, a 295 * negative errno is returned. 296 */ 297 int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr) 298 { 299 int ret; 300 301 might_sleep(); 302 303 /* Sync the transport header buffer */ 304 ib_dma_sync_single_for_device(rdma->sc_pd->device, 305 wr->sg_list[0].addr, 306 wr->sg_list[0].length, 307 DMA_TO_DEVICE); 308 309 /* If the SQ is full, wait until an SQ entry is available */ 310 while (1) { 311 if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) { 312 atomic_inc(&rdma_stat_sq_starve); 313 trace_svcrdma_sq_full(rdma); 314 atomic_inc(&rdma->sc_sq_avail); 315 wait_event(rdma->sc_send_wait, 316 atomic_read(&rdma->sc_sq_avail) > 1); 317 if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags)) 318 return -ENOTCONN; 319 trace_svcrdma_sq_retry(rdma); 320 continue; 321 } 322 323 svc_xprt_get(&rdma->sc_xprt); 324 trace_svcrdma_post_send(wr); 325 ret = ib_post_send(rdma->sc_qp, wr, NULL); 326 if (ret) 327 break; 328 return 0; 329 } 330 331 trace_svcrdma_sq_post_err(rdma, ret); 332 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); 333 svc_xprt_put(&rdma->sc_xprt); 334 wake_up(&rdma->sc_send_wait); 335 return ret; 336 } 337 338 /** 339 * svc_rdma_encode_read_list - Encode RPC Reply's Read chunk list 340 * @sctxt: Send context for the RPC Reply 341 * 342 * Return values: 343 * On success, returns length in bytes of the Reply XDR buffer 344 * that was consumed by the Reply Read list 345 * %-EMSGSIZE on XDR buffer overflow 346 */ 347 static ssize_t svc_rdma_encode_read_list(struct svc_rdma_send_ctxt *sctxt) 348 { 349 /* RPC-over-RDMA version 1 replies never have a Read list. */ 350 return xdr_stream_encode_item_absent(&sctxt->sc_stream); 351 } 352 353 /** 354 * svc_rdma_encode_write_segment - Encode one Write segment 355 * @src: matching Write chunk in the RPC Call header 356 * @sctxt: Send context for the RPC Reply 357 * @remaining: remaining bytes of the payload left in the Write chunk 358 * 359 * Return values: 360 * On success, returns length in bytes of the Reply XDR buffer 361 * that was consumed by the Write segment 362 * %-EMSGSIZE on XDR buffer overflow 363 */ 364 static ssize_t svc_rdma_encode_write_segment(__be32 *src, 365 struct svc_rdma_send_ctxt *sctxt, 366 unsigned int *remaining) 367 { 368 __be32 *p; 369 const size_t len = rpcrdma_segment_maxsz * sizeof(*p); 370 u32 handle, length; 371 u64 offset; 372 373 p = xdr_reserve_space(&sctxt->sc_stream, len); 374 if (!p) 375 return -EMSGSIZE; 376 377 xdr_decode_rdma_segment(src, &handle, &length, &offset); 378 379 *p++ = cpu_to_be32(handle); 380 if (*remaining < length) { 381 /* segment only partly filled */ 382 length = *remaining; 383 *remaining = 0; 384 } else { 385 /* entire segment was consumed */ 386 *remaining -= length; 387 } 388 *p++ = cpu_to_be32(length); 389 xdr_encode_hyper(p, offset); 390 391 trace_svcrdma_encode_wseg(handle, length, offset); 392 return len; 393 } 394 395 /** 396 * svc_rdma_encode_write_chunk - Encode one Write chunk 397 * @src: matching Write chunk in the RPC Call header 398 * @sctxt: Send context for the RPC Reply 399 * @remaining: size in bytes of the payload in the Write chunk 400 * 401 * Copy a Write chunk from the Call transport header to the 402 * Reply transport header. Update each segment's length field 403 * to reflect the number of bytes written in that segment. 404 * 405 * Return values: 406 * On success, returns length in bytes of the Reply XDR buffer 407 * that was consumed by the Write chunk 408 * %-EMSGSIZE on XDR buffer overflow 409 */ 410 static ssize_t svc_rdma_encode_write_chunk(__be32 *src, 411 struct svc_rdma_send_ctxt *sctxt, 412 unsigned int remaining) 413 { 414 unsigned int i, nsegs; 415 ssize_t len, ret; 416 417 len = 0; 418 trace_svcrdma_encode_write_chunk(remaining); 419 420 src++; 421 ret = xdr_stream_encode_item_present(&sctxt->sc_stream); 422 if (ret < 0) 423 return -EMSGSIZE; 424 len += ret; 425 426 nsegs = be32_to_cpup(src++); 427 ret = xdr_stream_encode_u32(&sctxt->sc_stream, nsegs); 428 if (ret < 0) 429 return -EMSGSIZE; 430 len += ret; 431 432 for (i = nsegs; i; i--) { 433 ret = svc_rdma_encode_write_segment(src, sctxt, &remaining); 434 if (ret < 0) 435 return -EMSGSIZE; 436 src += rpcrdma_segment_maxsz; 437 len += ret; 438 } 439 440 return len; 441 } 442 443 /** 444 * svc_rdma_encode_write_list - Encode RPC Reply's Write chunk list 445 * @rctxt: Reply context with information about the RPC Call 446 * @sctxt: Send context for the RPC Reply 447 * @length: size in bytes of the payload in the first Write chunk 448 * 449 * The client provides a Write chunk list in the Call message. Fill 450 * in the segments in the first Write chunk in the Reply's transport 451 * header with the number of bytes consumed in each segment. 452 * Remaining chunks are returned unused. 453 * 454 * Assumptions: 455 * - Client has provided only one Write chunk 456 * 457 * Return values: 458 * On success, returns length in bytes of the Reply XDR buffer 459 * that was consumed by the Reply's Write list 460 * %-EMSGSIZE on XDR buffer overflow 461 */ 462 static ssize_t 463 svc_rdma_encode_write_list(const struct svc_rdma_recv_ctxt *rctxt, 464 struct svc_rdma_send_ctxt *sctxt, 465 unsigned int length) 466 { 467 ssize_t len, ret; 468 469 ret = svc_rdma_encode_write_chunk(rctxt->rc_write_list, sctxt, length); 470 if (ret < 0) 471 return ret; 472 len = ret; 473 474 /* Terminate the Write list */ 475 ret = xdr_stream_encode_item_absent(&sctxt->sc_stream); 476 if (ret < 0) 477 return ret; 478 479 return len + ret; 480 } 481 482 /** 483 * svc_rdma_encode_reply_chunk - Encode RPC Reply's Reply chunk 484 * @rctxt: Reply context with information about the RPC Call 485 * @sctxt: Send context for the RPC Reply 486 * @length: size in bytes of the payload in the Reply chunk 487 * 488 * Assumptions: 489 * - Reply can always fit in the client-provided Reply chunk 490 * 491 * Return values: 492 * On success, returns length in bytes of the Reply XDR buffer 493 * that was consumed by the Reply's Reply chunk 494 * %-EMSGSIZE on XDR buffer overflow 495 */ 496 static ssize_t 497 svc_rdma_encode_reply_chunk(const struct svc_rdma_recv_ctxt *rctxt, 498 struct svc_rdma_send_ctxt *sctxt, 499 unsigned int length) 500 { 501 return svc_rdma_encode_write_chunk(rctxt->rc_reply_chunk, sctxt, 502 length); 503 } 504 505 static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma, 506 struct svc_rdma_send_ctxt *ctxt, 507 struct page *page, 508 unsigned long offset, 509 unsigned int len) 510 { 511 struct ib_device *dev = rdma->sc_cm_id->device; 512 dma_addr_t dma_addr; 513 514 dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE); 515 trace_svcrdma_dma_map_page(rdma, dma_addr, len); 516 if (ib_dma_mapping_error(dev, dma_addr)) 517 goto out_maperr; 518 519 ctxt->sc_sges[ctxt->sc_cur_sge_no].addr = dma_addr; 520 ctxt->sc_sges[ctxt->sc_cur_sge_no].length = len; 521 ctxt->sc_send_wr.num_sge++; 522 return 0; 523 524 out_maperr: 525 return -EIO; 526 } 527 528 /* ib_dma_map_page() is used here because svc_rdma_dma_unmap() 529 * handles DMA-unmap and it uses ib_dma_unmap_page() exclusively. 530 */ 531 static int svc_rdma_dma_map_buf(struct svcxprt_rdma *rdma, 532 struct svc_rdma_send_ctxt *ctxt, 533 unsigned char *base, 534 unsigned int len) 535 { 536 return svc_rdma_dma_map_page(rdma, ctxt, virt_to_page(base), 537 offset_in_page(base), len); 538 } 539 540 /** 541 * svc_rdma_pull_up_needed - Determine whether to use pull-up 542 * @rdma: controlling transport 543 * @sctxt: send_ctxt for the Send WR 544 * @rctxt: Write and Reply chunks provided by client 545 * @xdr: xdr_buf containing RPC message to transmit 546 * 547 * Returns: 548 * %true if pull-up must be used 549 * %false otherwise 550 */ 551 static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma, 552 struct svc_rdma_send_ctxt *sctxt, 553 const struct svc_rdma_recv_ctxt *rctxt, 554 struct xdr_buf *xdr) 555 { 556 int elements; 557 558 /* For small messages, copying bytes is cheaper than DMA mapping. 559 */ 560 if (sctxt->sc_hdrbuf.len + xdr->len < RPCRDMA_PULLUP_THRESH) 561 return true; 562 563 /* Check whether the xdr_buf has more elements than can 564 * fit in a single RDMA Send. 565 */ 566 /* xdr->head */ 567 elements = 1; 568 569 /* xdr->pages */ 570 if (!rctxt || !rctxt->rc_write_list) { 571 unsigned int remaining; 572 unsigned long pageoff; 573 574 pageoff = xdr->page_base & ~PAGE_MASK; 575 remaining = xdr->page_len; 576 while (remaining) { 577 ++elements; 578 remaining -= min_t(u32, PAGE_SIZE - pageoff, 579 remaining); 580 pageoff = 0; 581 } 582 } 583 584 /* xdr->tail */ 585 if (xdr->tail[0].iov_len) 586 ++elements; 587 588 /* assume 1 SGE is needed for the transport header */ 589 return elements >= rdma->sc_max_send_sges; 590 } 591 592 /** 593 * svc_rdma_pull_up_reply_msg - Copy Reply into a single buffer 594 * @rdma: controlling transport 595 * @sctxt: send_ctxt for the Send WR; xprt hdr is already prepared 596 * @rctxt: Write and Reply chunks provided by client 597 * @xdr: prepared xdr_buf containing RPC message 598 * 599 * The device is not capable of sending the reply directly. 600 * Assemble the elements of @xdr into the transport header buffer. 601 * 602 * Returns zero on success, or a negative errno on failure. 603 */ 604 static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma, 605 struct svc_rdma_send_ctxt *sctxt, 606 const struct svc_rdma_recv_ctxt *rctxt, 607 const struct xdr_buf *xdr) 608 { 609 unsigned char *dst, *tailbase; 610 unsigned int taillen; 611 612 dst = sctxt->sc_xprt_buf + sctxt->sc_hdrbuf.len; 613 memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len); 614 dst += xdr->head[0].iov_len; 615 616 tailbase = xdr->tail[0].iov_base; 617 taillen = xdr->tail[0].iov_len; 618 if (rctxt && rctxt->rc_write_list) { 619 u32 xdrpad; 620 621 xdrpad = xdr_pad_size(xdr->page_len); 622 if (taillen && xdrpad) { 623 tailbase += xdrpad; 624 taillen -= xdrpad; 625 } 626 } else { 627 unsigned int len, remaining; 628 unsigned long pageoff; 629 struct page **ppages; 630 631 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT); 632 pageoff = xdr->page_base & ~PAGE_MASK; 633 remaining = xdr->page_len; 634 while (remaining) { 635 len = min_t(u32, PAGE_SIZE - pageoff, remaining); 636 637 memcpy(dst, page_address(*ppages), len); 638 remaining -= len; 639 dst += len; 640 pageoff = 0; 641 } 642 } 643 644 if (taillen) 645 memcpy(dst, tailbase, taillen); 646 647 sctxt->sc_sges[0].length += xdr->len; 648 trace_svcrdma_send_pullup(sctxt->sc_sges[0].length); 649 return 0; 650 } 651 652 /* svc_rdma_map_reply_msg - DMA map the buffer holding RPC message 653 * @rdma: controlling transport 654 * @sctxt: send_ctxt for the Send WR 655 * @rctxt: Write and Reply chunks provided by client 656 * @xdr: prepared xdr_buf containing RPC message 657 * 658 * Load the xdr_buf into the ctxt's sge array, and DMA map each 659 * element as it is added. The Send WR's num_sge field is set. 660 * 661 * Returns zero on success, or a negative errno on failure. 662 */ 663 int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, 664 struct svc_rdma_send_ctxt *sctxt, 665 const struct svc_rdma_recv_ctxt *rctxt, 666 struct xdr_buf *xdr) 667 { 668 unsigned int len, remaining; 669 unsigned long page_off; 670 struct page **ppages; 671 unsigned char *base; 672 u32 xdr_pad; 673 int ret; 674 675 /* Set up the (persistently-mapped) transport header SGE. */ 676 sctxt->sc_send_wr.num_sge = 1; 677 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len; 678 679 /* If there is a Reply chunk, nothing follows the transport 680 * header, and we're done here. 681 */ 682 if (rctxt && rctxt->rc_reply_chunk) 683 return 0; 684 685 /* For pull-up, svc_rdma_send() will sync the transport header. 686 * No additional DMA mapping is necessary. 687 */ 688 if (svc_rdma_pull_up_needed(rdma, sctxt, rctxt, xdr)) 689 return svc_rdma_pull_up_reply_msg(rdma, sctxt, rctxt, xdr); 690 691 ++sctxt->sc_cur_sge_no; 692 ret = svc_rdma_dma_map_buf(rdma, sctxt, 693 xdr->head[0].iov_base, 694 xdr->head[0].iov_len); 695 if (ret < 0) 696 return ret; 697 698 /* If a Write chunk is present, the xdr_buf's page list 699 * is not included inline. However the Upper Layer may 700 * have added XDR padding in the tail buffer, and that 701 * should not be included inline. 702 */ 703 if (rctxt && rctxt->rc_write_list) { 704 base = xdr->tail[0].iov_base; 705 len = xdr->tail[0].iov_len; 706 xdr_pad = xdr_pad_size(xdr->page_len); 707 708 if (len && xdr_pad) { 709 base += xdr_pad; 710 len -= xdr_pad; 711 } 712 713 goto tail; 714 } 715 716 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT); 717 page_off = xdr->page_base & ~PAGE_MASK; 718 remaining = xdr->page_len; 719 while (remaining) { 720 len = min_t(u32, PAGE_SIZE - page_off, remaining); 721 722 ++sctxt->sc_cur_sge_no; 723 ret = svc_rdma_dma_map_page(rdma, sctxt, *ppages++, 724 page_off, len); 725 if (ret < 0) 726 return ret; 727 728 remaining -= len; 729 page_off = 0; 730 } 731 732 base = xdr->tail[0].iov_base; 733 len = xdr->tail[0].iov_len; 734 tail: 735 if (len) { 736 ++sctxt->sc_cur_sge_no; 737 ret = svc_rdma_dma_map_buf(rdma, sctxt, base, len); 738 if (ret < 0) 739 return ret; 740 } 741 742 return 0; 743 } 744 745 /* The svc_rqst and all resources it owns are released as soon as 746 * svc_rdma_sendto returns. Transfer pages under I/O to the ctxt 747 * so they are released by the Send completion handler. 748 */ 749 static void svc_rdma_save_io_pages(struct svc_rqst *rqstp, 750 struct svc_rdma_send_ctxt *ctxt) 751 { 752 int i, pages = rqstp->rq_next_page - rqstp->rq_respages; 753 754 ctxt->sc_page_count += pages; 755 for (i = 0; i < pages; i++) { 756 ctxt->sc_pages[i] = rqstp->rq_respages[i]; 757 rqstp->rq_respages[i] = NULL; 758 } 759 760 /* Prevent svc_xprt_release from releasing pages in rq_pages */ 761 rqstp->rq_next_page = rqstp->rq_respages; 762 } 763 764 /* Prepare the portion of the RPC Reply that will be transmitted 765 * via RDMA Send. The RPC-over-RDMA transport header is prepared 766 * in sc_sges[0], and the RPC xdr_buf is prepared in following sges. 767 * 768 * Depending on whether a Write list or Reply chunk is present, 769 * the server may send all, a portion of, or none of the xdr_buf. 770 * In the latter case, only the transport header (sc_sges[0]) is 771 * transmitted. 772 * 773 * RDMA Send is the last step of transmitting an RPC reply. Pages 774 * involved in the earlier RDMA Writes are here transferred out 775 * of the rqstp and into the sctxt's page array. These pages are 776 * DMA unmapped by each Write completion, but the subsequent Send 777 * completion finally releases these pages. 778 * 779 * Assumptions: 780 * - The Reply's transport header will never be larger than a page. 781 */ 782 static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma, 783 struct svc_rdma_send_ctxt *sctxt, 784 const struct svc_rdma_recv_ctxt *rctxt, 785 struct svc_rqst *rqstp) 786 { 787 int ret; 788 789 ret = svc_rdma_map_reply_msg(rdma, sctxt, rctxt, &rqstp->rq_res); 790 if (ret < 0) 791 return ret; 792 793 svc_rdma_save_io_pages(rqstp, sctxt); 794 795 if (rctxt->rc_inv_rkey) { 796 sctxt->sc_send_wr.opcode = IB_WR_SEND_WITH_INV; 797 sctxt->sc_send_wr.ex.invalidate_rkey = rctxt->rc_inv_rkey; 798 } else { 799 sctxt->sc_send_wr.opcode = IB_WR_SEND; 800 } 801 return svc_rdma_send(rdma, &sctxt->sc_send_wr); 802 } 803 804 /** 805 * svc_rdma_send_error_msg - Send an RPC/RDMA v1 error response 806 * @rdma: controlling transport context 807 * @sctxt: Send context for the response 808 * @rctxt: Receive context for incoming bad message 809 * @status: negative errno indicating error that occurred 810 * 811 * Given the client-provided Read, Write, and Reply chunks, the 812 * server was not able to parse the Call or form a complete Reply. 813 * Return an RDMA_ERROR message so the client can retire the RPC 814 * transaction. 815 * 816 * The caller does not have to release @sctxt. It is released by 817 * Send completion, or by this function on error. 818 */ 819 void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, 820 struct svc_rdma_send_ctxt *sctxt, 821 struct svc_rdma_recv_ctxt *rctxt, 822 int status) 823 { 824 __be32 *rdma_argp = rctxt->rc_recv_buf; 825 __be32 *p; 826 827 rpcrdma_set_xdrlen(&sctxt->sc_hdrbuf, 0); 828 xdr_init_encode(&sctxt->sc_stream, &sctxt->sc_hdrbuf, 829 sctxt->sc_xprt_buf, NULL); 830 831 p = xdr_reserve_space(&sctxt->sc_stream, 832 rpcrdma_fixed_maxsz * sizeof(*p)); 833 if (!p) 834 goto put_ctxt; 835 836 *p++ = *rdma_argp; 837 *p++ = *(rdma_argp + 1); 838 *p++ = rdma->sc_fc_credits; 839 *p = rdma_error; 840 841 switch (status) { 842 case -EPROTONOSUPPORT: 843 p = xdr_reserve_space(&sctxt->sc_stream, 3 * sizeof(*p)); 844 if (!p) 845 goto put_ctxt; 846 847 *p++ = err_vers; 848 *p++ = rpcrdma_version; 849 *p = rpcrdma_version; 850 trace_svcrdma_err_vers(*rdma_argp); 851 break; 852 default: 853 p = xdr_reserve_space(&sctxt->sc_stream, sizeof(*p)); 854 if (!p) 855 goto put_ctxt; 856 857 *p = err_chunk; 858 trace_svcrdma_err_chunk(*rdma_argp); 859 } 860 861 /* Remote Invalidation is skipped for simplicity. */ 862 sctxt->sc_send_wr.num_sge = 1; 863 sctxt->sc_send_wr.opcode = IB_WR_SEND; 864 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len; 865 if (svc_rdma_send(rdma, &sctxt->sc_send_wr)) 866 goto put_ctxt; 867 return; 868 869 put_ctxt: 870 svc_rdma_send_ctxt_put(rdma, sctxt); 871 } 872 873 /** 874 * svc_rdma_sendto - Transmit an RPC reply 875 * @rqstp: processed RPC request, reply XDR already in ::rq_res 876 * 877 * Any resources still associated with @rqstp are released upon return. 878 * If no reply message was possible, the connection is closed. 879 * 880 * Returns: 881 * %0 if an RPC reply has been successfully posted, 882 * %-ENOMEM if a resource shortage occurred (connection is lost), 883 * %-ENOTCONN if posting failed (connection is lost). 884 */ 885 int svc_rdma_sendto(struct svc_rqst *rqstp) 886 { 887 struct svc_xprt *xprt = rqstp->rq_xprt; 888 struct svcxprt_rdma *rdma = 889 container_of(xprt, struct svcxprt_rdma, sc_xprt); 890 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt; 891 __be32 *rdma_argp = rctxt->rc_recv_buf; 892 __be32 *wr_lst = rctxt->rc_write_list; 893 __be32 *rp_ch = rctxt->rc_reply_chunk; 894 struct xdr_buf *xdr = &rqstp->rq_res; 895 struct svc_rdma_send_ctxt *sctxt; 896 __be32 *p; 897 int ret; 898 899 ret = -ENOTCONN; 900 if (svc_xprt_is_dead(xprt)) 901 goto err0; 902 903 ret = -ENOMEM; 904 sctxt = svc_rdma_send_ctxt_get(rdma); 905 if (!sctxt) 906 goto err0; 907 908 p = xdr_reserve_space(&sctxt->sc_stream, 909 rpcrdma_fixed_maxsz * sizeof(*p)); 910 if (!p) 911 goto err0; 912 *p++ = *rdma_argp; 913 *p++ = *(rdma_argp + 1); 914 *p++ = rdma->sc_fc_credits; 915 *p = rp_ch ? rdma_nomsg : rdma_msg; 916 917 if (svc_rdma_encode_read_list(sctxt) < 0) 918 goto err0; 919 if (wr_lst) { 920 /* XXX: Presume the client sent only one Write chunk */ 921 unsigned long offset; 922 unsigned int length; 923 924 if (rctxt->rc_read_payload_length) { 925 offset = rctxt->rc_read_payload_offset; 926 length = rctxt->rc_read_payload_length; 927 } else { 928 offset = xdr->head[0].iov_len; 929 length = xdr->page_len; 930 } 931 ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr, offset, 932 length); 933 if (ret < 0) 934 goto err2; 935 if (svc_rdma_encode_write_list(rctxt, sctxt, length) < 0) 936 goto err0; 937 } else { 938 if (xdr_stream_encode_item_absent(&sctxt->sc_stream) < 0) 939 goto err0; 940 } 941 if (rp_ch) { 942 ret = svc_rdma_send_reply_chunk(rdma, rctxt, &rqstp->rq_res); 943 if (ret < 0) 944 goto err2; 945 if (svc_rdma_encode_reply_chunk(rctxt, sctxt, ret) < 0) 946 goto err0; 947 } else { 948 if (xdr_stream_encode_item_absent(&sctxt->sc_stream) < 0) 949 goto err0; 950 } 951 952 ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp); 953 if (ret < 0) 954 goto err1; 955 return 0; 956 957 err2: 958 if (ret != -E2BIG && ret != -EINVAL) 959 goto err1; 960 961 /* Send completion releases payload pages that were part 962 * of previously posted RDMA Writes. 963 */ 964 svc_rdma_save_io_pages(rqstp, sctxt); 965 svc_rdma_send_error_msg(rdma, sctxt, rctxt, ret); 966 return 0; 967 968 err1: 969 svc_rdma_send_ctxt_put(rdma, sctxt); 970 err0: 971 trace_svcrdma_send_err(rqstp, ret); 972 set_bit(XPT_CLOSE, &xprt->xpt_flags); 973 return -ENOTCONN; 974 } 975 976 /** 977 * svc_rdma_read_payload - special processing for a READ payload 978 * @rqstp: svc_rqst to operate on 979 * @offset: payload's byte offset in @xdr 980 * @length: size of payload, in bytes 981 * 982 * Returns zero on success. 983 * 984 * For the moment, just record the xdr_buf location of the READ 985 * payload. svc_rdma_sendto will use that location later when 986 * we actually send the payload. 987 */ 988 int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset, 989 unsigned int length) 990 { 991 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt; 992 993 /* XXX: Just one READ payload slot for now, since our 994 * transport implementation currently supports only one 995 * Write chunk. 996 */ 997 rctxt->rc_read_payload_offset = offset; 998 rctxt->rc_read_payload_length = length; 999 1000 return 0; 1001 } 1002