1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (c) 2016-2018 Oracle. All rights reserved. 4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved. 5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the BSD-type 11 * license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 20 * Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials provided 23 * with the distribution. 24 * 25 * Neither the name of the Network Appliance, Inc. nor the names of 26 * its contributors may be used to endorse or promote products 27 * derived from this software without specific prior written 28 * permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 41 * 42 * Author: Tom Tucker <tom@opengridcomputing.com> 43 */ 44 45 /* Operation 46 * 47 * The main entry point is svc_rdma_sendto. This is called by the 48 * RPC server when an RPC Reply is ready to be transmitted to a client. 49 * 50 * The passed-in svc_rqst contains a struct xdr_buf which holds an 51 * XDR-encoded RPC Reply message. sendto must construct the RPC-over-RDMA 52 * transport header, post all Write WRs needed for this Reply, then post 53 * a Send WR conveying the transport header and the RPC message itself to 54 * the client. 55 * 56 * svc_rdma_sendto must fully transmit the Reply before returning, as 57 * the svc_rqst will be recycled as soon as sendto returns. Remaining 58 * resources referred to by the svc_rqst are also recycled at that time. 59 * Therefore any resources that must remain longer must be detached 60 * from the svc_rqst and released later. 61 * 62 * Page Management 63 * 64 * The I/O that performs Reply transmission is asynchronous, and may 65 * complete well after sendto returns. Thus pages under I/O must be 66 * removed from the svc_rqst before sendto returns. 67 * 68 * The logic here depends on Send Queue and completion ordering. Since 69 * the Send WR is always posted last, it will always complete last. Thus 70 * when it completes, it is guaranteed that all previous Write WRs have 71 * also completed. 72 * 73 * Write WRs are constructed and posted. Each Write segment gets its own 74 * svc_rdma_rw_ctxt, allowing the Write completion handler to find and 75 * DMA-unmap the pages under I/O for that Write segment. The Write 76 * completion handler does not release any pages. 77 * 78 * When the Send WR is constructed, it also gets its own svc_rdma_send_ctxt. 79 * The ownership of all of the Reply's pages are transferred into that 80 * ctxt, the Send WR is posted, and sendto returns. 81 * 82 * The svc_rdma_send_ctxt is presented when the Send WR completes. The 83 * Send completion handler finally releases the Reply's pages. 84 * 85 * This mechanism also assumes that completions on the transport's Send 86 * Completion Queue do not run in parallel. Otherwise a Write completion 87 * and Send completion running at the same time could release pages that 88 * are still DMA-mapped. 89 * 90 * Error Handling 91 * 92 * - If the Send WR is posted successfully, it will either complete 93 * successfully, or get flushed. Either way, the Send completion 94 * handler releases the Reply's pages. 95 * - If the Send WR cannot be not posted, the forward path releases 96 * the Reply's pages. 97 * 98 * This handles the case, without the use of page reference counting, 99 * where two different Write segments send portions of the same page. 100 */ 101 102 #include <linux/spinlock.h> 103 #include <asm/unaligned.h> 104 105 #include <rdma/ib_verbs.h> 106 #include <rdma/rdma_cm.h> 107 108 #include <linux/sunrpc/debug.h> 109 #include <linux/sunrpc/svc_rdma.h> 110 111 #include "xprt_rdma.h" 112 #include <trace/events/rpcrdma.h> 113 114 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc); 115 116 static inline struct svc_rdma_send_ctxt * 117 svc_rdma_next_send_ctxt(struct list_head *list) 118 { 119 return list_first_entry_or_null(list, struct svc_rdma_send_ctxt, 120 sc_list); 121 } 122 123 static void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma, 124 struct rpc_rdma_cid *cid) 125 { 126 cid->ci_queue_id = rdma->sc_sq_cq->res.id; 127 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids); 128 } 129 130 static struct svc_rdma_send_ctxt * 131 svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma) 132 { 133 struct svc_rdma_send_ctxt *ctxt; 134 dma_addr_t addr; 135 void *buffer; 136 size_t size; 137 int i; 138 139 size = sizeof(*ctxt); 140 size += rdma->sc_max_send_sges * sizeof(struct ib_sge); 141 ctxt = kmalloc(size, GFP_KERNEL); 142 if (!ctxt) 143 goto fail0; 144 buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL); 145 if (!buffer) 146 goto fail1; 147 addr = ib_dma_map_single(rdma->sc_pd->device, buffer, 148 rdma->sc_max_req_size, DMA_TO_DEVICE); 149 if (ib_dma_mapping_error(rdma->sc_pd->device, addr)) 150 goto fail2; 151 152 svc_rdma_send_cid_init(rdma, &ctxt->sc_cid); 153 154 ctxt->sc_send_wr.next = NULL; 155 ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe; 156 ctxt->sc_send_wr.sg_list = ctxt->sc_sges; 157 ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED; 158 init_completion(&ctxt->sc_done); 159 ctxt->sc_cqe.done = svc_rdma_wc_send; 160 ctxt->sc_xprt_buf = buffer; 161 xdr_buf_init(&ctxt->sc_hdrbuf, ctxt->sc_xprt_buf, 162 rdma->sc_max_req_size); 163 ctxt->sc_sges[0].addr = addr; 164 165 for (i = 0; i < rdma->sc_max_send_sges; i++) 166 ctxt->sc_sges[i].lkey = rdma->sc_pd->local_dma_lkey; 167 return ctxt; 168 169 fail2: 170 kfree(buffer); 171 fail1: 172 kfree(ctxt); 173 fail0: 174 return NULL; 175 } 176 177 /** 178 * svc_rdma_send_ctxts_destroy - Release all send_ctxt's for an xprt 179 * @rdma: svcxprt_rdma being torn down 180 * 181 */ 182 void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma) 183 { 184 struct svc_rdma_send_ctxt *ctxt; 185 186 while ((ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts))) { 187 list_del(&ctxt->sc_list); 188 ib_dma_unmap_single(rdma->sc_pd->device, 189 ctxt->sc_sges[0].addr, 190 rdma->sc_max_req_size, 191 DMA_TO_DEVICE); 192 kfree(ctxt->sc_xprt_buf); 193 kfree(ctxt); 194 } 195 } 196 197 /** 198 * svc_rdma_send_ctxt_get - Get a free send_ctxt 199 * @rdma: controlling svcxprt_rdma 200 * 201 * Returns a ready-to-use send_ctxt, or NULL if none are 202 * available and a fresh one cannot be allocated. 203 */ 204 struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma) 205 { 206 struct svc_rdma_send_ctxt *ctxt; 207 208 spin_lock(&rdma->sc_send_lock); 209 ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts); 210 if (!ctxt) 211 goto out_empty; 212 list_del(&ctxt->sc_list); 213 spin_unlock(&rdma->sc_send_lock); 214 215 out: 216 rpcrdma_set_xdrlen(&ctxt->sc_hdrbuf, 0); 217 xdr_init_encode(&ctxt->sc_stream, &ctxt->sc_hdrbuf, 218 ctxt->sc_xprt_buf, NULL); 219 220 ctxt->sc_send_wr.num_sge = 0; 221 ctxt->sc_cur_sge_no = 0; 222 ctxt->sc_page_count = 0; 223 return ctxt; 224 225 out_empty: 226 spin_unlock(&rdma->sc_send_lock); 227 ctxt = svc_rdma_send_ctxt_alloc(rdma); 228 if (!ctxt) 229 return NULL; 230 goto out; 231 } 232 233 /** 234 * svc_rdma_send_ctxt_put - Return send_ctxt to free list 235 * @rdma: controlling svcxprt_rdma 236 * @ctxt: object to return to the free list 237 * 238 * Pages left in sc_pages are DMA unmapped and released. 239 */ 240 void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma, 241 struct svc_rdma_send_ctxt *ctxt) 242 { 243 struct ib_device *device = rdma->sc_cm_id->device; 244 unsigned int i; 245 246 /* The first SGE contains the transport header, which 247 * remains mapped until @ctxt is destroyed. 248 */ 249 for (i = 1; i < ctxt->sc_send_wr.num_sge; i++) { 250 ib_dma_unmap_page(device, 251 ctxt->sc_sges[i].addr, 252 ctxt->sc_sges[i].length, 253 DMA_TO_DEVICE); 254 trace_svcrdma_dma_unmap_page(rdma, 255 ctxt->sc_sges[i].addr, 256 ctxt->sc_sges[i].length); 257 } 258 259 for (i = 0; i < ctxt->sc_page_count; ++i) 260 put_page(ctxt->sc_pages[i]); 261 262 spin_lock(&rdma->sc_send_lock); 263 list_add(&ctxt->sc_list, &rdma->sc_send_ctxts); 264 spin_unlock(&rdma->sc_send_lock); 265 } 266 267 /** 268 * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC 269 * @cq: Completion Queue context 270 * @wc: Work Completion object 271 * 272 * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that 273 * the Send completion handler could be running. 274 */ 275 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) 276 { 277 struct svcxprt_rdma *rdma = cq->cq_context; 278 struct ib_cqe *cqe = wc->wr_cqe; 279 struct svc_rdma_send_ctxt *ctxt = 280 container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe); 281 282 trace_svcrdma_wc_send(wc, &ctxt->sc_cid); 283 284 complete(&ctxt->sc_done); 285 286 atomic_inc(&rdma->sc_sq_avail); 287 wake_up(&rdma->sc_send_wait); 288 289 if (unlikely(wc->status != IB_WC_SUCCESS)) 290 svc_xprt_deferred_close(&rdma->sc_xprt); 291 } 292 293 /** 294 * svc_rdma_send - Post a single Send WR 295 * @rdma: transport on which to post the WR 296 * @ctxt: send ctxt with a Send WR ready to post 297 * 298 * Returns zero if the Send WR was posted successfully. Otherwise, a 299 * negative errno is returned. 300 */ 301 int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt) 302 { 303 struct ib_send_wr *wr = &ctxt->sc_send_wr; 304 int ret; 305 306 reinit_completion(&ctxt->sc_done); 307 308 /* Sync the transport header buffer */ 309 ib_dma_sync_single_for_device(rdma->sc_pd->device, 310 wr->sg_list[0].addr, 311 wr->sg_list[0].length, 312 DMA_TO_DEVICE); 313 314 /* If the SQ is full, wait until an SQ entry is available */ 315 while (1) { 316 if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) { 317 percpu_counter_inc(&svcrdma_stat_sq_starve); 318 trace_svcrdma_sq_full(rdma); 319 atomic_inc(&rdma->sc_sq_avail); 320 wait_event(rdma->sc_send_wait, 321 atomic_read(&rdma->sc_sq_avail) > 1); 322 if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags)) 323 return -ENOTCONN; 324 trace_svcrdma_sq_retry(rdma); 325 continue; 326 } 327 328 trace_svcrdma_post_send(ctxt); 329 ret = ib_post_send(rdma->sc_qp, wr, NULL); 330 if (ret) 331 break; 332 return 0; 333 } 334 335 trace_svcrdma_sq_post_err(rdma, ret); 336 svc_xprt_deferred_close(&rdma->sc_xprt); 337 wake_up(&rdma->sc_send_wait); 338 return ret; 339 } 340 341 /** 342 * svc_rdma_encode_read_list - Encode RPC Reply's Read chunk list 343 * @sctxt: Send context for the RPC Reply 344 * 345 * Return values: 346 * On success, returns length in bytes of the Reply XDR buffer 347 * that was consumed by the Reply Read list 348 * %-EMSGSIZE on XDR buffer overflow 349 */ 350 static ssize_t svc_rdma_encode_read_list(struct svc_rdma_send_ctxt *sctxt) 351 { 352 /* RPC-over-RDMA version 1 replies never have a Read list. */ 353 return xdr_stream_encode_item_absent(&sctxt->sc_stream); 354 } 355 356 /** 357 * svc_rdma_encode_write_segment - Encode one Write segment 358 * @sctxt: Send context for the RPC Reply 359 * @chunk: Write chunk to push 360 * @remaining: remaining bytes of the payload left in the Write chunk 361 * @segno: which segment in the chunk 362 * 363 * Return values: 364 * On success, returns length in bytes of the Reply XDR buffer 365 * that was consumed by the Write segment, and updates @remaining 366 * %-EMSGSIZE on XDR buffer overflow 367 */ 368 static ssize_t svc_rdma_encode_write_segment(struct svc_rdma_send_ctxt *sctxt, 369 const struct svc_rdma_chunk *chunk, 370 u32 *remaining, unsigned int segno) 371 { 372 const struct svc_rdma_segment *segment = &chunk->ch_segments[segno]; 373 const size_t len = rpcrdma_segment_maxsz * sizeof(__be32); 374 u32 length; 375 __be32 *p; 376 377 p = xdr_reserve_space(&sctxt->sc_stream, len); 378 if (!p) 379 return -EMSGSIZE; 380 381 length = min_t(u32, *remaining, segment->rs_length); 382 *remaining -= length; 383 xdr_encode_rdma_segment(p, segment->rs_handle, length, 384 segment->rs_offset); 385 trace_svcrdma_encode_wseg(sctxt, segno, segment->rs_handle, length, 386 segment->rs_offset); 387 return len; 388 } 389 390 /** 391 * svc_rdma_encode_write_chunk - Encode one Write chunk 392 * @sctxt: Send context for the RPC Reply 393 * @chunk: Write chunk to push 394 * 395 * Copy a Write chunk from the Call transport header to the 396 * Reply transport header. Update each segment's length field 397 * to reflect the number of bytes written in that segment. 398 * 399 * Return values: 400 * On success, returns length in bytes of the Reply XDR buffer 401 * that was consumed by the Write chunk 402 * %-EMSGSIZE on XDR buffer overflow 403 */ 404 static ssize_t svc_rdma_encode_write_chunk(struct svc_rdma_send_ctxt *sctxt, 405 const struct svc_rdma_chunk *chunk) 406 { 407 u32 remaining = chunk->ch_payload_length; 408 unsigned int segno; 409 ssize_t len, ret; 410 411 len = 0; 412 ret = xdr_stream_encode_item_present(&sctxt->sc_stream); 413 if (ret < 0) 414 return ret; 415 len += ret; 416 417 ret = xdr_stream_encode_u32(&sctxt->sc_stream, chunk->ch_segcount); 418 if (ret < 0) 419 return ret; 420 len += ret; 421 422 for (segno = 0; segno < chunk->ch_segcount; segno++) { 423 ret = svc_rdma_encode_write_segment(sctxt, chunk, &remaining, segno); 424 if (ret < 0) 425 return ret; 426 len += ret; 427 } 428 429 return len; 430 } 431 432 /** 433 * svc_rdma_encode_write_list - Encode RPC Reply's Write chunk list 434 * @rctxt: Reply context with information about the RPC Call 435 * @sctxt: Send context for the RPC Reply 436 * 437 * Return values: 438 * On success, returns length in bytes of the Reply XDR buffer 439 * that was consumed by the Reply's Write list 440 * %-EMSGSIZE on XDR buffer overflow 441 */ 442 static ssize_t svc_rdma_encode_write_list(struct svc_rdma_recv_ctxt *rctxt, 443 struct svc_rdma_send_ctxt *sctxt) 444 { 445 struct svc_rdma_chunk *chunk; 446 ssize_t len, ret; 447 448 len = 0; 449 pcl_for_each_chunk(chunk, &rctxt->rc_write_pcl) { 450 ret = svc_rdma_encode_write_chunk(sctxt, chunk); 451 if (ret < 0) 452 return ret; 453 len += ret; 454 } 455 456 /* Terminate the Write list */ 457 ret = xdr_stream_encode_item_absent(&sctxt->sc_stream); 458 if (ret < 0) 459 return ret; 460 461 return len + ret; 462 } 463 464 /** 465 * svc_rdma_encode_reply_chunk - Encode RPC Reply's Reply chunk 466 * @rctxt: Reply context with information about the RPC Call 467 * @sctxt: Send context for the RPC Reply 468 * @length: size in bytes of the payload in the Reply chunk 469 * 470 * Return values: 471 * On success, returns length in bytes of the Reply XDR buffer 472 * that was consumed by the Reply's Reply chunk 473 * %-EMSGSIZE on XDR buffer overflow 474 * %-E2BIG if the RPC message is larger than the Reply chunk 475 */ 476 static ssize_t 477 svc_rdma_encode_reply_chunk(struct svc_rdma_recv_ctxt *rctxt, 478 struct svc_rdma_send_ctxt *sctxt, 479 unsigned int length) 480 { 481 struct svc_rdma_chunk *chunk; 482 483 if (pcl_is_empty(&rctxt->rc_reply_pcl)) 484 return xdr_stream_encode_item_absent(&sctxt->sc_stream); 485 486 chunk = pcl_first_chunk(&rctxt->rc_reply_pcl); 487 if (length > chunk->ch_length) 488 return -E2BIG; 489 490 chunk->ch_payload_length = length; 491 return svc_rdma_encode_write_chunk(sctxt, chunk); 492 } 493 494 struct svc_rdma_map_data { 495 struct svcxprt_rdma *md_rdma; 496 struct svc_rdma_send_ctxt *md_ctxt; 497 }; 498 499 /** 500 * svc_rdma_page_dma_map - DMA map one page 501 * @data: pointer to arguments 502 * @page: struct page to DMA map 503 * @offset: offset into the page 504 * @len: number of bytes to map 505 * 506 * Returns: 507 * %0 if DMA mapping was successful 508 * %-EIO if the page cannot be DMA mapped 509 */ 510 static int svc_rdma_page_dma_map(void *data, struct page *page, 511 unsigned long offset, unsigned int len) 512 { 513 struct svc_rdma_map_data *args = data; 514 struct svcxprt_rdma *rdma = args->md_rdma; 515 struct svc_rdma_send_ctxt *ctxt = args->md_ctxt; 516 struct ib_device *dev = rdma->sc_cm_id->device; 517 dma_addr_t dma_addr; 518 519 ++ctxt->sc_cur_sge_no; 520 521 dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE); 522 if (ib_dma_mapping_error(dev, dma_addr)) 523 goto out_maperr; 524 525 trace_svcrdma_dma_map_page(rdma, dma_addr, len); 526 ctxt->sc_sges[ctxt->sc_cur_sge_no].addr = dma_addr; 527 ctxt->sc_sges[ctxt->sc_cur_sge_no].length = len; 528 ctxt->sc_send_wr.num_sge++; 529 return 0; 530 531 out_maperr: 532 trace_svcrdma_dma_map_err(rdma, dma_addr, len); 533 return -EIO; 534 } 535 536 /** 537 * svc_rdma_iov_dma_map - DMA map an iovec 538 * @data: pointer to arguments 539 * @iov: kvec to DMA map 540 * 541 * ib_dma_map_page() is used here because svc_rdma_dma_unmap() 542 * handles DMA-unmap and it uses ib_dma_unmap_page() exclusively. 543 * 544 * Returns: 545 * %0 if DMA mapping was successful 546 * %-EIO if the iovec cannot be DMA mapped 547 */ 548 static int svc_rdma_iov_dma_map(void *data, const struct kvec *iov) 549 { 550 if (!iov->iov_len) 551 return 0; 552 return svc_rdma_page_dma_map(data, virt_to_page(iov->iov_base), 553 offset_in_page(iov->iov_base), 554 iov->iov_len); 555 } 556 557 /** 558 * svc_rdma_xb_dma_map - DMA map all segments of an xdr_buf 559 * @xdr: xdr_buf containing portion of an RPC message to transmit 560 * @data: pointer to arguments 561 * 562 * Returns: 563 * %0 if DMA mapping was successful 564 * %-EIO if DMA mapping failed 565 * 566 * On failure, any DMA mappings that have been already done must be 567 * unmapped by the caller. 568 */ 569 static int svc_rdma_xb_dma_map(const struct xdr_buf *xdr, void *data) 570 { 571 unsigned int len, remaining; 572 unsigned long pageoff; 573 struct page **ppages; 574 int ret; 575 576 ret = svc_rdma_iov_dma_map(data, &xdr->head[0]); 577 if (ret < 0) 578 return ret; 579 580 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT); 581 pageoff = offset_in_page(xdr->page_base); 582 remaining = xdr->page_len; 583 while (remaining) { 584 len = min_t(u32, PAGE_SIZE - pageoff, remaining); 585 586 ret = svc_rdma_page_dma_map(data, *ppages++, pageoff, len); 587 if (ret < 0) 588 return ret; 589 590 remaining -= len; 591 pageoff = 0; 592 } 593 594 ret = svc_rdma_iov_dma_map(data, &xdr->tail[0]); 595 if (ret < 0) 596 return ret; 597 598 return xdr->len; 599 } 600 601 struct svc_rdma_pullup_data { 602 u8 *pd_dest; 603 unsigned int pd_length; 604 unsigned int pd_num_sges; 605 }; 606 607 /** 608 * svc_rdma_xb_count_sges - Count how many SGEs will be needed 609 * @xdr: xdr_buf containing portion of an RPC message to transmit 610 * @data: pointer to arguments 611 * 612 * Returns: 613 * Number of SGEs needed to Send the contents of @xdr inline 614 */ 615 static int svc_rdma_xb_count_sges(const struct xdr_buf *xdr, 616 void *data) 617 { 618 struct svc_rdma_pullup_data *args = data; 619 unsigned int remaining; 620 unsigned long offset; 621 622 if (xdr->head[0].iov_len) 623 ++args->pd_num_sges; 624 625 offset = offset_in_page(xdr->page_base); 626 remaining = xdr->page_len; 627 while (remaining) { 628 ++args->pd_num_sges; 629 remaining -= min_t(u32, PAGE_SIZE - offset, remaining); 630 offset = 0; 631 } 632 633 if (xdr->tail[0].iov_len) 634 ++args->pd_num_sges; 635 636 args->pd_length += xdr->len; 637 return 0; 638 } 639 640 /** 641 * svc_rdma_pull_up_needed - Determine whether to use pull-up 642 * @rdma: controlling transport 643 * @sctxt: send_ctxt for the Send WR 644 * @rctxt: Write and Reply chunks provided by client 645 * @xdr: xdr_buf containing RPC message to transmit 646 * 647 * Returns: 648 * %true if pull-up must be used 649 * %false otherwise 650 */ 651 static bool svc_rdma_pull_up_needed(const struct svcxprt_rdma *rdma, 652 const struct svc_rdma_send_ctxt *sctxt, 653 const struct svc_rdma_recv_ctxt *rctxt, 654 const struct xdr_buf *xdr) 655 { 656 /* Resources needed for the transport header */ 657 struct svc_rdma_pullup_data args = { 658 .pd_length = sctxt->sc_hdrbuf.len, 659 .pd_num_sges = 1, 660 }; 661 int ret; 662 663 ret = pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr, 664 svc_rdma_xb_count_sges, &args); 665 if (ret < 0) 666 return false; 667 668 if (args.pd_length < RPCRDMA_PULLUP_THRESH) 669 return true; 670 return args.pd_num_sges >= rdma->sc_max_send_sges; 671 } 672 673 /** 674 * svc_rdma_xb_linearize - Copy region of xdr_buf to flat buffer 675 * @xdr: xdr_buf containing portion of an RPC message to copy 676 * @data: pointer to arguments 677 * 678 * Returns: 679 * Always zero. 680 */ 681 static int svc_rdma_xb_linearize(const struct xdr_buf *xdr, 682 void *data) 683 { 684 struct svc_rdma_pullup_data *args = data; 685 unsigned int len, remaining; 686 unsigned long pageoff; 687 struct page **ppages; 688 689 if (xdr->head[0].iov_len) { 690 memcpy(args->pd_dest, xdr->head[0].iov_base, xdr->head[0].iov_len); 691 args->pd_dest += xdr->head[0].iov_len; 692 } 693 694 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT); 695 pageoff = offset_in_page(xdr->page_base); 696 remaining = xdr->page_len; 697 while (remaining) { 698 len = min_t(u32, PAGE_SIZE - pageoff, remaining); 699 memcpy(args->pd_dest, page_address(*ppages) + pageoff, len); 700 remaining -= len; 701 args->pd_dest += len; 702 pageoff = 0; 703 ppages++; 704 } 705 706 if (xdr->tail[0].iov_len) { 707 memcpy(args->pd_dest, xdr->tail[0].iov_base, xdr->tail[0].iov_len); 708 args->pd_dest += xdr->tail[0].iov_len; 709 } 710 711 args->pd_length += xdr->len; 712 return 0; 713 } 714 715 /** 716 * svc_rdma_pull_up_reply_msg - Copy Reply into a single buffer 717 * @rdma: controlling transport 718 * @sctxt: send_ctxt for the Send WR; xprt hdr is already prepared 719 * @rctxt: Write and Reply chunks provided by client 720 * @xdr: prepared xdr_buf containing RPC message 721 * 722 * The device is not capable of sending the reply directly. 723 * Assemble the elements of @xdr into the transport header buffer. 724 * 725 * Assumptions: 726 * pull_up_needed has determined that @xdr will fit in the buffer. 727 * 728 * Returns: 729 * %0 if pull-up was successful 730 * %-EMSGSIZE if a buffer manipulation problem occurred 731 */ 732 static int svc_rdma_pull_up_reply_msg(const struct svcxprt_rdma *rdma, 733 struct svc_rdma_send_ctxt *sctxt, 734 const struct svc_rdma_recv_ctxt *rctxt, 735 const struct xdr_buf *xdr) 736 { 737 struct svc_rdma_pullup_data args = { 738 .pd_dest = sctxt->sc_xprt_buf + sctxt->sc_hdrbuf.len, 739 }; 740 int ret; 741 742 ret = pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr, 743 svc_rdma_xb_linearize, &args); 744 if (ret < 0) 745 return ret; 746 747 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len + args.pd_length; 748 trace_svcrdma_send_pullup(sctxt, args.pd_length); 749 return 0; 750 } 751 752 /* svc_rdma_map_reply_msg - DMA map the buffer holding RPC message 753 * @rdma: controlling transport 754 * @sctxt: send_ctxt for the Send WR 755 * @rctxt: Write and Reply chunks provided by client 756 * @xdr: prepared xdr_buf containing RPC message 757 * 758 * Returns: 759 * %0 if DMA mapping was successful. 760 * %-EMSGSIZE if a buffer manipulation problem occurred 761 * %-EIO if DMA mapping failed 762 * 763 * The Send WR's num_sge field is set in all cases. 764 */ 765 int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, 766 struct svc_rdma_send_ctxt *sctxt, 767 const struct svc_rdma_recv_ctxt *rctxt, 768 const struct xdr_buf *xdr) 769 { 770 struct svc_rdma_map_data args = { 771 .md_rdma = rdma, 772 .md_ctxt = sctxt, 773 }; 774 775 /* Set up the (persistently-mapped) transport header SGE. */ 776 sctxt->sc_send_wr.num_sge = 1; 777 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len; 778 779 /* If there is a Reply chunk, nothing follows the transport 780 * header, and we're done here. 781 */ 782 if (!pcl_is_empty(&rctxt->rc_reply_pcl)) 783 return 0; 784 785 /* For pull-up, svc_rdma_send() will sync the transport header. 786 * No additional DMA mapping is necessary. 787 */ 788 if (svc_rdma_pull_up_needed(rdma, sctxt, rctxt, xdr)) 789 return svc_rdma_pull_up_reply_msg(rdma, sctxt, rctxt, xdr); 790 791 return pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr, 792 svc_rdma_xb_dma_map, &args); 793 } 794 795 /* The svc_rqst and all resources it owns are released as soon as 796 * svc_rdma_sendto returns. Transfer pages under I/O to the ctxt 797 * so they are released by the Send completion handler. 798 */ 799 static inline void svc_rdma_save_io_pages(struct svc_rqst *rqstp, 800 struct svc_rdma_send_ctxt *ctxt) 801 { 802 int i, pages = rqstp->rq_next_page - rqstp->rq_respages; 803 804 ctxt->sc_page_count += pages; 805 for (i = 0; i < pages; i++) { 806 ctxt->sc_pages[i] = rqstp->rq_respages[i]; 807 rqstp->rq_respages[i] = NULL; 808 } 809 810 /* Prevent svc_xprt_release from releasing pages in rq_pages */ 811 rqstp->rq_next_page = rqstp->rq_respages; 812 } 813 814 /* Prepare the portion of the RPC Reply that will be transmitted 815 * via RDMA Send. The RPC-over-RDMA transport header is prepared 816 * in sc_sges[0], and the RPC xdr_buf is prepared in following sges. 817 * 818 * Depending on whether a Write list or Reply chunk is present, 819 * the server may send all, a portion of, or none of the xdr_buf. 820 * In the latter case, only the transport header (sc_sges[0]) is 821 * transmitted. 822 * 823 * RDMA Send is the last step of transmitting an RPC reply. Pages 824 * involved in the earlier RDMA Writes are here transferred out 825 * of the rqstp and into the sctxt's page array. These pages are 826 * DMA unmapped by each Write completion, but the subsequent Send 827 * completion finally releases these pages. 828 * 829 * Assumptions: 830 * - The Reply's transport header will never be larger than a page. 831 */ 832 static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma, 833 struct svc_rdma_send_ctxt *sctxt, 834 const struct svc_rdma_recv_ctxt *rctxt, 835 struct svc_rqst *rqstp) 836 { 837 int ret; 838 839 ret = svc_rdma_map_reply_msg(rdma, sctxt, rctxt, &rqstp->rq_res); 840 if (ret < 0) 841 return ret; 842 843 if (rctxt->rc_inv_rkey) { 844 sctxt->sc_send_wr.opcode = IB_WR_SEND_WITH_INV; 845 sctxt->sc_send_wr.ex.invalidate_rkey = rctxt->rc_inv_rkey; 846 } else { 847 sctxt->sc_send_wr.opcode = IB_WR_SEND; 848 } 849 850 ret = svc_rdma_send(rdma, sctxt); 851 if (ret < 0) 852 return ret; 853 854 ret = wait_for_completion_killable(&sctxt->sc_done); 855 svc_rdma_send_ctxt_put(rdma, sctxt); 856 return ret; 857 } 858 859 /** 860 * svc_rdma_send_error_msg - Send an RPC/RDMA v1 error response 861 * @rdma: controlling transport context 862 * @sctxt: Send context for the response 863 * @rctxt: Receive context for incoming bad message 864 * @status: negative errno indicating error that occurred 865 * 866 * Given the client-provided Read, Write, and Reply chunks, the 867 * server was not able to parse the Call or form a complete Reply. 868 * Return an RDMA_ERROR message so the client can retire the RPC 869 * transaction. 870 * 871 * The caller does not have to release @sctxt. It is released by 872 * Send completion, or by this function on error. 873 */ 874 void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, 875 struct svc_rdma_send_ctxt *sctxt, 876 struct svc_rdma_recv_ctxt *rctxt, 877 int status) 878 { 879 __be32 *rdma_argp = rctxt->rc_recv_buf; 880 __be32 *p; 881 882 rpcrdma_set_xdrlen(&sctxt->sc_hdrbuf, 0); 883 xdr_init_encode(&sctxt->sc_stream, &sctxt->sc_hdrbuf, 884 sctxt->sc_xprt_buf, NULL); 885 886 p = xdr_reserve_space(&sctxt->sc_stream, 887 rpcrdma_fixed_maxsz * sizeof(*p)); 888 if (!p) 889 goto put_ctxt; 890 891 *p++ = *rdma_argp; 892 *p++ = *(rdma_argp + 1); 893 *p++ = rdma->sc_fc_credits; 894 *p = rdma_error; 895 896 switch (status) { 897 case -EPROTONOSUPPORT: 898 p = xdr_reserve_space(&sctxt->sc_stream, 3 * sizeof(*p)); 899 if (!p) 900 goto put_ctxt; 901 902 *p++ = err_vers; 903 *p++ = rpcrdma_version; 904 *p = rpcrdma_version; 905 trace_svcrdma_err_vers(*rdma_argp); 906 break; 907 default: 908 p = xdr_reserve_space(&sctxt->sc_stream, sizeof(*p)); 909 if (!p) 910 goto put_ctxt; 911 912 *p = err_chunk; 913 trace_svcrdma_err_chunk(*rdma_argp); 914 } 915 916 /* Remote Invalidation is skipped for simplicity. */ 917 sctxt->sc_send_wr.num_sge = 1; 918 sctxt->sc_send_wr.opcode = IB_WR_SEND; 919 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len; 920 if (svc_rdma_send(rdma, sctxt)) 921 goto put_ctxt; 922 923 wait_for_completion_killable(&sctxt->sc_done); 924 925 put_ctxt: 926 svc_rdma_send_ctxt_put(rdma, sctxt); 927 } 928 929 /** 930 * svc_rdma_sendto - Transmit an RPC reply 931 * @rqstp: processed RPC request, reply XDR already in ::rq_res 932 * 933 * Any resources still associated with @rqstp are released upon return. 934 * If no reply message was possible, the connection is closed. 935 * 936 * Returns: 937 * %0 if an RPC reply has been successfully posted, 938 * %-ENOMEM if a resource shortage occurred (connection is lost), 939 * %-ENOTCONN if posting failed (connection is lost). 940 */ 941 int svc_rdma_sendto(struct svc_rqst *rqstp) 942 { 943 struct svc_xprt *xprt = rqstp->rq_xprt; 944 struct svcxprt_rdma *rdma = 945 container_of(xprt, struct svcxprt_rdma, sc_xprt); 946 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt; 947 __be32 *rdma_argp = rctxt->rc_recv_buf; 948 struct svc_rdma_send_ctxt *sctxt; 949 __be32 *p; 950 int ret; 951 952 ret = -ENOTCONN; 953 if (svc_xprt_is_dead(xprt)) 954 goto err0; 955 956 ret = -ENOMEM; 957 sctxt = svc_rdma_send_ctxt_get(rdma); 958 if (!sctxt) 959 goto err0; 960 961 p = xdr_reserve_space(&sctxt->sc_stream, 962 rpcrdma_fixed_maxsz * sizeof(*p)); 963 if (!p) 964 goto err0; 965 966 ret = svc_rdma_send_reply_chunk(rdma, rctxt, &rqstp->rq_res); 967 if (ret < 0) 968 goto err2; 969 970 *p++ = *rdma_argp; 971 *p++ = *(rdma_argp + 1); 972 *p++ = rdma->sc_fc_credits; 973 *p = pcl_is_empty(&rctxt->rc_reply_pcl) ? rdma_msg : rdma_nomsg; 974 975 if (svc_rdma_encode_read_list(sctxt) < 0) 976 goto err0; 977 if (svc_rdma_encode_write_list(rctxt, sctxt) < 0) 978 goto err0; 979 if (svc_rdma_encode_reply_chunk(rctxt, sctxt, ret) < 0) 980 goto err0; 981 982 ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp); 983 if (ret < 0) 984 goto err1; 985 return 0; 986 987 err2: 988 if (ret != -E2BIG && ret != -EINVAL) 989 goto err1; 990 991 svc_rdma_send_error_msg(rdma, sctxt, rctxt, ret); 992 return 0; 993 994 err1: 995 svc_rdma_send_ctxt_put(rdma, sctxt); 996 err0: 997 trace_svcrdma_send_err(rqstp, ret); 998 svc_xprt_deferred_close(&rdma->sc_xprt); 999 return -ENOTCONN; 1000 } 1001 1002 /** 1003 * svc_rdma_result_payload - special processing for a result payload 1004 * @rqstp: svc_rqst to operate on 1005 * @offset: payload's byte offset in @xdr 1006 * @length: size of payload, in bytes 1007 * 1008 * Return values: 1009 * %0 if successful or nothing needed to be done 1010 * %-EMSGSIZE on XDR buffer overflow 1011 * %-E2BIG if the payload was larger than the Write chunk 1012 * %-EINVAL if client provided too many segments 1013 * %-ENOMEM if rdma_rw context pool was exhausted 1014 * %-ENOTCONN if posting failed (connection is lost) 1015 * %-EIO if rdma_rw initialization failed (DMA mapping, etc) 1016 */ 1017 int svc_rdma_result_payload(struct svc_rqst *rqstp, unsigned int offset, 1018 unsigned int length) 1019 { 1020 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt; 1021 struct svc_rdma_chunk *chunk; 1022 struct svcxprt_rdma *rdma; 1023 struct xdr_buf subbuf; 1024 int ret; 1025 1026 chunk = rctxt->rc_cur_result_payload; 1027 if (!length || !chunk) 1028 return 0; 1029 rctxt->rc_cur_result_payload = 1030 pcl_next_chunk(&rctxt->rc_write_pcl, chunk); 1031 if (length > chunk->ch_length) 1032 return -E2BIG; 1033 1034 chunk->ch_position = offset; 1035 chunk->ch_payload_length = length; 1036 1037 if (xdr_buf_subsegment(&rqstp->rq_res, &subbuf, offset, length)) 1038 return -EMSGSIZE; 1039 1040 rdma = container_of(rqstp->rq_xprt, struct svcxprt_rdma, sc_xprt); 1041 ret = svc_rdma_send_write_chunk(rdma, chunk, &subbuf); 1042 if (ret < 0) 1043 return ret; 1044 return 0; 1045 } 1046