1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (c) 2016-2018 Oracle. All rights reserved. 4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved. 5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the BSD-type 11 * license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 20 * Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials provided 23 * with the distribution. 24 * 25 * Neither the name of the Network Appliance, Inc. nor the names of 26 * its contributors may be used to endorse or promote products 27 * derived from this software without specific prior written 28 * permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 41 * 42 * Author: Tom Tucker <tom@opengridcomputing.com> 43 */ 44 45 /* Operation 46 * 47 * The main entry point is svc_rdma_sendto. This is called by the 48 * RPC server when an RPC Reply is ready to be transmitted to a client. 49 * 50 * The passed-in svc_rqst contains a struct xdr_buf which holds an 51 * XDR-encoded RPC Reply message. sendto must construct the RPC-over-RDMA 52 * transport header, post all Write WRs needed for this Reply, then post 53 * a Send WR conveying the transport header and the RPC message itself to 54 * the client. 55 * 56 * svc_rdma_sendto must fully transmit the Reply before returning, as 57 * the svc_rqst will be recycled as soon as sendto returns. Remaining 58 * resources referred to by the svc_rqst are also recycled at that time. 59 * Therefore any resources that must remain longer must be detached 60 * from the svc_rqst and released later. 61 * 62 * Page Management 63 * 64 * The I/O that performs Reply transmission is asynchronous, and may 65 * complete well after sendto returns. Thus pages under I/O must be 66 * removed from the svc_rqst before sendto returns. 67 * 68 * The logic here depends on Send Queue and completion ordering. Since 69 * the Send WR is always posted last, it will always complete last. Thus 70 * when it completes, it is guaranteed that all previous Write WRs have 71 * also completed. 72 * 73 * Write WRs are constructed and posted. Each Write segment gets its own 74 * svc_rdma_rw_ctxt, allowing the Write completion handler to find and 75 * DMA-unmap the pages under I/O for that Write segment. The Write 76 * completion handler does not release any pages. 77 * 78 * When the Send WR is constructed, it also gets its own svc_rdma_send_ctxt. 79 * The ownership of all of the Reply's pages are transferred into that 80 * ctxt, the Send WR is posted, and sendto returns. 81 * 82 * The svc_rdma_send_ctxt is presented when the Send WR completes. The 83 * Send completion handler finally releases the Reply's pages. 84 * 85 * This mechanism also assumes that completions on the transport's Send 86 * Completion Queue do not run in parallel. Otherwise a Write completion 87 * and Send completion running at the same time could release pages that 88 * are still DMA-mapped. 89 * 90 * Error Handling 91 * 92 * - If the Send WR is posted successfully, it will either complete 93 * successfully, or get flushed. Either way, the Send completion 94 * handler releases the Reply's pages. 95 * - If the Send WR cannot be not posted, the forward path releases 96 * the Reply's pages. 97 * 98 * This handles the case, without the use of page reference counting, 99 * where two different Write segments send portions of the same page. 100 */ 101 102 #include <linux/spinlock.h> 103 #include <asm/unaligned.h> 104 105 #include <rdma/ib_verbs.h> 106 #include <rdma/rdma_cm.h> 107 108 #include <linux/sunrpc/debug.h> 109 #include <linux/sunrpc/svc_rdma.h> 110 111 #include "xprt_rdma.h" 112 #include <trace/events/rpcrdma.h> 113 114 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc); 115 116 static void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma, 117 struct rpc_rdma_cid *cid) 118 { 119 cid->ci_queue_id = rdma->sc_sq_cq->res.id; 120 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids); 121 } 122 123 static struct svc_rdma_send_ctxt * 124 svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma) 125 { 126 int node = ibdev_to_node(rdma->sc_cm_id->device); 127 struct svc_rdma_send_ctxt *ctxt; 128 dma_addr_t addr; 129 void *buffer; 130 int i; 131 132 ctxt = kmalloc_node(struct_size(ctxt, sc_sges, rdma->sc_max_send_sges), 133 GFP_KERNEL, node); 134 if (!ctxt) 135 goto fail0; 136 buffer = kmalloc_node(rdma->sc_max_req_size, GFP_KERNEL, node); 137 if (!buffer) 138 goto fail1; 139 addr = ib_dma_map_single(rdma->sc_pd->device, buffer, 140 rdma->sc_max_req_size, DMA_TO_DEVICE); 141 if (ib_dma_mapping_error(rdma->sc_pd->device, addr)) 142 goto fail2; 143 144 svc_rdma_send_cid_init(rdma, &ctxt->sc_cid); 145 146 ctxt->sc_send_wr.next = NULL; 147 ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe; 148 ctxt->sc_send_wr.sg_list = ctxt->sc_sges; 149 ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED; 150 init_completion(&ctxt->sc_done); 151 ctxt->sc_cqe.done = svc_rdma_wc_send; 152 ctxt->sc_xprt_buf = buffer; 153 xdr_buf_init(&ctxt->sc_hdrbuf, ctxt->sc_xprt_buf, 154 rdma->sc_max_req_size); 155 ctxt->sc_sges[0].addr = addr; 156 157 for (i = 0; i < rdma->sc_max_send_sges; i++) 158 ctxt->sc_sges[i].lkey = rdma->sc_pd->local_dma_lkey; 159 return ctxt; 160 161 fail2: 162 kfree(buffer); 163 fail1: 164 kfree(ctxt); 165 fail0: 166 return NULL; 167 } 168 169 /** 170 * svc_rdma_send_ctxts_destroy - Release all send_ctxt's for an xprt 171 * @rdma: svcxprt_rdma being torn down 172 * 173 */ 174 void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma) 175 { 176 struct svc_rdma_send_ctxt *ctxt; 177 struct llist_node *node; 178 179 while ((node = llist_del_first(&rdma->sc_send_ctxts)) != NULL) { 180 ctxt = llist_entry(node, struct svc_rdma_send_ctxt, sc_node); 181 ib_dma_unmap_single(rdma->sc_pd->device, 182 ctxt->sc_sges[0].addr, 183 rdma->sc_max_req_size, 184 DMA_TO_DEVICE); 185 kfree(ctxt->sc_xprt_buf); 186 kfree(ctxt); 187 } 188 } 189 190 /** 191 * svc_rdma_send_ctxt_get - Get a free send_ctxt 192 * @rdma: controlling svcxprt_rdma 193 * 194 * Returns a ready-to-use send_ctxt, or NULL if none are 195 * available and a fresh one cannot be allocated. 196 */ 197 struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma) 198 { 199 struct svc_rdma_send_ctxt *ctxt; 200 struct llist_node *node; 201 202 spin_lock(&rdma->sc_send_lock); 203 node = llist_del_first(&rdma->sc_send_ctxts); 204 if (!node) 205 goto out_empty; 206 ctxt = llist_entry(node, struct svc_rdma_send_ctxt, sc_node); 207 spin_unlock(&rdma->sc_send_lock); 208 209 out: 210 rpcrdma_set_xdrlen(&ctxt->sc_hdrbuf, 0); 211 xdr_init_encode(&ctxt->sc_stream, &ctxt->sc_hdrbuf, 212 ctxt->sc_xprt_buf, NULL); 213 214 ctxt->sc_send_wr.num_sge = 0; 215 ctxt->sc_cur_sge_no = 0; 216 return ctxt; 217 218 out_empty: 219 spin_unlock(&rdma->sc_send_lock); 220 ctxt = svc_rdma_send_ctxt_alloc(rdma); 221 if (!ctxt) 222 return NULL; 223 goto out; 224 } 225 226 /** 227 * svc_rdma_send_ctxt_put - Return send_ctxt to free list 228 * @rdma: controlling svcxprt_rdma 229 * @ctxt: object to return to the free list 230 */ 231 void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma, 232 struct svc_rdma_send_ctxt *ctxt) 233 { 234 struct ib_device *device = rdma->sc_cm_id->device; 235 unsigned int i; 236 237 /* The first SGE contains the transport header, which 238 * remains mapped until @ctxt is destroyed. 239 */ 240 for (i = 1; i < ctxt->sc_send_wr.num_sge; i++) { 241 ib_dma_unmap_page(device, 242 ctxt->sc_sges[i].addr, 243 ctxt->sc_sges[i].length, 244 DMA_TO_DEVICE); 245 trace_svcrdma_dma_unmap_page(rdma, 246 ctxt->sc_sges[i].addr, 247 ctxt->sc_sges[i].length); 248 } 249 250 llist_add(&ctxt->sc_node, &rdma->sc_send_ctxts); 251 } 252 253 /** 254 * svc_rdma_wake_send_waiters - manage Send Queue accounting 255 * @rdma: controlling transport 256 * @avail: Number of additional SQEs that are now available 257 * 258 */ 259 void svc_rdma_wake_send_waiters(struct svcxprt_rdma *rdma, int avail) 260 { 261 atomic_add(avail, &rdma->sc_sq_avail); 262 smp_mb__after_atomic(); 263 if (unlikely(waitqueue_active(&rdma->sc_send_wait))) 264 wake_up(&rdma->sc_send_wait); 265 } 266 267 /** 268 * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC 269 * @cq: Completion Queue context 270 * @wc: Work Completion object 271 * 272 * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that 273 * the Send completion handler could be running. 274 */ 275 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) 276 { 277 struct svcxprt_rdma *rdma = cq->cq_context; 278 struct ib_cqe *cqe = wc->wr_cqe; 279 struct svc_rdma_send_ctxt *ctxt = 280 container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe); 281 282 svc_rdma_wake_send_waiters(rdma, 1); 283 complete(&ctxt->sc_done); 284 285 if (unlikely(wc->status != IB_WC_SUCCESS)) 286 goto flushed; 287 288 trace_svcrdma_wc_send(wc, &ctxt->sc_cid); 289 return; 290 291 flushed: 292 if (wc->status != IB_WC_WR_FLUSH_ERR) 293 trace_svcrdma_wc_send_err(wc, &ctxt->sc_cid); 294 else 295 trace_svcrdma_wc_send_flush(wc, &ctxt->sc_cid); 296 svc_xprt_deferred_close(&rdma->sc_xprt); 297 } 298 299 /** 300 * svc_rdma_send - Post a single Send WR 301 * @rdma: transport on which to post the WR 302 * @ctxt: send ctxt with a Send WR ready to post 303 * 304 * Returns zero if the Send WR was posted successfully. Otherwise, a 305 * negative errno is returned. 306 */ 307 int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt) 308 { 309 struct ib_send_wr *wr = &ctxt->sc_send_wr; 310 int ret; 311 312 reinit_completion(&ctxt->sc_done); 313 314 /* Sync the transport header buffer */ 315 ib_dma_sync_single_for_device(rdma->sc_pd->device, 316 wr->sg_list[0].addr, 317 wr->sg_list[0].length, 318 DMA_TO_DEVICE); 319 320 /* If the SQ is full, wait until an SQ entry is available */ 321 while (1) { 322 if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) { 323 percpu_counter_inc(&svcrdma_stat_sq_starve); 324 trace_svcrdma_sq_full(rdma); 325 atomic_inc(&rdma->sc_sq_avail); 326 wait_event(rdma->sc_send_wait, 327 atomic_read(&rdma->sc_sq_avail) > 1); 328 if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags)) 329 return -ENOTCONN; 330 trace_svcrdma_sq_retry(rdma); 331 continue; 332 } 333 334 trace_svcrdma_post_send(ctxt); 335 ret = ib_post_send(rdma->sc_qp, wr, NULL); 336 if (ret) 337 break; 338 return 0; 339 } 340 341 trace_svcrdma_sq_post_err(rdma, ret); 342 svc_xprt_deferred_close(&rdma->sc_xprt); 343 wake_up(&rdma->sc_send_wait); 344 return ret; 345 } 346 347 /** 348 * svc_rdma_encode_read_list - Encode RPC Reply's Read chunk list 349 * @sctxt: Send context for the RPC Reply 350 * 351 * Return values: 352 * On success, returns length in bytes of the Reply XDR buffer 353 * that was consumed by the Reply Read list 354 * %-EMSGSIZE on XDR buffer overflow 355 */ 356 static ssize_t svc_rdma_encode_read_list(struct svc_rdma_send_ctxt *sctxt) 357 { 358 /* RPC-over-RDMA version 1 replies never have a Read list. */ 359 return xdr_stream_encode_item_absent(&sctxt->sc_stream); 360 } 361 362 /** 363 * svc_rdma_encode_write_segment - Encode one Write segment 364 * @sctxt: Send context for the RPC Reply 365 * @chunk: Write chunk to push 366 * @remaining: remaining bytes of the payload left in the Write chunk 367 * @segno: which segment in the chunk 368 * 369 * Return values: 370 * On success, returns length in bytes of the Reply XDR buffer 371 * that was consumed by the Write segment, and updates @remaining 372 * %-EMSGSIZE on XDR buffer overflow 373 */ 374 static ssize_t svc_rdma_encode_write_segment(struct svc_rdma_send_ctxt *sctxt, 375 const struct svc_rdma_chunk *chunk, 376 u32 *remaining, unsigned int segno) 377 { 378 const struct svc_rdma_segment *segment = &chunk->ch_segments[segno]; 379 const size_t len = rpcrdma_segment_maxsz * sizeof(__be32); 380 u32 length; 381 __be32 *p; 382 383 p = xdr_reserve_space(&sctxt->sc_stream, len); 384 if (!p) 385 return -EMSGSIZE; 386 387 length = min_t(u32, *remaining, segment->rs_length); 388 *remaining -= length; 389 xdr_encode_rdma_segment(p, segment->rs_handle, length, 390 segment->rs_offset); 391 trace_svcrdma_encode_wseg(sctxt, segno, segment->rs_handle, length, 392 segment->rs_offset); 393 return len; 394 } 395 396 /** 397 * svc_rdma_encode_write_chunk - Encode one Write chunk 398 * @sctxt: Send context for the RPC Reply 399 * @chunk: Write chunk to push 400 * 401 * Copy a Write chunk from the Call transport header to the 402 * Reply transport header. Update each segment's length field 403 * to reflect the number of bytes written in that segment. 404 * 405 * Return values: 406 * On success, returns length in bytes of the Reply XDR buffer 407 * that was consumed by the Write chunk 408 * %-EMSGSIZE on XDR buffer overflow 409 */ 410 static ssize_t svc_rdma_encode_write_chunk(struct svc_rdma_send_ctxt *sctxt, 411 const struct svc_rdma_chunk *chunk) 412 { 413 u32 remaining = chunk->ch_payload_length; 414 unsigned int segno; 415 ssize_t len, ret; 416 417 len = 0; 418 ret = xdr_stream_encode_item_present(&sctxt->sc_stream); 419 if (ret < 0) 420 return ret; 421 len += ret; 422 423 ret = xdr_stream_encode_u32(&sctxt->sc_stream, chunk->ch_segcount); 424 if (ret < 0) 425 return ret; 426 len += ret; 427 428 for (segno = 0; segno < chunk->ch_segcount; segno++) { 429 ret = svc_rdma_encode_write_segment(sctxt, chunk, &remaining, segno); 430 if (ret < 0) 431 return ret; 432 len += ret; 433 } 434 435 return len; 436 } 437 438 /** 439 * svc_rdma_encode_write_list - Encode RPC Reply's Write chunk list 440 * @rctxt: Reply context with information about the RPC Call 441 * @sctxt: Send context for the RPC Reply 442 * 443 * Return values: 444 * On success, returns length in bytes of the Reply XDR buffer 445 * that was consumed by the Reply's Write list 446 * %-EMSGSIZE on XDR buffer overflow 447 */ 448 static ssize_t svc_rdma_encode_write_list(struct svc_rdma_recv_ctxt *rctxt, 449 struct svc_rdma_send_ctxt *sctxt) 450 { 451 struct svc_rdma_chunk *chunk; 452 ssize_t len, ret; 453 454 len = 0; 455 pcl_for_each_chunk(chunk, &rctxt->rc_write_pcl) { 456 ret = svc_rdma_encode_write_chunk(sctxt, chunk); 457 if (ret < 0) 458 return ret; 459 len += ret; 460 } 461 462 /* Terminate the Write list */ 463 ret = xdr_stream_encode_item_absent(&sctxt->sc_stream); 464 if (ret < 0) 465 return ret; 466 467 return len + ret; 468 } 469 470 /** 471 * svc_rdma_encode_reply_chunk - Encode RPC Reply's Reply chunk 472 * @rctxt: Reply context with information about the RPC Call 473 * @sctxt: Send context for the RPC Reply 474 * @length: size in bytes of the payload in the Reply chunk 475 * 476 * Return values: 477 * On success, returns length in bytes of the Reply XDR buffer 478 * that was consumed by the Reply's Reply chunk 479 * %-EMSGSIZE on XDR buffer overflow 480 * %-E2BIG if the RPC message is larger than the Reply chunk 481 */ 482 static ssize_t 483 svc_rdma_encode_reply_chunk(struct svc_rdma_recv_ctxt *rctxt, 484 struct svc_rdma_send_ctxt *sctxt, 485 unsigned int length) 486 { 487 struct svc_rdma_chunk *chunk; 488 489 if (pcl_is_empty(&rctxt->rc_reply_pcl)) 490 return xdr_stream_encode_item_absent(&sctxt->sc_stream); 491 492 chunk = pcl_first_chunk(&rctxt->rc_reply_pcl); 493 if (length > chunk->ch_length) 494 return -E2BIG; 495 496 chunk->ch_payload_length = length; 497 return svc_rdma_encode_write_chunk(sctxt, chunk); 498 } 499 500 struct svc_rdma_map_data { 501 struct svcxprt_rdma *md_rdma; 502 struct svc_rdma_send_ctxt *md_ctxt; 503 }; 504 505 /** 506 * svc_rdma_page_dma_map - DMA map one page 507 * @data: pointer to arguments 508 * @page: struct page to DMA map 509 * @offset: offset into the page 510 * @len: number of bytes to map 511 * 512 * Returns: 513 * %0 if DMA mapping was successful 514 * %-EIO if the page cannot be DMA mapped 515 */ 516 static int svc_rdma_page_dma_map(void *data, struct page *page, 517 unsigned long offset, unsigned int len) 518 { 519 struct svc_rdma_map_data *args = data; 520 struct svcxprt_rdma *rdma = args->md_rdma; 521 struct svc_rdma_send_ctxt *ctxt = args->md_ctxt; 522 struct ib_device *dev = rdma->sc_cm_id->device; 523 dma_addr_t dma_addr; 524 525 ++ctxt->sc_cur_sge_no; 526 527 dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE); 528 if (ib_dma_mapping_error(dev, dma_addr)) 529 goto out_maperr; 530 531 trace_svcrdma_dma_map_page(rdma, dma_addr, len); 532 ctxt->sc_sges[ctxt->sc_cur_sge_no].addr = dma_addr; 533 ctxt->sc_sges[ctxt->sc_cur_sge_no].length = len; 534 ctxt->sc_send_wr.num_sge++; 535 return 0; 536 537 out_maperr: 538 trace_svcrdma_dma_map_err(rdma, dma_addr, len); 539 return -EIO; 540 } 541 542 /** 543 * svc_rdma_iov_dma_map - DMA map an iovec 544 * @data: pointer to arguments 545 * @iov: kvec to DMA map 546 * 547 * ib_dma_map_page() is used here because svc_rdma_dma_unmap() 548 * handles DMA-unmap and it uses ib_dma_unmap_page() exclusively. 549 * 550 * Returns: 551 * %0 if DMA mapping was successful 552 * %-EIO if the iovec cannot be DMA mapped 553 */ 554 static int svc_rdma_iov_dma_map(void *data, const struct kvec *iov) 555 { 556 if (!iov->iov_len) 557 return 0; 558 return svc_rdma_page_dma_map(data, virt_to_page(iov->iov_base), 559 offset_in_page(iov->iov_base), 560 iov->iov_len); 561 } 562 563 /** 564 * svc_rdma_xb_dma_map - DMA map all segments of an xdr_buf 565 * @xdr: xdr_buf containing portion of an RPC message to transmit 566 * @data: pointer to arguments 567 * 568 * Returns: 569 * %0 if DMA mapping was successful 570 * %-EIO if DMA mapping failed 571 * 572 * On failure, any DMA mappings that have been already done must be 573 * unmapped by the caller. 574 */ 575 static int svc_rdma_xb_dma_map(const struct xdr_buf *xdr, void *data) 576 { 577 unsigned int len, remaining; 578 unsigned long pageoff; 579 struct page **ppages; 580 int ret; 581 582 ret = svc_rdma_iov_dma_map(data, &xdr->head[0]); 583 if (ret < 0) 584 return ret; 585 586 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT); 587 pageoff = offset_in_page(xdr->page_base); 588 remaining = xdr->page_len; 589 while (remaining) { 590 len = min_t(u32, PAGE_SIZE - pageoff, remaining); 591 592 ret = svc_rdma_page_dma_map(data, *ppages++, pageoff, len); 593 if (ret < 0) 594 return ret; 595 596 remaining -= len; 597 pageoff = 0; 598 } 599 600 ret = svc_rdma_iov_dma_map(data, &xdr->tail[0]); 601 if (ret < 0) 602 return ret; 603 604 return xdr->len; 605 } 606 607 struct svc_rdma_pullup_data { 608 u8 *pd_dest; 609 unsigned int pd_length; 610 unsigned int pd_num_sges; 611 }; 612 613 /** 614 * svc_rdma_xb_count_sges - Count how many SGEs will be needed 615 * @xdr: xdr_buf containing portion of an RPC message to transmit 616 * @data: pointer to arguments 617 * 618 * Returns: 619 * Number of SGEs needed to Send the contents of @xdr inline 620 */ 621 static int svc_rdma_xb_count_sges(const struct xdr_buf *xdr, 622 void *data) 623 { 624 struct svc_rdma_pullup_data *args = data; 625 unsigned int remaining; 626 unsigned long offset; 627 628 if (xdr->head[0].iov_len) 629 ++args->pd_num_sges; 630 631 offset = offset_in_page(xdr->page_base); 632 remaining = xdr->page_len; 633 while (remaining) { 634 ++args->pd_num_sges; 635 remaining -= min_t(u32, PAGE_SIZE - offset, remaining); 636 offset = 0; 637 } 638 639 if (xdr->tail[0].iov_len) 640 ++args->pd_num_sges; 641 642 args->pd_length += xdr->len; 643 return 0; 644 } 645 646 /** 647 * svc_rdma_pull_up_needed - Determine whether to use pull-up 648 * @rdma: controlling transport 649 * @sctxt: send_ctxt for the Send WR 650 * @rctxt: Write and Reply chunks provided by client 651 * @xdr: xdr_buf containing RPC message to transmit 652 * 653 * Returns: 654 * %true if pull-up must be used 655 * %false otherwise 656 */ 657 static bool svc_rdma_pull_up_needed(const struct svcxprt_rdma *rdma, 658 const struct svc_rdma_send_ctxt *sctxt, 659 const struct svc_rdma_recv_ctxt *rctxt, 660 const struct xdr_buf *xdr) 661 { 662 /* Resources needed for the transport header */ 663 struct svc_rdma_pullup_data args = { 664 .pd_length = sctxt->sc_hdrbuf.len, 665 .pd_num_sges = 1, 666 }; 667 int ret; 668 669 ret = pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr, 670 svc_rdma_xb_count_sges, &args); 671 if (ret < 0) 672 return false; 673 674 if (args.pd_length < RPCRDMA_PULLUP_THRESH) 675 return true; 676 return args.pd_num_sges >= rdma->sc_max_send_sges; 677 } 678 679 /** 680 * svc_rdma_xb_linearize - Copy region of xdr_buf to flat buffer 681 * @xdr: xdr_buf containing portion of an RPC message to copy 682 * @data: pointer to arguments 683 * 684 * Returns: 685 * Always zero. 686 */ 687 static int svc_rdma_xb_linearize(const struct xdr_buf *xdr, 688 void *data) 689 { 690 struct svc_rdma_pullup_data *args = data; 691 unsigned int len, remaining; 692 unsigned long pageoff; 693 struct page **ppages; 694 695 if (xdr->head[0].iov_len) { 696 memcpy(args->pd_dest, xdr->head[0].iov_base, xdr->head[0].iov_len); 697 args->pd_dest += xdr->head[0].iov_len; 698 } 699 700 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT); 701 pageoff = offset_in_page(xdr->page_base); 702 remaining = xdr->page_len; 703 while (remaining) { 704 len = min_t(u32, PAGE_SIZE - pageoff, remaining); 705 memcpy(args->pd_dest, page_address(*ppages) + pageoff, len); 706 remaining -= len; 707 args->pd_dest += len; 708 pageoff = 0; 709 ppages++; 710 } 711 712 if (xdr->tail[0].iov_len) { 713 memcpy(args->pd_dest, xdr->tail[0].iov_base, xdr->tail[0].iov_len); 714 args->pd_dest += xdr->tail[0].iov_len; 715 } 716 717 args->pd_length += xdr->len; 718 return 0; 719 } 720 721 /** 722 * svc_rdma_pull_up_reply_msg - Copy Reply into a single buffer 723 * @rdma: controlling transport 724 * @sctxt: send_ctxt for the Send WR; xprt hdr is already prepared 725 * @rctxt: Write and Reply chunks provided by client 726 * @xdr: prepared xdr_buf containing RPC message 727 * 728 * The device is not capable of sending the reply directly. 729 * Assemble the elements of @xdr into the transport header buffer. 730 * 731 * Assumptions: 732 * pull_up_needed has determined that @xdr will fit in the buffer. 733 * 734 * Returns: 735 * %0 if pull-up was successful 736 * %-EMSGSIZE if a buffer manipulation problem occurred 737 */ 738 static int svc_rdma_pull_up_reply_msg(const struct svcxprt_rdma *rdma, 739 struct svc_rdma_send_ctxt *sctxt, 740 const struct svc_rdma_recv_ctxt *rctxt, 741 const struct xdr_buf *xdr) 742 { 743 struct svc_rdma_pullup_data args = { 744 .pd_dest = sctxt->sc_xprt_buf + sctxt->sc_hdrbuf.len, 745 }; 746 int ret; 747 748 ret = pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr, 749 svc_rdma_xb_linearize, &args); 750 if (ret < 0) 751 return ret; 752 753 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len + args.pd_length; 754 trace_svcrdma_send_pullup(sctxt, args.pd_length); 755 return 0; 756 } 757 758 /* svc_rdma_map_reply_msg - DMA map the buffer holding RPC message 759 * @rdma: controlling transport 760 * @sctxt: send_ctxt for the Send WR 761 * @rctxt: Write and Reply chunks provided by client 762 * @xdr: prepared xdr_buf containing RPC message 763 * 764 * Returns: 765 * %0 if DMA mapping was successful. 766 * %-EMSGSIZE if a buffer manipulation problem occurred 767 * %-EIO if DMA mapping failed 768 * 769 * The Send WR's num_sge field is set in all cases. 770 */ 771 int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, 772 struct svc_rdma_send_ctxt *sctxt, 773 const struct svc_rdma_recv_ctxt *rctxt, 774 const struct xdr_buf *xdr) 775 { 776 struct svc_rdma_map_data args = { 777 .md_rdma = rdma, 778 .md_ctxt = sctxt, 779 }; 780 781 /* Set up the (persistently-mapped) transport header SGE. */ 782 sctxt->sc_send_wr.num_sge = 1; 783 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len; 784 785 /* If there is a Reply chunk, nothing follows the transport 786 * header, and we're done here. 787 */ 788 if (!pcl_is_empty(&rctxt->rc_reply_pcl)) 789 return 0; 790 791 /* For pull-up, svc_rdma_send() will sync the transport header. 792 * No additional DMA mapping is necessary. 793 */ 794 if (svc_rdma_pull_up_needed(rdma, sctxt, rctxt, xdr)) 795 return svc_rdma_pull_up_reply_msg(rdma, sctxt, rctxt, xdr); 796 797 return pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr, 798 svc_rdma_xb_dma_map, &args); 799 } 800 801 /* Prepare the portion of the RPC Reply that will be transmitted 802 * via RDMA Send. The RPC-over-RDMA transport header is prepared 803 * in sc_sges[0], and the RPC xdr_buf is prepared in following sges. 804 * 805 * Depending on whether a Write list or Reply chunk is present, 806 * the server may send all, a portion of, or none of the xdr_buf. 807 * In the latter case, only the transport header (sc_sges[0]) is 808 * transmitted. 809 * 810 * RDMA Send is the last step of transmitting an RPC reply. Pages 811 * involved in the earlier RDMA Writes are here transferred out 812 * of the rqstp and into the sctxt's page array. These pages are 813 * DMA unmapped by each Write completion, but the subsequent Send 814 * completion finally releases these pages. 815 * 816 * Assumptions: 817 * - The Reply's transport header will never be larger than a page. 818 */ 819 static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma, 820 struct svc_rdma_send_ctxt *sctxt, 821 const struct svc_rdma_recv_ctxt *rctxt, 822 struct svc_rqst *rqstp) 823 { 824 int ret; 825 826 ret = svc_rdma_map_reply_msg(rdma, sctxt, rctxt, &rqstp->rq_res); 827 if (ret < 0) 828 return ret; 829 830 if (rctxt->rc_inv_rkey) { 831 sctxt->sc_send_wr.opcode = IB_WR_SEND_WITH_INV; 832 sctxt->sc_send_wr.ex.invalidate_rkey = rctxt->rc_inv_rkey; 833 } else { 834 sctxt->sc_send_wr.opcode = IB_WR_SEND; 835 } 836 837 ret = svc_rdma_send(rdma, sctxt); 838 if (ret < 0) 839 return ret; 840 841 ret = wait_for_completion_killable(&sctxt->sc_done); 842 svc_rdma_send_ctxt_put(rdma, sctxt); 843 return ret; 844 } 845 846 /** 847 * svc_rdma_send_error_msg - Send an RPC/RDMA v1 error response 848 * @rdma: controlling transport context 849 * @sctxt: Send context for the response 850 * @rctxt: Receive context for incoming bad message 851 * @status: negative errno indicating error that occurred 852 * 853 * Given the client-provided Read, Write, and Reply chunks, the 854 * server was not able to parse the Call or form a complete Reply. 855 * Return an RDMA_ERROR message so the client can retire the RPC 856 * transaction. 857 * 858 * The caller does not have to release @sctxt. It is released by 859 * Send completion, or by this function on error. 860 */ 861 void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, 862 struct svc_rdma_send_ctxt *sctxt, 863 struct svc_rdma_recv_ctxt *rctxt, 864 int status) 865 { 866 __be32 *rdma_argp = rctxt->rc_recv_buf; 867 __be32 *p; 868 869 rpcrdma_set_xdrlen(&sctxt->sc_hdrbuf, 0); 870 xdr_init_encode(&sctxt->sc_stream, &sctxt->sc_hdrbuf, 871 sctxt->sc_xprt_buf, NULL); 872 873 p = xdr_reserve_space(&sctxt->sc_stream, 874 rpcrdma_fixed_maxsz * sizeof(*p)); 875 if (!p) 876 goto put_ctxt; 877 878 *p++ = *rdma_argp; 879 *p++ = *(rdma_argp + 1); 880 *p++ = rdma->sc_fc_credits; 881 *p = rdma_error; 882 883 switch (status) { 884 case -EPROTONOSUPPORT: 885 p = xdr_reserve_space(&sctxt->sc_stream, 3 * sizeof(*p)); 886 if (!p) 887 goto put_ctxt; 888 889 *p++ = err_vers; 890 *p++ = rpcrdma_version; 891 *p = rpcrdma_version; 892 trace_svcrdma_err_vers(*rdma_argp); 893 break; 894 default: 895 p = xdr_reserve_space(&sctxt->sc_stream, sizeof(*p)); 896 if (!p) 897 goto put_ctxt; 898 899 *p = err_chunk; 900 trace_svcrdma_err_chunk(*rdma_argp); 901 } 902 903 /* Remote Invalidation is skipped for simplicity. */ 904 sctxt->sc_send_wr.num_sge = 1; 905 sctxt->sc_send_wr.opcode = IB_WR_SEND; 906 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len; 907 if (svc_rdma_send(rdma, sctxt)) 908 goto put_ctxt; 909 910 wait_for_completion_killable(&sctxt->sc_done); 911 912 put_ctxt: 913 svc_rdma_send_ctxt_put(rdma, sctxt); 914 } 915 916 /** 917 * svc_rdma_sendto - Transmit an RPC reply 918 * @rqstp: processed RPC request, reply XDR already in ::rq_res 919 * 920 * Any resources still associated with @rqstp are released upon return. 921 * If no reply message was possible, the connection is closed. 922 * 923 * Returns: 924 * %0 if an RPC reply has been successfully posted, 925 * %-ENOMEM if a resource shortage occurred (connection is lost), 926 * %-ENOTCONN if posting failed (connection is lost). 927 */ 928 int svc_rdma_sendto(struct svc_rqst *rqstp) 929 { 930 struct svc_xprt *xprt = rqstp->rq_xprt; 931 struct svcxprt_rdma *rdma = 932 container_of(xprt, struct svcxprt_rdma, sc_xprt); 933 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt; 934 __be32 *rdma_argp = rctxt->rc_recv_buf; 935 struct svc_rdma_send_ctxt *sctxt; 936 unsigned int rc_size; 937 __be32 *p; 938 int ret; 939 940 ret = -ENOTCONN; 941 if (svc_xprt_is_dead(xprt)) 942 goto drop_connection; 943 944 ret = -ENOMEM; 945 sctxt = svc_rdma_send_ctxt_get(rdma); 946 if (!sctxt) 947 goto drop_connection; 948 949 ret = -EMSGSIZE; 950 p = xdr_reserve_space(&sctxt->sc_stream, 951 rpcrdma_fixed_maxsz * sizeof(*p)); 952 if (!p) 953 goto put_ctxt; 954 955 ret = svc_rdma_send_reply_chunk(rdma, rctxt, &rqstp->rq_res); 956 if (ret < 0) 957 goto reply_chunk; 958 rc_size = ret; 959 960 *p++ = *rdma_argp; 961 *p++ = *(rdma_argp + 1); 962 *p++ = rdma->sc_fc_credits; 963 *p = pcl_is_empty(&rctxt->rc_reply_pcl) ? rdma_msg : rdma_nomsg; 964 965 ret = svc_rdma_encode_read_list(sctxt); 966 if (ret < 0) 967 goto put_ctxt; 968 ret = svc_rdma_encode_write_list(rctxt, sctxt); 969 if (ret < 0) 970 goto put_ctxt; 971 ret = svc_rdma_encode_reply_chunk(rctxt, sctxt, rc_size); 972 if (ret < 0) 973 goto put_ctxt; 974 975 ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp); 976 if (ret < 0) 977 goto put_ctxt; 978 979 /* Prevent svc_xprt_release() from releasing the page backing 980 * rq_res.head[0].iov_base. It's no longer being accessed by 981 * the I/O device. */ 982 rqstp->rq_respages++; 983 return 0; 984 985 reply_chunk: 986 if (ret != -E2BIG && ret != -EINVAL) 987 goto put_ctxt; 988 989 svc_rdma_send_error_msg(rdma, sctxt, rctxt, ret); 990 return 0; 991 992 put_ctxt: 993 svc_rdma_send_ctxt_put(rdma, sctxt); 994 drop_connection: 995 trace_svcrdma_send_err(rqstp, ret); 996 svc_xprt_deferred_close(&rdma->sc_xprt); 997 return -ENOTCONN; 998 } 999 1000 /** 1001 * svc_rdma_result_payload - special processing for a result payload 1002 * @rqstp: svc_rqst to operate on 1003 * @offset: payload's byte offset in @xdr 1004 * @length: size of payload, in bytes 1005 * 1006 * Return values: 1007 * %0 if successful or nothing needed to be done 1008 * %-EMSGSIZE on XDR buffer overflow 1009 * %-E2BIG if the payload was larger than the Write chunk 1010 * %-EINVAL if client provided too many segments 1011 * %-ENOMEM if rdma_rw context pool was exhausted 1012 * %-ENOTCONN if posting failed (connection is lost) 1013 * %-EIO if rdma_rw initialization failed (DMA mapping, etc) 1014 */ 1015 int svc_rdma_result_payload(struct svc_rqst *rqstp, unsigned int offset, 1016 unsigned int length) 1017 { 1018 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt; 1019 struct svc_rdma_chunk *chunk; 1020 struct svcxprt_rdma *rdma; 1021 struct xdr_buf subbuf; 1022 int ret; 1023 1024 chunk = rctxt->rc_cur_result_payload; 1025 if (!length || !chunk) 1026 return 0; 1027 rctxt->rc_cur_result_payload = 1028 pcl_next_chunk(&rctxt->rc_write_pcl, chunk); 1029 if (length > chunk->ch_length) 1030 return -E2BIG; 1031 1032 chunk->ch_position = offset; 1033 chunk->ch_payload_length = length; 1034 1035 if (xdr_buf_subsegment(&rqstp->rq_res, &subbuf, offset, length)) 1036 return -EMSGSIZE; 1037 1038 rdma = container_of(rqstp->rq_xprt, struct svcxprt_rdma, sc_xprt); 1039 ret = svc_rdma_send_write_chunk(rdma, chunk, &subbuf); 1040 if (ret < 0) 1041 return ret; 1042 return 0; 1043 } 1044