1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (c) 2016-2018 Oracle. All rights reserved. 4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved. 5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the BSD-type 11 * license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 20 * Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials provided 23 * with the distribution. 24 * 25 * Neither the name of the Network Appliance, Inc. nor the names of 26 * its contributors may be used to endorse or promote products 27 * derived from this software without specific prior written 28 * permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 41 * 42 * Author: Tom Tucker <tom@opengridcomputing.com> 43 */ 44 45 /* Operation 46 * 47 * The main entry point is svc_rdma_sendto. This is called by the 48 * RPC server when an RPC Reply is ready to be transmitted to a client. 49 * 50 * The passed-in svc_rqst contains a struct xdr_buf which holds an 51 * XDR-encoded RPC Reply message. sendto must construct the RPC-over-RDMA 52 * transport header, post all Write WRs needed for this Reply, then post 53 * a Send WR conveying the transport header and the RPC message itself to 54 * the client. 55 * 56 * svc_rdma_sendto must fully transmit the Reply before returning, as 57 * the svc_rqst will be recycled as soon as sendto returns. Remaining 58 * resources referred to by the svc_rqst are also recycled at that time. 59 * Therefore any resources that must remain longer must be detached 60 * from the svc_rqst and released later. 61 * 62 * Page Management 63 * 64 * The I/O that performs Reply transmission is asynchronous, and may 65 * complete well after sendto returns. Thus pages under I/O must be 66 * removed from the svc_rqst before sendto returns. 67 * 68 * The logic here depends on Send Queue and completion ordering. Since 69 * the Send WR is always posted last, it will always complete last. Thus 70 * when it completes, it is guaranteed that all previous Write WRs have 71 * also completed. 72 * 73 * Write WRs are constructed and posted. Each Write segment gets its own 74 * svc_rdma_rw_ctxt, allowing the Write completion handler to find and 75 * DMA-unmap the pages under I/O for that Write segment. The Write 76 * completion handler does not release any pages. 77 * 78 * When the Send WR is constructed, it also gets its own svc_rdma_send_ctxt. 79 * The ownership of all of the Reply's pages are transferred into that 80 * ctxt, the Send WR is posted, and sendto returns. 81 * 82 * The svc_rdma_send_ctxt is presented when the Send WR completes. The 83 * Send completion handler finally releases the Reply's pages. 84 * 85 * This mechanism also assumes that completions on the transport's Send 86 * Completion Queue do not run in parallel. Otherwise a Write completion 87 * and Send completion running at the same time could release pages that 88 * are still DMA-mapped. 89 * 90 * Error Handling 91 * 92 * - If the Send WR is posted successfully, it will either complete 93 * successfully, or get flushed. Either way, the Send completion 94 * handler releases the Reply's pages. 95 * - If the Send WR cannot be not posted, the forward path releases 96 * the Reply's pages. 97 * 98 * This handles the case, without the use of page reference counting, 99 * where two different Write segments send portions of the same page. 100 */ 101 102 #include <linux/spinlock.h> 103 #include <asm/unaligned.h> 104 105 #include <rdma/ib_verbs.h> 106 #include <rdma/rdma_cm.h> 107 108 #include <linux/sunrpc/debug.h> 109 #include <linux/sunrpc/svc_rdma.h> 110 111 #include "xprt_rdma.h" 112 #include <trace/events/rpcrdma.h> 113 114 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc); 115 116 static void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma, 117 struct rpc_rdma_cid *cid) 118 { 119 cid->ci_queue_id = rdma->sc_sq_cq->res.id; 120 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids); 121 } 122 123 static struct svc_rdma_send_ctxt * 124 svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma) 125 { 126 int node = ibdev_to_node(rdma->sc_cm_id->device); 127 struct svc_rdma_send_ctxt *ctxt; 128 dma_addr_t addr; 129 void *buffer; 130 int i; 131 132 ctxt = kmalloc_node(struct_size(ctxt, sc_sges, rdma->sc_max_send_sges), 133 GFP_KERNEL, node); 134 if (!ctxt) 135 goto fail0; 136 buffer = kmalloc_node(rdma->sc_max_req_size, GFP_KERNEL, node); 137 if (!buffer) 138 goto fail1; 139 addr = ib_dma_map_single(rdma->sc_pd->device, buffer, 140 rdma->sc_max_req_size, DMA_TO_DEVICE); 141 if (ib_dma_mapping_error(rdma->sc_pd->device, addr)) 142 goto fail2; 143 144 svc_rdma_send_cid_init(rdma, &ctxt->sc_cid); 145 146 ctxt->sc_send_wr.next = NULL; 147 ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe; 148 ctxt->sc_send_wr.sg_list = ctxt->sc_sges; 149 ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED; 150 ctxt->sc_cqe.done = svc_rdma_wc_send; 151 ctxt->sc_xprt_buf = buffer; 152 xdr_buf_init(&ctxt->sc_hdrbuf, ctxt->sc_xprt_buf, 153 rdma->sc_max_req_size); 154 ctxt->sc_sges[0].addr = addr; 155 156 for (i = 0; i < rdma->sc_max_send_sges; i++) 157 ctxt->sc_sges[i].lkey = rdma->sc_pd->local_dma_lkey; 158 return ctxt; 159 160 fail2: 161 kfree(buffer); 162 fail1: 163 kfree(ctxt); 164 fail0: 165 return NULL; 166 } 167 168 /** 169 * svc_rdma_send_ctxts_destroy - Release all send_ctxt's for an xprt 170 * @rdma: svcxprt_rdma being torn down 171 * 172 */ 173 void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma) 174 { 175 struct svc_rdma_send_ctxt *ctxt; 176 struct llist_node *node; 177 178 while ((node = llist_del_first(&rdma->sc_send_ctxts)) != NULL) { 179 ctxt = llist_entry(node, struct svc_rdma_send_ctxt, sc_node); 180 ib_dma_unmap_single(rdma->sc_pd->device, 181 ctxt->sc_sges[0].addr, 182 rdma->sc_max_req_size, 183 DMA_TO_DEVICE); 184 kfree(ctxt->sc_xprt_buf); 185 kfree(ctxt); 186 } 187 } 188 189 /** 190 * svc_rdma_send_ctxt_get - Get a free send_ctxt 191 * @rdma: controlling svcxprt_rdma 192 * 193 * Returns a ready-to-use send_ctxt, or NULL if none are 194 * available and a fresh one cannot be allocated. 195 */ 196 struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma) 197 { 198 struct svc_rdma_send_ctxt *ctxt; 199 struct llist_node *node; 200 201 spin_lock(&rdma->sc_send_lock); 202 node = llist_del_first(&rdma->sc_send_ctxts); 203 if (!node) 204 goto out_empty; 205 ctxt = llist_entry(node, struct svc_rdma_send_ctxt, sc_node); 206 spin_unlock(&rdma->sc_send_lock); 207 208 out: 209 rpcrdma_set_xdrlen(&ctxt->sc_hdrbuf, 0); 210 xdr_init_encode(&ctxt->sc_stream, &ctxt->sc_hdrbuf, 211 ctxt->sc_xprt_buf, NULL); 212 213 ctxt->sc_send_wr.num_sge = 0; 214 ctxt->sc_cur_sge_no = 0; 215 ctxt->sc_page_count = 0; 216 return ctxt; 217 218 out_empty: 219 spin_unlock(&rdma->sc_send_lock); 220 ctxt = svc_rdma_send_ctxt_alloc(rdma); 221 if (!ctxt) 222 return NULL; 223 goto out; 224 } 225 226 /** 227 * svc_rdma_send_ctxt_put - Return send_ctxt to free list 228 * @rdma: controlling svcxprt_rdma 229 * @ctxt: object to return to the free list 230 * 231 * Pages left in sc_pages are DMA unmapped and released. 232 */ 233 void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma, 234 struct svc_rdma_send_ctxt *ctxt) 235 { 236 struct ib_device *device = rdma->sc_cm_id->device; 237 unsigned int i; 238 239 if (ctxt->sc_page_count) 240 release_pages(ctxt->sc_pages, ctxt->sc_page_count); 241 242 /* The first SGE contains the transport header, which 243 * remains mapped until @ctxt is destroyed. 244 */ 245 for (i = 1; i < ctxt->sc_send_wr.num_sge; i++) { 246 ib_dma_unmap_page(device, 247 ctxt->sc_sges[i].addr, 248 ctxt->sc_sges[i].length, 249 DMA_TO_DEVICE); 250 trace_svcrdma_dma_unmap_page(rdma, 251 ctxt->sc_sges[i].addr, 252 ctxt->sc_sges[i].length); 253 } 254 255 llist_add(&ctxt->sc_node, &rdma->sc_send_ctxts); 256 } 257 258 /** 259 * svc_rdma_wake_send_waiters - manage Send Queue accounting 260 * @rdma: controlling transport 261 * @avail: Number of additional SQEs that are now available 262 * 263 */ 264 void svc_rdma_wake_send_waiters(struct svcxprt_rdma *rdma, int avail) 265 { 266 atomic_add(avail, &rdma->sc_sq_avail); 267 smp_mb__after_atomic(); 268 if (unlikely(waitqueue_active(&rdma->sc_send_wait))) 269 wake_up(&rdma->sc_send_wait); 270 } 271 272 /** 273 * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC 274 * @cq: Completion Queue context 275 * @wc: Work Completion object 276 * 277 * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that 278 * the Send completion handler could be running. 279 */ 280 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) 281 { 282 struct svcxprt_rdma *rdma = cq->cq_context; 283 struct ib_cqe *cqe = wc->wr_cqe; 284 struct svc_rdma_send_ctxt *ctxt = 285 container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe); 286 287 svc_rdma_wake_send_waiters(rdma, 1); 288 289 if (unlikely(wc->status != IB_WC_SUCCESS)) 290 goto flushed; 291 292 trace_svcrdma_wc_send(wc, &ctxt->sc_cid); 293 svc_rdma_send_ctxt_put(rdma, ctxt); 294 return; 295 296 flushed: 297 if (wc->status != IB_WC_WR_FLUSH_ERR) 298 trace_svcrdma_wc_send_err(wc, &ctxt->sc_cid); 299 else 300 trace_svcrdma_wc_send_flush(wc, &ctxt->sc_cid); 301 svc_rdma_send_ctxt_put(rdma, ctxt); 302 svc_xprt_deferred_close(&rdma->sc_xprt); 303 } 304 305 /** 306 * svc_rdma_send - Post a single Send WR 307 * @rdma: transport on which to post the WR 308 * @ctxt: send ctxt with a Send WR ready to post 309 * 310 * Returns zero if the Send WR was posted successfully. Otherwise, a 311 * negative errno is returned. 312 */ 313 int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt) 314 { 315 struct ib_send_wr *wr = &ctxt->sc_send_wr; 316 int ret; 317 318 /* Sync the transport header buffer */ 319 ib_dma_sync_single_for_device(rdma->sc_pd->device, 320 wr->sg_list[0].addr, 321 wr->sg_list[0].length, 322 DMA_TO_DEVICE); 323 324 /* If the SQ is full, wait until an SQ entry is available */ 325 while (1) { 326 if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) { 327 percpu_counter_inc(&svcrdma_stat_sq_starve); 328 trace_svcrdma_sq_full(rdma); 329 atomic_inc(&rdma->sc_sq_avail); 330 wait_event(rdma->sc_send_wait, 331 atomic_read(&rdma->sc_sq_avail) > 1); 332 if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags)) 333 return -ENOTCONN; 334 trace_svcrdma_sq_retry(rdma); 335 continue; 336 } 337 338 trace_svcrdma_post_send(ctxt); 339 ret = ib_post_send(rdma->sc_qp, wr, NULL); 340 if (ret) 341 break; 342 return 0; 343 } 344 345 trace_svcrdma_sq_post_err(rdma, ret); 346 svc_xprt_deferred_close(&rdma->sc_xprt); 347 wake_up(&rdma->sc_send_wait); 348 return ret; 349 } 350 351 /** 352 * svc_rdma_encode_read_list - Encode RPC Reply's Read chunk list 353 * @sctxt: Send context for the RPC Reply 354 * 355 * Return values: 356 * On success, returns length in bytes of the Reply XDR buffer 357 * that was consumed by the Reply Read list 358 * %-EMSGSIZE on XDR buffer overflow 359 */ 360 static ssize_t svc_rdma_encode_read_list(struct svc_rdma_send_ctxt *sctxt) 361 { 362 /* RPC-over-RDMA version 1 replies never have a Read list. */ 363 return xdr_stream_encode_item_absent(&sctxt->sc_stream); 364 } 365 366 /** 367 * svc_rdma_encode_write_segment - Encode one Write segment 368 * @sctxt: Send context for the RPC Reply 369 * @chunk: Write chunk to push 370 * @remaining: remaining bytes of the payload left in the Write chunk 371 * @segno: which segment in the chunk 372 * 373 * Return values: 374 * On success, returns length in bytes of the Reply XDR buffer 375 * that was consumed by the Write segment, and updates @remaining 376 * %-EMSGSIZE on XDR buffer overflow 377 */ 378 static ssize_t svc_rdma_encode_write_segment(struct svc_rdma_send_ctxt *sctxt, 379 const struct svc_rdma_chunk *chunk, 380 u32 *remaining, unsigned int segno) 381 { 382 const struct svc_rdma_segment *segment = &chunk->ch_segments[segno]; 383 const size_t len = rpcrdma_segment_maxsz * sizeof(__be32); 384 u32 length; 385 __be32 *p; 386 387 p = xdr_reserve_space(&sctxt->sc_stream, len); 388 if (!p) 389 return -EMSGSIZE; 390 391 length = min_t(u32, *remaining, segment->rs_length); 392 *remaining -= length; 393 xdr_encode_rdma_segment(p, segment->rs_handle, length, 394 segment->rs_offset); 395 trace_svcrdma_encode_wseg(sctxt, segno, segment->rs_handle, length, 396 segment->rs_offset); 397 return len; 398 } 399 400 /** 401 * svc_rdma_encode_write_chunk - Encode one Write chunk 402 * @sctxt: Send context for the RPC Reply 403 * @chunk: Write chunk to push 404 * 405 * Copy a Write chunk from the Call transport header to the 406 * Reply transport header. Update each segment's length field 407 * to reflect the number of bytes written in that segment. 408 * 409 * Return values: 410 * On success, returns length in bytes of the Reply XDR buffer 411 * that was consumed by the Write chunk 412 * %-EMSGSIZE on XDR buffer overflow 413 */ 414 static ssize_t svc_rdma_encode_write_chunk(struct svc_rdma_send_ctxt *sctxt, 415 const struct svc_rdma_chunk *chunk) 416 { 417 u32 remaining = chunk->ch_payload_length; 418 unsigned int segno; 419 ssize_t len, ret; 420 421 len = 0; 422 ret = xdr_stream_encode_item_present(&sctxt->sc_stream); 423 if (ret < 0) 424 return ret; 425 len += ret; 426 427 ret = xdr_stream_encode_u32(&sctxt->sc_stream, chunk->ch_segcount); 428 if (ret < 0) 429 return ret; 430 len += ret; 431 432 for (segno = 0; segno < chunk->ch_segcount; segno++) { 433 ret = svc_rdma_encode_write_segment(sctxt, chunk, &remaining, segno); 434 if (ret < 0) 435 return ret; 436 len += ret; 437 } 438 439 return len; 440 } 441 442 /** 443 * svc_rdma_encode_write_list - Encode RPC Reply's Write chunk list 444 * @rctxt: Reply context with information about the RPC Call 445 * @sctxt: Send context for the RPC Reply 446 * 447 * Return values: 448 * On success, returns length in bytes of the Reply XDR buffer 449 * that was consumed by the Reply's Write list 450 * %-EMSGSIZE on XDR buffer overflow 451 */ 452 static ssize_t svc_rdma_encode_write_list(struct svc_rdma_recv_ctxt *rctxt, 453 struct svc_rdma_send_ctxt *sctxt) 454 { 455 struct svc_rdma_chunk *chunk; 456 ssize_t len, ret; 457 458 len = 0; 459 pcl_for_each_chunk(chunk, &rctxt->rc_write_pcl) { 460 ret = svc_rdma_encode_write_chunk(sctxt, chunk); 461 if (ret < 0) 462 return ret; 463 len += ret; 464 } 465 466 /* Terminate the Write list */ 467 ret = xdr_stream_encode_item_absent(&sctxt->sc_stream); 468 if (ret < 0) 469 return ret; 470 471 return len + ret; 472 } 473 474 /** 475 * svc_rdma_encode_reply_chunk - Encode RPC Reply's Reply chunk 476 * @rctxt: Reply context with information about the RPC Call 477 * @sctxt: Send context for the RPC Reply 478 * @length: size in bytes of the payload in the Reply chunk 479 * 480 * Return values: 481 * On success, returns length in bytes of the Reply XDR buffer 482 * that was consumed by the Reply's Reply chunk 483 * %-EMSGSIZE on XDR buffer overflow 484 * %-E2BIG if the RPC message is larger than the Reply chunk 485 */ 486 static ssize_t 487 svc_rdma_encode_reply_chunk(struct svc_rdma_recv_ctxt *rctxt, 488 struct svc_rdma_send_ctxt *sctxt, 489 unsigned int length) 490 { 491 struct svc_rdma_chunk *chunk; 492 493 if (pcl_is_empty(&rctxt->rc_reply_pcl)) 494 return xdr_stream_encode_item_absent(&sctxt->sc_stream); 495 496 chunk = pcl_first_chunk(&rctxt->rc_reply_pcl); 497 if (length > chunk->ch_length) 498 return -E2BIG; 499 500 chunk->ch_payload_length = length; 501 return svc_rdma_encode_write_chunk(sctxt, chunk); 502 } 503 504 struct svc_rdma_map_data { 505 struct svcxprt_rdma *md_rdma; 506 struct svc_rdma_send_ctxt *md_ctxt; 507 }; 508 509 /** 510 * svc_rdma_page_dma_map - DMA map one page 511 * @data: pointer to arguments 512 * @page: struct page to DMA map 513 * @offset: offset into the page 514 * @len: number of bytes to map 515 * 516 * Returns: 517 * %0 if DMA mapping was successful 518 * %-EIO if the page cannot be DMA mapped 519 */ 520 static int svc_rdma_page_dma_map(void *data, struct page *page, 521 unsigned long offset, unsigned int len) 522 { 523 struct svc_rdma_map_data *args = data; 524 struct svcxprt_rdma *rdma = args->md_rdma; 525 struct svc_rdma_send_ctxt *ctxt = args->md_ctxt; 526 struct ib_device *dev = rdma->sc_cm_id->device; 527 dma_addr_t dma_addr; 528 529 ++ctxt->sc_cur_sge_no; 530 531 dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE); 532 if (ib_dma_mapping_error(dev, dma_addr)) 533 goto out_maperr; 534 535 trace_svcrdma_dma_map_page(rdma, dma_addr, len); 536 ctxt->sc_sges[ctxt->sc_cur_sge_no].addr = dma_addr; 537 ctxt->sc_sges[ctxt->sc_cur_sge_no].length = len; 538 ctxt->sc_send_wr.num_sge++; 539 return 0; 540 541 out_maperr: 542 trace_svcrdma_dma_map_err(rdma, dma_addr, len); 543 return -EIO; 544 } 545 546 /** 547 * svc_rdma_iov_dma_map - DMA map an iovec 548 * @data: pointer to arguments 549 * @iov: kvec to DMA map 550 * 551 * ib_dma_map_page() is used here because svc_rdma_dma_unmap() 552 * handles DMA-unmap and it uses ib_dma_unmap_page() exclusively. 553 * 554 * Returns: 555 * %0 if DMA mapping was successful 556 * %-EIO if the iovec cannot be DMA mapped 557 */ 558 static int svc_rdma_iov_dma_map(void *data, const struct kvec *iov) 559 { 560 if (!iov->iov_len) 561 return 0; 562 return svc_rdma_page_dma_map(data, virt_to_page(iov->iov_base), 563 offset_in_page(iov->iov_base), 564 iov->iov_len); 565 } 566 567 /** 568 * svc_rdma_xb_dma_map - DMA map all segments of an xdr_buf 569 * @xdr: xdr_buf containing portion of an RPC message to transmit 570 * @data: pointer to arguments 571 * 572 * Returns: 573 * %0 if DMA mapping was successful 574 * %-EIO if DMA mapping failed 575 * 576 * On failure, any DMA mappings that have been already done must be 577 * unmapped by the caller. 578 */ 579 static int svc_rdma_xb_dma_map(const struct xdr_buf *xdr, void *data) 580 { 581 unsigned int len, remaining; 582 unsigned long pageoff; 583 struct page **ppages; 584 int ret; 585 586 ret = svc_rdma_iov_dma_map(data, &xdr->head[0]); 587 if (ret < 0) 588 return ret; 589 590 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT); 591 pageoff = offset_in_page(xdr->page_base); 592 remaining = xdr->page_len; 593 while (remaining) { 594 len = min_t(u32, PAGE_SIZE - pageoff, remaining); 595 596 ret = svc_rdma_page_dma_map(data, *ppages++, pageoff, len); 597 if (ret < 0) 598 return ret; 599 600 remaining -= len; 601 pageoff = 0; 602 } 603 604 ret = svc_rdma_iov_dma_map(data, &xdr->tail[0]); 605 if (ret < 0) 606 return ret; 607 608 return xdr->len; 609 } 610 611 struct svc_rdma_pullup_data { 612 u8 *pd_dest; 613 unsigned int pd_length; 614 unsigned int pd_num_sges; 615 }; 616 617 /** 618 * svc_rdma_xb_count_sges - Count how many SGEs will be needed 619 * @xdr: xdr_buf containing portion of an RPC message to transmit 620 * @data: pointer to arguments 621 * 622 * Returns: 623 * Number of SGEs needed to Send the contents of @xdr inline 624 */ 625 static int svc_rdma_xb_count_sges(const struct xdr_buf *xdr, 626 void *data) 627 { 628 struct svc_rdma_pullup_data *args = data; 629 unsigned int remaining; 630 unsigned long offset; 631 632 if (xdr->head[0].iov_len) 633 ++args->pd_num_sges; 634 635 offset = offset_in_page(xdr->page_base); 636 remaining = xdr->page_len; 637 while (remaining) { 638 ++args->pd_num_sges; 639 remaining -= min_t(u32, PAGE_SIZE - offset, remaining); 640 offset = 0; 641 } 642 643 if (xdr->tail[0].iov_len) 644 ++args->pd_num_sges; 645 646 args->pd_length += xdr->len; 647 return 0; 648 } 649 650 /** 651 * svc_rdma_pull_up_needed - Determine whether to use pull-up 652 * @rdma: controlling transport 653 * @sctxt: send_ctxt for the Send WR 654 * @rctxt: Write and Reply chunks provided by client 655 * @xdr: xdr_buf containing RPC message to transmit 656 * 657 * Returns: 658 * %true if pull-up must be used 659 * %false otherwise 660 */ 661 static bool svc_rdma_pull_up_needed(const struct svcxprt_rdma *rdma, 662 const struct svc_rdma_send_ctxt *sctxt, 663 const struct svc_rdma_recv_ctxt *rctxt, 664 const struct xdr_buf *xdr) 665 { 666 /* Resources needed for the transport header */ 667 struct svc_rdma_pullup_data args = { 668 .pd_length = sctxt->sc_hdrbuf.len, 669 .pd_num_sges = 1, 670 }; 671 int ret; 672 673 ret = pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr, 674 svc_rdma_xb_count_sges, &args); 675 if (ret < 0) 676 return false; 677 678 if (args.pd_length < RPCRDMA_PULLUP_THRESH) 679 return true; 680 return args.pd_num_sges >= rdma->sc_max_send_sges; 681 } 682 683 /** 684 * svc_rdma_xb_linearize - Copy region of xdr_buf to flat buffer 685 * @xdr: xdr_buf containing portion of an RPC message to copy 686 * @data: pointer to arguments 687 * 688 * Returns: 689 * Always zero. 690 */ 691 static int svc_rdma_xb_linearize(const struct xdr_buf *xdr, 692 void *data) 693 { 694 struct svc_rdma_pullup_data *args = data; 695 unsigned int len, remaining; 696 unsigned long pageoff; 697 struct page **ppages; 698 699 if (xdr->head[0].iov_len) { 700 memcpy(args->pd_dest, xdr->head[0].iov_base, xdr->head[0].iov_len); 701 args->pd_dest += xdr->head[0].iov_len; 702 } 703 704 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT); 705 pageoff = offset_in_page(xdr->page_base); 706 remaining = xdr->page_len; 707 while (remaining) { 708 len = min_t(u32, PAGE_SIZE - pageoff, remaining); 709 memcpy(args->pd_dest, page_address(*ppages) + pageoff, len); 710 remaining -= len; 711 args->pd_dest += len; 712 pageoff = 0; 713 ppages++; 714 } 715 716 if (xdr->tail[0].iov_len) { 717 memcpy(args->pd_dest, xdr->tail[0].iov_base, xdr->tail[0].iov_len); 718 args->pd_dest += xdr->tail[0].iov_len; 719 } 720 721 args->pd_length += xdr->len; 722 return 0; 723 } 724 725 /** 726 * svc_rdma_pull_up_reply_msg - Copy Reply into a single buffer 727 * @rdma: controlling transport 728 * @sctxt: send_ctxt for the Send WR; xprt hdr is already prepared 729 * @rctxt: Write and Reply chunks provided by client 730 * @xdr: prepared xdr_buf containing RPC message 731 * 732 * The device is not capable of sending the reply directly. 733 * Assemble the elements of @xdr into the transport header buffer. 734 * 735 * Assumptions: 736 * pull_up_needed has determined that @xdr will fit in the buffer. 737 * 738 * Returns: 739 * %0 if pull-up was successful 740 * %-EMSGSIZE if a buffer manipulation problem occurred 741 */ 742 static int svc_rdma_pull_up_reply_msg(const struct svcxprt_rdma *rdma, 743 struct svc_rdma_send_ctxt *sctxt, 744 const struct svc_rdma_recv_ctxt *rctxt, 745 const struct xdr_buf *xdr) 746 { 747 struct svc_rdma_pullup_data args = { 748 .pd_dest = sctxt->sc_xprt_buf + sctxt->sc_hdrbuf.len, 749 }; 750 int ret; 751 752 ret = pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr, 753 svc_rdma_xb_linearize, &args); 754 if (ret < 0) 755 return ret; 756 757 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len + args.pd_length; 758 trace_svcrdma_send_pullup(sctxt, args.pd_length); 759 return 0; 760 } 761 762 /* svc_rdma_map_reply_msg - DMA map the buffer holding RPC message 763 * @rdma: controlling transport 764 * @sctxt: send_ctxt for the Send WR 765 * @rctxt: Write and Reply chunks provided by client 766 * @xdr: prepared xdr_buf containing RPC message 767 * 768 * Returns: 769 * %0 if DMA mapping was successful. 770 * %-EMSGSIZE if a buffer manipulation problem occurred 771 * %-EIO if DMA mapping failed 772 * 773 * The Send WR's num_sge field is set in all cases. 774 */ 775 int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, 776 struct svc_rdma_send_ctxt *sctxt, 777 const struct svc_rdma_recv_ctxt *rctxt, 778 const struct xdr_buf *xdr) 779 { 780 struct svc_rdma_map_data args = { 781 .md_rdma = rdma, 782 .md_ctxt = sctxt, 783 }; 784 785 /* Set up the (persistently-mapped) transport header SGE. */ 786 sctxt->sc_send_wr.num_sge = 1; 787 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len; 788 789 /* If there is a Reply chunk, nothing follows the transport 790 * header, and we're done here. 791 */ 792 if (!pcl_is_empty(&rctxt->rc_reply_pcl)) 793 return 0; 794 795 /* For pull-up, svc_rdma_send() will sync the transport header. 796 * No additional DMA mapping is necessary. 797 */ 798 if (svc_rdma_pull_up_needed(rdma, sctxt, rctxt, xdr)) 799 return svc_rdma_pull_up_reply_msg(rdma, sctxt, rctxt, xdr); 800 801 return pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr, 802 svc_rdma_xb_dma_map, &args); 803 } 804 805 /* The svc_rqst and all resources it owns are released as soon as 806 * svc_rdma_sendto returns. Transfer pages under I/O to the ctxt 807 * so they are released by the Send completion handler. 808 */ 809 static void svc_rdma_save_io_pages(struct svc_rqst *rqstp, 810 struct svc_rdma_send_ctxt *ctxt) 811 { 812 int i, pages = rqstp->rq_next_page - rqstp->rq_respages; 813 814 ctxt->sc_page_count += pages; 815 for (i = 0; i < pages; i++) { 816 ctxt->sc_pages[i] = rqstp->rq_respages[i]; 817 rqstp->rq_respages[i] = NULL; 818 } 819 820 /* Prevent svc_xprt_release from releasing pages in rq_pages */ 821 rqstp->rq_next_page = rqstp->rq_respages; 822 } 823 824 /* Prepare the portion of the RPC Reply that will be transmitted 825 * via RDMA Send. The RPC-over-RDMA transport header is prepared 826 * in sc_sges[0], and the RPC xdr_buf is prepared in following sges. 827 * 828 * Depending on whether a Write list or Reply chunk is present, 829 * the server may send all, a portion of, or none of the xdr_buf. 830 * In the latter case, only the transport header (sc_sges[0]) is 831 * transmitted. 832 * 833 * RDMA Send is the last step of transmitting an RPC reply. Pages 834 * involved in the earlier RDMA Writes are here transferred out 835 * of the rqstp and into the sctxt's page array. These pages are 836 * DMA unmapped by each Write completion, but the subsequent Send 837 * completion finally releases these pages. 838 * 839 * Assumptions: 840 * - The Reply's transport header will never be larger than a page. 841 */ 842 static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma, 843 struct svc_rdma_send_ctxt *sctxt, 844 const struct svc_rdma_recv_ctxt *rctxt, 845 struct svc_rqst *rqstp) 846 { 847 int ret; 848 849 ret = svc_rdma_map_reply_msg(rdma, sctxt, rctxt, &rqstp->rq_res); 850 if (ret < 0) 851 return ret; 852 853 svc_rdma_save_io_pages(rqstp, sctxt); 854 855 if (rctxt->rc_inv_rkey) { 856 sctxt->sc_send_wr.opcode = IB_WR_SEND_WITH_INV; 857 sctxt->sc_send_wr.ex.invalidate_rkey = rctxt->rc_inv_rkey; 858 } else { 859 sctxt->sc_send_wr.opcode = IB_WR_SEND; 860 } 861 862 return svc_rdma_send(rdma, sctxt); 863 } 864 865 /** 866 * svc_rdma_send_error_msg - Send an RPC/RDMA v1 error response 867 * @rdma: controlling transport context 868 * @sctxt: Send context for the response 869 * @rctxt: Receive context for incoming bad message 870 * @status: negative errno indicating error that occurred 871 * 872 * Given the client-provided Read, Write, and Reply chunks, the 873 * server was not able to parse the Call or form a complete Reply. 874 * Return an RDMA_ERROR message so the client can retire the RPC 875 * transaction. 876 * 877 * The caller does not have to release @sctxt. It is released by 878 * Send completion, or by this function on error. 879 */ 880 void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, 881 struct svc_rdma_send_ctxt *sctxt, 882 struct svc_rdma_recv_ctxt *rctxt, 883 int status) 884 { 885 __be32 *rdma_argp = rctxt->rc_recv_buf; 886 __be32 *p; 887 888 rpcrdma_set_xdrlen(&sctxt->sc_hdrbuf, 0); 889 xdr_init_encode(&sctxt->sc_stream, &sctxt->sc_hdrbuf, 890 sctxt->sc_xprt_buf, NULL); 891 892 p = xdr_reserve_space(&sctxt->sc_stream, 893 rpcrdma_fixed_maxsz * sizeof(*p)); 894 if (!p) 895 goto put_ctxt; 896 897 *p++ = *rdma_argp; 898 *p++ = *(rdma_argp + 1); 899 *p++ = rdma->sc_fc_credits; 900 *p = rdma_error; 901 902 switch (status) { 903 case -EPROTONOSUPPORT: 904 p = xdr_reserve_space(&sctxt->sc_stream, 3 * sizeof(*p)); 905 if (!p) 906 goto put_ctxt; 907 908 *p++ = err_vers; 909 *p++ = rpcrdma_version; 910 *p = rpcrdma_version; 911 trace_svcrdma_err_vers(*rdma_argp); 912 break; 913 default: 914 p = xdr_reserve_space(&sctxt->sc_stream, sizeof(*p)); 915 if (!p) 916 goto put_ctxt; 917 918 *p = err_chunk; 919 trace_svcrdma_err_chunk(*rdma_argp); 920 } 921 922 /* Remote Invalidation is skipped for simplicity. */ 923 sctxt->sc_send_wr.num_sge = 1; 924 sctxt->sc_send_wr.opcode = IB_WR_SEND; 925 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len; 926 if (svc_rdma_send(rdma, sctxt)) 927 goto put_ctxt; 928 return; 929 930 put_ctxt: 931 svc_rdma_send_ctxt_put(rdma, sctxt); 932 } 933 934 /** 935 * svc_rdma_sendto - Transmit an RPC reply 936 * @rqstp: processed RPC request, reply XDR already in ::rq_res 937 * 938 * Any resources still associated with @rqstp are released upon return. 939 * If no reply message was possible, the connection is closed. 940 * 941 * Returns: 942 * %0 if an RPC reply has been successfully posted, 943 * %-ENOMEM if a resource shortage occurred (connection is lost), 944 * %-ENOTCONN if posting failed (connection is lost). 945 */ 946 int svc_rdma_sendto(struct svc_rqst *rqstp) 947 { 948 struct svc_xprt *xprt = rqstp->rq_xprt; 949 struct svcxprt_rdma *rdma = 950 container_of(xprt, struct svcxprt_rdma, sc_xprt); 951 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt; 952 __be32 *rdma_argp = rctxt->rc_recv_buf; 953 struct svc_rdma_send_ctxt *sctxt; 954 unsigned int rc_size; 955 __be32 *p; 956 int ret; 957 958 ret = -ENOTCONN; 959 if (svc_xprt_is_dead(xprt)) 960 goto drop_connection; 961 962 ret = -ENOMEM; 963 sctxt = svc_rdma_send_ctxt_get(rdma); 964 if (!sctxt) 965 goto drop_connection; 966 967 ret = -EMSGSIZE; 968 p = xdr_reserve_space(&sctxt->sc_stream, 969 rpcrdma_fixed_maxsz * sizeof(*p)); 970 if (!p) 971 goto put_ctxt; 972 973 ret = svc_rdma_send_reply_chunk(rdma, rctxt, &rqstp->rq_res); 974 if (ret < 0) 975 goto reply_chunk; 976 rc_size = ret; 977 978 *p++ = *rdma_argp; 979 *p++ = *(rdma_argp + 1); 980 *p++ = rdma->sc_fc_credits; 981 *p = pcl_is_empty(&rctxt->rc_reply_pcl) ? rdma_msg : rdma_nomsg; 982 983 ret = svc_rdma_encode_read_list(sctxt); 984 if (ret < 0) 985 goto put_ctxt; 986 ret = svc_rdma_encode_write_list(rctxt, sctxt); 987 if (ret < 0) 988 goto put_ctxt; 989 ret = svc_rdma_encode_reply_chunk(rctxt, sctxt, rc_size); 990 if (ret < 0) 991 goto put_ctxt; 992 993 ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp); 994 if (ret < 0) 995 goto put_ctxt; 996 return 0; 997 998 reply_chunk: 999 if (ret != -E2BIG && ret != -EINVAL) 1000 goto put_ctxt; 1001 1002 /* Send completion releases payload pages that were part 1003 * of previously posted RDMA Writes. 1004 */ 1005 svc_rdma_save_io_pages(rqstp, sctxt); 1006 svc_rdma_send_error_msg(rdma, sctxt, rctxt, ret); 1007 return 0; 1008 1009 put_ctxt: 1010 svc_rdma_send_ctxt_put(rdma, sctxt); 1011 drop_connection: 1012 trace_svcrdma_send_err(rqstp, ret); 1013 svc_xprt_deferred_close(&rdma->sc_xprt); 1014 return -ENOTCONN; 1015 } 1016 1017 /** 1018 * svc_rdma_result_payload - special processing for a result payload 1019 * @rqstp: svc_rqst to operate on 1020 * @offset: payload's byte offset in @xdr 1021 * @length: size of payload, in bytes 1022 * 1023 * Return values: 1024 * %0 if successful or nothing needed to be done 1025 * %-EMSGSIZE on XDR buffer overflow 1026 * %-E2BIG if the payload was larger than the Write chunk 1027 * %-EINVAL if client provided too many segments 1028 * %-ENOMEM if rdma_rw context pool was exhausted 1029 * %-ENOTCONN if posting failed (connection is lost) 1030 * %-EIO if rdma_rw initialization failed (DMA mapping, etc) 1031 */ 1032 int svc_rdma_result_payload(struct svc_rqst *rqstp, unsigned int offset, 1033 unsigned int length) 1034 { 1035 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt; 1036 struct svc_rdma_chunk *chunk; 1037 struct svcxprt_rdma *rdma; 1038 struct xdr_buf subbuf; 1039 int ret; 1040 1041 chunk = rctxt->rc_cur_result_payload; 1042 if (!length || !chunk) 1043 return 0; 1044 rctxt->rc_cur_result_payload = 1045 pcl_next_chunk(&rctxt->rc_write_pcl, chunk); 1046 if (length > chunk->ch_length) 1047 return -E2BIG; 1048 1049 chunk->ch_position = offset; 1050 chunk->ch_payload_length = length; 1051 1052 if (xdr_buf_subsegment(&rqstp->rq_res, &subbuf, offset, length)) 1053 return -EMSGSIZE; 1054 1055 rdma = container_of(rqstp->rq_xprt, struct svcxprt_rdma, sc_xprt); 1056 ret = svc_rdma_send_write_chunk(rdma, chunk, &subbuf); 1057 if (ret < 0) 1058 return ret; 1059 return 0; 1060 } 1061