1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (c) 2016-2018 Oracle. All rights reserved. 4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved. 5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the BSD-type 11 * license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 20 * Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials provided 23 * with the distribution. 24 * 25 * Neither the name of the Network Appliance, Inc. nor the names of 26 * its contributors may be used to endorse or promote products 27 * derived from this software without specific prior written 28 * permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 41 * 42 * Author: Tom Tucker <tom@opengridcomputing.com> 43 */ 44 45 /* Operation 46 * 47 * The main entry point is svc_rdma_sendto. This is called by the 48 * RPC server when an RPC Reply is ready to be transmitted to a client. 49 * 50 * The passed-in svc_rqst contains a struct xdr_buf which holds an 51 * XDR-encoded RPC Reply message. sendto must construct the RPC-over-RDMA 52 * transport header, post all Write WRs needed for this Reply, then post 53 * a Send WR conveying the transport header and the RPC message itself to 54 * the client. 55 * 56 * svc_rdma_sendto must fully transmit the Reply before returning, as 57 * the svc_rqst will be recycled as soon as sendto returns. Remaining 58 * resources referred to by the svc_rqst are also recycled at that time. 59 * Therefore any resources that must remain longer must be detached 60 * from the svc_rqst and released later. 61 * 62 * Page Management 63 * 64 * The I/O that performs Reply transmission is asynchronous, and may 65 * complete well after sendto returns. Thus pages under I/O must be 66 * removed from the svc_rqst before sendto returns. 67 * 68 * The logic here depends on Send Queue and completion ordering. Since 69 * the Send WR is always posted last, it will always complete last. Thus 70 * when it completes, it is guaranteed that all previous Write WRs have 71 * also completed. 72 * 73 * Write WRs are constructed and posted. Each Write segment gets its own 74 * svc_rdma_rw_ctxt, allowing the Write completion handler to find and 75 * DMA-unmap the pages under I/O for that Write segment. The Write 76 * completion handler does not release any pages. 77 * 78 * When the Send WR is constructed, it also gets its own svc_rdma_send_ctxt. 79 * The ownership of all of the Reply's pages are transferred into that 80 * ctxt, the Send WR is posted, and sendto returns. 81 * 82 * The svc_rdma_send_ctxt is presented when the Send WR completes. The 83 * Send completion handler finally releases the Reply's pages. 84 * 85 * This mechanism also assumes that completions on the transport's Send 86 * Completion Queue do not run in parallel. Otherwise a Write completion 87 * and Send completion running at the same time could release pages that 88 * are still DMA-mapped. 89 * 90 * Error Handling 91 * 92 * - If the Send WR is posted successfully, it will either complete 93 * successfully, or get flushed. Either way, the Send completion 94 * handler releases the Reply's pages. 95 * - If the Send WR cannot be not posted, the forward path releases 96 * the Reply's pages. 97 * 98 * This handles the case, without the use of page reference counting, 99 * where two different Write segments send portions of the same page. 100 */ 101 102 #include <linux/spinlock.h> 103 #include <asm/unaligned.h> 104 105 #include <rdma/ib_verbs.h> 106 #include <rdma/rdma_cm.h> 107 108 #include <linux/sunrpc/debug.h> 109 #include <linux/sunrpc/svc_rdma.h> 110 111 #include "xprt_rdma.h" 112 #include <trace/events/rpcrdma.h> 113 114 #define RPCDBG_FACILITY RPCDBG_SVCXPRT 115 116 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc); 117 118 static inline struct svc_rdma_send_ctxt * 119 svc_rdma_next_send_ctxt(struct list_head *list) 120 { 121 return list_first_entry_or_null(list, struct svc_rdma_send_ctxt, 122 sc_list); 123 } 124 125 static void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma, 126 struct rpc_rdma_cid *cid) 127 { 128 cid->ci_queue_id = rdma->sc_sq_cq->res.id; 129 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids); 130 } 131 132 static struct svc_rdma_send_ctxt * 133 svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma) 134 { 135 struct svc_rdma_send_ctxt *ctxt; 136 dma_addr_t addr; 137 void *buffer; 138 size_t size; 139 int i; 140 141 size = sizeof(*ctxt); 142 size += rdma->sc_max_send_sges * sizeof(struct ib_sge); 143 ctxt = kmalloc(size, GFP_KERNEL); 144 if (!ctxt) 145 goto fail0; 146 buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL); 147 if (!buffer) 148 goto fail1; 149 addr = ib_dma_map_single(rdma->sc_pd->device, buffer, 150 rdma->sc_max_req_size, DMA_TO_DEVICE); 151 if (ib_dma_mapping_error(rdma->sc_pd->device, addr)) 152 goto fail2; 153 154 svc_rdma_send_cid_init(rdma, &ctxt->sc_cid); 155 156 ctxt->sc_send_wr.next = NULL; 157 ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe; 158 ctxt->sc_send_wr.sg_list = ctxt->sc_sges; 159 ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED; 160 ctxt->sc_cqe.done = svc_rdma_wc_send; 161 ctxt->sc_xprt_buf = buffer; 162 xdr_buf_init(&ctxt->sc_hdrbuf, ctxt->sc_xprt_buf, 163 rdma->sc_max_req_size); 164 ctxt->sc_sges[0].addr = addr; 165 166 for (i = 0; i < rdma->sc_max_send_sges; i++) 167 ctxt->sc_sges[i].lkey = rdma->sc_pd->local_dma_lkey; 168 return ctxt; 169 170 fail2: 171 kfree(buffer); 172 fail1: 173 kfree(ctxt); 174 fail0: 175 return NULL; 176 } 177 178 /** 179 * svc_rdma_send_ctxts_destroy - Release all send_ctxt's for an xprt 180 * @rdma: svcxprt_rdma being torn down 181 * 182 */ 183 void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma) 184 { 185 struct svc_rdma_send_ctxt *ctxt; 186 187 while ((ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts))) { 188 list_del(&ctxt->sc_list); 189 ib_dma_unmap_single(rdma->sc_pd->device, 190 ctxt->sc_sges[0].addr, 191 rdma->sc_max_req_size, 192 DMA_TO_DEVICE); 193 kfree(ctxt->sc_xprt_buf); 194 kfree(ctxt); 195 } 196 } 197 198 /** 199 * svc_rdma_send_ctxt_get - Get a free send_ctxt 200 * @rdma: controlling svcxprt_rdma 201 * 202 * Returns a ready-to-use send_ctxt, or NULL if none are 203 * available and a fresh one cannot be allocated. 204 */ 205 struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma) 206 { 207 struct svc_rdma_send_ctxt *ctxt; 208 209 spin_lock(&rdma->sc_send_lock); 210 ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts); 211 if (!ctxt) 212 goto out_empty; 213 list_del(&ctxt->sc_list); 214 spin_unlock(&rdma->sc_send_lock); 215 216 out: 217 rpcrdma_set_xdrlen(&ctxt->sc_hdrbuf, 0); 218 xdr_init_encode(&ctxt->sc_stream, &ctxt->sc_hdrbuf, 219 ctxt->sc_xprt_buf, NULL); 220 221 ctxt->sc_send_wr.num_sge = 0; 222 ctxt->sc_cur_sge_no = 0; 223 ctxt->sc_page_count = 0; 224 return ctxt; 225 226 out_empty: 227 spin_unlock(&rdma->sc_send_lock); 228 ctxt = svc_rdma_send_ctxt_alloc(rdma); 229 if (!ctxt) 230 return NULL; 231 goto out; 232 } 233 234 /** 235 * svc_rdma_send_ctxt_put - Return send_ctxt to free list 236 * @rdma: controlling svcxprt_rdma 237 * @ctxt: object to return to the free list 238 * 239 * Pages left in sc_pages are DMA unmapped and released. 240 */ 241 void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma, 242 struct svc_rdma_send_ctxt *ctxt) 243 { 244 struct ib_device *device = rdma->sc_cm_id->device; 245 unsigned int i; 246 247 /* The first SGE contains the transport header, which 248 * remains mapped until @ctxt is destroyed. 249 */ 250 for (i = 1; i < ctxt->sc_send_wr.num_sge; i++) { 251 ib_dma_unmap_page(device, 252 ctxt->sc_sges[i].addr, 253 ctxt->sc_sges[i].length, 254 DMA_TO_DEVICE); 255 trace_svcrdma_dma_unmap_page(rdma, 256 ctxt->sc_sges[i].addr, 257 ctxt->sc_sges[i].length); 258 } 259 260 for (i = 0; i < ctxt->sc_page_count; ++i) 261 put_page(ctxt->sc_pages[i]); 262 263 spin_lock(&rdma->sc_send_lock); 264 list_add(&ctxt->sc_list, &rdma->sc_send_ctxts); 265 spin_unlock(&rdma->sc_send_lock); 266 } 267 268 /** 269 * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC 270 * @cq: Completion Queue context 271 * @wc: Work Completion object 272 * 273 * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that 274 * the Send completion handler could be running. 275 */ 276 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) 277 { 278 struct svcxprt_rdma *rdma = cq->cq_context; 279 struct ib_cqe *cqe = wc->wr_cqe; 280 struct svc_rdma_send_ctxt *ctxt = 281 container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe); 282 283 trace_svcrdma_wc_send(wc, &ctxt->sc_cid); 284 285 atomic_inc(&rdma->sc_sq_avail); 286 wake_up(&rdma->sc_send_wait); 287 288 svc_rdma_send_ctxt_put(rdma, ctxt); 289 290 if (unlikely(wc->status != IB_WC_SUCCESS)) { 291 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); 292 svc_xprt_enqueue(&rdma->sc_xprt); 293 } 294 } 295 296 /** 297 * svc_rdma_send - Post a single Send WR 298 * @rdma: transport on which to post the WR 299 * @ctxt: send ctxt with a Send WR ready to post 300 * 301 * Returns zero the Send WR was posted successfully. Otherwise, a 302 * negative errno is returned. 303 */ 304 int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt) 305 { 306 struct ib_send_wr *wr = &ctxt->sc_send_wr; 307 int ret; 308 309 might_sleep(); 310 311 /* Sync the transport header buffer */ 312 ib_dma_sync_single_for_device(rdma->sc_pd->device, 313 wr->sg_list[0].addr, 314 wr->sg_list[0].length, 315 DMA_TO_DEVICE); 316 317 /* If the SQ is full, wait until an SQ entry is available */ 318 while (1) { 319 if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) { 320 percpu_counter_inc(&svcrdma_stat_sq_starve); 321 trace_svcrdma_sq_full(rdma); 322 atomic_inc(&rdma->sc_sq_avail); 323 wait_event(rdma->sc_send_wait, 324 atomic_read(&rdma->sc_sq_avail) > 1); 325 if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags)) 326 return -ENOTCONN; 327 trace_svcrdma_sq_retry(rdma); 328 continue; 329 } 330 331 trace_svcrdma_post_send(ctxt); 332 ret = ib_post_send(rdma->sc_qp, wr, NULL); 333 if (ret) 334 break; 335 return 0; 336 } 337 338 trace_svcrdma_sq_post_err(rdma, ret); 339 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); 340 wake_up(&rdma->sc_send_wait); 341 return ret; 342 } 343 344 /** 345 * svc_rdma_encode_read_list - Encode RPC Reply's Read chunk list 346 * @sctxt: Send context for the RPC Reply 347 * 348 * Return values: 349 * On success, returns length in bytes of the Reply XDR buffer 350 * that was consumed by the Reply Read list 351 * %-EMSGSIZE on XDR buffer overflow 352 */ 353 static ssize_t svc_rdma_encode_read_list(struct svc_rdma_send_ctxt *sctxt) 354 { 355 /* RPC-over-RDMA version 1 replies never have a Read list. */ 356 return xdr_stream_encode_item_absent(&sctxt->sc_stream); 357 } 358 359 /** 360 * svc_rdma_encode_write_segment - Encode one Write segment 361 * @sctxt: Send context for the RPC Reply 362 * @chunk: Write chunk to push 363 * @remaining: remaining bytes of the payload left in the Write chunk 364 * @segno: which segment in the chunk 365 * 366 * Return values: 367 * On success, returns length in bytes of the Reply XDR buffer 368 * that was consumed by the Write segment, and updates @remaining 369 * %-EMSGSIZE on XDR buffer overflow 370 */ 371 static ssize_t svc_rdma_encode_write_segment(struct svc_rdma_send_ctxt *sctxt, 372 const struct svc_rdma_chunk *chunk, 373 u32 *remaining, unsigned int segno) 374 { 375 const struct svc_rdma_segment *segment = &chunk->ch_segments[segno]; 376 const size_t len = rpcrdma_segment_maxsz * sizeof(__be32); 377 u32 length; 378 __be32 *p; 379 380 p = xdr_reserve_space(&sctxt->sc_stream, len); 381 if (!p) 382 return -EMSGSIZE; 383 384 length = min_t(u32, *remaining, segment->rs_length); 385 *remaining -= length; 386 xdr_encode_rdma_segment(p, segment->rs_handle, length, 387 segment->rs_offset); 388 trace_svcrdma_encode_wseg(sctxt, segno, segment->rs_handle, length, 389 segment->rs_offset); 390 return len; 391 } 392 393 /** 394 * svc_rdma_encode_write_chunk - Encode one Write chunk 395 * @sctxt: Send context for the RPC Reply 396 * @chunk: Write chunk to push 397 * 398 * Copy a Write chunk from the Call transport header to the 399 * Reply transport header. Update each segment's length field 400 * to reflect the number of bytes written in that segment. 401 * 402 * Return values: 403 * On success, returns length in bytes of the Reply XDR buffer 404 * that was consumed by the Write chunk 405 * %-EMSGSIZE on XDR buffer overflow 406 */ 407 static ssize_t svc_rdma_encode_write_chunk(struct svc_rdma_send_ctxt *sctxt, 408 const struct svc_rdma_chunk *chunk) 409 { 410 u32 remaining = chunk->ch_payload_length; 411 unsigned int segno; 412 ssize_t len, ret; 413 414 len = 0; 415 ret = xdr_stream_encode_item_present(&sctxt->sc_stream); 416 if (ret < 0) 417 return ret; 418 len += ret; 419 420 ret = xdr_stream_encode_u32(&sctxt->sc_stream, chunk->ch_segcount); 421 if (ret < 0) 422 return ret; 423 len += ret; 424 425 for (segno = 0; segno < chunk->ch_segcount; segno++) { 426 ret = svc_rdma_encode_write_segment(sctxt, chunk, &remaining, segno); 427 if (ret < 0) 428 return ret; 429 len += ret; 430 } 431 432 return len; 433 } 434 435 /** 436 * svc_rdma_encode_write_list - Encode RPC Reply's Write chunk list 437 * @rctxt: Reply context with information about the RPC Call 438 * @sctxt: Send context for the RPC Reply 439 * 440 * Return values: 441 * On success, returns length in bytes of the Reply XDR buffer 442 * that was consumed by the Reply's Write list 443 * %-EMSGSIZE on XDR buffer overflow 444 */ 445 static ssize_t svc_rdma_encode_write_list(struct svc_rdma_recv_ctxt *rctxt, 446 struct svc_rdma_send_ctxt *sctxt) 447 { 448 struct svc_rdma_chunk *chunk; 449 ssize_t len, ret; 450 451 len = 0; 452 pcl_for_each_chunk(chunk, &rctxt->rc_write_pcl) { 453 ret = svc_rdma_encode_write_chunk(sctxt, chunk); 454 if (ret < 0) 455 return ret; 456 len += ret; 457 } 458 459 /* Terminate the Write list */ 460 ret = xdr_stream_encode_item_absent(&sctxt->sc_stream); 461 if (ret < 0) 462 return ret; 463 464 return len + ret; 465 } 466 467 /** 468 * svc_rdma_encode_reply_chunk - Encode RPC Reply's Reply chunk 469 * @rctxt: Reply context with information about the RPC Call 470 * @sctxt: Send context for the RPC Reply 471 * @length: size in bytes of the payload in the Reply chunk 472 * 473 * Return values: 474 * On success, returns length in bytes of the Reply XDR buffer 475 * that was consumed by the Reply's Reply chunk 476 * %-EMSGSIZE on XDR buffer overflow 477 * %-E2BIG if the RPC message is larger than the Reply chunk 478 */ 479 static ssize_t 480 svc_rdma_encode_reply_chunk(struct svc_rdma_recv_ctxt *rctxt, 481 struct svc_rdma_send_ctxt *sctxt, 482 unsigned int length) 483 { 484 struct svc_rdma_chunk *chunk; 485 486 if (pcl_is_empty(&rctxt->rc_reply_pcl)) 487 return xdr_stream_encode_item_absent(&sctxt->sc_stream); 488 489 chunk = pcl_first_chunk(&rctxt->rc_reply_pcl); 490 if (length > chunk->ch_length) 491 return -E2BIG; 492 493 chunk->ch_payload_length = length; 494 return svc_rdma_encode_write_chunk(sctxt, chunk); 495 } 496 497 struct svc_rdma_map_data { 498 struct svcxprt_rdma *md_rdma; 499 struct svc_rdma_send_ctxt *md_ctxt; 500 }; 501 502 /** 503 * svc_rdma_page_dma_map - DMA map one page 504 * @data: pointer to arguments 505 * @page: struct page to DMA map 506 * @offset: offset into the page 507 * @len: number of bytes to map 508 * 509 * Returns: 510 * %0 if DMA mapping was successful 511 * %-EIO if the page cannot be DMA mapped 512 */ 513 static int svc_rdma_page_dma_map(void *data, struct page *page, 514 unsigned long offset, unsigned int len) 515 { 516 struct svc_rdma_map_data *args = data; 517 struct svcxprt_rdma *rdma = args->md_rdma; 518 struct svc_rdma_send_ctxt *ctxt = args->md_ctxt; 519 struct ib_device *dev = rdma->sc_cm_id->device; 520 dma_addr_t dma_addr; 521 522 ++ctxt->sc_cur_sge_no; 523 524 dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE); 525 if (ib_dma_mapping_error(dev, dma_addr)) 526 goto out_maperr; 527 528 trace_svcrdma_dma_map_page(rdma, dma_addr, len); 529 ctxt->sc_sges[ctxt->sc_cur_sge_no].addr = dma_addr; 530 ctxt->sc_sges[ctxt->sc_cur_sge_no].length = len; 531 ctxt->sc_send_wr.num_sge++; 532 return 0; 533 534 out_maperr: 535 trace_svcrdma_dma_map_err(rdma, dma_addr, len); 536 return -EIO; 537 } 538 539 /** 540 * svc_rdma_iov_dma_map - DMA map an iovec 541 * @data: pointer to arguments 542 * @iov: kvec to DMA map 543 * 544 * ib_dma_map_page() is used here because svc_rdma_dma_unmap() 545 * handles DMA-unmap and it uses ib_dma_unmap_page() exclusively. 546 * 547 * Returns: 548 * %0 if DMA mapping was successful 549 * %-EIO if the iovec cannot be DMA mapped 550 */ 551 static int svc_rdma_iov_dma_map(void *data, const struct kvec *iov) 552 { 553 if (!iov->iov_len) 554 return 0; 555 return svc_rdma_page_dma_map(data, virt_to_page(iov->iov_base), 556 offset_in_page(iov->iov_base), 557 iov->iov_len); 558 } 559 560 /** 561 * svc_rdma_xb_dma_map - DMA map all segments of an xdr_buf 562 * @xdr: xdr_buf containing portion of an RPC message to transmit 563 * @data: pointer to arguments 564 * 565 * Returns: 566 * %0 if DMA mapping was successful 567 * %-EIO if DMA mapping failed 568 * 569 * On failure, any DMA mappings that have been already done must be 570 * unmapped by the caller. 571 */ 572 static int svc_rdma_xb_dma_map(const struct xdr_buf *xdr, void *data) 573 { 574 unsigned int len, remaining; 575 unsigned long pageoff; 576 struct page **ppages; 577 int ret; 578 579 ret = svc_rdma_iov_dma_map(data, &xdr->head[0]); 580 if (ret < 0) 581 return ret; 582 583 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT); 584 pageoff = offset_in_page(xdr->page_base); 585 remaining = xdr->page_len; 586 while (remaining) { 587 len = min_t(u32, PAGE_SIZE - pageoff, remaining); 588 589 ret = svc_rdma_page_dma_map(data, *ppages++, pageoff, len); 590 if (ret < 0) 591 return ret; 592 593 remaining -= len; 594 pageoff = 0; 595 } 596 597 ret = svc_rdma_iov_dma_map(data, &xdr->tail[0]); 598 if (ret < 0) 599 return ret; 600 601 return xdr->len; 602 } 603 604 struct svc_rdma_pullup_data { 605 u8 *pd_dest; 606 unsigned int pd_length; 607 unsigned int pd_num_sges; 608 }; 609 610 /** 611 * svc_rdma_xb_count_sges - Count how many SGEs will be needed 612 * @xdr: xdr_buf containing portion of an RPC message to transmit 613 * @data: pointer to arguments 614 * 615 * Returns: 616 * Number of SGEs needed to Send the contents of @xdr inline 617 */ 618 static int svc_rdma_xb_count_sges(const struct xdr_buf *xdr, 619 void *data) 620 { 621 struct svc_rdma_pullup_data *args = data; 622 unsigned int remaining; 623 unsigned long offset; 624 625 if (xdr->head[0].iov_len) 626 ++args->pd_num_sges; 627 628 offset = offset_in_page(xdr->page_base); 629 remaining = xdr->page_len; 630 while (remaining) { 631 ++args->pd_num_sges; 632 remaining -= min_t(u32, PAGE_SIZE - offset, remaining); 633 offset = 0; 634 } 635 636 if (xdr->tail[0].iov_len) 637 ++args->pd_num_sges; 638 639 args->pd_length += xdr->len; 640 return 0; 641 } 642 643 /** 644 * svc_rdma_pull_up_needed - Determine whether to use pull-up 645 * @rdma: controlling transport 646 * @sctxt: send_ctxt for the Send WR 647 * @rctxt: Write and Reply chunks provided by client 648 * @xdr: xdr_buf containing RPC message to transmit 649 * 650 * Returns: 651 * %true if pull-up must be used 652 * %false otherwise 653 */ 654 static bool svc_rdma_pull_up_needed(const struct svcxprt_rdma *rdma, 655 const struct svc_rdma_send_ctxt *sctxt, 656 const struct svc_rdma_recv_ctxt *rctxt, 657 const struct xdr_buf *xdr) 658 { 659 /* Resources needed for the transport header */ 660 struct svc_rdma_pullup_data args = { 661 .pd_length = sctxt->sc_hdrbuf.len, 662 .pd_num_sges = 1, 663 }; 664 int ret; 665 666 ret = pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr, 667 svc_rdma_xb_count_sges, &args); 668 if (ret < 0) 669 return false; 670 671 if (args.pd_length < RPCRDMA_PULLUP_THRESH) 672 return true; 673 return args.pd_num_sges >= rdma->sc_max_send_sges; 674 } 675 676 /** 677 * svc_rdma_xb_linearize - Copy region of xdr_buf to flat buffer 678 * @xdr: xdr_buf containing portion of an RPC message to copy 679 * @data: pointer to arguments 680 * 681 * Returns: 682 * Always zero. 683 */ 684 static int svc_rdma_xb_linearize(const struct xdr_buf *xdr, 685 void *data) 686 { 687 struct svc_rdma_pullup_data *args = data; 688 unsigned int len, remaining; 689 unsigned long pageoff; 690 struct page **ppages; 691 692 if (xdr->head[0].iov_len) { 693 memcpy(args->pd_dest, xdr->head[0].iov_base, xdr->head[0].iov_len); 694 args->pd_dest += xdr->head[0].iov_len; 695 } 696 697 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT); 698 pageoff = offset_in_page(xdr->page_base); 699 remaining = xdr->page_len; 700 while (remaining) { 701 len = min_t(u32, PAGE_SIZE - pageoff, remaining); 702 memcpy(args->pd_dest, page_address(*ppages) + pageoff, len); 703 remaining -= len; 704 args->pd_dest += len; 705 pageoff = 0; 706 ppages++; 707 } 708 709 if (xdr->tail[0].iov_len) { 710 memcpy(args->pd_dest, xdr->tail[0].iov_base, xdr->tail[0].iov_len); 711 args->pd_dest += xdr->tail[0].iov_len; 712 } 713 714 args->pd_length += xdr->len; 715 return 0; 716 } 717 718 /** 719 * svc_rdma_pull_up_reply_msg - Copy Reply into a single buffer 720 * @rdma: controlling transport 721 * @sctxt: send_ctxt for the Send WR; xprt hdr is already prepared 722 * @rctxt: Write and Reply chunks provided by client 723 * @xdr: prepared xdr_buf containing RPC message 724 * 725 * The device is not capable of sending the reply directly. 726 * Assemble the elements of @xdr into the transport header buffer. 727 * 728 * Assumptions: 729 * pull_up_needed has determined that @xdr will fit in the buffer. 730 * 731 * Returns: 732 * %0 if pull-up was successful 733 * %-EMSGSIZE if a buffer manipulation problem occurred 734 */ 735 static int svc_rdma_pull_up_reply_msg(const struct svcxprt_rdma *rdma, 736 struct svc_rdma_send_ctxt *sctxt, 737 const struct svc_rdma_recv_ctxt *rctxt, 738 const struct xdr_buf *xdr) 739 { 740 struct svc_rdma_pullup_data args = { 741 .pd_dest = sctxt->sc_xprt_buf + sctxt->sc_hdrbuf.len, 742 }; 743 int ret; 744 745 ret = pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr, 746 svc_rdma_xb_linearize, &args); 747 if (ret < 0) 748 return ret; 749 750 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len + args.pd_length; 751 trace_svcrdma_send_pullup(sctxt, args.pd_length); 752 return 0; 753 } 754 755 /* svc_rdma_map_reply_msg - DMA map the buffer holding RPC message 756 * @rdma: controlling transport 757 * @sctxt: send_ctxt for the Send WR 758 * @rctxt: Write and Reply chunks provided by client 759 * @xdr: prepared xdr_buf containing RPC message 760 * 761 * Returns: 762 * %0 if DMA mapping was successful. 763 * %-EMSGSIZE if a buffer manipulation problem occurred 764 * %-EIO if DMA mapping failed 765 * 766 * The Send WR's num_sge field is set in all cases. 767 */ 768 int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, 769 struct svc_rdma_send_ctxt *sctxt, 770 const struct svc_rdma_recv_ctxt *rctxt, 771 const struct xdr_buf *xdr) 772 { 773 struct svc_rdma_map_data args = { 774 .md_rdma = rdma, 775 .md_ctxt = sctxt, 776 }; 777 778 /* Set up the (persistently-mapped) transport header SGE. */ 779 sctxt->sc_send_wr.num_sge = 1; 780 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len; 781 782 /* If there is a Reply chunk, nothing follows the transport 783 * header, and we're done here. 784 */ 785 if (!pcl_is_empty(&rctxt->rc_reply_pcl)) 786 return 0; 787 788 /* For pull-up, svc_rdma_send() will sync the transport header. 789 * No additional DMA mapping is necessary. 790 */ 791 if (svc_rdma_pull_up_needed(rdma, sctxt, rctxt, xdr)) 792 return svc_rdma_pull_up_reply_msg(rdma, sctxt, rctxt, xdr); 793 794 return pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr, 795 svc_rdma_xb_dma_map, &args); 796 } 797 798 /* The svc_rqst and all resources it owns are released as soon as 799 * svc_rdma_sendto returns. Transfer pages under I/O to the ctxt 800 * so they are released by the Send completion handler. 801 */ 802 static void svc_rdma_save_io_pages(struct svc_rqst *rqstp, 803 struct svc_rdma_send_ctxt *ctxt) 804 { 805 int i, pages = rqstp->rq_next_page - rqstp->rq_respages; 806 807 ctxt->sc_page_count += pages; 808 for (i = 0; i < pages; i++) { 809 ctxt->sc_pages[i] = rqstp->rq_respages[i]; 810 rqstp->rq_respages[i] = NULL; 811 } 812 813 /* Prevent svc_xprt_release from releasing pages in rq_pages */ 814 rqstp->rq_next_page = rqstp->rq_respages; 815 } 816 817 /* Prepare the portion of the RPC Reply that will be transmitted 818 * via RDMA Send. The RPC-over-RDMA transport header is prepared 819 * in sc_sges[0], and the RPC xdr_buf is prepared in following sges. 820 * 821 * Depending on whether a Write list or Reply chunk is present, 822 * the server may send all, a portion of, or none of the xdr_buf. 823 * In the latter case, only the transport header (sc_sges[0]) is 824 * transmitted. 825 * 826 * RDMA Send is the last step of transmitting an RPC reply. Pages 827 * involved in the earlier RDMA Writes are here transferred out 828 * of the rqstp and into the sctxt's page array. These pages are 829 * DMA unmapped by each Write completion, but the subsequent Send 830 * completion finally releases these pages. 831 * 832 * Assumptions: 833 * - The Reply's transport header will never be larger than a page. 834 */ 835 static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma, 836 struct svc_rdma_send_ctxt *sctxt, 837 const struct svc_rdma_recv_ctxt *rctxt, 838 struct svc_rqst *rqstp) 839 { 840 int ret; 841 842 ret = svc_rdma_map_reply_msg(rdma, sctxt, rctxt, &rqstp->rq_res); 843 if (ret < 0) 844 return ret; 845 846 svc_rdma_save_io_pages(rqstp, sctxt); 847 848 if (rctxt->rc_inv_rkey) { 849 sctxt->sc_send_wr.opcode = IB_WR_SEND_WITH_INV; 850 sctxt->sc_send_wr.ex.invalidate_rkey = rctxt->rc_inv_rkey; 851 } else { 852 sctxt->sc_send_wr.opcode = IB_WR_SEND; 853 } 854 return svc_rdma_send(rdma, sctxt); 855 } 856 857 /** 858 * svc_rdma_send_error_msg - Send an RPC/RDMA v1 error response 859 * @rdma: controlling transport context 860 * @sctxt: Send context for the response 861 * @rctxt: Receive context for incoming bad message 862 * @status: negative errno indicating error that occurred 863 * 864 * Given the client-provided Read, Write, and Reply chunks, the 865 * server was not able to parse the Call or form a complete Reply. 866 * Return an RDMA_ERROR message so the client can retire the RPC 867 * transaction. 868 * 869 * The caller does not have to release @sctxt. It is released by 870 * Send completion, or by this function on error. 871 */ 872 void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, 873 struct svc_rdma_send_ctxt *sctxt, 874 struct svc_rdma_recv_ctxt *rctxt, 875 int status) 876 { 877 __be32 *rdma_argp = rctxt->rc_recv_buf; 878 __be32 *p; 879 880 rpcrdma_set_xdrlen(&sctxt->sc_hdrbuf, 0); 881 xdr_init_encode(&sctxt->sc_stream, &sctxt->sc_hdrbuf, 882 sctxt->sc_xprt_buf, NULL); 883 884 p = xdr_reserve_space(&sctxt->sc_stream, 885 rpcrdma_fixed_maxsz * sizeof(*p)); 886 if (!p) 887 goto put_ctxt; 888 889 *p++ = *rdma_argp; 890 *p++ = *(rdma_argp + 1); 891 *p++ = rdma->sc_fc_credits; 892 *p = rdma_error; 893 894 switch (status) { 895 case -EPROTONOSUPPORT: 896 p = xdr_reserve_space(&sctxt->sc_stream, 3 * sizeof(*p)); 897 if (!p) 898 goto put_ctxt; 899 900 *p++ = err_vers; 901 *p++ = rpcrdma_version; 902 *p = rpcrdma_version; 903 trace_svcrdma_err_vers(*rdma_argp); 904 break; 905 default: 906 p = xdr_reserve_space(&sctxt->sc_stream, sizeof(*p)); 907 if (!p) 908 goto put_ctxt; 909 910 *p = err_chunk; 911 trace_svcrdma_err_chunk(*rdma_argp); 912 } 913 914 /* Remote Invalidation is skipped for simplicity. */ 915 sctxt->sc_send_wr.num_sge = 1; 916 sctxt->sc_send_wr.opcode = IB_WR_SEND; 917 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len; 918 if (svc_rdma_send(rdma, sctxt)) 919 goto put_ctxt; 920 return; 921 922 put_ctxt: 923 svc_rdma_send_ctxt_put(rdma, sctxt); 924 } 925 926 /** 927 * svc_rdma_sendto - Transmit an RPC reply 928 * @rqstp: processed RPC request, reply XDR already in ::rq_res 929 * 930 * Any resources still associated with @rqstp are released upon return. 931 * If no reply message was possible, the connection is closed. 932 * 933 * Returns: 934 * %0 if an RPC reply has been successfully posted, 935 * %-ENOMEM if a resource shortage occurred (connection is lost), 936 * %-ENOTCONN if posting failed (connection is lost). 937 */ 938 int svc_rdma_sendto(struct svc_rqst *rqstp) 939 { 940 struct svc_xprt *xprt = rqstp->rq_xprt; 941 struct svcxprt_rdma *rdma = 942 container_of(xprt, struct svcxprt_rdma, sc_xprt); 943 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt; 944 __be32 *rdma_argp = rctxt->rc_recv_buf; 945 struct svc_rdma_send_ctxt *sctxt; 946 __be32 *p; 947 int ret; 948 949 ret = -ENOTCONN; 950 if (svc_xprt_is_dead(xprt)) 951 goto err0; 952 953 ret = -ENOMEM; 954 sctxt = svc_rdma_send_ctxt_get(rdma); 955 if (!sctxt) 956 goto err0; 957 958 p = xdr_reserve_space(&sctxt->sc_stream, 959 rpcrdma_fixed_maxsz * sizeof(*p)); 960 if (!p) 961 goto err0; 962 963 ret = svc_rdma_send_reply_chunk(rdma, rctxt, &rqstp->rq_res); 964 if (ret < 0) 965 goto err2; 966 967 *p++ = *rdma_argp; 968 *p++ = *(rdma_argp + 1); 969 *p++ = rdma->sc_fc_credits; 970 *p = pcl_is_empty(&rctxt->rc_reply_pcl) ? rdma_msg : rdma_nomsg; 971 972 if (svc_rdma_encode_read_list(sctxt) < 0) 973 goto err0; 974 if (svc_rdma_encode_write_list(rctxt, sctxt) < 0) 975 goto err0; 976 if (svc_rdma_encode_reply_chunk(rctxt, sctxt, ret) < 0) 977 goto err0; 978 979 ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp); 980 if (ret < 0) 981 goto err1; 982 return 0; 983 984 err2: 985 if (ret != -E2BIG && ret != -EINVAL) 986 goto err1; 987 988 /* Send completion releases payload pages that were part 989 * of previously posted RDMA Writes. 990 */ 991 svc_rdma_save_io_pages(rqstp, sctxt); 992 svc_rdma_send_error_msg(rdma, sctxt, rctxt, ret); 993 return 0; 994 995 err1: 996 svc_rdma_send_ctxt_put(rdma, sctxt); 997 err0: 998 trace_svcrdma_send_err(rqstp, ret); 999 set_bit(XPT_CLOSE, &xprt->xpt_flags); 1000 return -ENOTCONN; 1001 } 1002 1003 /** 1004 * svc_rdma_result_payload - special processing for a result payload 1005 * @rqstp: svc_rqst to operate on 1006 * @offset: payload's byte offset in @xdr 1007 * @length: size of payload, in bytes 1008 * 1009 * Return values: 1010 * %0 if successful or nothing needed to be done 1011 * %-EMSGSIZE on XDR buffer overflow 1012 * %-E2BIG if the payload was larger than the Write chunk 1013 * %-EINVAL if client provided too many segments 1014 * %-ENOMEM if rdma_rw context pool was exhausted 1015 * %-ENOTCONN if posting failed (connection is lost) 1016 * %-EIO if rdma_rw initialization failed (DMA mapping, etc) 1017 */ 1018 int svc_rdma_result_payload(struct svc_rqst *rqstp, unsigned int offset, 1019 unsigned int length) 1020 { 1021 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt; 1022 struct svc_rdma_chunk *chunk; 1023 struct svcxprt_rdma *rdma; 1024 struct xdr_buf subbuf; 1025 int ret; 1026 1027 chunk = rctxt->rc_cur_result_payload; 1028 if (!length || !chunk) 1029 return 0; 1030 rctxt->rc_cur_result_payload = 1031 pcl_next_chunk(&rctxt->rc_write_pcl, chunk); 1032 if (length > chunk->ch_length) 1033 return -E2BIG; 1034 1035 chunk->ch_position = offset; 1036 chunk->ch_payload_length = length; 1037 1038 if (xdr_buf_subsegment(&rqstp->rq_res, &subbuf, offset, length)) 1039 return -EMSGSIZE; 1040 1041 rdma = container_of(rqstp->rq_xprt, struct svcxprt_rdma, sc_xprt); 1042 ret = svc_rdma_send_write_chunk(rdma, chunk, &subbuf); 1043 if (ret < 0) 1044 return ret; 1045 return 0; 1046 } 1047