1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2016-2018 Oracle. All rights reserved. 4 * 5 * Use the core R/W API to move RPC-over-RDMA Read and Write chunks. 6 */ 7 8 #include <rdma/rw.h> 9 10 #include <linux/sunrpc/rpc_rdma.h> 11 #include <linux/sunrpc/svc_rdma.h> 12 #include <linux/sunrpc/debug.h> 13 14 #include "xprt_rdma.h" 15 #include <trace/events/rpcrdma.h> 16 17 #define RPCDBG_FACILITY RPCDBG_SVCXPRT 18 19 static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc); 20 static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc); 21 22 /* Each R/W context contains state for one chain of RDMA Read or 23 * Write Work Requests. 24 * 25 * Each WR chain handles a single contiguous server-side buffer, 26 * because scatterlist entries after the first have to start on 27 * page alignment. xdr_buf iovecs cannot guarantee alignment. 28 * 29 * Each WR chain handles only one R_key. Each RPC-over-RDMA segment 30 * from a client may contain a unique R_key, so each WR chain moves 31 * up to one segment at a time. 32 * 33 * The scatterlist makes this data structure over 4KB in size. To 34 * make it less likely to fail, and to handle the allocation for 35 * smaller I/O requests without disabling bottom-halves, these 36 * contexts are created on demand, but cached and reused until the 37 * controlling svcxprt_rdma is destroyed. 38 */ 39 struct svc_rdma_rw_ctxt { 40 struct list_head rw_list; 41 struct rdma_rw_ctx rw_ctx; 42 unsigned int rw_nents; 43 struct sg_table rw_sg_table; 44 struct scatterlist rw_first_sgl[]; 45 }; 46 47 static inline struct svc_rdma_rw_ctxt * 48 svc_rdma_next_ctxt(struct list_head *list) 49 { 50 return list_first_entry_or_null(list, struct svc_rdma_rw_ctxt, 51 rw_list); 52 } 53 54 static struct svc_rdma_rw_ctxt * 55 svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges) 56 { 57 struct svc_rdma_rw_ctxt *ctxt; 58 59 spin_lock(&rdma->sc_rw_ctxt_lock); 60 61 ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts); 62 if (ctxt) { 63 list_del(&ctxt->rw_list); 64 spin_unlock(&rdma->sc_rw_ctxt_lock); 65 } else { 66 spin_unlock(&rdma->sc_rw_ctxt_lock); 67 ctxt = kmalloc(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE), 68 GFP_KERNEL); 69 if (!ctxt) 70 goto out_noctx; 71 INIT_LIST_HEAD(&ctxt->rw_list); 72 } 73 74 ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl; 75 if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges, 76 ctxt->rw_sg_table.sgl, 77 SG_CHUNK_SIZE)) 78 goto out_free; 79 return ctxt; 80 81 out_free: 82 kfree(ctxt); 83 out_noctx: 84 trace_svcrdma_no_rwctx_err(rdma, sges); 85 return NULL; 86 } 87 88 static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma, 89 struct svc_rdma_rw_ctxt *ctxt) 90 { 91 sg_free_table_chained(&ctxt->rw_sg_table, SG_CHUNK_SIZE); 92 93 spin_lock(&rdma->sc_rw_ctxt_lock); 94 list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts); 95 spin_unlock(&rdma->sc_rw_ctxt_lock); 96 } 97 98 /** 99 * svc_rdma_destroy_rw_ctxts - Free accumulated R/W contexts 100 * @rdma: transport about to be destroyed 101 * 102 */ 103 void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma) 104 { 105 struct svc_rdma_rw_ctxt *ctxt; 106 107 while ((ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts)) != NULL) { 108 list_del(&ctxt->rw_list); 109 kfree(ctxt); 110 } 111 } 112 113 /** 114 * svc_rdma_rw_ctx_init - Prepare a R/W context for I/O 115 * @rdma: controlling transport instance 116 * @ctxt: R/W context to prepare 117 * @offset: RDMA offset 118 * @handle: RDMA tag/handle 119 * @direction: I/O direction 120 * 121 * Returns on success, the number of WQEs that will be needed 122 * on the workqueue, or a negative errno. 123 */ 124 static int svc_rdma_rw_ctx_init(struct svcxprt_rdma *rdma, 125 struct svc_rdma_rw_ctxt *ctxt, 126 u64 offset, u32 handle, 127 enum dma_data_direction direction) 128 { 129 int ret; 130 131 ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, rdma->sc_port_num, 132 ctxt->rw_sg_table.sgl, ctxt->rw_nents, 133 0, offset, handle, direction); 134 if (unlikely(ret < 0)) { 135 svc_rdma_put_rw_ctxt(rdma, ctxt); 136 trace_svcrdma_dma_map_rw_err(rdma, ctxt->rw_nents, ret); 137 } 138 return ret; 139 } 140 141 /* A chunk context tracks all I/O for moving one Read or Write 142 * chunk. This is a a set of rdma_rw's that handle data movement 143 * for all segments of one chunk. 144 * 145 * These are small, acquired with a single allocator call, and 146 * no more than one is needed per chunk. They are allocated on 147 * demand, and not cached. 148 */ 149 struct svc_rdma_chunk_ctxt { 150 struct ib_cqe cc_cqe; 151 struct svcxprt_rdma *cc_rdma; 152 struct list_head cc_rwctxts; 153 int cc_sqecount; 154 }; 155 156 static void svc_rdma_cc_init(struct svcxprt_rdma *rdma, 157 struct svc_rdma_chunk_ctxt *cc) 158 { 159 cc->cc_rdma = rdma; 160 svc_xprt_get(&rdma->sc_xprt); 161 162 INIT_LIST_HEAD(&cc->cc_rwctxts); 163 cc->cc_sqecount = 0; 164 } 165 166 static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc, 167 enum dma_data_direction dir) 168 { 169 struct svcxprt_rdma *rdma = cc->cc_rdma; 170 struct svc_rdma_rw_ctxt *ctxt; 171 172 while ((ctxt = svc_rdma_next_ctxt(&cc->cc_rwctxts)) != NULL) { 173 list_del(&ctxt->rw_list); 174 175 rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp, 176 rdma->sc_port_num, ctxt->rw_sg_table.sgl, 177 ctxt->rw_nents, dir); 178 svc_rdma_put_rw_ctxt(rdma, ctxt); 179 } 180 svc_xprt_put(&rdma->sc_xprt); 181 } 182 183 /* State for sending a Write or Reply chunk. 184 * - Tracks progress of writing one chunk over all its segments 185 * - Stores arguments for the SGL constructor functions 186 */ 187 struct svc_rdma_write_info { 188 /* write state of this chunk */ 189 unsigned int wi_seg_off; 190 unsigned int wi_seg_no; 191 unsigned int wi_nsegs; 192 __be32 *wi_segs; 193 194 /* SGL constructor arguments */ 195 struct xdr_buf *wi_xdr; 196 unsigned char *wi_base; 197 unsigned int wi_next_off; 198 199 struct svc_rdma_chunk_ctxt wi_cc; 200 }; 201 202 static struct svc_rdma_write_info * 203 svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma, __be32 *chunk) 204 { 205 struct svc_rdma_write_info *info; 206 207 info = kmalloc(sizeof(*info), GFP_KERNEL); 208 if (!info) 209 return info; 210 211 info->wi_seg_off = 0; 212 info->wi_seg_no = 0; 213 info->wi_nsegs = be32_to_cpup(++chunk); 214 info->wi_segs = ++chunk; 215 svc_rdma_cc_init(rdma, &info->wi_cc); 216 info->wi_cc.cc_cqe.done = svc_rdma_write_done; 217 return info; 218 } 219 220 static void svc_rdma_write_info_free(struct svc_rdma_write_info *info) 221 { 222 svc_rdma_cc_release(&info->wi_cc, DMA_TO_DEVICE); 223 kfree(info); 224 } 225 226 /** 227 * svc_rdma_write_done - Write chunk completion 228 * @cq: controlling Completion Queue 229 * @wc: Work Completion 230 * 231 * Pages under I/O are freed by a subsequent Send completion. 232 */ 233 static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc) 234 { 235 struct ib_cqe *cqe = wc->wr_cqe; 236 struct svc_rdma_chunk_ctxt *cc = 237 container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe); 238 struct svcxprt_rdma *rdma = cc->cc_rdma; 239 struct svc_rdma_write_info *info = 240 container_of(cc, struct svc_rdma_write_info, wi_cc); 241 242 trace_svcrdma_wc_write(wc); 243 244 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); 245 wake_up(&rdma->sc_send_wait); 246 247 if (unlikely(wc->status != IB_WC_SUCCESS)) 248 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); 249 250 svc_rdma_write_info_free(info); 251 } 252 253 /* State for pulling a Read chunk. 254 */ 255 struct svc_rdma_read_info { 256 struct svc_rdma_recv_ctxt *ri_readctxt; 257 unsigned int ri_position; 258 unsigned int ri_pageno; 259 unsigned int ri_pageoff; 260 unsigned int ri_chunklen; 261 262 struct svc_rdma_chunk_ctxt ri_cc; 263 }; 264 265 static struct svc_rdma_read_info * 266 svc_rdma_read_info_alloc(struct svcxprt_rdma *rdma) 267 { 268 struct svc_rdma_read_info *info; 269 270 info = kmalloc(sizeof(*info), GFP_KERNEL); 271 if (!info) 272 return info; 273 274 svc_rdma_cc_init(rdma, &info->ri_cc); 275 info->ri_cc.cc_cqe.done = svc_rdma_wc_read_done; 276 return info; 277 } 278 279 static void svc_rdma_read_info_free(struct svc_rdma_read_info *info) 280 { 281 svc_rdma_cc_release(&info->ri_cc, DMA_FROM_DEVICE); 282 kfree(info); 283 } 284 285 /** 286 * svc_rdma_wc_read_done - Handle completion of an RDMA Read ctx 287 * @cq: controlling Completion Queue 288 * @wc: Work Completion 289 * 290 */ 291 static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc) 292 { 293 struct ib_cqe *cqe = wc->wr_cqe; 294 struct svc_rdma_chunk_ctxt *cc = 295 container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe); 296 struct svcxprt_rdma *rdma = cc->cc_rdma; 297 struct svc_rdma_read_info *info = 298 container_of(cc, struct svc_rdma_read_info, ri_cc); 299 300 trace_svcrdma_wc_read(wc); 301 302 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); 303 wake_up(&rdma->sc_send_wait); 304 305 if (unlikely(wc->status != IB_WC_SUCCESS)) { 306 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); 307 svc_rdma_recv_ctxt_put(rdma, info->ri_readctxt); 308 } else { 309 spin_lock(&rdma->sc_rq_dto_lock); 310 list_add_tail(&info->ri_readctxt->rc_list, 311 &rdma->sc_read_complete_q); 312 /* Note the unlock pairs with the smp_rmb in svc_xprt_ready: */ 313 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags); 314 spin_unlock(&rdma->sc_rq_dto_lock); 315 316 svc_xprt_enqueue(&rdma->sc_xprt); 317 } 318 319 svc_rdma_read_info_free(info); 320 } 321 322 /* This function sleeps when the transport's Send Queue is congested. 323 * 324 * Assumptions: 325 * - If ib_post_send() succeeds, only one completion is expected, 326 * even if one or more WRs are flushed. This is true when posting 327 * an rdma_rw_ctx or when posting a single signaled WR. 328 */ 329 static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc) 330 { 331 struct svcxprt_rdma *rdma = cc->cc_rdma; 332 struct svc_xprt *xprt = &rdma->sc_xprt; 333 struct ib_send_wr *first_wr; 334 const struct ib_send_wr *bad_wr; 335 struct list_head *tmp; 336 struct ib_cqe *cqe; 337 int ret; 338 339 if (cc->cc_sqecount > rdma->sc_sq_depth) 340 return -EINVAL; 341 342 first_wr = NULL; 343 cqe = &cc->cc_cqe; 344 list_for_each(tmp, &cc->cc_rwctxts) { 345 struct svc_rdma_rw_ctxt *ctxt; 346 347 ctxt = list_entry(tmp, struct svc_rdma_rw_ctxt, rw_list); 348 first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp, 349 rdma->sc_port_num, cqe, first_wr); 350 cqe = NULL; 351 } 352 353 do { 354 if (atomic_sub_return(cc->cc_sqecount, 355 &rdma->sc_sq_avail) > 0) { 356 ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr); 357 if (ret) 358 break; 359 return 0; 360 } 361 362 trace_svcrdma_sq_full(rdma); 363 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); 364 wait_event(rdma->sc_send_wait, 365 atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount); 366 trace_svcrdma_sq_retry(rdma); 367 } while (1); 368 369 trace_svcrdma_sq_post_err(rdma, ret); 370 set_bit(XPT_CLOSE, &xprt->xpt_flags); 371 372 /* If even one was posted, there will be a completion. */ 373 if (bad_wr != first_wr) 374 return 0; 375 376 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail); 377 wake_up(&rdma->sc_send_wait); 378 return -ENOTCONN; 379 } 380 381 /* Build and DMA-map an SGL that covers one kvec in an xdr_buf 382 */ 383 static void svc_rdma_vec_to_sg(struct svc_rdma_write_info *info, 384 unsigned int len, 385 struct svc_rdma_rw_ctxt *ctxt) 386 { 387 struct scatterlist *sg = ctxt->rw_sg_table.sgl; 388 389 sg_set_buf(&sg[0], info->wi_base, len); 390 info->wi_base += len; 391 392 ctxt->rw_nents = 1; 393 } 394 395 /* Build and DMA-map an SGL that covers part of an xdr_buf's pagelist. 396 */ 397 static void svc_rdma_pagelist_to_sg(struct svc_rdma_write_info *info, 398 unsigned int remaining, 399 struct svc_rdma_rw_ctxt *ctxt) 400 { 401 unsigned int sge_no, sge_bytes, page_off, page_no; 402 struct xdr_buf *xdr = info->wi_xdr; 403 struct scatterlist *sg; 404 struct page **page; 405 406 page_off = info->wi_next_off + xdr->page_base; 407 page_no = page_off >> PAGE_SHIFT; 408 page_off = offset_in_page(page_off); 409 page = xdr->pages + page_no; 410 info->wi_next_off += remaining; 411 sg = ctxt->rw_sg_table.sgl; 412 sge_no = 0; 413 do { 414 sge_bytes = min_t(unsigned int, remaining, 415 PAGE_SIZE - page_off); 416 sg_set_page(sg, *page, sge_bytes, page_off); 417 418 remaining -= sge_bytes; 419 sg = sg_next(sg); 420 page_off = 0; 421 sge_no++; 422 page++; 423 } while (remaining); 424 425 ctxt->rw_nents = sge_no; 426 } 427 428 /* Construct RDMA Write WRs to send a portion of an xdr_buf containing 429 * an RPC Reply. 430 */ 431 static int 432 svc_rdma_build_writes(struct svc_rdma_write_info *info, 433 void (*constructor)(struct svc_rdma_write_info *info, 434 unsigned int len, 435 struct svc_rdma_rw_ctxt *ctxt), 436 unsigned int remaining) 437 { 438 struct svc_rdma_chunk_ctxt *cc = &info->wi_cc; 439 struct svcxprt_rdma *rdma = cc->cc_rdma; 440 struct svc_rdma_rw_ctxt *ctxt; 441 __be32 *seg; 442 int ret; 443 444 seg = info->wi_segs + info->wi_seg_no * rpcrdma_segment_maxsz; 445 do { 446 unsigned int write_len; 447 u32 seg_length, seg_handle; 448 u64 seg_offset; 449 450 if (info->wi_seg_no >= info->wi_nsegs) 451 goto out_overflow; 452 453 seg_handle = be32_to_cpup(seg); 454 seg_length = be32_to_cpup(seg + 1); 455 xdr_decode_hyper(seg + 2, &seg_offset); 456 seg_offset += info->wi_seg_off; 457 458 write_len = min(remaining, seg_length - info->wi_seg_off); 459 ctxt = svc_rdma_get_rw_ctxt(rdma, 460 (write_len >> PAGE_SHIFT) + 2); 461 if (!ctxt) 462 return -ENOMEM; 463 464 constructor(info, write_len, ctxt); 465 ret = svc_rdma_rw_ctx_init(rdma, ctxt, seg_offset, seg_handle, 466 DMA_TO_DEVICE); 467 if (ret < 0) 468 return -EIO; 469 470 trace_svcrdma_send_wseg(seg_handle, write_len, seg_offset); 471 472 list_add(&ctxt->rw_list, &cc->cc_rwctxts); 473 cc->cc_sqecount += ret; 474 if (write_len == seg_length - info->wi_seg_off) { 475 seg += 4; 476 info->wi_seg_no++; 477 info->wi_seg_off = 0; 478 } else { 479 info->wi_seg_off += write_len; 480 } 481 remaining -= write_len; 482 } while (remaining); 483 484 return 0; 485 486 out_overflow: 487 dprintk("svcrdma: inadequate space in Write chunk (%u)\n", 488 info->wi_nsegs); 489 return -E2BIG; 490 } 491 492 /* Send one of an xdr_buf's kvecs by itself. To send a Reply 493 * chunk, the whole RPC Reply is written back to the client. 494 * This function writes either the head or tail of the xdr_buf 495 * containing the Reply. 496 */ 497 static int svc_rdma_send_xdr_kvec(struct svc_rdma_write_info *info, 498 struct kvec *vec) 499 { 500 info->wi_base = vec->iov_base; 501 return svc_rdma_build_writes(info, svc_rdma_vec_to_sg, 502 vec->iov_len); 503 } 504 505 /* Send an xdr_buf's page list by itself. A Write chunk is just 506 * the page list. A Reply chunk is @xdr's head, page list, and 507 * tail. This function is shared between the two types of chunk. 508 */ 509 static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info, 510 struct xdr_buf *xdr, 511 unsigned int offset, 512 unsigned long length) 513 { 514 info->wi_xdr = xdr; 515 info->wi_next_off = offset - xdr->head[0].iov_len; 516 return svc_rdma_build_writes(info, svc_rdma_pagelist_to_sg, 517 length); 518 } 519 520 /** 521 * svc_rdma_send_write_chunk - Write all segments in a Write chunk 522 * @rdma: controlling RDMA transport 523 * @wr_ch: Write chunk provided by client 524 * @xdr: xdr_buf containing the data payload 525 * @offset: payload's byte offset in @xdr 526 * @length: size of payload, in bytes 527 * 528 * Returns a non-negative number of bytes the chunk consumed, or 529 * %-E2BIG if the payload was larger than the Write chunk, 530 * %-EINVAL if client provided too many segments, 531 * %-ENOMEM if rdma_rw context pool was exhausted, 532 * %-ENOTCONN if posting failed (connection is lost), 533 * %-EIO if rdma_rw initialization failed (DMA mapping, etc). 534 */ 535 int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch, 536 struct xdr_buf *xdr, 537 unsigned int offset, unsigned long length) 538 { 539 struct svc_rdma_write_info *info; 540 int ret; 541 542 if (!length) 543 return 0; 544 545 info = svc_rdma_write_info_alloc(rdma, wr_ch); 546 if (!info) 547 return -ENOMEM; 548 549 ret = svc_rdma_send_xdr_pagelist(info, xdr, offset, length); 550 if (ret < 0) 551 goto out_err; 552 553 ret = svc_rdma_post_chunk_ctxt(&info->wi_cc); 554 if (ret < 0) 555 goto out_err; 556 557 trace_svcrdma_send_write_chunk(xdr->page_len); 558 return length; 559 560 out_err: 561 svc_rdma_write_info_free(info); 562 return ret; 563 } 564 565 /** 566 * svc_rdma_send_reply_chunk - Write all segments in the Reply chunk 567 * @rdma: controlling RDMA transport 568 * @rctxt: Write and Reply chunks from client 569 * @xdr: xdr_buf containing an RPC Reply 570 * 571 * Returns a non-negative number of bytes the chunk consumed, or 572 * %-E2BIG if the payload was larger than the Reply chunk, 573 * %-EINVAL if client provided too many segments, 574 * %-ENOMEM if rdma_rw context pool was exhausted, 575 * %-ENOTCONN if posting failed (connection is lost), 576 * %-EIO if rdma_rw initialization failed (DMA mapping, etc). 577 */ 578 int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, 579 const struct svc_rdma_recv_ctxt *rctxt, 580 struct xdr_buf *xdr) 581 { 582 struct svc_rdma_write_info *info; 583 int consumed, ret; 584 585 info = svc_rdma_write_info_alloc(rdma, rctxt->rc_reply_chunk); 586 if (!info) 587 return -ENOMEM; 588 589 ret = svc_rdma_send_xdr_kvec(info, &xdr->head[0]); 590 if (ret < 0) 591 goto out_err; 592 consumed = xdr->head[0].iov_len; 593 594 /* Send the page list in the Reply chunk only if the 595 * client did not provide Write chunks. 596 */ 597 if (!rctxt->rc_write_list && xdr->page_len) { 598 ret = svc_rdma_send_xdr_pagelist(info, xdr, 599 xdr->head[0].iov_len, 600 xdr->page_len); 601 if (ret < 0) 602 goto out_err; 603 consumed += xdr->page_len; 604 } 605 606 if (xdr->tail[0].iov_len) { 607 ret = svc_rdma_send_xdr_kvec(info, &xdr->tail[0]); 608 if (ret < 0) 609 goto out_err; 610 consumed += xdr->tail[0].iov_len; 611 } 612 613 ret = svc_rdma_post_chunk_ctxt(&info->wi_cc); 614 if (ret < 0) 615 goto out_err; 616 617 trace_svcrdma_send_reply_chunk(consumed); 618 return consumed; 619 620 out_err: 621 svc_rdma_write_info_free(info); 622 return ret; 623 } 624 625 static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info, 626 struct svc_rqst *rqstp, 627 u32 rkey, u32 len, u64 offset) 628 { 629 struct svc_rdma_recv_ctxt *head = info->ri_readctxt; 630 struct svc_rdma_chunk_ctxt *cc = &info->ri_cc; 631 struct svc_rdma_rw_ctxt *ctxt; 632 unsigned int sge_no, seg_len; 633 struct scatterlist *sg; 634 int ret; 635 636 sge_no = PAGE_ALIGN(info->ri_pageoff + len) >> PAGE_SHIFT; 637 ctxt = svc_rdma_get_rw_ctxt(cc->cc_rdma, sge_no); 638 if (!ctxt) 639 return -ENOMEM; 640 ctxt->rw_nents = sge_no; 641 642 sg = ctxt->rw_sg_table.sgl; 643 for (sge_no = 0; sge_no < ctxt->rw_nents; sge_no++) { 644 seg_len = min_t(unsigned int, len, 645 PAGE_SIZE - info->ri_pageoff); 646 647 head->rc_arg.pages[info->ri_pageno] = 648 rqstp->rq_pages[info->ri_pageno]; 649 if (!info->ri_pageoff) 650 head->rc_page_count++; 651 652 sg_set_page(sg, rqstp->rq_pages[info->ri_pageno], 653 seg_len, info->ri_pageoff); 654 sg = sg_next(sg); 655 656 info->ri_pageoff += seg_len; 657 if (info->ri_pageoff == PAGE_SIZE) { 658 info->ri_pageno++; 659 info->ri_pageoff = 0; 660 } 661 len -= seg_len; 662 663 /* Safety check */ 664 if (len && 665 &rqstp->rq_pages[info->ri_pageno + 1] > rqstp->rq_page_end) 666 goto out_overrun; 667 } 668 669 ret = svc_rdma_rw_ctx_init(cc->cc_rdma, ctxt, offset, rkey, 670 DMA_FROM_DEVICE); 671 if (ret < 0) 672 return -EIO; 673 674 list_add(&ctxt->rw_list, &cc->cc_rwctxts); 675 cc->cc_sqecount += ret; 676 return 0; 677 678 out_overrun: 679 dprintk("svcrdma: request overruns rq_pages\n"); 680 return -EINVAL; 681 } 682 683 /* Walk the segments in the Read chunk starting at @p and construct 684 * RDMA Read operations to pull the chunk to the server. 685 */ 686 static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp, 687 struct svc_rdma_read_info *info, 688 __be32 *p) 689 { 690 unsigned int i; 691 int ret; 692 693 ret = -EINVAL; 694 info->ri_chunklen = 0; 695 while (*p++ != xdr_zero && be32_to_cpup(p++) == info->ri_position) { 696 u32 rs_handle, rs_length; 697 u64 rs_offset; 698 699 rs_handle = be32_to_cpup(p++); 700 rs_length = be32_to_cpup(p++); 701 p = xdr_decode_hyper(p, &rs_offset); 702 703 ret = svc_rdma_build_read_segment(info, rqstp, 704 rs_handle, rs_length, 705 rs_offset); 706 if (ret < 0) 707 break; 708 709 trace_svcrdma_send_rseg(rs_handle, rs_length, rs_offset); 710 info->ri_chunklen += rs_length; 711 } 712 713 /* Pages under I/O have been copied to head->rc_pages. 714 * Prevent their premature release by svc_xprt_release() . 715 */ 716 for (i = 0; i < info->ri_readctxt->rc_page_count; i++) 717 rqstp->rq_pages[i] = NULL; 718 719 return ret; 720 } 721 722 /* Construct RDMA Reads to pull over a normal Read chunk. The chunk 723 * data lands in the page list of head->rc_arg.pages. 724 * 725 * Currently NFSD does not look at the head->rc_arg.tail[0] iovec. 726 * Therefore, XDR round-up of the Read chunk and trailing 727 * inline content must both be added at the end of the pagelist. 728 */ 729 static int svc_rdma_build_normal_read_chunk(struct svc_rqst *rqstp, 730 struct svc_rdma_read_info *info, 731 __be32 *p) 732 { 733 struct svc_rdma_recv_ctxt *head = info->ri_readctxt; 734 int ret; 735 736 ret = svc_rdma_build_read_chunk(rqstp, info, p); 737 if (ret < 0) 738 goto out; 739 740 trace_svcrdma_send_read_chunk(info->ri_chunklen, info->ri_position); 741 742 head->rc_hdr_count = 0; 743 744 /* Split the Receive buffer between the head and tail 745 * buffers at Read chunk's position. XDR roundup of the 746 * chunk is not included in either the pagelist or in 747 * the tail. 748 */ 749 head->rc_arg.tail[0].iov_base = 750 head->rc_arg.head[0].iov_base + info->ri_position; 751 head->rc_arg.tail[0].iov_len = 752 head->rc_arg.head[0].iov_len - info->ri_position; 753 head->rc_arg.head[0].iov_len = info->ri_position; 754 755 /* Read chunk may need XDR roundup (see RFC 8166, s. 3.4.5.2). 756 * 757 * If the client already rounded up the chunk length, the 758 * length does not change. Otherwise, the length of the page 759 * list is increased to include XDR round-up. 760 * 761 * Currently these chunks always start at page offset 0, 762 * thus the rounded-up length never crosses a page boundary. 763 */ 764 info->ri_chunklen = XDR_QUADLEN(info->ri_chunklen) << 2; 765 766 head->rc_arg.page_len = info->ri_chunklen; 767 head->rc_arg.len += info->ri_chunklen; 768 head->rc_arg.buflen += info->ri_chunklen; 769 770 out: 771 return ret; 772 } 773 774 /* Construct RDMA Reads to pull over a Position Zero Read chunk. 775 * The start of the data lands in the first page just after 776 * the Transport header, and the rest lands in the page list of 777 * head->rc_arg.pages. 778 * 779 * Assumptions: 780 * - A PZRC has an XDR-aligned length (no implicit round-up). 781 * - There can be no trailing inline content (IOW, we assume 782 * a PZRC is never sent in an RDMA_MSG message, though it's 783 * allowed by spec). 784 */ 785 static int svc_rdma_build_pz_read_chunk(struct svc_rqst *rqstp, 786 struct svc_rdma_read_info *info, 787 __be32 *p) 788 { 789 struct svc_rdma_recv_ctxt *head = info->ri_readctxt; 790 int ret; 791 792 ret = svc_rdma_build_read_chunk(rqstp, info, p); 793 if (ret < 0) 794 goto out; 795 796 trace_svcrdma_send_pzr(info->ri_chunklen); 797 798 head->rc_arg.len += info->ri_chunklen; 799 head->rc_arg.buflen += info->ri_chunklen; 800 801 head->rc_hdr_count = 1; 802 head->rc_arg.head[0].iov_base = page_address(head->rc_pages[0]); 803 head->rc_arg.head[0].iov_len = min_t(size_t, PAGE_SIZE, 804 info->ri_chunklen); 805 806 head->rc_arg.page_len = info->ri_chunklen - 807 head->rc_arg.head[0].iov_len; 808 809 out: 810 return ret; 811 } 812 813 /** 814 * svc_rdma_recv_read_chunk - Pull a Read chunk from the client 815 * @rdma: controlling RDMA transport 816 * @rqstp: set of pages to use as Read sink buffers 817 * @head: pages under I/O collect here 818 * @p: pointer to start of Read chunk 819 * 820 * Returns: 821 * %0 if all needed RDMA Reads were posted successfully, 822 * %-EINVAL if client provided too many segments, 823 * %-ENOMEM if rdma_rw context pool was exhausted, 824 * %-ENOTCONN if posting failed (connection is lost), 825 * %-EIO if rdma_rw initialization failed (DMA mapping, etc). 826 * 827 * Assumptions: 828 * - All Read segments in @p have the same Position value. 829 */ 830 int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp, 831 struct svc_rdma_recv_ctxt *head, __be32 *p) 832 { 833 struct svc_rdma_read_info *info; 834 int ret; 835 836 /* The request (with page list) is constructed in 837 * head->rc_arg. Pages involved with RDMA Read I/O are 838 * transferred there. 839 */ 840 head->rc_arg.head[0] = rqstp->rq_arg.head[0]; 841 head->rc_arg.tail[0] = rqstp->rq_arg.tail[0]; 842 head->rc_arg.pages = head->rc_pages; 843 head->rc_arg.page_base = 0; 844 head->rc_arg.page_len = 0; 845 head->rc_arg.len = rqstp->rq_arg.len; 846 head->rc_arg.buflen = rqstp->rq_arg.buflen; 847 848 info = svc_rdma_read_info_alloc(rdma); 849 if (!info) 850 return -ENOMEM; 851 info->ri_readctxt = head; 852 info->ri_pageno = 0; 853 info->ri_pageoff = 0; 854 855 info->ri_position = be32_to_cpup(p + 1); 856 if (info->ri_position) 857 ret = svc_rdma_build_normal_read_chunk(rqstp, info, p); 858 else 859 ret = svc_rdma_build_pz_read_chunk(rqstp, info, p); 860 if (ret < 0) 861 goto out_err; 862 863 ret = svc_rdma_post_chunk_ctxt(&info->ri_cc); 864 if (ret < 0) 865 goto out_err; 866 return 0; 867 868 out_err: 869 svc_rdma_read_info_free(info); 870 return ret; 871 } 872