1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2015, 2017 Oracle. All rights reserved. 4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. 5 */ 6 7 /* Lightweight memory registration using Fast Registration Work 8 * Requests (FRWR). 9 * 10 * FRWR features ordered asynchronous registration and invalidation 11 * of arbitrarily-sized memory regions. This is the fastest and safest 12 * but most complex memory registration mode. 13 */ 14 15 /* Normal operation 16 * 17 * A Memory Region is prepared for RDMA Read or Write using a FAST_REG 18 * Work Request (frwr_map). When the RDMA operation is finished, this 19 * Memory Region is invalidated using a LOCAL_INV Work Request 20 * (frwr_unmap_async and frwr_unmap_sync). 21 * 22 * Typically FAST_REG Work Requests are not signaled, and neither are 23 * RDMA Send Work Requests (with the exception of signaling occasionally 24 * to prevent provider work queue overflows). This greatly reduces HCA 25 * interrupt workload. 26 */ 27 28 /* Transport recovery 29 * 30 * frwr_map and frwr_unmap_* cannot run at the same time the transport 31 * connect worker is running. The connect worker holds the transport 32 * send lock, just as ->send_request does. This prevents frwr_map and 33 * the connect worker from running concurrently. When a connection is 34 * closed, the Receive completion queue is drained before the allowing 35 * the connect worker to get control. This prevents frwr_unmap and the 36 * connect worker from running concurrently. 37 * 38 * When the underlying transport disconnects, MRs that are in flight 39 * are flushed and are likely unusable. Thus all MRs are destroyed. 40 * New MRs are created on demand. 41 */ 42 43 #include <linux/sunrpc/rpc_rdma.h> 44 #include <linux/sunrpc/svc_rdma.h> 45 46 #include "xprt_rdma.h" 47 #include <trace/events/rpcrdma.h> 48 49 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 50 # define RPCDBG_FACILITY RPCDBG_TRANS 51 #endif 52 53 /** 54 * frwr_release_mr - Destroy one MR 55 * @mr: MR allocated by frwr_mr_init 56 * 57 */ 58 void frwr_release_mr(struct rpcrdma_mr *mr) 59 { 60 int rc; 61 62 rc = ib_dereg_mr(mr->frwr.fr_mr); 63 if (rc) 64 trace_xprtrdma_frwr_dereg(mr, rc); 65 kfree(mr->mr_sg); 66 kfree(mr); 67 } 68 69 static void frwr_mr_recycle(struct rpcrdma_mr *mr) 70 { 71 struct rpcrdma_xprt *r_xprt = mr->mr_xprt; 72 73 trace_xprtrdma_mr_recycle(mr); 74 75 if (mr->mr_dir != DMA_NONE) { 76 trace_xprtrdma_mr_unmap(mr); 77 ib_dma_unmap_sg(r_xprt->rx_ep->re_id->device, 78 mr->mr_sg, mr->mr_nents, mr->mr_dir); 79 mr->mr_dir = DMA_NONE; 80 } 81 82 spin_lock(&r_xprt->rx_buf.rb_lock); 83 list_del(&mr->mr_all); 84 r_xprt->rx_stats.mrs_recycled++; 85 spin_unlock(&r_xprt->rx_buf.rb_lock); 86 87 frwr_release_mr(mr); 88 } 89 90 /* frwr_reset - Place MRs back on the free list 91 * @req: request to reset 92 * 93 * Used after a failed marshal. For FRWR, this means the MRs 94 * don't have to be fully released and recreated. 95 * 96 * NB: This is safe only as long as none of @req's MRs are 97 * involved with an ongoing asynchronous FAST_REG or LOCAL_INV 98 * Work Request. 99 */ 100 void frwr_reset(struct rpcrdma_req *req) 101 { 102 struct rpcrdma_mr *mr; 103 104 while ((mr = rpcrdma_mr_pop(&req->rl_registered))) 105 rpcrdma_mr_put(mr); 106 } 107 108 /** 109 * frwr_mr_init - Initialize one MR 110 * @r_xprt: controlling transport instance 111 * @mr: generic MR to prepare for FRWR 112 * 113 * Returns zero if successful. Otherwise a negative errno 114 * is returned. 115 */ 116 int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr) 117 { 118 struct rpcrdma_ep *ep = r_xprt->rx_ep; 119 unsigned int depth = ep->re_max_fr_depth; 120 struct scatterlist *sg; 121 struct ib_mr *frmr; 122 int rc; 123 124 frmr = ib_alloc_mr(ep->re_pd, ep->re_mrtype, depth); 125 if (IS_ERR(frmr)) 126 goto out_mr_err; 127 128 sg = kcalloc(depth, sizeof(*sg), GFP_NOFS); 129 if (!sg) 130 goto out_list_err; 131 132 mr->mr_xprt = r_xprt; 133 mr->frwr.fr_mr = frmr; 134 mr->mr_dir = DMA_NONE; 135 INIT_LIST_HEAD(&mr->mr_list); 136 init_completion(&mr->frwr.fr_linv_done); 137 138 sg_init_table(sg, depth); 139 mr->mr_sg = sg; 140 return 0; 141 142 out_mr_err: 143 rc = PTR_ERR(frmr); 144 trace_xprtrdma_frwr_alloc(mr, rc); 145 return rc; 146 147 out_list_err: 148 ib_dereg_mr(frmr); 149 return -ENOMEM; 150 } 151 152 /** 153 * frwr_query_device - Prepare a transport for use with FRWR 154 * @ep: endpoint to fill in 155 * @device: RDMA device to query 156 * 157 * On success, sets: 158 * ep->re_attr 159 * ep->re_max_requests 160 * ep->re_max_rdma_segs 161 * ep->re_max_fr_depth 162 * ep->re_mrtype 163 * 164 * Return values: 165 * On success, returns zero. 166 * %-EINVAL - the device does not support FRWR memory registration 167 * %-ENOMEM - the device is not sufficiently capable for NFS/RDMA 168 */ 169 int frwr_query_device(struct rpcrdma_ep *ep, const struct ib_device *device) 170 { 171 const struct ib_device_attr *attrs = &device->attrs; 172 int max_qp_wr, depth, delta; 173 unsigned int max_sge; 174 175 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) || 176 attrs->max_fast_reg_page_list_len == 0) { 177 pr_err("rpcrdma: 'frwr' mode is not supported by device %s\n", 178 device->name); 179 return -EINVAL; 180 } 181 182 max_sge = min_t(unsigned int, attrs->max_send_sge, 183 RPCRDMA_MAX_SEND_SGES); 184 if (max_sge < RPCRDMA_MIN_SEND_SGES) { 185 pr_err("rpcrdma: HCA provides only %u send SGEs\n", max_sge); 186 return -ENOMEM; 187 } 188 ep->re_attr.cap.max_send_sge = max_sge; 189 ep->re_attr.cap.max_recv_sge = 1; 190 191 ep->re_mrtype = IB_MR_TYPE_MEM_REG; 192 if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG) 193 ep->re_mrtype = IB_MR_TYPE_SG_GAPS; 194 195 /* Quirk: Some devices advertise a large max_fast_reg_page_list_len 196 * capability, but perform optimally when the MRs are not larger 197 * than a page. 198 */ 199 if (attrs->max_sge_rd > RPCRDMA_MAX_HDR_SEGS) 200 ep->re_max_fr_depth = attrs->max_sge_rd; 201 else 202 ep->re_max_fr_depth = attrs->max_fast_reg_page_list_len; 203 if (ep->re_max_fr_depth > RPCRDMA_MAX_DATA_SEGS) 204 ep->re_max_fr_depth = RPCRDMA_MAX_DATA_SEGS; 205 206 /* Add room for frwr register and invalidate WRs. 207 * 1. FRWR reg WR for head 208 * 2. FRWR invalidate WR for head 209 * 3. N FRWR reg WRs for pagelist 210 * 4. N FRWR invalidate WRs for pagelist 211 * 5. FRWR reg WR for tail 212 * 6. FRWR invalidate WR for tail 213 * 7. The RDMA_SEND WR 214 */ 215 depth = 7; 216 217 /* Calculate N if the device max FRWR depth is smaller than 218 * RPCRDMA_MAX_DATA_SEGS. 219 */ 220 if (ep->re_max_fr_depth < RPCRDMA_MAX_DATA_SEGS) { 221 delta = RPCRDMA_MAX_DATA_SEGS - ep->re_max_fr_depth; 222 do { 223 depth += 2; /* FRWR reg + invalidate */ 224 delta -= ep->re_max_fr_depth; 225 } while (delta > 0); 226 } 227 228 max_qp_wr = attrs->max_qp_wr; 229 max_qp_wr -= RPCRDMA_BACKWARD_WRS; 230 max_qp_wr -= 1; 231 if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE) 232 return -ENOMEM; 233 if (ep->re_max_requests > max_qp_wr) 234 ep->re_max_requests = max_qp_wr; 235 ep->re_attr.cap.max_send_wr = ep->re_max_requests * depth; 236 if (ep->re_attr.cap.max_send_wr > max_qp_wr) { 237 ep->re_max_requests = max_qp_wr / depth; 238 if (!ep->re_max_requests) 239 return -ENOMEM; 240 ep->re_attr.cap.max_send_wr = ep->re_max_requests * depth; 241 } 242 ep->re_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS; 243 ep->re_attr.cap.max_send_wr += 1; /* for ib_drain_sq */ 244 ep->re_attr.cap.max_recv_wr = ep->re_max_requests; 245 ep->re_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS; 246 ep->re_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */ 247 248 ep->re_max_rdma_segs = 249 DIV_ROUND_UP(RPCRDMA_MAX_DATA_SEGS, ep->re_max_fr_depth); 250 /* Reply chunks require segments for head and tail buffers */ 251 ep->re_max_rdma_segs += 2; 252 if (ep->re_max_rdma_segs > RPCRDMA_MAX_HDR_SEGS) 253 ep->re_max_rdma_segs = RPCRDMA_MAX_HDR_SEGS; 254 255 /* Ensure the underlying device is capable of conveying the 256 * largest r/wsize NFS will ask for. This guarantees that 257 * failing over from one RDMA device to another will not 258 * break NFS I/O. 259 */ 260 if ((ep->re_max_rdma_segs * ep->re_max_fr_depth) < RPCRDMA_MAX_SEGS) 261 return -ENOMEM; 262 263 return 0; 264 } 265 266 /** 267 * frwr_map - Register a memory region 268 * @r_xprt: controlling transport 269 * @seg: memory region co-ordinates 270 * @nsegs: number of segments remaining 271 * @writing: true when RDMA Write will be used 272 * @xid: XID of RPC using the registered memory 273 * @mr: MR to fill in 274 * 275 * Prepare a REG_MR Work Request to register a memory region 276 * for remote access via RDMA READ or RDMA WRITE. 277 * 278 * Returns the next segment or a negative errno pointer. 279 * On success, @mr is filled in. 280 */ 281 struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt, 282 struct rpcrdma_mr_seg *seg, 283 int nsegs, bool writing, __be32 xid, 284 struct rpcrdma_mr *mr) 285 { 286 struct rpcrdma_ep *ep = r_xprt->rx_ep; 287 struct ib_reg_wr *reg_wr; 288 int i, n, dma_nents; 289 struct ib_mr *ibmr; 290 u8 key; 291 292 if (nsegs > ep->re_max_fr_depth) 293 nsegs = ep->re_max_fr_depth; 294 for (i = 0; i < nsegs;) { 295 if (seg->mr_page) 296 sg_set_page(&mr->mr_sg[i], 297 seg->mr_page, 298 seg->mr_len, 299 offset_in_page(seg->mr_offset)); 300 else 301 sg_set_buf(&mr->mr_sg[i], seg->mr_offset, 302 seg->mr_len); 303 304 ++seg; 305 ++i; 306 if (ep->re_mrtype == IB_MR_TYPE_SG_GAPS) 307 continue; 308 if ((i < nsegs && offset_in_page(seg->mr_offset)) || 309 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) 310 break; 311 } 312 mr->mr_dir = rpcrdma_data_dir(writing); 313 mr->mr_nents = i; 314 315 dma_nents = ib_dma_map_sg(ep->re_id->device, mr->mr_sg, mr->mr_nents, 316 mr->mr_dir); 317 if (!dma_nents) 318 goto out_dmamap_err; 319 320 ibmr = mr->frwr.fr_mr; 321 n = ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL, PAGE_SIZE); 322 if (n != dma_nents) 323 goto out_mapmr_err; 324 325 ibmr->iova &= 0x00000000ffffffff; 326 ibmr->iova |= ((u64)be32_to_cpu(xid)) << 32; 327 key = (u8)(ibmr->rkey & 0x000000FF); 328 ib_update_fast_reg_key(ibmr, ++key); 329 330 reg_wr = &mr->frwr.fr_regwr; 331 reg_wr->mr = ibmr; 332 reg_wr->key = ibmr->rkey; 333 reg_wr->access = writing ? 334 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : 335 IB_ACCESS_REMOTE_READ; 336 337 mr->mr_handle = ibmr->rkey; 338 mr->mr_length = ibmr->length; 339 mr->mr_offset = ibmr->iova; 340 trace_xprtrdma_mr_map(mr); 341 342 return seg; 343 344 out_dmamap_err: 345 mr->mr_dir = DMA_NONE; 346 trace_xprtrdma_frwr_sgerr(mr, i); 347 return ERR_PTR(-EIO); 348 349 out_mapmr_err: 350 trace_xprtrdma_frwr_maperr(mr, n); 351 return ERR_PTR(-EIO); 352 } 353 354 /** 355 * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC 356 * @cq: completion queue 357 * @wc: WCE for a completed FastReg WR 358 * 359 */ 360 static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc) 361 { 362 struct ib_cqe *cqe = wc->wr_cqe; 363 struct rpcrdma_frwr *frwr = 364 container_of(cqe, struct rpcrdma_frwr, fr_cqe); 365 366 /* WARNING: Only wr_cqe and status are reliable at this point */ 367 trace_xprtrdma_wc_fastreg(wc, frwr); 368 /* The MR will get recycled when the associated req is retransmitted */ 369 370 rpcrdma_flush_disconnect(cq->cq_context, wc); 371 } 372 373 /** 374 * frwr_send - post Send WRs containing the RPC Call message 375 * @r_xprt: controlling transport instance 376 * @req: prepared RPC Call 377 * 378 * For FRWR, chain any FastReg WRs to the Send WR. Only a 379 * single ib_post_send call is needed to register memory 380 * and then post the Send WR. 381 * 382 * Returns the return code from ib_post_send. 383 * 384 * Caller must hold the transport send lock to ensure that the 385 * pointers to the transport's rdma_cm_id and QP are stable. 386 */ 387 int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) 388 { 389 struct ib_send_wr *post_wr; 390 struct rpcrdma_mr *mr; 391 392 post_wr = &req->rl_wr; 393 list_for_each_entry(mr, &req->rl_registered, mr_list) { 394 struct rpcrdma_frwr *frwr; 395 396 frwr = &mr->frwr; 397 398 frwr->fr_cqe.done = frwr_wc_fastreg; 399 frwr->fr_regwr.wr.next = post_wr; 400 frwr->fr_regwr.wr.wr_cqe = &frwr->fr_cqe; 401 frwr->fr_regwr.wr.num_sge = 0; 402 frwr->fr_regwr.wr.opcode = IB_WR_REG_MR; 403 frwr->fr_regwr.wr.send_flags = 0; 404 405 post_wr = &frwr->fr_regwr.wr; 406 } 407 408 return ib_post_send(r_xprt->rx_ep->re_id->qp, post_wr, NULL); 409 } 410 411 /** 412 * frwr_reminv - handle a remotely invalidated mr on the @mrs list 413 * @rep: Received reply 414 * @mrs: list of MRs to check 415 * 416 */ 417 void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs) 418 { 419 struct rpcrdma_mr *mr; 420 421 list_for_each_entry(mr, mrs, mr_list) 422 if (mr->mr_handle == rep->rr_inv_rkey) { 423 list_del_init(&mr->mr_list); 424 trace_xprtrdma_mr_reminv(mr); 425 rpcrdma_mr_put(mr); 426 break; /* only one invalidated MR per RPC */ 427 } 428 } 429 430 static void __frwr_release_mr(struct ib_wc *wc, struct rpcrdma_mr *mr) 431 { 432 if (wc->status != IB_WC_SUCCESS) 433 frwr_mr_recycle(mr); 434 else 435 rpcrdma_mr_put(mr); 436 } 437 438 /** 439 * frwr_wc_localinv - Invoked by RDMA provider for a LOCAL_INV WC 440 * @cq: completion queue 441 * @wc: WCE for a completed LocalInv WR 442 * 443 */ 444 static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc) 445 { 446 struct ib_cqe *cqe = wc->wr_cqe; 447 struct rpcrdma_frwr *frwr = 448 container_of(cqe, struct rpcrdma_frwr, fr_cqe); 449 struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr); 450 451 /* WARNING: Only wr_cqe and status are reliable at this point */ 452 trace_xprtrdma_wc_li(wc, frwr); 453 __frwr_release_mr(wc, mr); 454 455 rpcrdma_flush_disconnect(cq->cq_context, wc); 456 } 457 458 /** 459 * frwr_wc_localinv_wake - Invoked by RDMA provider for a LOCAL_INV WC 460 * @cq: completion queue 461 * @wc: WCE for a completed LocalInv WR 462 * 463 * Awaken anyone waiting for an MR to finish being fenced. 464 */ 465 static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc) 466 { 467 struct ib_cqe *cqe = wc->wr_cqe; 468 struct rpcrdma_frwr *frwr = 469 container_of(cqe, struct rpcrdma_frwr, fr_cqe); 470 struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr); 471 472 /* WARNING: Only wr_cqe and status are reliable at this point */ 473 trace_xprtrdma_wc_li_wake(wc, frwr); 474 __frwr_release_mr(wc, mr); 475 complete(&frwr->fr_linv_done); 476 477 rpcrdma_flush_disconnect(cq->cq_context, wc); 478 } 479 480 /** 481 * frwr_unmap_sync - invalidate memory regions that were registered for @req 482 * @r_xprt: controlling transport instance 483 * @req: rpcrdma_req with a non-empty list of MRs to process 484 * 485 * Sleeps until it is safe for the host CPU to access the previously mapped 486 * memory regions. This guarantees that registered MRs are properly fenced 487 * from the server before the RPC consumer accesses the data in them. It 488 * also ensures proper Send flow control: waking the next RPC waits until 489 * this RPC has relinquished all its Send Queue entries. 490 */ 491 void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) 492 { 493 struct ib_send_wr *first, **prev, *last; 494 const struct ib_send_wr *bad_wr; 495 struct rpcrdma_frwr *frwr; 496 struct rpcrdma_mr *mr; 497 int rc; 498 499 /* ORDER: Invalidate all of the MRs first 500 * 501 * Chain the LOCAL_INV Work Requests and post them with 502 * a single ib_post_send() call. 503 */ 504 frwr = NULL; 505 prev = &first; 506 while ((mr = rpcrdma_mr_pop(&req->rl_registered))) { 507 508 trace_xprtrdma_mr_localinv(mr); 509 r_xprt->rx_stats.local_inv_needed++; 510 511 frwr = &mr->frwr; 512 frwr->fr_cqe.done = frwr_wc_localinv; 513 last = &frwr->fr_invwr; 514 last->next = NULL; 515 last->wr_cqe = &frwr->fr_cqe; 516 last->sg_list = NULL; 517 last->num_sge = 0; 518 last->opcode = IB_WR_LOCAL_INV; 519 last->send_flags = IB_SEND_SIGNALED; 520 last->ex.invalidate_rkey = mr->mr_handle; 521 522 *prev = last; 523 prev = &last->next; 524 } 525 526 /* Strong send queue ordering guarantees that when the 527 * last WR in the chain completes, all WRs in the chain 528 * are complete. 529 */ 530 frwr->fr_cqe.done = frwr_wc_localinv_wake; 531 reinit_completion(&frwr->fr_linv_done); 532 533 /* Transport disconnect drains the receive CQ before it 534 * replaces the QP. The RPC reply handler won't call us 535 * unless re_id->qp is a valid pointer. 536 */ 537 bad_wr = NULL; 538 rc = ib_post_send(r_xprt->rx_ep->re_id->qp, first, &bad_wr); 539 540 /* The final LOCAL_INV WR in the chain is supposed to 541 * do the wake. If it was never posted, the wake will 542 * not happen, so don't wait in that case. 543 */ 544 if (bad_wr != first) 545 wait_for_completion(&frwr->fr_linv_done); 546 if (!rc) 547 return; 548 549 /* Recycle MRs in the LOCAL_INV chain that did not get posted. 550 */ 551 trace_xprtrdma_post_linv(req, rc); 552 while (bad_wr) { 553 frwr = container_of(bad_wr, struct rpcrdma_frwr, 554 fr_invwr); 555 mr = container_of(frwr, struct rpcrdma_mr, frwr); 556 bad_wr = bad_wr->next; 557 558 list_del_init(&mr->mr_list); 559 frwr_mr_recycle(mr); 560 } 561 } 562 563 /** 564 * frwr_wc_localinv_done - Invoked by RDMA provider for a signaled LOCAL_INV WC 565 * @cq: completion queue 566 * @wc: WCE for a completed LocalInv WR 567 * 568 */ 569 static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc) 570 { 571 struct ib_cqe *cqe = wc->wr_cqe; 572 struct rpcrdma_frwr *frwr = 573 container_of(cqe, struct rpcrdma_frwr, fr_cqe); 574 struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr); 575 struct rpcrdma_rep *rep = mr->mr_req->rl_reply; 576 577 /* WARNING: Only wr_cqe and status are reliable at this point */ 578 trace_xprtrdma_wc_li_done(wc, frwr); 579 __frwr_release_mr(wc, mr); 580 581 /* Ensure @rep is generated before __frwr_release_mr */ 582 smp_rmb(); 583 rpcrdma_complete_rqst(rep); 584 585 rpcrdma_flush_disconnect(cq->cq_context, wc); 586 } 587 588 /** 589 * frwr_unmap_async - invalidate memory regions that were registered for @req 590 * @r_xprt: controlling transport instance 591 * @req: rpcrdma_req with a non-empty list of MRs to process 592 * 593 * This guarantees that registered MRs are properly fenced from the 594 * server before the RPC consumer accesses the data in them. It also 595 * ensures proper Send flow control: waking the next RPC waits until 596 * this RPC has relinquished all its Send Queue entries. 597 */ 598 void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) 599 { 600 struct ib_send_wr *first, *last, **prev; 601 const struct ib_send_wr *bad_wr; 602 struct rpcrdma_frwr *frwr; 603 struct rpcrdma_mr *mr; 604 int rc; 605 606 /* Chain the LOCAL_INV Work Requests and post them with 607 * a single ib_post_send() call. 608 */ 609 frwr = NULL; 610 prev = &first; 611 while ((mr = rpcrdma_mr_pop(&req->rl_registered))) { 612 613 trace_xprtrdma_mr_localinv(mr); 614 r_xprt->rx_stats.local_inv_needed++; 615 616 frwr = &mr->frwr; 617 frwr->fr_cqe.done = frwr_wc_localinv; 618 last = &frwr->fr_invwr; 619 last->next = NULL; 620 last->wr_cqe = &frwr->fr_cqe; 621 last->sg_list = NULL; 622 last->num_sge = 0; 623 last->opcode = IB_WR_LOCAL_INV; 624 last->send_flags = IB_SEND_SIGNALED; 625 last->ex.invalidate_rkey = mr->mr_handle; 626 627 *prev = last; 628 prev = &last->next; 629 } 630 631 /* Strong send queue ordering guarantees that when the 632 * last WR in the chain completes, all WRs in the chain 633 * are complete. The last completion will wake up the 634 * RPC waiter. 635 */ 636 frwr->fr_cqe.done = frwr_wc_localinv_done; 637 638 /* Transport disconnect drains the receive CQ before it 639 * replaces the QP. The RPC reply handler won't call us 640 * unless re_id->qp is a valid pointer. 641 */ 642 bad_wr = NULL; 643 rc = ib_post_send(r_xprt->rx_ep->re_id->qp, first, &bad_wr); 644 if (!rc) 645 return; 646 647 /* Recycle MRs in the LOCAL_INV chain that did not get posted. 648 */ 649 trace_xprtrdma_post_linv(req, rc); 650 while (bad_wr) { 651 frwr = container_of(bad_wr, struct rpcrdma_frwr, fr_invwr); 652 mr = container_of(frwr, struct rpcrdma_mr, frwr); 653 bad_wr = bad_wr->next; 654 655 frwr_mr_recycle(mr); 656 } 657 658 /* The final LOCAL_INV WR in the chain is supposed to 659 * do the wake. If it was never posted, the wake will 660 * not happen, so wake here in that case. 661 */ 662 rpcrdma_complete_rqst(req->rl_reply); 663 } 664