1 /* 2 * Copyright (c) 2015 Oracle. All rights reserved. 3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. 4 */ 5 6 /* Lightweight memory registration using Fast Registration Work 7 * Requests (FRWR). Also referred to sometimes as FRMR mode. 8 * 9 * FRWR features ordered asynchronous registration and deregistration 10 * of arbitrarily sized memory regions. This is the fastest and safest 11 * but most complex memory registration mode. 12 */ 13 14 /* Normal operation 15 * 16 * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG 17 * Work Request (frmr_op_map). When the RDMA operation is finished, this 18 * Memory Region is invalidated using a LOCAL_INV Work Request 19 * (frmr_op_unmap). 20 * 21 * Typically these Work Requests are not signaled, and neither are RDMA 22 * SEND Work Requests (with the exception of signaling occasionally to 23 * prevent provider work queue overflows). This greatly reduces HCA 24 * interrupt workload. 25 * 26 * As an optimization, frwr_op_unmap marks MRs INVALID before the 27 * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on 28 * rb_mws immediately so that no work (like managing a linked list 29 * under a spinlock) is needed in the completion upcall. 30 * 31 * But this means that frwr_op_map() can occasionally encounter an MR 32 * that is INVALID but the LOCAL_INV WR has not completed. Work Queue 33 * ordering prevents a subsequent FAST_REG WR from executing against 34 * that MR while it is still being invalidated. 35 */ 36 37 /* Transport recovery 38 * 39 * ->op_map and the transport connect worker cannot run at the same 40 * time, but ->op_unmap can fire while the transport connect worker 41 * is running. Thus MR recovery is handled in ->op_map, to guarantee 42 * that recovered MRs are owned by a sending RPC, and not one where 43 * ->op_unmap could fire at the same time transport reconnect is 44 * being done. 45 * 46 * When the underlying transport disconnects, MRs are left in one of 47 * three states: 48 * 49 * INVALID: The MR was not in use before the QP entered ERROR state. 50 * (Or, the LOCAL_INV WR has not completed or flushed yet). 51 * 52 * STALE: The MR was being registered or unregistered when the QP 53 * entered ERROR state, and the pending WR was flushed. 54 * 55 * VALID: The MR was registered before the QP entered ERROR state. 56 * 57 * When frwr_op_map encounters STALE and VALID MRs, they are recovered 58 * with ib_dereg_mr and then are re-initialized. Beause MR recovery 59 * allocates fresh resources, it is deferred to a workqueue, and the 60 * recovered MRs are placed back on the rb_mws list when recovery is 61 * complete. frwr_op_map allocates another MR for the current RPC while 62 * the broken MR is reset. 63 * 64 * To ensure that frwr_op_map doesn't encounter an MR that is marked 65 * INVALID but that is about to be flushed due to a previous transport 66 * disconnect, the transport connect worker attempts to drain all 67 * pending send queue WRs before the transport is reconnected. 68 */ 69 70 #include "xprt_rdma.h" 71 72 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 73 # define RPCDBG_FACILITY RPCDBG_TRANS 74 #endif 75 76 static struct workqueue_struct *frwr_recovery_wq; 77 78 #define FRWR_RECOVERY_WQ_FLAGS (WQ_UNBOUND | WQ_MEM_RECLAIM) 79 80 int 81 frwr_alloc_recovery_wq(void) 82 { 83 frwr_recovery_wq = alloc_workqueue("frwr_recovery", 84 FRWR_RECOVERY_WQ_FLAGS, 0); 85 return !frwr_recovery_wq ? -ENOMEM : 0; 86 } 87 88 void 89 frwr_destroy_recovery_wq(void) 90 { 91 struct workqueue_struct *wq; 92 93 if (!frwr_recovery_wq) 94 return; 95 96 wq = frwr_recovery_wq; 97 frwr_recovery_wq = NULL; 98 destroy_workqueue(wq); 99 } 100 101 /* Deferred reset of a single FRMR. Generate a fresh rkey by 102 * replacing the MR. 103 * 104 * There's no recovery if this fails. The FRMR is abandoned, but 105 * remains in rb_all. It will be cleaned up when the transport is 106 * destroyed. 107 */ 108 static void 109 __frwr_recovery_worker(struct work_struct *work) 110 { 111 struct rpcrdma_mw *r = container_of(work, struct rpcrdma_mw, 112 r.frmr.fr_work); 113 struct rpcrdma_xprt *r_xprt = r->r.frmr.fr_xprt; 114 unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth; 115 struct ib_pd *pd = r_xprt->rx_ia.ri_pd; 116 117 if (ib_dereg_mr(r->r.frmr.fr_mr)) 118 goto out_fail; 119 120 r->r.frmr.fr_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, depth); 121 if (IS_ERR(r->r.frmr.fr_mr)) 122 goto out_fail; 123 124 dprintk("RPC: %s: recovered FRMR %p\n", __func__, r); 125 r->r.frmr.fr_state = FRMR_IS_INVALID; 126 rpcrdma_put_mw(r_xprt, r); 127 return; 128 129 out_fail: 130 pr_warn("RPC: %s: FRMR %p unrecovered\n", 131 __func__, r); 132 } 133 134 /* A broken MR was discovered in a context that can't sleep. 135 * Defer recovery to the recovery worker. 136 */ 137 static void 138 __frwr_queue_recovery(struct rpcrdma_mw *r) 139 { 140 INIT_WORK(&r->r.frmr.fr_work, __frwr_recovery_worker); 141 queue_work(frwr_recovery_wq, &r->r.frmr.fr_work); 142 } 143 144 static int 145 __frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, struct ib_device *device, 146 unsigned int depth) 147 { 148 struct rpcrdma_frmr *f = &r->r.frmr; 149 int rc; 150 151 f->fr_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, depth); 152 if (IS_ERR(f->fr_mr)) 153 goto out_mr_err; 154 155 f->sg = kcalloc(depth, sizeof(*f->sg), GFP_KERNEL); 156 if (!f->sg) 157 goto out_list_err; 158 159 sg_init_table(f->sg, depth); 160 161 return 0; 162 163 out_mr_err: 164 rc = PTR_ERR(f->fr_mr); 165 dprintk("RPC: %s: ib_alloc_mr status %i\n", 166 __func__, rc); 167 return rc; 168 169 out_list_err: 170 rc = -ENOMEM; 171 dprintk("RPC: %s: sg allocation failure\n", 172 __func__); 173 ib_dereg_mr(f->fr_mr); 174 return rc; 175 } 176 177 static void 178 __frwr_release(struct rpcrdma_mw *r) 179 { 180 int rc; 181 182 rc = ib_dereg_mr(r->r.frmr.fr_mr); 183 if (rc) 184 dprintk("RPC: %s: ib_dereg_mr status %i\n", 185 __func__, rc); 186 kfree(r->r.frmr.sg); 187 } 188 189 static int 190 frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, 191 struct rpcrdma_create_data_internal *cdata) 192 { 193 int depth, delta; 194 195 ia->ri_max_frmr_depth = 196 min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, 197 ia->ri_device->attrs.max_fast_reg_page_list_len); 198 dprintk("RPC: %s: device's max FR page list len = %u\n", 199 __func__, ia->ri_max_frmr_depth); 200 201 /* Add room for frmr register and invalidate WRs. 202 * 1. FRMR reg WR for head 203 * 2. FRMR invalidate WR for head 204 * 3. N FRMR reg WRs for pagelist 205 * 4. N FRMR invalidate WRs for pagelist 206 * 5. FRMR reg WR for tail 207 * 6. FRMR invalidate WR for tail 208 * 7. The RDMA_SEND WR 209 */ 210 depth = 7; 211 212 /* Calculate N if the device max FRMR depth is smaller than 213 * RPCRDMA_MAX_DATA_SEGS. 214 */ 215 if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) { 216 delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth; 217 do { 218 depth += 2; /* FRMR reg + invalidate */ 219 delta -= ia->ri_max_frmr_depth; 220 } while (delta > 0); 221 } 222 223 ep->rep_attr.cap.max_send_wr *= depth; 224 if (ep->rep_attr.cap.max_send_wr > ia->ri_device->attrs.max_qp_wr) { 225 cdata->max_requests = ia->ri_device->attrs.max_qp_wr / depth; 226 if (!cdata->max_requests) 227 return -EINVAL; 228 ep->rep_attr.cap.max_send_wr = cdata->max_requests * 229 depth; 230 } 231 232 return 0; 233 } 234 235 /* FRWR mode conveys a list of pages per chunk segment. The 236 * maximum length of that list is the FRWR page list depth. 237 */ 238 static size_t 239 frwr_op_maxpages(struct rpcrdma_xprt *r_xprt) 240 { 241 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 242 243 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, 244 rpcrdma_max_segments(r_xprt) * ia->ri_max_frmr_depth); 245 } 246 247 /* If FAST_REG or LOCAL_INV failed, indicate the frmr needs 248 * to be reset. 249 * 250 * WARNING: Only wr_id and status are reliable at this point 251 */ 252 static void 253 __frwr_sendcompletion_flush(struct ib_wc *wc, struct rpcrdma_mw *r) 254 { 255 if (likely(wc->status == IB_WC_SUCCESS)) 256 return; 257 258 /* WARNING: Only wr_id and status are reliable at this point */ 259 r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id; 260 if (wc->status == IB_WC_WR_FLUSH_ERR) 261 dprintk("RPC: %s: frmr %p flushed\n", __func__, r); 262 else 263 pr_warn("RPC: %s: frmr %p error, status %s (%d)\n", 264 __func__, r, ib_wc_status_msg(wc->status), wc->status); 265 266 r->r.frmr.fr_state = FRMR_IS_STALE; 267 } 268 269 static void 270 frwr_sendcompletion(struct ib_wc *wc) 271 { 272 struct rpcrdma_mw *r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id; 273 struct rpcrdma_frmr *f = &r->r.frmr; 274 275 if (unlikely(wc->status != IB_WC_SUCCESS)) 276 __frwr_sendcompletion_flush(wc, r); 277 278 if (f->fr_waiter) 279 complete(&f->fr_linv_done); 280 } 281 282 static int 283 frwr_op_init(struct rpcrdma_xprt *r_xprt) 284 { 285 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 286 struct ib_device *device = r_xprt->rx_ia.ri_device; 287 unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth; 288 struct ib_pd *pd = r_xprt->rx_ia.ri_pd; 289 int i; 290 291 spin_lock_init(&buf->rb_mwlock); 292 INIT_LIST_HEAD(&buf->rb_mws); 293 INIT_LIST_HEAD(&buf->rb_all); 294 295 i = max_t(int, RPCRDMA_MAX_DATA_SEGS / depth, 1); 296 i += 2; /* head + tail */ 297 i *= buf->rb_max_requests; /* one set for each RPC slot */ 298 dprintk("RPC: %s: initalizing %d FRMRs\n", __func__, i); 299 300 while (i--) { 301 struct rpcrdma_mw *r; 302 int rc; 303 304 r = kzalloc(sizeof(*r), GFP_KERNEL); 305 if (!r) 306 return -ENOMEM; 307 308 rc = __frwr_init(r, pd, device, depth); 309 if (rc) { 310 kfree(r); 311 return rc; 312 } 313 314 list_add(&r->mw_list, &buf->rb_mws); 315 list_add(&r->mw_all, &buf->rb_all); 316 r->mw_sendcompletion = frwr_sendcompletion; 317 r->r.frmr.fr_xprt = r_xprt; 318 } 319 320 return 0; 321 } 322 323 /* Post a FAST_REG Work Request to register a memory region 324 * for remote access via RDMA READ or RDMA WRITE. 325 */ 326 static int 327 frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, 328 int nsegs, bool writing) 329 { 330 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 331 struct ib_device *device = ia->ri_device; 332 enum dma_data_direction direction = rpcrdma_data_dir(writing); 333 struct rpcrdma_mr_seg *seg1 = seg; 334 struct rpcrdma_mw *mw; 335 struct rpcrdma_frmr *frmr; 336 struct ib_mr *mr; 337 struct ib_reg_wr *reg_wr; 338 struct ib_send_wr *bad_wr; 339 int rc, i, n, dma_nents; 340 u8 key; 341 342 mw = seg1->rl_mw; 343 seg1->rl_mw = NULL; 344 do { 345 if (mw) 346 __frwr_queue_recovery(mw); 347 mw = rpcrdma_get_mw(r_xprt); 348 if (!mw) 349 return -ENOMEM; 350 } while (mw->r.frmr.fr_state != FRMR_IS_INVALID); 351 frmr = &mw->r.frmr; 352 frmr->fr_state = FRMR_IS_VALID; 353 frmr->fr_waiter = false; 354 mr = frmr->fr_mr; 355 reg_wr = &frmr->fr_regwr; 356 357 if (nsegs > ia->ri_max_frmr_depth) 358 nsegs = ia->ri_max_frmr_depth; 359 360 for (i = 0; i < nsegs;) { 361 if (seg->mr_page) 362 sg_set_page(&frmr->sg[i], 363 seg->mr_page, 364 seg->mr_len, 365 offset_in_page(seg->mr_offset)); 366 else 367 sg_set_buf(&frmr->sg[i], seg->mr_offset, 368 seg->mr_len); 369 370 ++seg; 371 ++i; 372 373 /* Check for holes */ 374 if ((i < nsegs && offset_in_page(seg->mr_offset)) || 375 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) 376 break; 377 } 378 frmr->sg_nents = i; 379 380 dma_nents = ib_dma_map_sg(device, frmr->sg, frmr->sg_nents, direction); 381 if (!dma_nents) { 382 pr_err("RPC: %s: failed to dma map sg %p sg_nents %u\n", 383 __func__, frmr->sg, frmr->sg_nents); 384 return -ENOMEM; 385 } 386 387 n = ib_map_mr_sg(mr, frmr->sg, frmr->sg_nents, PAGE_SIZE); 388 if (unlikely(n != frmr->sg_nents)) { 389 pr_err("RPC: %s: failed to map mr %p (%u/%u)\n", 390 __func__, frmr->fr_mr, n, frmr->sg_nents); 391 rc = n < 0 ? n : -EINVAL; 392 goto out_senderr; 393 } 394 395 dprintk("RPC: %s: Using frmr %p to map %u segments (%u bytes)\n", 396 __func__, mw, frmr->sg_nents, mr->length); 397 398 key = (u8)(mr->rkey & 0x000000FF); 399 ib_update_fast_reg_key(mr, ++key); 400 401 reg_wr->wr.next = NULL; 402 reg_wr->wr.opcode = IB_WR_REG_MR; 403 reg_wr->wr.wr_id = (uintptr_t)mw; 404 reg_wr->wr.num_sge = 0; 405 reg_wr->wr.send_flags = 0; 406 reg_wr->mr = mr; 407 reg_wr->key = mr->rkey; 408 reg_wr->access = writing ? 409 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : 410 IB_ACCESS_REMOTE_READ; 411 412 DECR_CQCOUNT(&r_xprt->rx_ep); 413 rc = ib_post_send(ia->ri_id->qp, ®_wr->wr, &bad_wr); 414 if (rc) 415 goto out_senderr; 416 417 seg1->mr_dir = direction; 418 seg1->rl_mw = mw; 419 seg1->mr_rkey = mr->rkey; 420 seg1->mr_base = mr->iova; 421 seg1->mr_nsegs = frmr->sg_nents; 422 seg1->mr_len = mr->length; 423 424 return frmr->sg_nents; 425 426 out_senderr: 427 dprintk("RPC: %s: ib_post_send status %i\n", __func__, rc); 428 ib_dma_unmap_sg(device, frmr->sg, dma_nents, direction); 429 __frwr_queue_recovery(mw); 430 return rc; 431 } 432 433 static struct ib_send_wr * 434 __frwr_prepare_linv_wr(struct rpcrdma_mr_seg *seg) 435 { 436 struct rpcrdma_mw *mw = seg->rl_mw; 437 struct rpcrdma_frmr *f = &mw->r.frmr; 438 struct ib_send_wr *invalidate_wr; 439 440 f->fr_waiter = false; 441 f->fr_state = FRMR_IS_INVALID; 442 invalidate_wr = &f->fr_invwr; 443 444 memset(invalidate_wr, 0, sizeof(*invalidate_wr)); 445 invalidate_wr->wr_id = (unsigned long)(void *)mw; 446 invalidate_wr->opcode = IB_WR_LOCAL_INV; 447 invalidate_wr->ex.invalidate_rkey = f->fr_mr->rkey; 448 449 return invalidate_wr; 450 } 451 452 static void 453 __frwr_dma_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, 454 int rc) 455 { 456 struct ib_device *device = r_xprt->rx_ia.ri_device; 457 struct rpcrdma_mw *mw = seg->rl_mw; 458 struct rpcrdma_frmr *f = &mw->r.frmr; 459 460 seg->rl_mw = NULL; 461 462 ib_dma_unmap_sg(device, f->sg, f->sg_nents, seg->mr_dir); 463 464 if (!rc) 465 rpcrdma_put_mw(r_xprt, mw); 466 else 467 __frwr_queue_recovery(mw); 468 } 469 470 /* Invalidate all memory regions that were registered for "req". 471 * 472 * Sleeps until it is safe for the host CPU to access the 473 * previously mapped memory regions. 474 */ 475 static void 476 frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) 477 { 478 struct ib_send_wr *invalidate_wrs, *pos, *prev, *bad_wr; 479 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 480 struct rpcrdma_mr_seg *seg; 481 unsigned int i, nchunks; 482 struct rpcrdma_frmr *f; 483 int rc; 484 485 dprintk("RPC: %s: req %p\n", __func__, req); 486 487 /* ORDER: Invalidate all of the req's MRs first 488 * 489 * Chain the LOCAL_INV Work Requests and post them with 490 * a single ib_post_send() call. 491 */ 492 invalidate_wrs = pos = prev = NULL; 493 seg = NULL; 494 for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) { 495 seg = &req->rl_segments[i]; 496 497 pos = __frwr_prepare_linv_wr(seg); 498 499 if (!invalidate_wrs) 500 invalidate_wrs = pos; 501 else 502 prev->next = pos; 503 prev = pos; 504 505 i += seg->mr_nsegs; 506 } 507 f = &seg->rl_mw->r.frmr; 508 509 /* Strong send queue ordering guarantees that when the 510 * last WR in the chain completes, all WRs in the chain 511 * are complete. 512 */ 513 f->fr_invwr.send_flags = IB_SEND_SIGNALED; 514 f->fr_waiter = true; 515 init_completion(&f->fr_linv_done); 516 INIT_CQCOUNT(&r_xprt->rx_ep); 517 518 /* Transport disconnect drains the receive CQ before it 519 * replaces the QP. The RPC reply handler won't call us 520 * unless ri_id->qp is a valid pointer. 521 */ 522 rc = ib_post_send(ia->ri_id->qp, invalidate_wrs, &bad_wr); 523 if (rc) 524 pr_warn("%s: ib_post_send failed %i\n", __func__, rc); 525 526 wait_for_completion(&f->fr_linv_done); 527 528 /* ORDER: Now DMA unmap all of the req's MRs, and return 529 * them to the free MW list. 530 */ 531 for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) { 532 seg = &req->rl_segments[i]; 533 534 __frwr_dma_unmap(r_xprt, seg, rc); 535 536 i += seg->mr_nsegs; 537 seg->mr_nsegs = 0; 538 } 539 540 req->rl_nchunks = 0; 541 } 542 543 /* Post a LOCAL_INV Work Request to prevent further remote access 544 * via RDMA READ or RDMA WRITE. 545 */ 546 static int 547 frwr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg) 548 { 549 struct rpcrdma_mr_seg *seg1 = seg; 550 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 551 struct rpcrdma_mw *mw = seg1->rl_mw; 552 struct rpcrdma_frmr *frmr = &mw->r.frmr; 553 struct ib_send_wr *invalidate_wr, *bad_wr; 554 int rc, nsegs = seg->mr_nsegs; 555 556 dprintk("RPC: %s: FRMR %p\n", __func__, mw); 557 558 seg1->rl_mw = NULL; 559 frmr->fr_state = FRMR_IS_INVALID; 560 invalidate_wr = &mw->r.frmr.fr_invwr; 561 562 memset(invalidate_wr, 0, sizeof(*invalidate_wr)); 563 invalidate_wr->wr_id = (uintptr_t)mw; 564 invalidate_wr->opcode = IB_WR_LOCAL_INV; 565 invalidate_wr->ex.invalidate_rkey = frmr->fr_mr->rkey; 566 DECR_CQCOUNT(&r_xprt->rx_ep); 567 568 ib_dma_unmap_sg(ia->ri_device, frmr->sg, frmr->sg_nents, seg1->mr_dir); 569 read_lock(&ia->ri_qplock); 570 rc = ib_post_send(ia->ri_id->qp, invalidate_wr, &bad_wr); 571 read_unlock(&ia->ri_qplock); 572 if (rc) 573 goto out_err; 574 575 rpcrdma_put_mw(r_xprt, mw); 576 return nsegs; 577 578 out_err: 579 dprintk("RPC: %s: ib_post_send status %i\n", __func__, rc); 580 __frwr_queue_recovery(mw); 581 return nsegs; 582 } 583 584 static void 585 frwr_op_destroy(struct rpcrdma_buffer *buf) 586 { 587 struct rpcrdma_mw *r; 588 589 /* Ensure stale MWs for "buf" are no longer in flight */ 590 flush_workqueue(frwr_recovery_wq); 591 592 while (!list_empty(&buf->rb_all)) { 593 r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all); 594 list_del(&r->mw_all); 595 __frwr_release(r); 596 kfree(r); 597 } 598 } 599 600 const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = { 601 .ro_map = frwr_op_map, 602 .ro_unmap_sync = frwr_op_unmap_sync, 603 .ro_unmap = frwr_op_unmap, 604 .ro_open = frwr_op_open, 605 .ro_maxpages = frwr_op_maxpages, 606 .ro_init = frwr_op_init, 607 .ro_destroy = frwr_op_destroy, 608 .ro_displayname = "frwr", 609 }; 610