1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (c) 2014-2017 Oracle. All rights reserved. 4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the BSD-type 10 * license below: 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 19 * Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials provided 22 * with the distribution. 23 * 24 * Neither the name of the Network Appliance, Inc. nor the names of 25 * its contributors may be used to endorse or promote products 26 * derived from this software without specific prior written 27 * permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 */ 41 42 /* 43 * verbs.c 44 * 45 * Encapsulates the major functions managing: 46 * o adapters 47 * o endpoints 48 * o connections 49 * o buffer memory 50 */ 51 52 #include <linux/interrupt.h> 53 #include <linux/slab.h> 54 #include <linux/sunrpc/addr.h> 55 #include <linux/sunrpc/svc_rdma.h> 56 #include <linux/log2.h> 57 58 #include <asm-generic/barrier.h> 59 #include <asm/bitops.h> 60 61 #include <rdma/ib_cm.h> 62 63 #include "xprt_rdma.h" 64 #include <trace/events/rpcrdma.h> 65 66 /* 67 * Globals/Macros 68 */ 69 70 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 71 # define RPCDBG_FACILITY RPCDBG_TRANS 72 #endif 73 74 /* 75 * internal functions 76 */ 77 static void rpcrdma_sendctx_put_locked(struct rpcrdma_xprt *r_xprt, 78 struct rpcrdma_sendctx *sc); 79 static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt); 80 static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf); 81 static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt); 82 static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt); 83 static struct rpcrdma_regbuf * 84 rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction, 85 gfp_t flags); 86 static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb); 87 static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb); 88 89 /* Wait for outstanding transport work to finish. ib_drain_qp 90 * handles the drains in the wrong order for us, so open code 91 * them here. 92 */ 93 static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt) 94 { 95 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 96 97 /* Flush Receives, then wait for deferred Reply work 98 * to complete. 99 */ 100 ib_drain_rq(ia->ri_id->qp); 101 102 /* Deferred Reply processing might have scheduled 103 * local invalidations. 104 */ 105 ib_drain_sq(ia->ri_id->qp); 106 } 107 108 /** 109 * rpcrdma_qp_event_handler - Handle one QP event (error notification) 110 * @event: details of the event 111 * @context: ep that owns QP where event occurred 112 * 113 * Called from the RDMA provider (device driver) possibly in an interrupt 114 * context. 115 */ 116 static void 117 rpcrdma_qp_event_handler(struct ib_event *event, void *context) 118 { 119 struct rpcrdma_ep *ep = context; 120 struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt, 121 rx_ep); 122 123 trace_xprtrdma_qp_event(r_xprt, event); 124 } 125 126 /** 127 * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC 128 * @cq: completion queue 129 * @wc: completed WR 130 * 131 */ 132 static void 133 rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) 134 { 135 struct ib_cqe *cqe = wc->wr_cqe; 136 struct rpcrdma_sendctx *sc = 137 container_of(cqe, struct rpcrdma_sendctx, sc_cqe); 138 139 /* WARNING: Only wr_cqe and status are reliable at this point */ 140 trace_xprtrdma_wc_send(sc, wc); 141 rpcrdma_sendctx_put_locked((struct rpcrdma_xprt *)cq->cq_context, sc); 142 } 143 144 /** 145 * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC 146 * @cq: completion queue (ignored) 147 * @wc: completed WR 148 * 149 */ 150 static void 151 rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) 152 { 153 struct ib_cqe *cqe = wc->wr_cqe; 154 struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep, 155 rr_cqe); 156 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt; 157 158 /* WARNING: Only wr_cqe and status are reliable at this point */ 159 trace_xprtrdma_wc_receive(wc); 160 --r_xprt->rx_ep.rep_receive_count; 161 if (wc->status != IB_WC_SUCCESS) 162 goto out_flushed; 163 164 /* status == SUCCESS means all fields in wc are trustworthy */ 165 rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len); 166 rep->rr_wc_flags = wc->wc_flags; 167 rep->rr_inv_rkey = wc->ex.invalidate_rkey; 168 169 ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf), 170 rdmab_addr(rep->rr_rdmabuf), 171 wc->byte_len, DMA_FROM_DEVICE); 172 173 rpcrdma_reply_handler(rep); 174 return; 175 176 out_flushed: 177 rpcrdma_recv_buffer_put(rep); 178 } 179 180 static void rpcrdma_update_cm_private(struct rpcrdma_xprt *r_xprt, 181 struct rdma_conn_param *param) 182 { 183 const struct rpcrdma_connect_private *pmsg = param->private_data; 184 struct rpcrdma_ep *ep = &r_xprt->rx_ep; 185 unsigned int rsize, wsize; 186 187 /* Default settings for RPC-over-RDMA Version One */ 188 r_xprt->rx_ia.ri_implicit_roundup = xprt_rdma_pad_optimize; 189 rsize = RPCRDMA_V1_DEF_INLINE_SIZE; 190 wsize = RPCRDMA_V1_DEF_INLINE_SIZE; 191 192 if (pmsg && 193 pmsg->cp_magic == rpcrdma_cmp_magic && 194 pmsg->cp_version == RPCRDMA_CMP_VERSION) { 195 r_xprt->rx_ia.ri_implicit_roundup = true; 196 rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size); 197 wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size); 198 } 199 200 if (rsize < ep->rep_inline_recv) 201 ep->rep_inline_recv = rsize; 202 if (wsize < ep->rep_inline_send) 203 ep->rep_inline_send = wsize; 204 205 rpcrdma_set_max_header_sizes(r_xprt); 206 } 207 208 /** 209 * rpcrdma_cm_event_handler - Handle RDMA CM events 210 * @id: rdma_cm_id on which an event has occurred 211 * @event: details of the event 212 * 213 * Called with @id's mutex held. Returns 1 if caller should 214 * destroy @id, otherwise 0. 215 */ 216 static int 217 rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) 218 { 219 struct rpcrdma_xprt *r_xprt = id->context; 220 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 221 struct rpcrdma_ep *ep = &r_xprt->rx_ep; 222 struct rpc_xprt *xprt = &r_xprt->rx_xprt; 223 224 might_sleep(); 225 226 trace_xprtrdma_cm_event(r_xprt, event); 227 switch (event->event) { 228 case RDMA_CM_EVENT_ADDR_RESOLVED: 229 case RDMA_CM_EVENT_ROUTE_RESOLVED: 230 ia->ri_async_rc = 0; 231 complete(&ia->ri_done); 232 return 0; 233 case RDMA_CM_EVENT_ADDR_ERROR: 234 ia->ri_async_rc = -EPROTO; 235 complete(&ia->ri_done); 236 return 0; 237 case RDMA_CM_EVENT_ROUTE_ERROR: 238 ia->ri_async_rc = -ENETUNREACH; 239 complete(&ia->ri_done); 240 return 0; 241 case RDMA_CM_EVENT_DEVICE_REMOVAL: 242 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 243 pr_info("rpcrdma: removing device %s for %s:%s\n", 244 ia->ri_id->device->name, 245 rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt)); 246 #endif 247 set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags); 248 ep->rep_connected = -ENODEV; 249 xprt_force_disconnect(xprt); 250 wait_for_completion(&ia->ri_remove_done); 251 252 ia->ri_id = NULL; 253 /* Return 1 to ensure the core destroys the id. */ 254 return 1; 255 case RDMA_CM_EVENT_ESTABLISHED: 256 ++xprt->connect_cookie; 257 ep->rep_connected = 1; 258 rpcrdma_update_cm_private(r_xprt, &event->param.conn); 259 trace_xprtrdma_inline_thresh(r_xprt); 260 wake_up_all(&ep->rep_connect_wait); 261 break; 262 case RDMA_CM_EVENT_CONNECT_ERROR: 263 ep->rep_connected = -ENOTCONN; 264 goto disconnected; 265 case RDMA_CM_EVENT_UNREACHABLE: 266 ep->rep_connected = -ENETUNREACH; 267 goto disconnected; 268 case RDMA_CM_EVENT_REJECTED: 269 dprintk("rpcrdma: connection to %s:%s rejected: %s\n", 270 rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt), 271 rdma_reject_msg(id, event->status)); 272 ep->rep_connected = -ECONNREFUSED; 273 if (event->status == IB_CM_REJ_STALE_CONN) 274 ep->rep_connected = -EAGAIN; 275 goto disconnected; 276 case RDMA_CM_EVENT_DISCONNECTED: 277 ep->rep_connected = -ECONNABORTED; 278 disconnected: 279 xprt_force_disconnect(xprt); 280 wake_up_all(&ep->rep_connect_wait); 281 break; 282 default: 283 break; 284 } 285 286 dprintk("RPC: %s: %s:%s on %s/frwr: %s\n", __func__, 287 rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt), 288 ia->ri_id->device->name, rdma_event_msg(event->event)); 289 return 0; 290 } 291 292 static struct rdma_cm_id * 293 rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia) 294 { 295 unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1; 296 struct rdma_cm_id *id; 297 int rc; 298 299 init_completion(&ia->ri_done); 300 init_completion(&ia->ri_remove_done); 301 302 id = rdma_create_id(xprt->rx_xprt.xprt_net, rpcrdma_cm_event_handler, 303 xprt, RDMA_PS_TCP, IB_QPT_RC); 304 if (IS_ERR(id)) 305 return id; 306 307 ia->ri_async_rc = -ETIMEDOUT; 308 rc = rdma_resolve_addr(id, NULL, 309 (struct sockaddr *)&xprt->rx_xprt.addr, 310 RDMA_RESOLVE_TIMEOUT); 311 if (rc) 312 goto out; 313 rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout); 314 if (rc < 0) 315 goto out; 316 317 rc = ia->ri_async_rc; 318 if (rc) 319 goto out; 320 321 ia->ri_async_rc = -ETIMEDOUT; 322 rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT); 323 if (rc) 324 goto out; 325 rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout); 326 if (rc < 0) 327 goto out; 328 rc = ia->ri_async_rc; 329 if (rc) 330 goto out; 331 332 return id; 333 334 out: 335 rdma_destroy_id(id); 336 return ERR_PTR(rc); 337 } 338 339 /* 340 * Exported functions. 341 */ 342 343 /** 344 * rpcrdma_ia_open - Open and initialize an Interface Adapter. 345 * @xprt: transport with IA to (re)initialize 346 * 347 * Returns 0 on success, negative errno if an appropriate 348 * Interface Adapter could not be found and opened. 349 */ 350 int 351 rpcrdma_ia_open(struct rpcrdma_xprt *xprt) 352 { 353 struct rpcrdma_ia *ia = &xprt->rx_ia; 354 int rc; 355 356 ia->ri_id = rpcrdma_create_id(xprt, ia); 357 if (IS_ERR(ia->ri_id)) { 358 rc = PTR_ERR(ia->ri_id); 359 goto out_err; 360 } 361 362 ia->ri_pd = ib_alloc_pd(ia->ri_id->device, 0); 363 if (IS_ERR(ia->ri_pd)) { 364 rc = PTR_ERR(ia->ri_pd); 365 pr_err("rpcrdma: ib_alloc_pd() returned %d\n", rc); 366 goto out_err; 367 } 368 369 switch (xprt_rdma_memreg_strategy) { 370 case RPCRDMA_FRWR: 371 if (frwr_is_supported(ia->ri_id->device)) 372 break; 373 /*FALLTHROUGH*/ 374 default: 375 pr_err("rpcrdma: Device %s does not support memreg mode %d\n", 376 ia->ri_id->device->name, xprt_rdma_memreg_strategy); 377 rc = -EINVAL; 378 goto out_err; 379 } 380 381 return 0; 382 383 out_err: 384 rpcrdma_ia_close(ia); 385 return rc; 386 } 387 388 /** 389 * rpcrdma_ia_remove - Handle device driver unload 390 * @ia: interface adapter being removed 391 * 392 * Divest transport H/W resources associated with this adapter, 393 * but allow it to be restored later. 394 */ 395 void 396 rpcrdma_ia_remove(struct rpcrdma_ia *ia) 397 { 398 struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt, 399 rx_ia); 400 struct rpcrdma_ep *ep = &r_xprt->rx_ep; 401 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 402 struct rpcrdma_req *req; 403 404 /* This is similar to rpcrdma_ep_destroy, but: 405 * - Don't cancel the connect worker. 406 * - Don't call rpcrdma_ep_disconnect, which waits 407 * for another conn upcall, which will deadlock. 408 * - rdma_disconnect is unneeded, the underlying 409 * connection is already gone. 410 */ 411 if (ia->ri_id->qp) { 412 rpcrdma_xprt_drain(r_xprt); 413 rdma_destroy_qp(ia->ri_id); 414 ia->ri_id->qp = NULL; 415 } 416 ib_free_cq(ep->rep_attr.recv_cq); 417 ep->rep_attr.recv_cq = NULL; 418 ib_free_cq(ep->rep_attr.send_cq); 419 ep->rep_attr.send_cq = NULL; 420 421 /* The ULP is responsible for ensuring all DMA 422 * mappings and MRs are gone. 423 */ 424 rpcrdma_reps_destroy(buf); 425 list_for_each_entry(req, &buf->rb_allreqs, rl_all) { 426 rpcrdma_regbuf_dma_unmap(req->rl_rdmabuf); 427 rpcrdma_regbuf_dma_unmap(req->rl_sendbuf); 428 rpcrdma_regbuf_dma_unmap(req->rl_recvbuf); 429 } 430 rpcrdma_mrs_destroy(r_xprt); 431 ib_dealloc_pd(ia->ri_pd); 432 ia->ri_pd = NULL; 433 434 /* Allow waiters to continue */ 435 complete(&ia->ri_remove_done); 436 437 trace_xprtrdma_remove(r_xprt); 438 } 439 440 /** 441 * rpcrdma_ia_close - Clean up/close an IA. 442 * @ia: interface adapter to close 443 * 444 */ 445 void 446 rpcrdma_ia_close(struct rpcrdma_ia *ia) 447 { 448 if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) { 449 if (ia->ri_id->qp) 450 rdma_destroy_qp(ia->ri_id); 451 rdma_destroy_id(ia->ri_id); 452 } 453 ia->ri_id = NULL; 454 455 /* If the pd is still busy, xprtrdma missed freeing a resource */ 456 if (ia->ri_pd && !IS_ERR(ia->ri_pd)) 457 ib_dealloc_pd(ia->ri_pd); 458 ia->ri_pd = NULL; 459 } 460 461 /** 462 * rpcrdma_ep_create - Create unconnected endpoint 463 * @r_xprt: transport to instantiate 464 * 465 * Returns zero on success, or a negative errno. 466 */ 467 int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt) 468 { 469 struct rpcrdma_ep *ep = &r_xprt->rx_ep; 470 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 471 struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private; 472 struct ib_cq *sendcq, *recvcq; 473 unsigned int max_sge; 474 int rc; 475 476 ep->rep_max_requests = xprt_rdma_slot_table_entries; 477 ep->rep_inline_send = xprt_rdma_max_inline_write; 478 ep->rep_inline_recv = xprt_rdma_max_inline_read; 479 480 max_sge = min_t(unsigned int, ia->ri_id->device->attrs.max_send_sge, 481 RPCRDMA_MAX_SEND_SGES); 482 if (max_sge < RPCRDMA_MIN_SEND_SGES) { 483 pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge); 484 return -ENOMEM; 485 } 486 ia->ri_max_send_sges = max_sge; 487 488 rc = frwr_open(ia, ep); 489 if (rc) 490 return rc; 491 492 ep->rep_attr.event_handler = rpcrdma_qp_event_handler; 493 ep->rep_attr.qp_context = ep; 494 ep->rep_attr.srq = NULL; 495 ep->rep_attr.cap.max_send_sge = max_sge; 496 ep->rep_attr.cap.max_recv_sge = 1; 497 ep->rep_attr.cap.max_inline_data = 0; 498 ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 499 ep->rep_attr.qp_type = IB_QPT_RC; 500 ep->rep_attr.port_num = ~0; 501 502 dprintk("RPC: %s: requested max: dtos: send %d recv %d; " 503 "iovs: send %d recv %d\n", 504 __func__, 505 ep->rep_attr.cap.max_send_wr, 506 ep->rep_attr.cap.max_recv_wr, 507 ep->rep_attr.cap.max_send_sge, 508 ep->rep_attr.cap.max_recv_sge); 509 510 ep->rep_send_batch = ep->rep_max_requests >> 3; 511 ep->rep_send_count = ep->rep_send_batch; 512 init_waitqueue_head(&ep->rep_connect_wait); 513 ep->rep_receive_count = 0; 514 515 sendcq = ib_alloc_cq_any(ia->ri_id->device, r_xprt, 516 ep->rep_attr.cap.max_send_wr + 1, 517 IB_POLL_WORKQUEUE); 518 if (IS_ERR(sendcq)) { 519 rc = PTR_ERR(sendcq); 520 goto out1; 521 } 522 523 recvcq = ib_alloc_cq_any(ia->ri_id->device, NULL, 524 ep->rep_attr.cap.max_recv_wr + 1, 525 IB_POLL_WORKQUEUE); 526 if (IS_ERR(recvcq)) { 527 rc = PTR_ERR(recvcq); 528 goto out2; 529 } 530 531 ep->rep_attr.send_cq = sendcq; 532 ep->rep_attr.recv_cq = recvcq; 533 534 /* Initialize cma parameters */ 535 memset(&ep->rep_remote_cma, 0, sizeof(ep->rep_remote_cma)); 536 537 /* Prepare RDMA-CM private message */ 538 pmsg->cp_magic = rpcrdma_cmp_magic; 539 pmsg->cp_version = RPCRDMA_CMP_VERSION; 540 pmsg->cp_flags |= RPCRDMA_CMP_F_SND_W_INV_OK; 541 pmsg->cp_send_size = rpcrdma_encode_buffer_size(ep->rep_inline_send); 542 pmsg->cp_recv_size = rpcrdma_encode_buffer_size(ep->rep_inline_recv); 543 ep->rep_remote_cma.private_data = pmsg; 544 ep->rep_remote_cma.private_data_len = sizeof(*pmsg); 545 546 /* Client offers RDMA Read but does not initiate */ 547 ep->rep_remote_cma.initiator_depth = 0; 548 ep->rep_remote_cma.responder_resources = 549 min_t(int, U8_MAX, ia->ri_id->device->attrs.max_qp_rd_atom); 550 551 /* Limit transport retries so client can detect server 552 * GID changes quickly. RPC layer handles re-establishing 553 * transport connection and retransmission. 554 */ 555 ep->rep_remote_cma.retry_count = 6; 556 557 /* RPC-over-RDMA handles its own flow control. In addition, 558 * make all RNR NAKs visible so we know that RPC-over-RDMA 559 * flow control is working correctly (no NAKs should be seen). 560 */ 561 ep->rep_remote_cma.flow_control = 0; 562 ep->rep_remote_cma.rnr_retry_count = 0; 563 564 return 0; 565 566 out2: 567 ib_free_cq(sendcq); 568 out1: 569 return rc; 570 } 571 572 /** 573 * rpcrdma_ep_destroy - Disconnect and destroy endpoint. 574 * @r_xprt: transport instance to shut down 575 * 576 */ 577 void rpcrdma_ep_destroy(struct rpcrdma_xprt *r_xprt) 578 { 579 struct rpcrdma_ep *ep = &r_xprt->rx_ep; 580 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 581 582 if (ia->ri_id && ia->ri_id->qp) { 583 rpcrdma_ep_disconnect(ep, ia); 584 rdma_destroy_qp(ia->ri_id); 585 ia->ri_id->qp = NULL; 586 } 587 588 if (ep->rep_attr.recv_cq) 589 ib_free_cq(ep->rep_attr.recv_cq); 590 if (ep->rep_attr.send_cq) 591 ib_free_cq(ep->rep_attr.send_cq); 592 } 593 594 /* Re-establish a connection after a device removal event. 595 * Unlike a normal reconnection, a fresh PD and a new set 596 * of MRs and buffers is needed. 597 */ 598 static int rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt, 599 struct ib_qp_init_attr *qp_init_attr) 600 { 601 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 602 int rc, err; 603 604 trace_xprtrdma_reinsert(r_xprt); 605 606 rc = -EHOSTUNREACH; 607 if (rpcrdma_ia_open(r_xprt)) 608 goto out1; 609 610 rc = -ENOMEM; 611 err = rpcrdma_ep_create(r_xprt); 612 if (err) { 613 pr_err("rpcrdma: rpcrdma_ep_create returned %d\n", err); 614 goto out2; 615 } 616 617 rc = -ENETUNREACH; 618 err = rdma_create_qp(ia->ri_id, ia->ri_pd, qp_init_attr); 619 if (err) { 620 pr_err("rpcrdma: rdma_create_qp returned %d\n", err); 621 goto out3; 622 } 623 return 0; 624 625 out3: 626 rpcrdma_ep_destroy(r_xprt); 627 out2: 628 rpcrdma_ia_close(ia); 629 out1: 630 return rc; 631 } 632 633 static int rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt, 634 struct ib_qp_init_attr *qp_init_attr) 635 { 636 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 637 struct rdma_cm_id *id, *old; 638 int err, rc; 639 640 rpcrdma_ep_disconnect(&r_xprt->rx_ep, ia); 641 642 rc = -EHOSTUNREACH; 643 id = rpcrdma_create_id(r_xprt, ia); 644 if (IS_ERR(id)) 645 goto out; 646 647 /* As long as the new ID points to the same device as the 648 * old ID, we can reuse the transport's existing PD and all 649 * previously allocated MRs. Also, the same device means 650 * the transport's previous DMA mappings are still valid. 651 * 652 * This is a sanity check only. There should be no way these 653 * point to two different devices here. 654 */ 655 old = id; 656 rc = -ENETUNREACH; 657 if (ia->ri_id->device != id->device) { 658 pr_err("rpcrdma: can't reconnect on different device!\n"); 659 goto out_destroy; 660 } 661 662 err = rdma_create_qp(id, ia->ri_pd, qp_init_attr); 663 if (err) 664 goto out_destroy; 665 666 /* Atomically replace the transport's ID and QP. */ 667 rc = 0; 668 old = ia->ri_id; 669 ia->ri_id = id; 670 rdma_destroy_qp(old); 671 672 out_destroy: 673 rdma_destroy_id(old); 674 out: 675 return rc; 676 } 677 678 /* 679 * Connect unconnected endpoint. 680 */ 681 int 682 rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) 683 { 684 struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt, 685 rx_ia); 686 struct rpc_xprt *xprt = &r_xprt->rx_xprt; 687 struct ib_qp_init_attr qp_init_attr; 688 int rc; 689 690 retry: 691 memcpy(&qp_init_attr, &ep->rep_attr, sizeof(qp_init_attr)); 692 switch (ep->rep_connected) { 693 case 0: 694 rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &qp_init_attr); 695 if (rc) { 696 rc = -ENETUNREACH; 697 goto out_noupdate; 698 } 699 break; 700 case -ENODEV: 701 rc = rpcrdma_ep_recreate_xprt(r_xprt, &qp_init_attr); 702 if (rc) 703 goto out_noupdate; 704 break; 705 default: 706 rc = rpcrdma_ep_reconnect(r_xprt, &qp_init_attr); 707 if (rc) 708 goto out; 709 } 710 711 ep->rep_connected = 0; 712 xprt_clear_connected(xprt); 713 714 rpcrdma_reset_cwnd(r_xprt); 715 rpcrdma_post_recvs(r_xprt, true); 716 717 rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma); 718 if (rc) 719 goto out; 720 721 if (xprt->reestablish_timeout < RPCRDMA_INIT_REEST_TO) 722 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO; 723 wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0); 724 if (ep->rep_connected <= 0) { 725 if (ep->rep_connected == -EAGAIN) 726 goto retry; 727 rc = ep->rep_connected; 728 goto out; 729 } 730 731 rpcrdma_mrs_create(r_xprt); 732 733 out: 734 if (rc) 735 ep->rep_connected = rc; 736 737 out_noupdate: 738 trace_xprtrdma_connect(r_xprt, rc); 739 return rc; 740 } 741 742 /** 743 * rpcrdma_ep_disconnect - Disconnect underlying transport 744 * @ep: endpoint to disconnect 745 * @ia: associated interface adapter 746 * 747 * Caller serializes. Either the transport send lock is held, 748 * or we're being called to destroy the transport. 749 */ 750 void 751 rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) 752 { 753 struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt, 754 rx_ep); 755 int rc; 756 757 /* returns without wait if ID is not connected */ 758 rc = rdma_disconnect(ia->ri_id); 759 if (!rc) 760 wait_event_interruptible(ep->rep_connect_wait, 761 ep->rep_connected != 1); 762 else 763 ep->rep_connected = rc; 764 trace_xprtrdma_disconnect(r_xprt, rc); 765 766 rpcrdma_xprt_drain(r_xprt); 767 rpcrdma_reqs_reset(r_xprt); 768 rpcrdma_mrs_destroy(r_xprt); 769 } 770 771 /* Fixed-size circular FIFO queue. This implementation is wait-free and 772 * lock-free. 773 * 774 * Consumer is the code path that posts Sends. This path dequeues a 775 * sendctx for use by a Send operation. Multiple consumer threads 776 * are serialized by the RPC transport lock, which allows only one 777 * ->send_request call at a time. 778 * 779 * Producer is the code path that handles Send completions. This path 780 * enqueues a sendctx that has been completed. Multiple producer 781 * threads are serialized by the ib_poll_cq() function. 782 */ 783 784 /* rpcrdma_sendctxs_destroy() assumes caller has already quiesced 785 * queue activity, and rpcrdma_xprt_drain has flushed all remaining 786 * Send requests. 787 */ 788 static void rpcrdma_sendctxs_destroy(struct rpcrdma_buffer *buf) 789 { 790 unsigned long i; 791 792 for (i = 0; i <= buf->rb_sc_last; i++) 793 kfree(buf->rb_sc_ctxs[i]); 794 kfree(buf->rb_sc_ctxs); 795 } 796 797 static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ia *ia) 798 { 799 struct rpcrdma_sendctx *sc; 800 801 sc = kzalloc(struct_size(sc, sc_sges, ia->ri_max_send_sges), 802 GFP_KERNEL); 803 if (!sc) 804 return NULL; 805 806 sc->sc_cqe.done = rpcrdma_wc_send; 807 return sc; 808 } 809 810 static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt) 811 { 812 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 813 struct rpcrdma_sendctx *sc; 814 unsigned long i; 815 816 /* Maximum number of concurrent outstanding Send WRs. Capping 817 * the circular queue size stops Send Queue overflow by causing 818 * the ->send_request call to fail temporarily before too many 819 * Sends are posted. 820 */ 821 i = buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS; 822 dprintk("RPC: %s: allocating %lu send_ctxs\n", __func__, i); 823 buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL); 824 if (!buf->rb_sc_ctxs) 825 return -ENOMEM; 826 827 buf->rb_sc_last = i - 1; 828 for (i = 0; i <= buf->rb_sc_last; i++) { 829 sc = rpcrdma_sendctx_create(&r_xprt->rx_ia); 830 if (!sc) 831 return -ENOMEM; 832 833 buf->rb_sc_ctxs[i] = sc; 834 } 835 836 return 0; 837 } 838 839 /* The sendctx queue is not guaranteed to have a size that is a 840 * power of two, thus the helpers in circ_buf.h cannot be used. 841 * The other option is to use modulus (%), which can be expensive. 842 */ 843 static unsigned long rpcrdma_sendctx_next(struct rpcrdma_buffer *buf, 844 unsigned long item) 845 { 846 return likely(item < buf->rb_sc_last) ? item + 1 : 0; 847 } 848 849 /** 850 * rpcrdma_sendctx_get_locked - Acquire a send context 851 * @r_xprt: controlling transport instance 852 * 853 * Returns pointer to a free send completion context; or NULL if 854 * the queue is empty. 855 * 856 * Usage: Called to acquire an SGE array before preparing a Send WR. 857 * 858 * The caller serializes calls to this function (per transport), and 859 * provides an effective memory barrier that flushes the new value 860 * of rb_sc_head. 861 */ 862 struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_xprt *r_xprt) 863 { 864 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 865 struct rpcrdma_sendctx *sc; 866 unsigned long next_head; 867 868 next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head); 869 870 if (next_head == READ_ONCE(buf->rb_sc_tail)) 871 goto out_emptyq; 872 873 /* ORDER: item must be accessed _before_ head is updated */ 874 sc = buf->rb_sc_ctxs[next_head]; 875 876 /* Releasing the lock in the caller acts as a memory 877 * barrier that flushes rb_sc_head. 878 */ 879 buf->rb_sc_head = next_head; 880 881 return sc; 882 883 out_emptyq: 884 /* The queue is "empty" if there have not been enough Send 885 * completions recently. This is a sign the Send Queue is 886 * backing up. Cause the caller to pause and try again. 887 */ 888 xprt_wait_for_buffer_space(&r_xprt->rx_xprt); 889 r_xprt->rx_stats.empty_sendctx_q++; 890 return NULL; 891 } 892 893 /** 894 * rpcrdma_sendctx_put_locked - Release a send context 895 * @r_xprt: controlling transport instance 896 * @sc: send context to release 897 * 898 * Usage: Called from Send completion to return a sendctxt 899 * to the queue. 900 * 901 * The caller serializes calls to this function (per transport). 902 */ 903 static void rpcrdma_sendctx_put_locked(struct rpcrdma_xprt *r_xprt, 904 struct rpcrdma_sendctx *sc) 905 { 906 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 907 unsigned long next_tail; 908 909 /* Unmap SGEs of previously completed but unsignaled 910 * Sends by walking up the queue until @sc is found. 911 */ 912 next_tail = buf->rb_sc_tail; 913 do { 914 next_tail = rpcrdma_sendctx_next(buf, next_tail); 915 916 /* ORDER: item must be accessed _before_ tail is updated */ 917 rpcrdma_sendctx_unmap(buf->rb_sc_ctxs[next_tail]); 918 919 } while (buf->rb_sc_ctxs[next_tail] != sc); 920 921 /* Paired with READ_ONCE */ 922 smp_store_release(&buf->rb_sc_tail, next_tail); 923 924 xprt_write_space(&r_xprt->rx_xprt); 925 } 926 927 static void 928 rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt) 929 { 930 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 931 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 932 unsigned int count; 933 934 for (count = 0; count < ia->ri_max_segs; count++) { 935 struct rpcrdma_mr *mr; 936 int rc; 937 938 mr = kzalloc(sizeof(*mr), GFP_NOFS); 939 if (!mr) 940 break; 941 942 rc = frwr_init_mr(ia, mr); 943 if (rc) { 944 kfree(mr); 945 break; 946 } 947 948 mr->mr_xprt = r_xprt; 949 950 spin_lock(&buf->rb_lock); 951 rpcrdma_mr_push(mr, &buf->rb_mrs); 952 list_add(&mr->mr_all, &buf->rb_all_mrs); 953 spin_unlock(&buf->rb_lock); 954 } 955 956 r_xprt->rx_stats.mrs_allocated += count; 957 trace_xprtrdma_createmrs(r_xprt, count); 958 } 959 960 static void 961 rpcrdma_mr_refresh_worker(struct work_struct *work) 962 { 963 struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer, 964 rb_refresh_worker); 965 struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt, 966 rx_buf); 967 968 rpcrdma_mrs_create(r_xprt); 969 xprt_write_space(&r_xprt->rx_xprt); 970 } 971 972 /** 973 * rpcrdma_mrs_refresh - Wake the MR refresh worker 974 * @r_xprt: controlling transport instance 975 * 976 */ 977 void rpcrdma_mrs_refresh(struct rpcrdma_xprt *r_xprt) 978 { 979 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 980 struct rpcrdma_ep *ep = &r_xprt->rx_ep; 981 982 /* If there is no underlying device, it's no use to 983 * wake the refresh worker. 984 */ 985 if (ep->rep_connected != -ENODEV) { 986 /* The work is scheduled on a WQ_MEM_RECLAIM 987 * workqueue in order to prevent MR allocation 988 * from recursing into NFS during direct reclaim. 989 */ 990 queue_work(xprtiod_workqueue, &buf->rb_refresh_worker); 991 } 992 } 993 994 /** 995 * rpcrdma_req_create - Allocate an rpcrdma_req object 996 * @r_xprt: controlling r_xprt 997 * @size: initial size, in bytes, of send and receive buffers 998 * @flags: GFP flags passed to memory allocators 999 * 1000 * Returns an allocated and fully initialized rpcrdma_req or NULL. 1001 */ 1002 struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size, 1003 gfp_t flags) 1004 { 1005 struct rpcrdma_buffer *buffer = &r_xprt->rx_buf; 1006 struct rpcrdma_regbuf *rb; 1007 struct rpcrdma_req *req; 1008 size_t maxhdrsize; 1009 1010 req = kzalloc(sizeof(*req), flags); 1011 if (req == NULL) 1012 goto out1; 1013 1014 /* Compute maximum header buffer size in bytes */ 1015 maxhdrsize = rpcrdma_fixed_maxsz + 3 + 1016 r_xprt->rx_ia.ri_max_segs * rpcrdma_readchunk_maxsz; 1017 maxhdrsize *= sizeof(__be32); 1018 rb = rpcrdma_regbuf_alloc(__roundup_pow_of_two(maxhdrsize), 1019 DMA_TO_DEVICE, flags); 1020 if (!rb) 1021 goto out2; 1022 req->rl_rdmabuf = rb; 1023 xdr_buf_init(&req->rl_hdrbuf, rdmab_data(rb), rdmab_length(rb)); 1024 1025 req->rl_sendbuf = rpcrdma_regbuf_alloc(size, DMA_TO_DEVICE, flags); 1026 if (!req->rl_sendbuf) 1027 goto out3; 1028 1029 req->rl_recvbuf = rpcrdma_regbuf_alloc(size, DMA_NONE, flags); 1030 if (!req->rl_recvbuf) 1031 goto out4; 1032 1033 INIT_LIST_HEAD(&req->rl_free_mrs); 1034 INIT_LIST_HEAD(&req->rl_registered); 1035 spin_lock(&buffer->rb_lock); 1036 list_add(&req->rl_all, &buffer->rb_allreqs); 1037 spin_unlock(&buffer->rb_lock); 1038 return req; 1039 1040 out4: 1041 kfree(req->rl_sendbuf); 1042 out3: 1043 kfree(req->rl_rdmabuf); 1044 out2: 1045 kfree(req); 1046 out1: 1047 return NULL; 1048 } 1049 1050 /** 1051 * rpcrdma_reqs_reset - Reset all reqs owned by a transport 1052 * @r_xprt: controlling transport instance 1053 * 1054 * ASSUMPTION: the rb_allreqs list is stable for the duration, 1055 * and thus can be walked without holding rb_lock. Eg. the 1056 * caller is holding the transport send lock to exclude 1057 * device removal or disconnection. 1058 */ 1059 static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt) 1060 { 1061 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 1062 struct rpcrdma_req *req; 1063 1064 list_for_each_entry(req, &buf->rb_allreqs, rl_all) { 1065 /* Credits are valid only for one connection */ 1066 req->rl_slot.rq_cong = 0; 1067 } 1068 } 1069 1070 static struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, 1071 bool temp) 1072 { 1073 struct rpcrdma_rep *rep; 1074 1075 rep = kzalloc(sizeof(*rep), GFP_KERNEL); 1076 if (rep == NULL) 1077 goto out; 1078 1079 rep->rr_rdmabuf = rpcrdma_regbuf_alloc(r_xprt->rx_ep.rep_inline_recv, 1080 DMA_FROM_DEVICE, GFP_KERNEL); 1081 if (!rep->rr_rdmabuf) 1082 goto out_free; 1083 1084 xdr_buf_init(&rep->rr_hdrbuf, rdmab_data(rep->rr_rdmabuf), 1085 rdmab_length(rep->rr_rdmabuf)); 1086 rep->rr_cqe.done = rpcrdma_wc_receive; 1087 rep->rr_rxprt = r_xprt; 1088 rep->rr_recv_wr.next = NULL; 1089 rep->rr_recv_wr.wr_cqe = &rep->rr_cqe; 1090 rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov; 1091 rep->rr_recv_wr.num_sge = 1; 1092 rep->rr_temp = temp; 1093 return rep; 1094 1095 out_free: 1096 kfree(rep); 1097 out: 1098 return NULL; 1099 } 1100 1101 static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep) 1102 { 1103 rpcrdma_regbuf_free(rep->rr_rdmabuf); 1104 kfree(rep); 1105 } 1106 1107 static struct rpcrdma_rep *rpcrdma_rep_get_locked(struct rpcrdma_buffer *buf) 1108 { 1109 struct llist_node *node; 1110 1111 /* Calls to llist_del_first are required to be serialized */ 1112 node = llist_del_first(&buf->rb_free_reps); 1113 if (!node) 1114 return NULL; 1115 return llist_entry(node, struct rpcrdma_rep, rr_node); 1116 } 1117 1118 static void rpcrdma_rep_put(struct rpcrdma_buffer *buf, 1119 struct rpcrdma_rep *rep) 1120 { 1121 if (!rep->rr_temp) 1122 llist_add(&rep->rr_node, &buf->rb_free_reps); 1123 else 1124 rpcrdma_rep_destroy(rep); 1125 } 1126 1127 static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf) 1128 { 1129 struct rpcrdma_rep *rep; 1130 1131 while ((rep = rpcrdma_rep_get_locked(buf)) != NULL) 1132 rpcrdma_rep_destroy(rep); 1133 } 1134 1135 /** 1136 * rpcrdma_buffer_create - Create initial set of req/rep objects 1137 * @r_xprt: transport instance to (re)initialize 1138 * 1139 * Returns zero on success, otherwise a negative errno. 1140 */ 1141 int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt) 1142 { 1143 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 1144 int i, rc; 1145 1146 buf->rb_max_requests = r_xprt->rx_ep.rep_max_requests; 1147 buf->rb_bc_srv_max_requests = 0; 1148 spin_lock_init(&buf->rb_lock); 1149 INIT_LIST_HEAD(&buf->rb_mrs); 1150 INIT_LIST_HEAD(&buf->rb_all_mrs); 1151 INIT_WORK(&buf->rb_refresh_worker, rpcrdma_mr_refresh_worker); 1152 1153 INIT_LIST_HEAD(&buf->rb_send_bufs); 1154 INIT_LIST_HEAD(&buf->rb_allreqs); 1155 1156 rc = -ENOMEM; 1157 for (i = 0; i < buf->rb_max_requests; i++) { 1158 struct rpcrdma_req *req; 1159 1160 req = rpcrdma_req_create(r_xprt, RPCRDMA_V1_DEF_INLINE_SIZE * 2, 1161 GFP_KERNEL); 1162 if (!req) 1163 goto out; 1164 list_add(&req->rl_list, &buf->rb_send_bufs); 1165 } 1166 1167 init_llist_head(&buf->rb_free_reps); 1168 1169 rc = rpcrdma_sendctxs_create(r_xprt); 1170 if (rc) 1171 goto out; 1172 1173 return 0; 1174 out: 1175 rpcrdma_buffer_destroy(buf); 1176 return rc; 1177 } 1178 1179 /** 1180 * rpcrdma_req_destroy - Destroy an rpcrdma_req object 1181 * @req: unused object to be destroyed 1182 * 1183 * Relies on caller holding the transport send lock to protect 1184 * removing req->rl_all from buf->rb_all_reqs safely. 1185 */ 1186 void rpcrdma_req_destroy(struct rpcrdma_req *req) 1187 { 1188 struct rpcrdma_mr *mr; 1189 1190 list_del(&req->rl_all); 1191 1192 while ((mr = rpcrdma_mr_pop(&req->rl_free_mrs))) { 1193 struct rpcrdma_buffer *buf = &mr->mr_xprt->rx_buf; 1194 1195 spin_lock(&buf->rb_lock); 1196 list_del(&mr->mr_all); 1197 spin_unlock(&buf->rb_lock); 1198 1199 frwr_release_mr(mr); 1200 } 1201 1202 rpcrdma_regbuf_free(req->rl_recvbuf); 1203 rpcrdma_regbuf_free(req->rl_sendbuf); 1204 rpcrdma_regbuf_free(req->rl_rdmabuf); 1205 kfree(req); 1206 } 1207 1208 /** 1209 * rpcrdma_mrs_destroy - Release all of a transport's MRs 1210 * @r_xprt: controlling transport instance 1211 * 1212 * Relies on caller holding the transport send lock to protect 1213 * removing mr->mr_list from req->rl_free_mrs safely. 1214 */ 1215 static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt) 1216 { 1217 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 1218 struct rpcrdma_mr *mr; 1219 1220 cancel_work_sync(&buf->rb_refresh_worker); 1221 1222 spin_lock(&buf->rb_lock); 1223 while ((mr = list_first_entry_or_null(&buf->rb_all_mrs, 1224 struct rpcrdma_mr, 1225 mr_all)) != NULL) { 1226 list_del(&mr->mr_list); 1227 list_del(&mr->mr_all); 1228 spin_unlock(&buf->rb_lock); 1229 1230 frwr_release_mr(mr); 1231 1232 spin_lock(&buf->rb_lock); 1233 } 1234 spin_unlock(&buf->rb_lock); 1235 } 1236 1237 /** 1238 * rpcrdma_buffer_destroy - Release all hw resources 1239 * @buf: root control block for resources 1240 * 1241 * ORDERING: relies on a prior rpcrdma_xprt_drain : 1242 * - No more Send or Receive completions can occur 1243 * - All MRs, reps, and reqs are returned to their free lists 1244 */ 1245 void 1246 rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf) 1247 { 1248 rpcrdma_sendctxs_destroy(buf); 1249 rpcrdma_reps_destroy(buf); 1250 1251 while (!list_empty(&buf->rb_send_bufs)) { 1252 struct rpcrdma_req *req; 1253 1254 req = list_first_entry(&buf->rb_send_bufs, 1255 struct rpcrdma_req, rl_list); 1256 list_del(&req->rl_list); 1257 rpcrdma_req_destroy(req); 1258 } 1259 } 1260 1261 /** 1262 * rpcrdma_mr_get - Allocate an rpcrdma_mr object 1263 * @r_xprt: controlling transport 1264 * 1265 * Returns an initialized rpcrdma_mr or NULL if no free 1266 * rpcrdma_mr objects are available. 1267 */ 1268 struct rpcrdma_mr * 1269 rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt) 1270 { 1271 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 1272 struct rpcrdma_mr *mr; 1273 1274 spin_lock(&buf->rb_lock); 1275 mr = rpcrdma_mr_pop(&buf->rb_mrs); 1276 spin_unlock(&buf->rb_lock); 1277 return mr; 1278 } 1279 1280 /** 1281 * rpcrdma_mr_put - DMA unmap an MR and release it 1282 * @mr: MR to release 1283 * 1284 */ 1285 void rpcrdma_mr_put(struct rpcrdma_mr *mr) 1286 { 1287 struct rpcrdma_xprt *r_xprt = mr->mr_xprt; 1288 1289 if (mr->mr_dir != DMA_NONE) { 1290 trace_xprtrdma_mr_unmap(mr); 1291 ib_dma_unmap_sg(r_xprt->rx_ia.ri_id->device, 1292 mr->mr_sg, mr->mr_nents, mr->mr_dir); 1293 mr->mr_dir = DMA_NONE; 1294 } 1295 1296 rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs); 1297 } 1298 1299 /** 1300 * rpcrdma_buffer_get - Get a request buffer 1301 * @buffers: Buffer pool from which to obtain a buffer 1302 * 1303 * Returns a fresh rpcrdma_req, or NULL if none are available. 1304 */ 1305 struct rpcrdma_req * 1306 rpcrdma_buffer_get(struct rpcrdma_buffer *buffers) 1307 { 1308 struct rpcrdma_req *req; 1309 1310 spin_lock(&buffers->rb_lock); 1311 req = list_first_entry_or_null(&buffers->rb_send_bufs, 1312 struct rpcrdma_req, rl_list); 1313 if (req) 1314 list_del_init(&req->rl_list); 1315 spin_unlock(&buffers->rb_lock); 1316 return req; 1317 } 1318 1319 /** 1320 * rpcrdma_buffer_put - Put request/reply buffers back into pool 1321 * @buffers: buffer pool 1322 * @req: object to return 1323 * 1324 */ 1325 void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req) 1326 { 1327 if (req->rl_reply) 1328 rpcrdma_rep_put(buffers, req->rl_reply); 1329 req->rl_reply = NULL; 1330 1331 spin_lock(&buffers->rb_lock); 1332 list_add(&req->rl_list, &buffers->rb_send_bufs); 1333 spin_unlock(&buffers->rb_lock); 1334 } 1335 1336 /** 1337 * rpcrdma_recv_buffer_put - Release rpcrdma_rep back to free list 1338 * @rep: rep to release 1339 * 1340 * Used after error conditions. 1341 */ 1342 void rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep) 1343 { 1344 rpcrdma_rep_put(&rep->rr_rxprt->rx_buf, rep); 1345 } 1346 1347 /* Returns a pointer to a rpcrdma_regbuf object, or NULL. 1348 * 1349 * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for 1350 * receiving the payload of RDMA RECV operations. During Long Calls 1351 * or Replies they may be registered externally via frwr_map. 1352 */ 1353 static struct rpcrdma_regbuf * 1354 rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction, 1355 gfp_t flags) 1356 { 1357 struct rpcrdma_regbuf *rb; 1358 1359 rb = kmalloc(sizeof(*rb), flags); 1360 if (!rb) 1361 return NULL; 1362 rb->rg_data = kmalloc(size, flags); 1363 if (!rb->rg_data) { 1364 kfree(rb); 1365 return NULL; 1366 } 1367 1368 rb->rg_device = NULL; 1369 rb->rg_direction = direction; 1370 rb->rg_iov.length = size; 1371 return rb; 1372 } 1373 1374 /** 1375 * rpcrdma_regbuf_realloc - re-allocate a SEND/RECV buffer 1376 * @rb: regbuf to reallocate 1377 * @size: size of buffer to be allocated, in bytes 1378 * @flags: GFP flags 1379 * 1380 * Returns true if reallocation was successful. If false is 1381 * returned, @rb is left untouched. 1382 */ 1383 bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size, gfp_t flags) 1384 { 1385 void *buf; 1386 1387 buf = kmalloc(size, flags); 1388 if (!buf) 1389 return false; 1390 1391 rpcrdma_regbuf_dma_unmap(rb); 1392 kfree(rb->rg_data); 1393 1394 rb->rg_data = buf; 1395 rb->rg_iov.length = size; 1396 return true; 1397 } 1398 1399 /** 1400 * __rpcrdma_regbuf_dma_map - DMA-map a regbuf 1401 * @r_xprt: controlling transport instance 1402 * @rb: regbuf to be mapped 1403 * 1404 * Returns true if the buffer is now DMA mapped to @r_xprt's device 1405 */ 1406 bool __rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt, 1407 struct rpcrdma_regbuf *rb) 1408 { 1409 struct ib_device *device = r_xprt->rx_ia.ri_id->device; 1410 1411 if (rb->rg_direction == DMA_NONE) 1412 return false; 1413 1414 rb->rg_iov.addr = ib_dma_map_single(device, rdmab_data(rb), 1415 rdmab_length(rb), rb->rg_direction); 1416 if (ib_dma_mapping_error(device, rdmab_addr(rb))) { 1417 trace_xprtrdma_dma_maperr(rdmab_addr(rb)); 1418 return false; 1419 } 1420 1421 rb->rg_device = device; 1422 rb->rg_iov.lkey = r_xprt->rx_ia.ri_pd->local_dma_lkey; 1423 return true; 1424 } 1425 1426 static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb) 1427 { 1428 if (!rb) 1429 return; 1430 1431 if (!rpcrdma_regbuf_is_mapped(rb)) 1432 return; 1433 1434 ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb), rdmab_length(rb), 1435 rb->rg_direction); 1436 rb->rg_device = NULL; 1437 } 1438 1439 static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb) 1440 { 1441 rpcrdma_regbuf_dma_unmap(rb); 1442 if (rb) 1443 kfree(rb->rg_data); 1444 kfree(rb); 1445 } 1446 1447 /** 1448 * rpcrdma_ep_post - Post WRs to a transport's Send Queue 1449 * @ia: transport's device information 1450 * @ep: transport's RDMA endpoint information 1451 * @req: rpcrdma_req containing the Send WR to post 1452 * 1453 * Returns 0 if the post was successful, otherwise -ENOTCONN 1454 * is returned. 1455 */ 1456 int 1457 rpcrdma_ep_post(struct rpcrdma_ia *ia, 1458 struct rpcrdma_ep *ep, 1459 struct rpcrdma_req *req) 1460 { 1461 struct ib_send_wr *send_wr = &req->rl_wr; 1462 int rc; 1463 1464 if (!ep->rep_send_count || kref_read(&req->rl_kref) > 1) { 1465 send_wr->send_flags |= IB_SEND_SIGNALED; 1466 ep->rep_send_count = ep->rep_send_batch; 1467 } else { 1468 send_wr->send_flags &= ~IB_SEND_SIGNALED; 1469 --ep->rep_send_count; 1470 } 1471 1472 rc = frwr_send(ia, req); 1473 trace_xprtrdma_post_send(req, rc); 1474 if (rc) 1475 return -ENOTCONN; 1476 return 0; 1477 } 1478 1479 /** 1480 * rpcrdma_post_recvs - Refill the Receive Queue 1481 * @r_xprt: controlling transport instance 1482 * @temp: mark Receive buffers to be deleted after use 1483 * 1484 */ 1485 void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp) 1486 { 1487 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 1488 struct rpcrdma_ep *ep = &r_xprt->rx_ep; 1489 struct ib_recv_wr *i, *wr, *bad_wr; 1490 struct rpcrdma_rep *rep; 1491 int needed, count, rc; 1492 1493 rc = 0; 1494 count = 0; 1495 1496 needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1); 1497 if (likely(ep->rep_receive_count > needed)) 1498 goto out; 1499 needed -= ep->rep_receive_count; 1500 if (!temp) 1501 needed += RPCRDMA_MAX_RECV_BATCH; 1502 1503 /* fast path: all needed reps can be found on the free list */ 1504 wr = NULL; 1505 while (needed) { 1506 rep = rpcrdma_rep_get_locked(buf); 1507 if (!rep) 1508 rep = rpcrdma_rep_create(r_xprt, temp); 1509 if (!rep) 1510 break; 1511 1512 rep->rr_recv_wr.next = wr; 1513 wr = &rep->rr_recv_wr; 1514 --needed; 1515 } 1516 if (!wr) 1517 goto out; 1518 1519 for (i = wr; i; i = i->next) { 1520 rep = container_of(i, struct rpcrdma_rep, rr_recv_wr); 1521 1522 if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf)) 1523 goto release_wrs; 1524 1525 trace_xprtrdma_post_recv(rep); 1526 ++count; 1527 } 1528 1529 rc = ib_post_recv(r_xprt->rx_ia.ri_id->qp, wr, 1530 (const struct ib_recv_wr **)&bad_wr); 1531 out: 1532 trace_xprtrdma_post_recvs(r_xprt, count, rc); 1533 if (rc) { 1534 for (wr = bad_wr; wr;) { 1535 struct rpcrdma_rep *rep; 1536 1537 rep = container_of(wr, struct rpcrdma_rep, rr_recv_wr); 1538 wr = wr->next; 1539 rpcrdma_recv_buffer_put(rep); 1540 --count; 1541 } 1542 } 1543 ep->rep_receive_count += count; 1544 return; 1545 1546 release_wrs: 1547 for (i = wr; i;) { 1548 rep = container_of(i, struct rpcrdma_rep, rr_recv_wr); 1549 i = i->next; 1550 rpcrdma_recv_buffer_put(rep); 1551 } 1552 } 1553