1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (c) 2014-2017 Oracle. All rights reserved. 4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the BSD-type 10 * license below: 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 19 * Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials provided 22 * with the distribution. 23 * 24 * Neither the name of the Network Appliance, Inc. nor the names of 25 * its contributors may be used to endorse or promote products 26 * derived from this software without specific prior written 27 * permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 */ 41 42 /* 43 * verbs.c 44 * 45 * Encapsulates the major functions managing: 46 * o adapters 47 * o endpoints 48 * o connections 49 * o buffer memory 50 */ 51 52 #include <linux/interrupt.h> 53 #include <linux/slab.h> 54 #include <linux/sunrpc/addr.h> 55 #include <linux/sunrpc/svc_rdma.h> 56 57 #include <asm-generic/barrier.h> 58 #include <asm/bitops.h> 59 60 #include <rdma/ib_cm.h> 61 62 #include "xprt_rdma.h" 63 #include <trace/events/rpcrdma.h> 64 65 /* 66 * Globals/Macros 67 */ 68 69 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 70 # define RPCDBG_FACILITY RPCDBG_TRANS 71 #endif 72 73 /* 74 * internal functions 75 */ 76 static void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc); 77 static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt); 78 static void rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf); 79 static struct rpcrdma_regbuf * 80 rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction, 81 gfp_t flags); 82 static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb); 83 static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb); 84 static void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp); 85 86 /* Wait for outstanding transport work to finish. ib_drain_qp 87 * handles the drains in the wrong order for us, so open code 88 * them here. 89 */ 90 static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt) 91 { 92 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 93 94 /* Flush Receives, then wait for deferred Reply work 95 * to complete. 96 */ 97 ib_drain_rq(ia->ri_id->qp); 98 99 /* Deferred Reply processing might have scheduled 100 * local invalidations. 101 */ 102 ib_drain_sq(ia->ri_id->qp); 103 } 104 105 /** 106 * rpcrdma_qp_event_handler - Handle one QP event (error notification) 107 * @event: details of the event 108 * @context: ep that owns QP where event occurred 109 * 110 * Called from the RDMA provider (device driver) possibly in an interrupt 111 * context. 112 */ 113 static void 114 rpcrdma_qp_event_handler(struct ib_event *event, void *context) 115 { 116 struct rpcrdma_ep *ep = context; 117 struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt, 118 rx_ep); 119 120 trace_xprtrdma_qp_event(r_xprt, event); 121 } 122 123 /** 124 * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC 125 * @cq: completion queue (ignored) 126 * @wc: completed WR 127 * 128 */ 129 static void 130 rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) 131 { 132 struct ib_cqe *cqe = wc->wr_cqe; 133 struct rpcrdma_sendctx *sc = 134 container_of(cqe, struct rpcrdma_sendctx, sc_cqe); 135 136 /* WARNING: Only wr_cqe and status are reliable at this point */ 137 trace_xprtrdma_wc_send(sc, wc); 138 rpcrdma_sendctx_put_locked(sc); 139 } 140 141 /** 142 * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC 143 * @cq: completion queue (ignored) 144 * @wc: completed WR 145 * 146 */ 147 static void 148 rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) 149 { 150 struct ib_cqe *cqe = wc->wr_cqe; 151 struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep, 152 rr_cqe); 153 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt; 154 155 /* WARNING: Only wr_cqe and status are reliable at this point */ 156 trace_xprtrdma_wc_receive(wc); 157 --r_xprt->rx_ep.rep_receive_count; 158 if (wc->status != IB_WC_SUCCESS) 159 goto out_flushed; 160 161 /* status == SUCCESS means all fields in wc are trustworthy */ 162 rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len); 163 rep->rr_wc_flags = wc->wc_flags; 164 rep->rr_inv_rkey = wc->ex.invalidate_rkey; 165 166 ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf), 167 rdmab_addr(rep->rr_rdmabuf), 168 wc->byte_len, DMA_FROM_DEVICE); 169 170 rpcrdma_post_recvs(r_xprt, false); 171 rpcrdma_reply_handler(rep); 172 return; 173 174 out_flushed: 175 rpcrdma_recv_buffer_put(rep); 176 } 177 178 static void 179 rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt, 180 struct rdma_conn_param *param) 181 { 182 const struct rpcrdma_connect_private *pmsg = param->private_data; 183 unsigned int rsize, wsize; 184 185 /* Default settings for RPC-over-RDMA Version One */ 186 r_xprt->rx_ia.ri_implicit_roundup = xprt_rdma_pad_optimize; 187 rsize = RPCRDMA_V1_DEF_INLINE_SIZE; 188 wsize = RPCRDMA_V1_DEF_INLINE_SIZE; 189 190 if (pmsg && 191 pmsg->cp_magic == rpcrdma_cmp_magic && 192 pmsg->cp_version == RPCRDMA_CMP_VERSION) { 193 r_xprt->rx_ia.ri_implicit_roundup = true; 194 rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size); 195 wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size); 196 } 197 198 if (rsize < r_xprt->rx_ep.rep_inline_recv) 199 r_xprt->rx_ep.rep_inline_recv = rsize; 200 if (wsize < r_xprt->rx_ep.rep_inline_send) 201 r_xprt->rx_ep.rep_inline_send = wsize; 202 dprintk("RPC: %s: max send %u, max recv %u\n", __func__, 203 r_xprt->rx_ep.rep_inline_send, 204 r_xprt->rx_ep.rep_inline_recv); 205 rpcrdma_set_max_header_sizes(r_xprt); 206 } 207 208 /** 209 * rpcrdma_cm_event_handler - Handle RDMA CM events 210 * @id: rdma_cm_id on which an event has occurred 211 * @event: details of the event 212 * 213 * Called with @id's mutex held. Returns 1 if caller should 214 * destroy @id, otherwise 0. 215 */ 216 static int 217 rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) 218 { 219 struct rpcrdma_xprt *r_xprt = id->context; 220 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 221 struct rpcrdma_ep *ep = &r_xprt->rx_ep; 222 struct rpc_xprt *xprt = &r_xprt->rx_xprt; 223 224 might_sleep(); 225 226 trace_xprtrdma_cm_event(r_xprt, event); 227 switch (event->event) { 228 case RDMA_CM_EVENT_ADDR_RESOLVED: 229 case RDMA_CM_EVENT_ROUTE_RESOLVED: 230 ia->ri_async_rc = 0; 231 complete(&ia->ri_done); 232 return 0; 233 case RDMA_CM_EVENT_ADDR_ERROR: 234 ia->ri_async_rc = -EPROTO; 235 complete(&ia->ri_done); 236 return 0; 237 case RDMA_CM_EVENT_ROUTE_ERROR: 238 ia->ri_async_rc = -ENETUNREACH; 239 complete(&ia->ri_done); 240 return 0; 241 case RDMA_CM_EVENT_DEVICE_REMOVAL: 242 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 243 pr_info("rpcrdma: removing device %s for %s:%s\n", 244 ia->ri_id->device->name, 245 rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt)); 246 #endif 247 set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags); 248 ep->rep_connected = -ENODEV; 249 xprt_force_disconnect(xprt); 250 wait_for_completion(&ia->ri_remove_done); 251 252 ia->ri_id = NULL; 253 /* Return 1 to ensure the core destroys the id. */ 254 return 1; 255 case RDMA_CM_EVENT_ESTABLISHED: 256 ++xprt->connect_cookie; 257 ep->rep_connected = 1; 258 rpcrdma_update_connect_private(r_xprt, &event->param.conn); 259 wake_up_all(&ep->rep_connect_wait); 260 break; 261 case RDMA_CM_EVENT_CONNECT_ERROR: 262 ep->rep_connected = -ENOTCONN; 263 goto disconnected; 264 case RDMA_CM_EVENT_UNREACHABLE: 265 ep->rep_connected = -ENETUNREACH; 266 goto disconnected; 267 case RDMA_CM_EVENT_REJECTED: 268 dprintk("rpcrdma: connection to %s:%s rejected: %s\n", 269 rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt), 270 rdma_reject_msg(id, event->status)); 271 ep->rep_connected = -ECONNREFUSED; 272 if (event->status == IB_CM_REJ_STALE_CONN) 273 ep->rep_connected = -EAGAIN; 274 goto disconnected; 275 case RDMA_CM_EVENT_DISCONNECTED: 276 ep->rep_connected = -ECONNABORTED; 277 disconnected: 278 xprt_force_disconnect(xprt); 279 wake_up_all(&ep->rep_connect_wait); 280 break; 281 default: 282 break; 283 } 284 285 dprintk("RPC: %s: %s:%s on %s/frwr: %s\n", __func__, 286 rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt), 287 ia->ri_id->device->name, rdma_event_msg(event->event)); 288 return 0; 289 } 290 291 static struct rdma_cm_id * 292 rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia) 293 { 294 unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1; 295 struct rdma_cm_id *id; 296 int rc; 297 298 trace_xprtrdma_conn_start(xprt); 299 300 init_completion(&ia->ri_done); 301 init_completion(&ia->ri_remove_done); 302 303 id = rdma_create_id(xprt->rx_xprt.xprt_net, rpcrdma_cm_event_handler, 304 xprt, RDMA_PS_TCP, IB_QPT_RC); 305 if (IS_ERR(id)) 306 return id; 307 308 ia->ri_async_rc = -ETIMEDOUT; 309 rc = rdma_resolve_addr(id, NULL, 310 (struct sockaddr *)&xprt->rx_xprt.addr, 311 RDMA_RESOLVE_TIMEOUT); 312 if (rc) 313 goto out; 314 rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout); 315 if (rc < 0) { 316 trace_xprtrdma_conn_tout(xprt); 317 goto out; 318 } 319 320 rc = ia->ri_async_rc; 321 if (rc) 322 goto out; 323 324 ia->ri_async_rc = -ETIMEDOUT; 325 rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT); 326 if (rc) 327 goto out; 328 rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout); 329 if (rc < 0) { 330 trace_xprtrdma_conn_tout(xprt); 331 goto out; 332 } 333 rc = ia->ri_async_rc; 334 if (rc) 335 goto out; 336 337 return id; 338 339 out: 340 rdma_destroy_id(id); 341 return ERR_PTR(rc); 342 } 343 344 /* 345 * Exported functions. 346 */ 347 348 /** 349 * rpcrdma_ia_open - Open and initialize an Interface Adapter. 350 * @xprt: transport with IA to (re)initialize 351 * 352 * Returns 0 on success, negative errno if an appropriate 353 * Interface Adapter could not be found and opened. 354 */ 355 int 356 rpcrdma_ia_open(struct rpcrdma_xprt *xprt) 357 { 358 struct rpcrdma_ia *ia = &xprt->rx_ia; 359 int rc; 360 361 ia->ri_id = rpcrdma_create_id(xprt, ia); 362 if (IS_ERR(ia->ri_id)) { 363 rc = PTR_ERR(ia->ri_id); 364 goto out_err; 365 } 366 367 ia->ri_pd = ib_alloc_pd(ia->ri_id->device, 0); 368 if (IS_ERR(ia->ri_pd)) { 369 rc = PTR_ERR(ia->ri_pd); 370 pr_err("rpcrdma: ib_alloc_pd() returned %d\n", rc); 371 goto out_err; 372 } 373 374 switch (xprt_rdma_memreg_strategy) { 375 case RPCRDMA_FRWR: 376 if (frwr_is_supported(ia->ri_id->device)) 377 break; 378 /*FALLTHROUGH*/ 379 default: 380 pr_err("rpcrdma: Device %s does not support memreg mode %d\n", 381 ia->ri_id->device->name, xprt_rdma_memreg_strategy); 382 rc = -EINVAL; 383 goto out_err; 384 } 385 386 return 0; 387 388 out_err: 389 rpcrdma_ia_close(ia); 390 return rc; 391 } 392 393 /** 394 * rpcrdma_ia_remove - Handle device driver unload 395 * @ia: interface adapter being removed 396 * 397 * Divest transport H/W resources associated with this adapter, 398 * but allow it to be restored later. 399 */ 400 void 401 rpcrdma_ia_remove(struct rpcrdma_ia *ia) 402 { 403 struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt, 404 rx_ia); 405 struct rpcrdma_ep *ep = &r_xprt->rx_ep; 406 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 407 struct rpcrdma_req *req; 408 struct rpcrdma_rep *rep; 409 410 cancel_delayed_work_sync(&buf->rb_refresh_worker); 411 412 /* This is similar to rpcrdma_ep_destroy, but: 413 * - Don't cancel the connect worker. 414 * - Don't call rpcrdma_ep_disconnect, which waits 415 * for another conn upcall, which will deadlock. 416 * - rdma_disconnect is unneeded, the underlying 417 * connection is already gone. 418 */ 419 if (ia->ri_id->qp) { 420 rpcrdma_xprt_drain(r_xprt); 421 rdma_destroy_qp(ia->ri_id); 422 ia->ri_id->qp = NULL; 423 } 424 ib_free_cq(ep->rep_attr.recv_cq); 425 ep->rep_attr.recv_cq = NULL; 426 ib_free_cq(ep->rep_attr.send_cq); 427 ep->rep_attr.send_cq = NULL; 428 429 /* The ULP is responsible for ensuring all DMA 430 * mappings and MRs are gone. 431 */ 432 list_for_each_entry(rep, &buf->rb_recv_bufs, rr_list) 433 rpcrdma_regbuf_dma_unmap(rep->rr_rdmabuf); 434 list_for_each_entry(req, &buf->rb_allreqs, rl_all) { 435 rpcrdma_regbuf_dma_unmap(req->rl_rdmabuf); 436 rpcrdma_regbuf_dma_unmap(req->rl_sendbuf); 437 rpcrdma_regbuf_dma_unmap(req->rl_recvbuf); 438 } 439 rpcrdma_mrs_destroy(buf); 440 ib_dealloc_pd(ia->ri_pd); 441 ia->ri_pd = NULL; 442 443 /* Allow waiters to continue */ 444 complete(&ia->ri_remove_done); 445 446 trace_xprtrdma_remove(r_xprt); 447 } 448 449 /** 450 * rpcrdma_ia_close - Clean up/close an IA. 451 * @ia: interface adapter to close 452 * 453 */ 454 void 455 rpcrdma_ia_close(struct rpcrdma_ia *ia) 456 { 457 if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) { 458 if (ia->ri_id->qp) 459 rdma_destroy_qp(ia->ri_id); 460 rdma_destroy_id(ia->ri_id); 461 } 462 ia->ri_id = NULL; 463 464 /* If the pd is still busy, xprtrdma missed freeing a resource */ 465 if (ia->ri_pd && !IS_ERR(ia->ri_pd)) 466 ib_dealloc_pd(ia->ri_pd); 467 ia->ri_pd = NULL; 468 } 469 470 /** 471 * rpcrdma_ep_create - Create unconnected endpoint 472 * @r_xprt: transport to instantiate 473 * 474 * Returns zero on success, or a negative errno. 475 */ 476 int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt) 477 { 478 struct rpcrdma_ep *ep = &r_xprt->rx_ep; 479 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 480 struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private; 481 struct ib_cq *sendcq, *recvcq; 482 unsigned int max_sge; 483 int rc; 484 485 ep->rep_max_requests = xprt_rdma_slot_table_entries; 486 ep->rep_inline_send = xprt_rdma_max_inline_write; 487 ep->rep_inline_recv = xprt_rdma_max_inline_read; 488 489 max_sge = min_t(unsigned int, ia->ri_id->device->attrs.max_send_sge, 490 RPCRDMA_MAX_SEND_SGES); 491 if (max_sge < RPCRDMA_MIN_SEND_SGES) { 492 pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge); 493 return -ENOMEM; 494 } 495 ia->ri_max_send_sges = max_sge; 496 497 rc = frwr_open(ia, ep); 498 if (rc) 499 return rc; 500 501 ep->rep_attr.event_handler = rpcrdma_qp_event_handler; 502 ep->rep_attr.qp_context = ep; 503 ep->rep_attr.srq = NULL; 504 ep->rep_attr.cap.max_send_sge = max_sge; 505 ep->rep_attr.cap.max_recv_sge = 1; 506 ep->rep_attr.cap.max_inline_data = 0; 507 ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 508 ep->rep_attr.qp_type = IB_QPT_RC; 509 ep->rep_attr.port_num = ~0; 510 511 dprintk("RPC: %s: requested max: dtos: send %d recv %d; " 512 "iovs: send %d recv %d\n", 513 __func__, 514 ep->rep_attr.cap.max_send_wr, 515 ep->rep_attr.cap.max_recv_wr, 516 ep->rep_attr.cap.max_send_sge, 517 ep->rep_attr.cap.max_recv_sge); 518 519 ep->rep_send_batch = ep->rep_max_requests >> 3; 520 ep->rep_send_count = ep->rep_send_batch; 521 init_waitqueue_head(&ep->rep_connect_wait); 522 ep->rep_receive_count = 0; 523 524 sendcq = ib_alloc_cq(ia->ri_id->device, NULL, 525 ep->rep_attr.cap.max_send_wr + 1, 526 ia->ri_id->device->num_comp_vectors > 1 ? 1 : 0, 527 IB_POLL_WORKQUEUE); 528 if (IS_ERR(sendcq)) { 529 rc = PTR_ERR(sendcq); 530 goto out1; 531 } 532 533 recvcq = ib_alloc_cq(ia->ri_id->device, NULL, 534 ep->rep_attr.cap.max_recv_wr + 1, 535 0, IB_POLL_WORKQUEUE); 536 if (IS_ERR(recvcq)) { 537 rc = PTR_ERR(recvcq); 538 goto out2; 539 } 540 541 ep->rep_attr.send_cq = sendcq; 542 ep->rep_attr.recv_cq = recvcq; 543 544 /* Initialize cma parameters */ 545 memset(&ep->rep_remote_cma, 0, sizeof(ep->rep_remote_cma)); 546 547 /* Prepare RDMA-CM private message */ 548 pmsg->cp_magic = rpcrdma_cmp_magic; 549 pmsg->cp_version = RPCRDMA_CMP_VERSION; 550 pmsg->cp_flags |= RPCRDMA_CMP_F_SND_W_INV_OK; 551 pmsg->cp_send_size = rpcrdma_encode_buffer_size(ep->rep_inline_send); 552 pmsg->cp_recv_size = rpcrdma_encode_buffer_size(ep->rep_inline_recv); 553 ep->rep_remote_cma.private_data = pmsg; 554 ep->rep_remote_cma.private_data_len = sizeof(*pmsg); 555 556 /* Client offers RDMA Read but does not initiate */ 557 ep->rep_remote_cma.initiator_depth = 0; 558 ep->rep_remote_cma.responder_resources = 559 min_t(int, U8_MAX, ia->ri_id->device->attrs.max_qp_rd_atom); 560 561 /* Limit transport retries so client can detect server 562 * GID changes quickly. RPC layer handles re-establishing 563 * transport connection and retransmission. 564 */ 565 ep->rep_remote_cma.retry_count = 6; 566 567 /* RPC-over-RDMA handles its own flow control. In addition, 568 * make all RNR NAKs visible so we know that RPC-over-RDMA 569 * flow control is working correctly (no NAKs should be seen). 570 */ 571 ep->rep_remote_cma.flow_control = 0; 572 ep->rep_remote_cma.rnr_retry_count = 0; 573 574 return 0; 575 576 out2: 577 ib_free_cq(sendcq); 578 out1: 579 return rc; 580 } 581 582 /** 583 * rpcrdma_ep_destroy - Disconnect and destroy endpoint. 584 * @r_xprt: transport instance to shut down 585 * 586 */ 587 void rpcrdma_ep_destroy(struct rpcrdma_xprt *r_xprt) 588 { 589 struct rpcrdma_ep *ep = &r_xprt->rx_ep; 590 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 591 592 if (ia->ri_id && ia->ri_id->qp) { 593 rpcrdma_ep_disconnect(ep, ia); 594 rdma_destroy_qp(ia->ri_id); 595 ia->ri_id->qp = NULL; 596 } 597 598 if (ep->rep_attr.recv_cq) 599 ib_free_cq(ep->rep_attr.recv_cq); 600 if (ep->rep_attr.send_cq) 601 ib_free_cq(ep->rep_attr.send_cq); 602 } 603 604 /* Re-establish a connection after a device removal event. 605 * Unlike a normal reconnection, a fresh PD and a new set 606 * of MRs and buffers is needed. 607 */ 608 static int 609 rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt, 610 struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) 611 { 612 int rc, err; 613 614 trace_xprtrdma_reinsert(r_xprt); 615 616 rc = -EHOSTUNREACH; 617 if (rpcrdma_ia_open(r_xprt)) 618 goto out1; 619 620 rc = -ENOMEM; 621 err = rpcrdma_ep_create(r_xprt); 622 if (err) { 623 pr_err("rpcrdma: rpcrdma_ep_create returned %d\n", err); 624 goto out2; 625 } 626 627 rc = -ENETUNREACH; 628 err = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr); 629 if (err) { 630 pr_err("rpcrdma: rdma_create_qp returned %d\n", err); 631 goto out3; 632 } 633 634 rpcrdma_mrs_create(r_xprt); 635 return 0; 636 637 out3: 638 rpcrdma_ep_destroy(r_xprt); 639 out2: 640 rpcrdma_ia_close(ia); 641 out1: 642 return rc; 643 } 644 645 static int 646 rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt, struct rpcrdma_ep *ep, 647 struct rpcrdma_ia *ia) 648 { 649 struct rdma_cm_id *id, *old; 650 int err, rc; 651 652 trace_xprtrdma_reconnect(r_xprt); 653 654 rpcrdma_ep_disconnect(ep, ia); 655 656 rc = -EHOSTUNREACH; 657 id = rpcrdma_create_id(r_xprt, ia); 658 if (IS_ERR(id)) 659 goto out; 660 661 /* As long as the new ID points to the same device as the 662 * old ID, we can reuse the transport's existing PD and all 663 * previously allocated MRs. Also, the same device means 664 * the transport's previous DMA mappings are still valid. 665 * 666 * This is a sanity check only. There should be no way these 667 * point to two different devices here. 668 */ 669 old = id; 670 rc = -ENETUNREACH; 671 if (ia->ri_id->device != id->device) { 672 pr_err("rpcrdma: can't reconnect on different device!\n"); 673 goto out_destroy; 674 } 675 676 err = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr); 677 if (err) 678 goto out_destroy; 679 680 /* Atomically replace the transport's ID and QP. */ 681 rc = 0; 682 old = ia->ri_id; 683 ia->ri_id = id; 684 rdma_destroy_qp(old); 685 686 out_destroy: 687 rdma_destroy_id(old); 688 out: 689 return rc; 690 } 691 692 /* 693 * Connect unconnected endpoint. 694 */ 695 int 696 rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) 697 { 698 struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt, 699 rx_ia); 700 struct rpc_xprt *xprt = &r_xprt->rx_xprt; 701 int rc; 702 703 retry: 704 switch (ep->rep_connected) { 705 case 0: 706 dprintk("RPC: %s: connecting...\n", __func__); 707 rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr); 708 if (rc) { 709 rc = -ENETUNREACH; 710 goto out_noupdate; 711 } 712 break; 713 case -ENODEV: 714 rc = rpcrdma_ep_recreate_xprt(r_xprt, ep, ia); 715 if (rc) 716 goto out_noupdate; 717 break; 718 default: 719 rc = rpcrdma_ep_reconnect(r_xprt, ep, ia); 720 if (rc) 721 goto out; 722 } 723 724 ep->rep_connected = 0; 725 xprt_clear_connected(xprt); 726 727 rpcrdma_post_recvs(r_xprt, true); 728 729 rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma); 730 if (rc) 731 goto out; 732 733 wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0); 734 if (ep->rep_connected <= 0) { 735 if (ep->rep_connected == -EAGAIN) 736 goto retry; 737 rc = ep->rep_connected; 738 goto out; 739 } 740 741 dprintk("RPC: %s: connected\n", __func__); 742 743 out: 744 if (rc) 745 ep->rep_connected = rc; 746 747 out_noupdate: 748 return rc; 749 } 750 751 /** 752 * rpcrdma_ep_disconnect - Disconnect underlying transport 753 * @ep: endpoint to disconnect 754 * @ia: associated interface adapter 755 * 756 * This is separate from destroy to facilitate the ability 757 * to reconnect without recreating the endpoint. 758 * 759 * This call is not reentrant, and must not be made in parallel 760 * on the same endpoint. 761 */ 762 void 763 rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) 764 { 765 struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt, 766 rx_ep); 767 int rc; 768 769 /* returns without wait if ID is not connected */ 770 rc = rdma_disconnect(ia->ri_id); 771 if (!rc) 772 wait_event_interruptible(ep->rep_connect_wait, 773 ep->rep_connected != 1); 774 else 775 ep->rep_connected = rc; 776 trace_xprtrdma_disconnect(r_xprt, rc); 777 778 rpcrdma_xprt_drain(r_xprt); 779 } 780 781 /* Fixed-size circular FIFO queue. This implementation is wait-free and 782 * lock-free. 783 * 784 * Consumer is the code path that posts Sends. This path dequeues a 785 * sendctx for use by a Send operation. Multiple consumer threads 786 * are serialized by the RPC transport lock, which allows only one 787 * ->send_request call at a time. 788 * 789 * Producer is the code path that handles Send completions. This path 790 * enqueues a sendctx that has been completed. Multiple producer 791 * threads are serialized by the ib_poll_cq() function. 792 */ 793 794 /* rpcrdma_sendctxs_destroy() assumes caller has already quiesced 795 * queue activity, and rpcrdma_xprt_drain has flushed all remaining 796 * Send requests. 797 */ 798 static void rpcrdma_sendctxs_destroy(struct rpcrdma_buffer *buf) 799 { 800 unsigned long i; 801 802 for (i = 0; i <= buf->rb_sc_last; i++) 803 kfree(buf->rb_sc_ctxs[i]); 804 kfree(buf->rb_sc_ctxs); 805 } 806 807 static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ia *ia) 808 { 809 struct rpcrdma_sendctx *sc; 810 811 sc = kzalloc(struct_size(sc, sc_sges, ia->ri_max_send_sges), 812 GFP_KERNEL); 813 if (!sc) 814 return NULL; 815 816 sc->sc_wr.wr_cqe = &sc->sc_cqe; 817 sc->sc_wr.sg_list = sc->sc_sges; 818 sc->sc_wr.opcode = IB_WR_SEND; 819 sc->sc_cqe.done = rpcrdma_wc_send; 820 return sc; 821 } 822 823 static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt) 824 { 825 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 826 struct rpcrdma_sendctx *sc; 827 unsigned long i; 828 829 /* Maximum number of concurrent outstanding Send WRs. Capping 830 * the circular queue size stops Send Queue overflow by causing 831 * the ->send_request call to fail temporarily before too many 832 * Sends are posted. 833 */ 834 i = buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS; 835 dprintk("RPC: %s: allocating %lu send_ctxs\n", __func__, i); 836 buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL); 837 if (!buf->rb_sc_ctxs) 838 return -ENOMEM; 839 840 buf->rb_sc_last = i - 1; 841 for (i = 0; i <= buf->rb_sc_last; i++) { 842 sc = rpcrdma_sendctx_create(&r_xprt->rx_ia); 843 if (!sc) 844 return -ENOMEM; 845 846 sc->sc_xprt = r_xprt; 847 buf->rb_sc_ctxs[i] = sc; 848 } 849 850 return 0; 851 } 852 853 /* The sendctx queue is not guaranteed to have a size that is a 854 * power of two, thus the helpers in circ_buf.h cannot be used. 855 * The other option is to use modulus (%), which can be expensive. 856 */ 857 static unsigned long rpcrdma_sendctx_next(struct rpcrdma_buffer *buf, 858 unsigned long item) 859 { 860 return likely(item < buf->rb_sc_last) ? item + 1 : 0; 861 } 862 863 /** 864 * rpcrdma_sendctx_get_locked - Acquire a send context 865 * @r_xprt: controlling transport instance 866 * 867 * Returns pointer to a free send completion context; or NULL if 868 * the queue is empty. 869 * 870 * Usage: Called to acquire an SGE array before preparing a Send WR. 871 * 872 * The caller serializes calls to this function (per transport), and 873 * provides an effective memory barrier that flushes the new value 874 * of rb_sc_head. 875 */ 876 struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_xprt *r_xprt) 877 { 878 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 879 struct rpcrdma_sendctx *sc; 880 unsigned long next_head; 881 882 next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head); 883 884 if (next_head == READ_ONCE(buf->rb_sc_tail)) 885 goto out_emptyq; 886 887 /* ORDER: item must be accessed _before_ head is updated */ 888 sc = buf->rb_sc_ctxs[next_head]; 889 890 /* Releasing the lock in the caller acts as a memory 891 * barrier that flushes rb_sc_head. 892 */ 893 buf->rb_sc_head = next_head; 894 895 return sc; 896 897 out_emptyq: 898 /* The queue is "empty" if there have not been enough Send 899 * completions recently. This is a sign the Send Queue is 900 * backing up. Cause the caller to pause and try again. 901 */ 902 xprt_wait_for_buffer_space(&r_xprt->rx_xprt); 903 r_xprt->rx_stats.empty_sendctx_q++; 904 return NULL; 905 } 906 907 /** 908 * rpcrdma_sendctx_put_locked - Release a send context 909 * @sc: send context to release 910 * 911 * Usage: Called from Send completion to return a sendctxt 912 * to the queue. 913 * 914 * The caller serializes calls to this function (per transport). 915 */ 916 static void 917 rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc) 918 { 919 struct rpcrdma_buffer *buf = &sc->sc_xprt->rx_buf; 920 unsigned long next_tail; 921 922 /* Unmap SGEs of previously completed but unsignaled 923 * Sends by walking up the queue until @sc is found. 924 */ 925 next_tail = buf->rb_sc_tail; 926 do { 927 next_tail = rpcrdma_sendctx_next(buf, next_tail); 928 929 /* ORDER: item must be accessed _before_ tail is updated */ 930 rpcrdma_sendctx_unmap(buf->rb_sc_ctxs[next_tail]); 931 932 } while (buf->rb_sc_ctxs[next_tail] != sc); 933 934 /* Paired with READ_ONCE */ 935 smp_store_release(&buf->rb_sc_tail, next_tail); 936 937 xprt_write_space(&sc->sc_xprt->rx_xprt); 938 } 939 940 static void 941 rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt) 942 { 943 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 944 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 945 unsigned int count; 946 LIST_HEAD(free); 947 LIST_HEAD(all); 948 949 for (count = 0; count < ia->ri_max_segs; count++) { 950 struct rpcrdma_mr *mr; 951 int rc; 952 953 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 954 if (!mr) 955 break; 956 957 rc = frwr_init_mr(ia, mr); 958 if (rc) { 959 kfree(mr); 960 break; 961 } 962 963 mr->mr_xprt = r_xprt; 964 965 list_add(&mr->mr_list, &free); 966 list_add(&mr->mr_all, &all); 967 } 968 969 spin_lock(&buf->rb_mrlock); 970 list_splice(&free, &buf->rb_mrs); 971 list_splice(&all, &buf->rb_all); 972 r_xprt->rx_stats.mrs_allocated += count; 973 spin_unlock(&buf->rb_mrlock); 974 trace_xprtrdma_createmrs(r_xprt, count); 975 } 976 977 static void 978 rpcrdma_mr_refresh_worker(struct work_struct *work) 979 { 980 struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer, 981 rb_refresh_worker.work); 982 struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt, 983 rx_buf); 984 985 rpcrdma_mrs_create(r_xprt); 986 xprt_write_space(&r_xprt->rx_xprt); 987 } 988 989 /** 990 * rpcrdma_req_create - Allocate an rpcrdma_req object 991 * @r_xprt: controlling r_xprt 992 * @size: initial size, in bytes, of send and receive buffers 993 * @flags: GFP flags passed to memory allocators 994 * 995 * Returns an allocated and fully initialized rpcrdma_req or NULL. 996 */ 997 struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size, 998 gfp_t flags) 999 { 1000 struct rpcrdma_buffer *buffer = &r_xprt->rx_buf; 1001 struct rpcrdma_regbuf *rb; 1002 struct rpcrdma_req *req; 1003 1004 req = kzalloc(sizeof(*req), flags); 1005 if (req == NULL) 1006 goto out1; 1007 1008 rb = rpcrdma_regbuf_alloc(RPCRDMA_HDRBUF_SIZE, DMA_TO_DEVICE, flags); 1009 if (!rb) 1010 goto out2; 1011 req->rl_rdmabuf = rb; 1012 xdr_buf_init(&req->rl_hdrbuf, rdmab_data(rb), rdmab_length(rb)); 1013 1014 req->rl_sendbuf = rpcrdma_regbuf_alloc(size, DMA_TO_DEVICE, flags); 1015 if (!req->rl_sendbuf) 1016 goto out3; 1017 1018 req->rl_recvbuf = rpcrdma_regbuf_alloc(size, DMA_NONE, flags); 1019 if (!req->rl_recvbuf) 1020 goto out4; 1021 1022 INIT_LIST_HEAD(&req->rl_registered); 1023 spin_lock(&buffer->rb_lock); 1024 list_add(&req->rl_all, &buffer->rb_allreqs); 1025 spin_unlock(&buffer->rb_lock); 1026 return req; 1027 1028 out4: 1029 kfree(req->rl_sendbuf); 1030 out3: 1031 kfree(req->rl_rdmabuf); 1032 out2: 1033 kfree(req); 1034 out1: 1035 return NULL; 1036 } 1037 1038 static struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, 1039 bool temp) 1040 { 1041 struct rpcrdma_rep *rep; 1042 1043 rep = kzalloc(sizeof(*rep), GFP_KERNEL); 1044 if (rep == NULL) 1045 goto out; 1046 1047 rep->rr_rdmabuf = rpcrdma_regbuf_alloc(r_xprt->rx_ep.rep_inline_recv, 1048 DMA_FROM_DEVICE, GFP_KERNEL); 1049 if (!rep->rr_rdmabuf) 1050 goto out_free; 1051 1052 xdr_buf_init(&rep->rr_hdrbuf, rdmab_data(rep->rr_rdmabuf), 1053 rdmab_length(rep->rr_rdmabuf)); 1054 rep->rr_cqe.done = rpcrdma_wc_receive; 1055 rep->rr_rxprt = r_xprt; 1056 rep->rr_recv_wr.next = NULL; 1057 rep->rr_recv_wr.wr_cqe = &rep->rr_cqe; 1058 rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov; 1059 rep->rr_recv_wr.num_sge = 1; 1060 rep->rr_temp = temp; 1061 return rep; 1062 1063 out_free: 1064 kfree(rep); 1065 out: 1066 return NULL; 1067 } 1068 1069 /** 1070 * rpcrdma_buffer_create - Create initial set of req/rep objects 1071 * @r_xprt: transport instance to (re)initialize 1072 * 1073 * Returns zero on success, otherwise a negative errno. 1074 */ 1075 int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt) 1076 { 1077 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 1078 int i, rc; 1079 1080 buf->rb_max_requests = r_xprt->rx_ep.rep_max_requests; 1081 buf->rb_bc_srv_max_requests = 0; 1082 spin_lock_init(&buf->rb_mrlock); 1083 spin_lock_init(&buf->rb_lock); 1084 INIT_LIST_HEAD(&buf->rb_mrs); 1085 INIT_LIST_HEAD(&buf->rb_all); 1086 INIT_DELAYED_WORK(&buf->rb_refresh_worker, 1087 rpcrdma_mr_refresh_worker); 1088 1089 rpcrdma_mrs_create(r_xprt); 1090 1091 INIT_LIST_HEAD(&buf->rb_send_bufs); 1092 INIT_LIST_HEAD(&buf->rb_allreqs); 1093 1094 rc = -ENOMEM; 1095 for (i = 0; i < buf->rb_max_requests; i++) { 1096 struct rpcrdma_req *req; 1097 1098 req = rpcrdma_req_create(r_xprt, RPCRDMA_V1_DEF_INLINE_SIZE, 1099 GFP_KERNEL); 1100 if (!req) 1101 goto out; 1102 list_add(&req->rl_list, &buf->rb_send_bufs); 1103 } 1104 1105 buf->rb_credits = 1; 1106 INIT_LIST_HEAD(&buf->rb_recv_bufs); 1107 1108 rc = rpcrdma_sendctxs_create(r_xprt); 1109 if (rc) 1110 goto out; 1111 1112 return 0; 1113 out: 1114 rpcrdma_buffer_destroy(buf); 1115 return rc; 1116 } 1117 1118 static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep) 1119 { 1120 rpcrdma_regbuf_free(rep->rr_rdmabuf); 1121 kfree(rep); 1122 } 1123 1124 /** 1125 * rpcrdma_req_destroy - Destroy an rpcrdma_req object 1126 * @req: unused object to be destroyed 1127 * 1128 * This function assumes that the caller prevents concurrent device 1129 * unload and transport tear-down. 1130 */ 1131 void 1132 rpcrdma_req_destroy(struct rpcrdma_req *req) 1133 { 1134 list_del(&req->rl_all); 1135 1136 rpcrdma_regbuf_free(req->rl_recvbuf); 1137 rpcrdma_regbuf_free(req->rl_sendbuf); 1138 rpcrdma_regbuf_free(req->rl_rdmabuf); 1139 kfree(req); 1140 } 1141 1142 static void 1143 rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf) 1144 { 1145 struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt, 1146 rx_buf); 1147 struct rpcrdma_mr *mr; 1148 unsigned int count; 1149 1150 count = 0; 1151 spin_lock(&buf->rb_mrlock); 1152 while (!list_empty(&buf->rb_all)) { 1153 mr = list_entry(buf->rb_all.next, struct rpcrdma_mr, mr_all); 1154 list_del(&mr->mr_all); 1155 1156 spin_unlock(&buf->rb_mrlock); 1157 1158 /* Ensure MW is not on any rl_registered list */ 1159 if (!list_empty(&mr->mr_list)) 1160 list_del(&mr->mr_list); 1161 1162 frwr_release_mr(mr); 1163 count++; 1164 spin_lock(&buf->rb_mrlock); 1165 } 1166 spin_unlock(&buf->rb_mrlock); 1167 r_xprt->rx_stats.mrs_allocated = 0; 1168 1169 dprintk("RPC: %s: released %u MRs\n", __func__, count); 1170 } 1171 1172 /** 1173 * rpcrdma_buffer_destroy - Release all hw resources 1174 * @buf: root control block for resources 1175 * 1176 * ORDERING: relies on a prior rpcrdma_xprt_drain : 1177 * - No more Send or Receive completions can occur 1178 * - All MRs, reps, and reqs are returned to their free lists 1179 */ 1180 void 1181 rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf) 1182 { 1183 cancel_delayed_work_sync(&buf->rb_refresh_worker); 1184 1185 rpcrdma_sendctxs_destroy(buf); 1186 1187 while (!list_empty(&buf->rb_recv_bufs)) { 1188 struct rpcrdma_rep *rep; 1189 1190 rep = list_first_entry(&buf->rb_recv_bufs, 1191 struct rpcrdma_rep, rr_list); 1192 list_del(&rep->rr_list); 1193 rpcrdma_rep_destroy(rep); 1194 } 1195 1196 while (!list_empty(&buf->rb_send_bufs)) { 1197 struct rpcrdma_req *req; 1198 1199 req = list_first_entry(&buf->rb_send_bufs, 1200 struct rpcrdma_req, rl_list); 1201 list_del(&req->rl_list); 1202 rpcrdma_req_destroy(req); 1203 } 1204 1205 rpcrdma_mrs_destroy(buf); 1206 } 1207 1208 /** 1209 * rpcrdma_mr_get - Allocate an rpcrdma_mr object 1210 * @r_xprt: controlling transport 1211 * 1212 * Returns an initialized rpcrdma_mr or NULL if no free 1213 * rpcrdma_mr objects are available. 1214 */ 1215 struct rpcrdma_mr * 1216 rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt) 1217 { 1218 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 1219 struct rpcrdma_mr *mr = NULL; 1220 1221 spin_lock(&buf->rb_mrlock); 1222 if (!list_empty(&buf->rb_mrs)) 1223 mr = rpcrdma_mr_pop(&buf->rb_mrs); 1224 spin_unlock(&buf->rb_mrlock); 1225 1226 if (!mr) 1227 goto out_nomrs; 1228 return mr; 1229 1230 out_nomrs: 1231 trace_xprtrdma_nomrs(r_xprt); 1232 if (r_xprt->rx_ep.rep_connected != -ENODEV) 1233 schedule_delayed_work(&buf->rb_refresh_worker, 0); 1234 1235 /* Allow the reply handler and refresh worker to run */ 1236 cond_resched(); 1237 1238 return NULL; 1239 } 1240 1241 static void 1242 __rpcrdma_mr_put(struct rpcrdma_buffer *buf, struct rpcrdma_mr *mr) 1243 { 1244 spin_lock(&buf->rb_mrlock); 1245 rpcrdma_mr_push(mr, &buf->rb_mrs); 1246 spin_unlock(&buf->rb_mrlock); 1247 } 1248 1249 /** 1250 * rpcrdma_mr_put - Release an rpcrdma_mr object 1251 * @mr: object to release 1252 * 1253 */ 1254 void 1255 rpcrdma_mr_put(struct rpcrdma_mr *mr) 1256 { 1257 __rpcrdma_mr_put(&mr->mr_xprt->rx_buf, mr); 1258 } 1259 1260 /** 1261 * rpcrdma_mr_unmap_and_put - DMA unmap an MR and release it 1262 * @mr: object to release 1263 * 1264 */ 1265 void 1266 rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr) 1267 { 1268 struct rpcrdma_xprt *r_xprt = mr->mr_xprt; 1269 1270 if (mr->mr_dir != DMA_NONE) { 1271 trace_xprtrdma_mr_unmap(mr); 1272 ib_dma_unmap_sg(r_xprt->rx_ia.ri_id->device, 1273 mr->mr_sg, mr->mr_nents, mr->mr_dir); 1274 mr->mr_dir = DMA_NONE; 1275 } 1276 __rpcrdma_mr_put(&r_xprt->rx_buf, mr); 1277 } 1278 1279 /** 1280 * rpcrdma_buffer_get - Get a request buffer 1281 * @buffers: Buffer pool from which to obtain a buffer 1282 * 1283 * Returns a fresh rpcrdma_req, or NULL if none are available. 1284 */ 1285 struct rpcrdma_req * 1286 rpcrdma_buffer_get(struct rpcrdma_buffer *buffers) 1287 { 1288 struct rpcrdma_req *req; 1289 1290 spin_lock(&buffers->rb_lock); 1291 req = list_first_entry_or_null(&buffers->rb_send_bufs, 1292 struct rpcrdma_req, rl_list); 1293 if (req) 1294 list_del_init(&req->rl_list); 1295 spin_unlock(&buffers->rb_lock); 1296 return req; 1297 } 1298 1299 /** 1300 * rpcrdma_buffer_put - Put request/reply buffers back into pool 1301 * @buffers: buffer pool 1302 * @req: object to return 1303 * 1304 */ 1305 void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req) 1306 { 1307 struct rpcrdma_rep *rep = req->rl_reply; 1308 1309 req->rl_reply = NULL; 1310 1311 spin_lock(&buffers->rb_lock); 1312 list_add(&req->rl_list, &buffers->rb_send_bufs); 1313 if (rep) { 1314 if (!rep->rr_temp) { 1315 list_add(&rep->rr_list, &buffers->rb_recv_bufs); 1316 rep = NULL; 1317 } 1318 } 1319 spin_unlock(&buffers->rb_lock); 1320 if (rep) 1321 rpcrdma_rep_destroy(rep); 1322 } 1323 1324 /* 1325 * Put reply buffers back into pool when not attached to 1326 * request. This happens in error conditions. 1327 */ 1328 void 1329 rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep) 1330 { 1331 struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf; 1332 1333 if (!rep->rr_temp) { 1334 spin_lock(&buffers->rb_lock); 1335 list_add(&rep->rr_list, &buffers->rb_recv_bufs); 1336 spin_unlock(&buffers->rb_lock); 1337 } else { 1338 rpcrdma_rep_destroy(rep); 1339 } 1340 } 1341 1342 /* Returns a pointer to a rpcrdma_regbuf object, or NULL. 1343 * 1344 * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for 1345 * receiving the payload of RDMA RECV operations. During Long Calls 1346 * or Replies they may be registered externally via frwr_map. 1347 */ 1348 static struct rpcrdma_regbuf * 1349 rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction, 1350 gfp_t flags) 1351 { 1352 struct rpcrdma_regbuf *rb; 1353 1354 rb = kmalloc(sizeof(*rb), flags); 1355 if (!rb) 1356 return NULL; 1357 rb->rg_data = kmalloc(size, flags); 1358 if (!rb->rg_data) { 1359 kfree(rb); 1360 return NULL; 1361 } 1362 1363 rb->rg_device = NULL; 1364 rb->rg_direction = direction; 1365 rb->rg_iov.length = size; 1366 return rb; 1367 } 1368 1369 /** 1370 * rpcrdma_regbuf_realloc - re-allocate a SEND/RECV buffer 1371 * @rb: regbuf to reallocate 1372 * @size: size of buffer to be allocated, in bytes 1373 * @flags: GFP flags 1374 * 1375 * Returns true if reallocation was successful. If false is 1376 * returned, @rb is left untouched. 1377 */ 1378 bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size, gfp_t flags) 1379 { 1380 void *buf; 1381 1382 buf = kmalloc(size, flags); 1383 if (!buf) 1384 return false; 1385 1386 rpcrdma_regbuf_dma_unmap(rb); 1387 kfree(rb->rg_data); 1388 1389 rb->rg_data = buf; 1390 rb->rg_iov.length = size; 1391 return true; 1392 } 1393 1394 /** 1395 * __rpcrdma_regbuf_dma_map - DMA-map a regbuf 1396 * @r_xprt: controlling transport instance 1397 * @rb: regbuf to be mapped 1398 * 1399 * Returns true if the buffer is now DMA mapped to @r_xprt's device 1400 */ 1401 bool __rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt, 1402 struct rpcrdma_regbuf *rb) 1403 { 1404 struct ib_device *device = r_xprt->rx_ia.ri_id->device; 1405 1406 if (rb->rg_direction == DMA_NONE) 1407 return false; 1408 1409 rb->rg_iov.addr = ib_dma_map_single(device, rdmab_data(rb), 1410 rdmab_length(rb), rb->rg_direction); 1411 if (ib_dma_mapping_error(device, rdmab_addr(rb))) { 1412 trace_xprtrdma_dma_maperr(rdmab_addr(rb)); 1413 return false; 1414 } 1415 1416 rb->rg_device = device; 1417 rb->rg_iov.lkey = r_xprt->rx_ia.ri_pd->local_dma_lkey; 1418 return true; 1419 } 1420 1421 static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb) 1422 { 1423 if (!rb) 1424 return; 1425 1426 if (!rpcrdma_regbuf_is_mapped(rb)) 1427 return; 1428 1429 ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb), rdmab_length(rb), 1430 rb->rg_direction); 1431 rb->rg_device = NULL; 1432 } 1433 1434 static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb) 1435 { 1436 rpcrdma_regbuf_dma_unmap(rb); 1437 if (rb) 1438 kfree(rb->rg_data); 1439 kfree(rb); 1440 } 1441 1442 /** 1443 * rpcrdma_ep_post - Post WRs to a transport's Send Queue 1444 * @ia: transport's device information 1445 * @ep: transport's RDMA endpoint information 1446 * @req: rpcrdma_req containing the Send WR to post 1447 * 1448 * Returns 0 if the post was successful, otherwise -ENOTCONN 1449 * is returned. 1450 */ 1451 int 1452 rpcrdma_ep_post(struct rpcrdma_ia *ia, 1453 struct rpcrdma_ep *ep, 1454 struct rpcrdma_req *req) 1455 { 1456 struct ib_send_wr *send_wr = &req->rl_sendctx->sc_wr; 1457 int rc; 1458 1459 if (!ep->rep_send_count || kref_read(&req->rl_kref) > 1) { 1460 send_wr->send_flags |= IB_SEND_SIGNALED; 1461 ep->rep_send_count = ep->rep_send_batch; 1462 } else { 1463 send_wr->send_flags &= ~IB_SEND_SIGNALED; 1464 --ep->rep_send_count; 1465 } 1466 1467 rc = frwr_send(ia, req); 1468 trace_xprtrdma_post_send(req, rc); 1469 if (rc) 1470 return -ENOTCONN; 1471 return 0; 1472 } 1473 1474 static void 1475 rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp) 1476 { 1477 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; 1478 struct rpcrdma_ep *ep = &r_xprt->rx_ep; 1479 struct ib_recv_wr *i, *wr, *bad_wr; 1480 struct rpcrdma_rep *rep; 1481 int needed, count, rc; 1482 1483 rc = 0; 1484 count = 0; 1485 1486 needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1); 1487 if (ep->rep_receive_count > needed) 1488 goto out; 1489 needed -= ep->rep_receive_count; 1490 if (!temp) 1491 needed += RPCRDMA_MAX_RECV_BATCH; 1492 1493 /* fast path: all needed reps can be found on the free list */ 1494 wr = NULL; 1495 spin_lock(&buf->rb_lock); 1496 while (needed) { 1497 rep = list_first_entry_or_null(&buf->rb_recv_bufs, 1498 struct rpcrdma_rep, rr_list); 1499 if (!rep) 1500 break; 1501 1502 list_del(&rep->rr_list); 1503 rep->rr_recv_wr.next = wr; 1504 wr = &rep->rr_recv_wr; 1505 --needed; 1506 } 1507 spin_unlock(&buf->rb_lock); 1508 1509 while (needed) { 1510 rep = rpcrdma_rep_create(r_xprt, temp); 1511 if (!rep) 1512 break; 1513 1514 rep->rr_recv_wr.next = wr; 1515 wr = &rep->rr_recv_wr; 1516 --needed; 1517 } 1518 if (!wr) 1519 goto out; 1520 1521 for (i = wr; i; i = i->next) { 1522 rep = container_of(i, struct rpcrdma_rep, rr_recv_wr); 1523 1524 if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf)) 1525 goto release_wrs; 1526 1527 trace_xprtrdma_post_recv(rep->rr_recv_wr.wr_cqe); 1528 ++count; 1529 } 1530 1531 rc = ib_post_recv(r_xprt->rx_ia.ri_id->qp, wr, 1532 (const struct ib_recv_wr **)&bad_wr); 1533 out: 1534 trace_xprtrdma_post_recvs(r_xprt, count, rc); 1535 if (rc) { 1536 for (wr = bad_wr; wr;) { 1537 struct rpcrdma_rep *rep; 1538 1539 rep = container_of(wr, struct rpcrdma_rep, rr_recv_wr); 1540 wr = wr->next; 1541 rpcrdma_recv_buffer_put(rep); 1542 --count; 1543 } 1544 } 1545 ep->rep_receive_count += count; 1546 return; 1547 1548 release_wrs: 1549 for (i = wr; i;) { 1550 rep = container_of(i, struct rpcrdma_rep, rr_recv_wr); 1551 i = i->next; 1552 rpcrdma_recv_buffer_put(rep); 1553 } 1554 } 1555