1 /* 2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the BSD-type 8 * license below: 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 17 * Redistributions in binary form must reproduce the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer in the documentation and/or other materials provided 20 * with the distribution. 21 * 22 * Neither the name of the Network Appliance, Inc. nor the names of 23 * its contributors may be used to endorse or promote products 24 * derived from this software without specific prior written 25 * permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * transport.c 42 * 43 * This file contains the top-level implementation of an RPC RDMA 44 * transport. 45 * 46 * Naming convention: functions beginning with xprt_ are part of the 47 * transport switch. All others are RPC RDMA internal. 48 */ 49 50 #include <linux/module.h> 51 #include <linux/slab.h> 52 #include <linux/seq_file.h> 53 #include <linux/sunrpc/addr.h> 54 55 #include "xprt_rdma.h" 56 57 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 58 # define RPCDBG_FACILITY RPCDBG_TRANS 59 #endif 60 61 /* 62 * tunables 63 */ 64 65 static unsigned int xprt_rdma_slot_table_entries = RPCRDMA_DEF_SLOT_TABLE; 66 unsigned int xprt_rdma_max_inline_read = RPCRDMA_DEF_INLINE; 67 static unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE; 68 static unsigned int xprt_rdma_inline_write_padding; 69 static unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRMR; 70 int xprt_rdma_pad_optimize = 1; 71 72 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 73 74 static unsigned int min_slot_table_size = RPCRDMA_MIN_SLOT_TABLE; 75 static unsigned int max_slot_table_size = RPCRDMA_MAX_SLOT_TABLE; 76 static unsigned int min_inline_size = RPCRDMA_MIN_INLINE; 77 static unsigned int max_inline_size = RPCRDMA_MAX_INLINE; 78 static unsigned int zero; 79 static unsigned int max_padding = PAGE_SIZE; 80 static unsigned int min_memreg = RPCRDMA_BOUNCEBUFFERS; 81 static unsigned int max_memreg = RPCRDMA_LAST - 1; 82 83 static struct ctl_table_header *sunrpc_table_header; 84 85 static struct ctl_table xr_tunables_table[] = { 86 { 87 .procname = "rdma_slot_table_entries", 88 .data = &xprt_rdma_slot_table_entries, 89 .maxlen = sizeof(unsigned int), 90 .mode = 0644, 91 .proc_handler = proc_dointvec_minmax, 92 .extra1 = &min_slot_table_size, 93 .extra2 = &max_slot_table_size 94 }, 95 { 96 .procname = "rdma_max_inline_read", 97 .data = &xprt_rdma_max_inline_read, 98 .maxlen = sizeof(unsigned int), 99 .mode = 0644, 100 .proc_handler = proc_dointvec_minmax, 101 .extra1 = &min_inline_size, 102 .extra2 = &max_inline_size, 103 }, 104 { 105 .procname = "rdma_max_inline_write", 106 .data = &xprt_rdma_max_inline_write, 107 .maxlen = sizeof(unsigned int), 108 .mode = 0644, 109 .proc_handler = proc_dointvec_minmax, 110 .extra1 = &min_inline_size, 111 .extra2 = &max_inline_size, 112 }, 113 { 114 .procname = "rdma_inline_write_padding", 115 .data = &xprt_rdma_inline_write_padding, 116 .maxlen = sizeof(unsigned int), 117 .mode = 0644, 118 .proc_handler = proc_dointvec_minmax, 119 .extra1 = &zero, 120 .extra2 = &max_padding, 121 }, 122 { 123 .procname = "rdma_memreg_strategy", 124 .data = &xprt_rdma_memreg_strategy, 125 .maxlen = sizeof(unsigned int), 126 .mode = 0644, 127 .proc_handler = proc_dointvec_minmax, 128 .extra1 = &min_memreg, 129 .extra2 = &max_memreg, 130 }, 131 { 132 .procname = "rdma_pad_optimize", 133 .data = &xprt_rdma_pad_optimize, 134 .maxlen = sizeof(unsigned int), 135 .mode = 0644, 136 .proc_handler = proc_dointvec, 137 }, 138 { }, 139 }; 140 141 static struct ctl_table sunrpc_table[] = { 142 { 143 .procname = "sunrpc", 144 .mode = 0555, 145 .child = xr_tunables_table 146 }, 147 { }, 148 }; 149 150 #endif 151 152 static struct rpc_xprt_ops xprt_rdma_procs; /*forward reference */ 153 154 static void 155 xprt_rdma_format_addresses4(struct rpc_xprt *xprt, struct sockaddr *sap) 156 { 157 struct sockaddr_in *sin = (struct sockaddr_in *)sap; 158 char buf[20]; 159 160 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr)); 161 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); 162 163 xprt->address_strings[RPC_DISPLAY_NETID] = RPCBIND_NETID_RDMA; 164 } 165 166 static void 167 xprt_rdma_format_addresses6(struct rpc_xprt *xprt, struct sockaddr *sap) 168 { 169 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; 170 char buf[40]; 171 172 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr); 173 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); 174 175 xprt->address_strings[RPC_DISPLAY_NETID] = RPCBIND_NETID_RDMA6; 176 } 177 178 void 179 xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap) 180 { 181 char buf[128]; 182 183 switch (sap->sa_family) { 184 case AF_INET: 185 xprt_rdma_format_addresses4(xprt, sap); 186 break; 187 case AF_INET6: 188 xprt_rdma_format_addresses6(xprt, sap); 189 break; 190 default: 191 pr_err("rpcrdma: Unrecognized address family\n"); 192 return; 193 } 194 195 (void)rpc_ntop(sap, buf, sizeof(buf)); 196 xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL); 197 198 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); 199 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); 200 201 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); 202 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); 203 204 xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma"; 205 } 206 207 void 208 xprt_rdma_free_addresses(struct rpc_xprt *xprt) 209 { 210 unsigned int i; 211 212 for (i = 0; i < RPC_DISPLAY_MAX; i++) 213 switch (i) { 214 case RPC_DISPLAY_PROTO: 215 case RPC_DISPLAY_NETID: 216 continue; 217 default: 218 kfree(xprt->address_strings[i]); 219 } 220 } 221 222 static void 223 xprt_rdma_connect_worker(struct work_struct *work) 224 { 225 struct rpcrdma_xprt *r_xprt = container_of(work, struct rpcrdma_xprt, 226 rx_connect_worker.work); 227 struct rpc_xprt *xprt = &r_xprt->rx_xprt; 228 int rc = 0; 229 230 xprt_clear_connected(xprt); 231 232 dprintk("RPC: %s: %sconnect\n", __func__, 233 r_xprt->rx_ep.rep_connected != 0 ? "re" : ""); 234 rc = rpcrdma_ep_connect(&r_xprt->rx_ep, &r_xprt->rx_ia); 235 if (rc) 236 xprt_wake_pending_tasks(xprt, rc); 237 238 dprintk("RPC: %s: exit\n", __func__); 239 xprt_clear_connecting(xprt); 240 } 241 242 static void 243 xprt_rdma_inject_disconnect(struct rpc_xprt *xprt) 244 { 245 struct rpcrdma_xprt *r_xprt = container_of(xprt, struct rpcrdma_xprt, 246 rx_xprt); 247 248 pr_info("rpcrdma: injecting transport disconnect on xprt=%p\n", xprt); 249 rdma_disconnect(r_xprt->rx_ia.ri_id); 250 } 251 252 /* 253 * xprt_rdma_destroy 254 * 255 * Destroy the xprt. 256 * Free all memory associated with the object, including its own. 257 * NOTE: none of the *destroy methods free memory for their top-level 258 * objects, even though they may have allocated it (they do free 259 * private memory). It's up to the caller to handle it. In this 260 * case (RDMA transport), all structure memory is inlined with the 261 * struct rpcrdma_xprt. 262 */ 263 static void 264 xprt_rdma_destroy(struct rpc_xprt *xprt) 265 { 266 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 267 268 dprintk("RPC: %s: called\n", __func__); 269 270 cancel_delayed_work_sync(&r_xprt->rx_connect_worker); 271 272 xprt_clear_connected(xprt); 273 274 rpcrdma_ep_destroy(&r_xprt->rx_ep, &r_xprt->rx_ia); 275 rpcrdma_buffer_destroy(&r_xprt->rx_buf); 276 rpcrdma_ia_close(&r_xprt->rx_ia); 277 278 xprt_rdma_free_addresses(xprt); 279 280 xprt_free(xprt); 281 282 dprintk("RPC: %s: returning\n", __func__); 283 284 module_put(THIS_MODULE); 285 } 286 287 static const struct rpc_timeout xprt_rdma_default_timeout = { 288 .to_initval = 60 * HZ, 289 .to_maxval = 60 * HZ, 290 }; 291 292 /** 293 * xprt_setup_rdma - Set up transport to use RDMA 294 * 295 * @args: rpc transport arguments 296 */ 297 static struct rpc_xprt * 298 xprt_setup_rdma(struct xprt_create *args) 299 { 300 struct rpcrdma_create_data_internal cdata; 301 struct rpc_xprt *xprt; 302 struct rpcrdma_xprt *new_xprt; 303 struct rpcrdma_ep *new_ep; 304 struct sockaddr *sap; 305 int rc; 306 307 if (args->addrlen > sizeof(xprt->addr)) { 308 dprintk("RPC: %s: address too large\n", __func__); 309 return ERR_PTR(-EBADF); 310 } 311 312 xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt), 313 xprt_rdma_slot_table_entries, 314 xprt_rdma_slot_table_entries); 315 if (xprt == NULL) { 316 dprintk("RPC: %s: couldn't allocate rpcrdma_xprt\n", 317 __func__); 318 return ERR_PTR(-ENOMEM); 319 } 320 321 /* 60 second timeout, no retries */ 322 xprt->timeout = &xprt_rdma_default_timeout; 323 xprt->bind_timeout = RPCRDMA_BIND_TO; 324 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO; 325 xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO; 326 327 xprt->resvport = 0; /* privileged port not needed */ 328 xprt->tsh_size = 0; /* RPC-RDMA handles framing */ 329 xprt->ops = &xprt_rdma_procs; 330 331 /* 332 * Set up RDMA-specific connect data. 333 */ 334 335 sap = (struct sockaddr *)&cdata.addr; 336 memcpy(sap, args->dstaddr, args->addrlen); 337 338 /* Ensure xprt->addr holds valid server TCP (not RDMA) 339 * address, for any side protocols which peek at it */ 340 xprt->prot = IPPROTO_TCP; 341 xprt->addrlen = args->addrlen; 342 memcpy(&xprt->addr, sap, xprt->addrlen); 343 344 if (rpc_get_port(sap)) 345 xprt_set_bound(xprt); 346 347 cdata.max_requests = xprt->max_reqs; 348 349 cdata.rsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA write max */ 350 cdata.wsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA read max */ 351 352 cdata.inline_wsize = xprt_rdma_max_inline_write; 353 if (cdata.inline_wsize > cdata.wsize) 354 cdata.inline_wsize = cdata.wsize; 355 356 cdata.inline_rsize = xprt_rdma_max_inline_read; 357 if (cdata.inline_rsize > cdata.rsize) 358 cdata.inline_rsize = cdata.rsize; 359 360 cdata.padding = xprt_rdma_inline_write_padding; 361 362 /* 363 * Create new transport instance, which includes initialized 364 * o ia 365 * o endpoint 366 * o buffers 367 */ 368 369 new_xprt = rpcx_to_rdmax(xprt); 370 371 rc = rpcrdma_ia_open(new_xprt, sap, xprt_rdma_memreg_strategy); 372 if (rc) 373 goto out1; 374 375 /* 376 * initialize and create ep 377 */ 378 new_xprt->rx_data = cdata; 379 new_ep = &new_xprt->rx_ep; 380 new_ep->rep_remote_addr = cdata.addr; 381 382 rc = rpcrdma_ep_create(&new_xprt->rx_ep, 383 &new_xprt->rx_ia, &new_xprt->rx_data); 384 if (rc) 385 goto out2; 386 387 /* 388 * Allocate pre-registered send and receive buffers for headers and 389 * any inline data. Also specify any padding which will be provided 390 * from a preregistered zero buffer. 391 */ 392 rc = rpcrdma_buffer_create(new_xprt); 393 if (rc) 394 goto out3; 395 396 /* 397 * Register a callback for connection events. This is necessary because 398 * connection loss notification is async. We also catch connection loss 399 * when reaping receives. 400 */ 401 INIT_DELAYED_WORK(&new_xprt->rx_connect_worker, 402 xprt_rdma_connect_worker); 403 404 xprt_rdma_format_addresses(xprt, sap); 405 xprt->max_payload = new_xprt->rx_ia.ri_ops->ro_maxpages(new_xprt); 406 if (xprt->max_payload == 0) 407 goto out4; 408 xprt->max_payload <<= PAGE_SHIFT; 409 dprintk("RPC: %s: transport data payload maximum: %zu bytes\n", 410 __func__, xprt->max_payload); 411 412 if (!try_module_get(THIS_MODULE)) 413 goto out4; 414 415 dprintk("RPC: %s: %s:%s\n", __func__, 416 xprt->address_strings[RPC_DISPLAY_ADDR], 417 xprt->address_strings[RPC_DISPLAY_PORT]); 418 return xprt; 419 420 out4: 421 xprt_rdma_free_addresses(xprt); 422 rc = -EINVAL; 423 out3: 424 rpcrdma_ep_destroy(new_ep, &new_xprt->rx_ia); 425 out2: 426 rpcrdma_ia_close(&new_xprt->rx_ia); 427 out1: 428 xprt_free(xprt); 429 return ERR_PTR(rc); 430 } 431 432 /* 433 * Close a connection, during shutdown or timeout/reconnect 434 */ 435 static void 436 xprt_rdma_close(struct rpc_xprt *xprt) 437 { 438 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 439 440 dprintk("RPC: %s: closing\n", __func__); 441 if (r_xprt->rx_ep.rep_connected > 0) 442 xprt->reestablish_timeout = 0; 443 xprt_disconnect_done(xprt); 444 rpcrdma_ep_disconnect(&r_xprt->rx_ep, &r_xprt->rx_ia); 445 } 446 447 static void 448 xprt_rdma_set_port(struct rpc_xprt *xprt, u16 port) 449 { 450 struct sockaddr_in *sap; 451 452 sap = (struct sockaddr_in *)&xprt->addr; 453 sap->sin_port = htons(port); 454 sap = (struct sockaddr_in *)&rpcx_to_rdmad(xprt).addr; 455 sap->sin_port = htons(port); 456 dprintk("RPC: %s: %u\n", __func__, port); 457 } 458 459 static void 460 xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task) 461 { 462 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 463 464 if (r_xprt->rx_ep.rep_connected != 0) { 465 /* Reconnect */ 466 schedule_delayed_work(&r_xprt->rx_connect_worker, 467 xprt->reestablish_timeout); 468 xprt->reestablish_timeout <<= 1; 469 if (xprt->reestablish_timeout > RPCRDMA_MAX_REEST_TO) 470 xprt->reestablish_timeout = RPCRDMA_MAX_REEST_TO; 471 else if (xprt->reestablish_timeout < RPCRDMA_INIT_REEST_TO) 472 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO; 473 } else { 474 schedule_delayed_work(&r_xprt->rx_connect_worker, 0); 475 if (!RPC_IS_ASYNC(task)) 476 flush_delayed_work(&r_xprt->rx_connect_worker); 477 } 478 } 479 480 /* Allocate a fixed-size buffer in which to construct and send the 481 * RPC-over-RDMA header for this request. 482 */ 483 static bool 484 rpcrdma_get_rdmabuf(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, 485 gfp_t flags) 486 { 487 size_t size = RPCRDMA_HDRBUF_SIZE; 488 struct rpcrdma_regbuf *rb; 489 490 if (req->rl_rdmabuf) 491 return true; 492 493 rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, flags); 494 if (IS_ERR(rb)) 495 return false; 496 497 r_xprt->rx_stats.hardway_register_count += size; 498 req->rl_rdmabuf = rb; 499 return true; 500 } 501 502 static bool 503 rpcrdma_get_sendbuf(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, 504 size_t size, gfp_t flags) 505 { 506 struct rpcrdma_regbuf *rb; 507 508 if (req->rl_sendbuf && rdmab_length(req->rl_sendbuf) >= size) 509 return true; 510 511 rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, flags); 512 if (IS_ERR(rb)) 513 return false; 514 515 rpcrdma_free_regbuf(req->rl_sendbuf); 516 r_xprt->rx_stats.hardway_register_count += size; 517 req->rl_sendbuf = rb; 518 return true; 519 } 520 521 /* The rq_rcv_buf is used only if a Reply chunk is necessary. 522 * The decision to use a Reply chunk is made later in 523 * rpcrdma_marshal_req. This buffer is registered at that time. 524 * 525 * Otherwise, the associated RPC Reply arrives in a separate 526 * Receive buffer, arbitrarily chosen by the HCA. The buffer 527 * allocated here for the RPC Reply is not utilized in that 528 * case. See rpcrdma_inline_fixup. 529 * 530 * A regbuf is used here to remember the buffer size. 531 */ 532 static bool 533 rpcrdma_get_recvbuf(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, 534 size_t size, gfp_t flags) 535 { 536 struct rpcrdma_regbuf *rb; 537 538 if (req->rl_recvbuf && rdmab_length(req->rl_recvbuf) >= size) 539 return true; 540 541 rb = rpcrdma_alloc_regbuf(size, DMA_NONE, flags); 542 if (IS_ERR(rb)) 543 return false; 544 545 rpcrdma_free_regbuf(req->rl_recvbuf); 546 r_xprt->rx_stats.hardway_register_count += size; 547 req->rl_recvbuf = rb; 548 return true; 549 } 550 551 /** 552 * xprt_rdma_allocate - allocate transport resources for an RPC 553 * @task: RPC task 554 * 555 * Return values: 556 * 0: Success; rq_buffer points to RPC buffer to use 557 * ENOMEM: Out of memory, call again later 558 * EIO: A permanent error occurred, do not retry 559 * 560 * The RDMA allocate/free functions need the task structure as a place 561 * to hide the struct rpcrdma_req, which is necessary for the actual 562 * send/recv sequence. 563 * 564 * xprt_rdma_allocate provides buffers that are already mapped for 565 * DMA, and a local DMA lkey is provided for each. 566 */ 567 static int 568 xprt_rdma_allocate(struct rpc_task *task) 569 { 570 struct rpc_rqst *rqst = task->tk_rqstp; 571 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); 572 struct rpcrdma_req *req; 573 gfp_t flags; 574 575 req = rpcrdma_buffer_get(&r_xprt->rx_buf); 576 if (req == NULL) 577 return -ENOMEM; 578 579 flags = RPCRDMA_DEF_GFP; 580 if (RPC_IS_SWAPPER(task)) 581 flags = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN; 582 583 if (!rpcrdma_get_rdmabuf(r_xprt, req, flags)) 584 goto out_fail; 585 if (!rpcrdma_get_sendbuf(r_xprt, req, rqst->rq_callsize, flags)) 586 goto out_fail; 587 if (!rpcrdma_get_recvbuf(r_xprt, req, rqst->rq_rcvsize, flags)) 588 goto out_fail; 589 590 dprintk("RPC: %5u %s: send size = %zd, recv size = %zd, req = %p\n", 591 task->tk_pid, __func__, rqst->rq_callsize, 592 rqst->rq_rcvsize, req); 593 594 req->rl_connect_cookie = 0; /* our reserved value */ 595 rpcrdma_set_xprtdata(rqst, req); 596 rqst->rq_buffer = req->rl_sendbuf->rg_base; 597 rqst->rq_rbuffer = req->rl_recvbuf->rg_base; 598 return 0; 599 600 out_fail: 601 rpcrdma_buffer_put(req); 602 return -ENOMEM; 603 } 604 605 /** 606 * xprt_rdma_free - release resources allocated by xprt_rdma_allocate 607 * @task: RPC task 608 * 609 * Caller guarantees rqst->rq_buffer is non-NULL. 610 */ 611 static void 612 xprt_rdma_free(struct rpc_task *task) 613 { 614 struct rpc_rqst *rqst = task->tk_rqstp; 615 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); 616 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 617 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 618 619 if (req->rl_backchannel) 620 return; 621 622 dprintk("RPC: %s: called on 0x%p\n", __func__, req->rl_reply); 623 624 ia->ri_ops->ro_unmap_safe(r_xprt, req, !RPC_IS_ASYNC(task)); 625 rpcrdma_unmap_sges(ia, req); 626 rpcrdma_buffer_put(req); 627 } 628 629 /** 630 * xprt_rdma_send_request - marshal and send an RPC request 631 * @task: RPC task with an RPC message in rq_snd_buf 632 * 633 * Return values: 634 * 0: The request has been sent 635 * ENOTCONN: Caller needs to invoke connect logic then call again 636 * ENOBUFS: Call again later to send the request 637 * EIO: A permanent error occurred. The request was not sent, 638 * and don't try it again 639 * 640 * send_request invokes the meat of RPC RDMA. It must do the following: 641 * 642 * 1. Marshal the RPC request into an RPC RDMA request, which means 643 * putting a header in front of data, and creating IOVs for RDMA 644 * from those in the request. 645 * 2. In marshaling, detect opportunities for RDMA, and use them. 646 * 3. Post a recv message to set up asynch completion, then send 647 * the request (rpcrdma_ep_post). 648 * 4. No partial sends are possible in the RPC-RDMA protocol (as in UDP). 649 */ 650 static int 651 xprt_rdma_send_request(struct rpc_task *task) 652 { 653 struct rpc_rqst *rqst = task->tk_rqstp; 654 struct rpc_xprt *xprt = rqst->rq_xprt; 655 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 656 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 657 int rc = 0; 658 659 /* On retransmit, remove any previously registered chunks */ 660 r_xprt->rx_ia.ri_ops->ro_unmap_safe(r_xprt, req, false); 661 662 rc = rpcrdma_marshal_req(rqst); 663 if (rc < 0) 664 goto failed_marshal; 665 666 if (req->rl_reply == NULL) /* e.g. reconnection */ 667 rpcrdma_recv_buffer_get(req); 668 669 /* Must suppress retransmit to maintain credits */ 670 if (req->rl_connect_cookie == xprt->connect_cookie) 671 goto drop_connection; 672 req->rl_connect_cookie = xprt->connect_cookie; 673 674 if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req)) 675 goto drop_connection; 676 677 rqst->rq_xmit_bytes_sent += rqst->rq_snd_buf.len; 678 rqst->rq_bytes_sent = 0; 679 return 0; 680 681 failed_marshal: 682 dprintk("RPC: %s: rpcrdma_marshal_req failed, status %i\n", 683 __func__, rc); 684 if (rc == -EIO) 685 r_xprt->rx_stats.failed_marshal_count++; 686 if (rc != -ENOTCONN) 687 return rc; 688 drop_connection: 689 xprt_disconnect_done(xprt); 690 return -ENOTCONN; /* implies disconnect */ 691 } 692 693 void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) 694 { 695 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 696 long idle_time = 0; 697 698 if (xprt_connected(xprt)) 699 idle_time = (long)(jiffies - xprt->last_used) / HZ; 700 701 seq_puts(seq, "\txprt:\trdma "); 702 seq_printf(seq, "%u %lu %lu %lu %ld %lu %lu %lu %llu %llu ", 703 0, /* need a local port? */ 704 xprt->stat.bind_count, 705 xprt->stat.connect_count, 706 xprt->stat.connect_time, 707 idle_time, 708 xprt->stat.sends, 709 xprt->stat.recvs, 710 xprt->stat.bad_xids, 711 xprt->stat.req_u, 712 xprt->stat.bklog_u); 713 seq_printf(seq, "%lu %lu %lu %llu %llu %llu %llu %lu %lu %lu %lu ", 714 r_xprt->rx_stats.read_chunk_count, 715 r_xprt->rx_stats.write_chunk_count, 716 r_xprt->rx_stats.reply_chunk_count, 717 r_xprt->rx_stats.total_rdma_request, 718 r_xprt->rx_stats.total_rdma_reply, 719 r_xprt->rx_stats.pullup_copy_count, 720 r_xprt->rx_stats.fixup_copy_count, 721 r_xprt->rx_stats.hardway_register_count, 722 r_xprt->rx_stats.failed_marshal_count, 723 r_xprt->rx_stats.bad_reply_count, 724 r_xprt->rx_stats.nomsg_call_count); 725 seq_printf(seq, "%lu %lu %lu %lu\n", 726 r_xprt->rx_stats.mrs_recovered, 727 r_xprt->rx_stats.mrs_orphaned, 728 r_xprt->rx_stats.mrs_allocated, 729 r_xprt->rx_stats.local_inv_needed); 730 } 731 732 static int 733 xprt_rdma_enable_swap(struct rpc_xprt *xprt) 734 { 735 return 0; 736 } 737 738 static void 739 xprt_rdma_disable_swap(struct rpc_xprt *xprt) 740 { 741 } 742 743 /* 744 * Plumbing for rpc transport switch and kernel module 745 */ 746 747 static struct rpc_xprt_ops xprt_rdma_procs = { 748 .reserve_xprt = xprt_reserve_xprt_cong, 749 .release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */ 750 .alloc_slot = xprt_alloc_slot, 751 .release_request = xprt_release_rqst_cong, /* ditto */ 752 .set_retrans_timeout = xprt_set_retrans_timeout_def, /* ditto */ 753 .rpcbind = rpcb_getport_async, /* sunrpc/rpcb_clnt.c */ 754 .set_port = xprt_rdma_set_port, 755 .connect = xprt_rdma_connect, 756 .buf_alloc = xprt_rdma_allocate, 757 .buf_free = xprt_rdma_free, 758 .send_request = xprt_rdma_send_request, 759 .close = xprt_rdma_close, 760 .destroy = xprt_rdma_destroy, 761 .print_stats = xprt_rdma_print_stats, 762 .enable_swap = xprt_rdma_enable_swap, 763 .disable_swap = xprt_rdma_disable_swap, 764 .inject_disconnect = xprt_rdma_inject_disconnect, 765 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 766 .bc_setup = xprt_rdma_bc_setup, 767 .bc_up = xprt_rdma_bc_up, 768 .bc_maxpayload = xprt_rdma_bc_maxpayload, 769 .bc_free_rqst = xprt_rdma_bc_free_rqst, 770 .bc_destroy = xprt_rdma_bc_destroy, 771 #endif 772 }; 773 774 static struct xprt_class xprt_rdma = { 775 .list = LIST_HEAD_INIT(xprt_rdma.list), 776 .name = "rdma", 777 .owner = THIS_MODULE, 778 .ident = XPRT_TRANSPORT_RDMA, 779 .setup = xprt_setup_rdma, 780 }; 781 782 void xprt_rdma_cleanup(void) 783 { 784 int rc; 785 786 dprintk("RPCRDMA Module Removed, deregister RPC RDMA transport\n"); 787 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 788 if (sunrpc_table_header) { 789 unregister_sysctl_table(sunrpc_table_header); 790 sunrpc_table_header = NULL; 791 } 792 #endif 793 rc = xprt_unregister_transport(&xprt_rdma); 794 if (rc) 795 dprintk("RPC: %s: xprt_unregister returned %i\n", 796 __func__, rc); 797 798 rpcrdma_destroy_wq(); 799 800 rc = xprt_unregister_transport(&xprt_rdma_bc); 801 if (rc) 802 dprintk("RPC: %s: xprt_unregister(bc) returned %i\n", 803 __func__, rc); 804 } 805 806 int xprt_rdma_init(void) 807 { 808 int rc; 809 810 rc = rpcrdma_alloc_wq(); 811 if (rc) 812 return rc; 813 814 rc = xprt_register_transport(&xprt_rdma); 815 if (rc) { 816 rpcrdma_destroy_wq(); 817 return rc; 818 } 819 820 rc = xprt_register_transport(&xprt_rdma_bc); 821 if (rc) { 822 xprt_unregister_transport(&xprt_rdma); 823 rpcrdma_destroy_wq(); 824 return rc; 825 } 826 827 dprintk("RPCRDMA Module Init, register RPC RDMA transport\n"); 828 829 dprintk("Defaults:\n"); 830 dprintk("\tSlots %d\n" 831 "\tMaxInlineRead %d\n\tMaxInlineWrite %d\n", 832 xprt_rdma_slot_table_entries, 833 xprt_rdma_max_inline_read, xprt_rdma_max_inline_write); 834 dprintk("\tPadding %d\n\tMemreg %d\n", 835 xprt_rdma_inline_write_padding, xprt_rdma_memreg_strategy); 836 837 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 838 if (!sunrpc_table_header) 839 sunrpc_table_header = register_sysctl_table(sunrpc_table); 840 #endif 841 return 0; 842 } 843