1 /* 2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the BSD-type 8 * license below: 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 17 * Redistributions in binary form must reproduce the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer in the documentation and/or other materials provided 20 * with the distribution. 21 * 22 * Neither the name of the Network Appliance, Inc. nor the names of 23 * its contributors may be used to endorse or promote products 24 * derived from this software without specific prior written 25 * permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * transport.c 42 * 43 * This file contains the top-level implementation of an RPC RDMA 44 * transport. 45 * 46 * Naming convention: functions beginning with xprt_ are part of the 47 * transport switch. All others are RPC RDMA internal. 48 */ 49 50 #include <linux/module.h> 51 #include <linux/slab.h> 52 #include <linux/seq_file.h> 53 #include <linux/sunrpc/addr.h> 54 55 #include "xprt_rdma.h" 56 57 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 58 # define RPCDBG_FACILITY RPCDBG_TRANS 59 #endif 60 61 /* 62 * tunables 63 */ 64 65 static unsigned int xprt_rdma_slot_table_entries = RPCRDMA_DEF_SLOT_TABLE; 66 unsigned int xprt_rdma_max_inline_read = RPCRDMA_DEF_INLINE; 67 static unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE; 68 static unsigned int xprt_rdma_inline_write_padding; 69 static unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRMR; 70 int xprt_rdma_pad_optimize = 1; 71 72 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 73 74 static unsigned int min_slot_table_size = RPCRDMA_MIN_SLOT_TABLE; 75 static unsigned int max_slot_table_size = RPCRDMA_MAX_SLOT_TABLE; 76 static unsigned int min_inline_size = RPCRDMA_MIN_INLINE; 77 static unsigned int max_inline_size = RPCRDMA_MAX_INLINE; 78 static unsigned int zero; 79 static unsigned int max_padding = PAGE_SIZE; 80 static unsigned int min_memreg = RPCRDMA_BOUNCEBUFFERS; 81 static unsigned int max_memreg = RPCRDMA_LAST - 1; 82 83 static struct ctl_table_header *sunrpc_table_header; 84 85 static struct ctl_table xr_tunables_table[] = { 86 { 87 .procname = "rdma_slot_table_entries", 88 .data = &xprt_rdma_slot_table_entries, 89 .maxlen = sizeof(unsigned int), 90 .mode = 0644, 91 .proc_handler = proc_dointvec_minmax, 92 .extra1 = &min_slot_table_size, 93 .extra2 = &max_slot_table_size 94 }, 95 { 96 .procname = "rdma_max_inline_read", 97 .data = &xprt_rdma_max_inline_read, 98 .maxlen = sizeof(unsigned int), 99 .mode = 0644, 100 .proc_handler = proc_dointvec, 101 .extra1 = &min_inline_size, 102 .extra2 = &max_inline_size, 103 }, 104 { 105 .procname = "rdma_max_inline_write", 106 .data = &xprt_rdma_max_inline_write, 107 .maxlen = sizeof(unsigned int), 108 .mode = 0644, 109 .proc_handler = proc_dointvec, 110 .extra1 = &min_inline_size, 111 .extra2 = &max_inline_size, 112 }, 113 { 114 .procname = "rdma_inline_write_padding", 115 .data = &xprt_rdma_inline_write_padding, 116 .maxlen = sizeof(unsigned int), 117 .mode = 0644, 118 .proc_handler = proc_dointvec_minmax, 119 .extra1 = &zero, 120 .extra2 = &max_padding, 121 }, 122 { 123 .procname = "rdma_memreg_strategy", 124 .data = &xprt_rdma_memreg_strategy, 125 .maxlen = sizeof(unsigned int), 126 .mode = 0644, 127 .proc_handler = proc_dointvec_minmax, 128 .extra1 = &min_memreg, 129 .extra2 = &max_memreg, 130 }, 131 { 132 .procname = "rdma_pad_optimize", 133 .data = &xprt_rdma_pad_optimize, 134 .maxlen = sizeof(unsigned int), 135 .mode = 0644, 136 .proc_handler = proc_dointvec, 137 }, 138 { }, 139 }; 140 141 static struct ctl_table sunrpc_table[] = { 142 { 143 .procname = "sunrpc", 144 .mode = 0555, 145 .child = xr_tunables_table 146 }, 147 { }, 148 }; 149 150 #endif 151 152 static struct rpc_xprt_ops xprt_rdma_procs; /*forward reference */ 153 154 static void 155 xprt_rdma_format_addresses4(struct rpc_xprt *xprt, struct sockaddr *sap) 156 { 157 struct sockaddr_in *sin = (struct sockaddr_in *)sap; 158 char buf[20]; 159 160 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr)); 161 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); 162 163 xprt->address_strings[RPC_DISPLAY_NETID] = RPCBIND_NETID_RDMA; 164 } 165 166 static void 167 xprt_rdma_format_addresses6(struct rpc_xprt *xprt, struct sockaddr *sap) 168 { 169 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; 170 char buf[40]; 171 172 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr); 173 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); 174 175 xprt->address_strings[RPC_DISPLAY_NETID] = RPCBIND_NETID_RDMA6; 176 } 177 178 void 179 xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap) 180 { 181 char buf[128]; 182 183 switch (sap->sa_family) { 184 case AF_INET: 185 xprt_rdma_format_addresses4(xprt, sap); 186 break; 187 case AF_INET6: 188 xprt_rdma_format_addresses6(xprt, sap); 189 break; 190 default: 191 pr_err("rpcrdma: Unrecognized address family\n"); 192 return; 193 } 194 195 (void)rpc_ntop(sap, buf, sizeof(buf)); 196 xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL); 197 198 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); 199 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); 200 201 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); 202 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); 203 204 xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma"; 205 } 206 207 void 208 xprt_rdma_free_addresses(struct rpc_xprt *xprt) 209 { 210 unsigned int i; 211 212 for (i = 0; i < RPC_DISPLAY_MAX; i++) 213 switch (i) { 214 case RPC_DISPLAY_PROTO: 215 case RPC_DISPLAY_NETID: 216 continue; 217 default: 218 kfree(xprt->address_strings[i]); 219 } 220 } 221 222 static void 223 xprt_rdma_connect_worker(struct work_struct *work) 224 { 225 struct rpcrdma_xprt *r_xprt = container_of(work, struct rpcrdma_xprt, 226 rx_connect_worker.work); 227 struct rpc_xprt *xprt = &r_xprt->rx_xprt; 228 int rc = 0; 229 230 xprt_clear_connected(xprt); 231 232 dprintk("RPC: %s: %sconnect\n", __func__, 233 r_xprt->rx_ep.rep_connected != 0 ? "re" : ""); 234 rc = rpcrdma_ep_connect(&r_xprt->rx_ep, &r_xprt->rx_ia); 235 if (rc) 236 xprt_wake_pending_tasks(xprt, rc); 237 238 dprintk("RPC: %s: exit\n", __func__); 239 xprt_clear_connecting(xprt); 240 } 241 242 static void 243 xprt_rdma_inject_disconnect(struct rpc_xprt *xprt) 244 { 245 struct rpcrdma_xprt *r_xprt = container_of(xprt, struct rpcrdma_xprt, 246 rx_xprt); 247 248 pr_info("rpcrdma: injecting transport disconnect on xprt=%p\n", xprt); 249 rdma_disconnect(r_xprt->rx_ia.ri_id); 250 } 251 252 /* 253 * xprt_rdma_destroy 254 * 255 * Destroy the xprt. 256 * Free all memory associated with the object, including its own. 257 * NOTE: none of the *destroy methods free memory for their top-level 258 * objects, even though they may have allocated it (they do free 259 * private memory). It's up to the caller to handle it. In this 260 * case (RDMA transport), all structure memory is inlined with the 261 * struct rpcrdma_xprt. 262 */ 263 static void 264 xprt_rdma_destroy(struct rpc_xprt *xprt) 265 { 266 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 267 268 dprintk("RPC: %s: called\n", __func__); 269 270 cancel_delayed_work_sync(&r_xprt->rx_connect_worker); 271 272 xprt_clear_connected(xprt); 273 274 rpcrdma_ep_destroy(&r_xprt->rx_ep, &r_xprt->rx_ia); 275 rpcrdma_buffer_destroy(&r_xprt->rx_buf); 276 rpcrdma_ia_close(&r_xprt->rx_ia); 277 278 xprt_rdma_free_addresses(xprt); 279 280 xprt_free(xprt); 281 282 dprintk("RPC: %s: returning\n", __func__); 283 284 module_put(THIS_MODULE); 285 } 286 287 static const struct rpc_timeout xprt_rdma_default_timeout = { 288 .to_initval = 60 * HZ, 289 .to_maxval = 60 * HZ, 290 }; 291 292 /** 293 * xprt_setup_rdma - Set up transport to use RDMA 294 * 295 * @args: rpc transport arguments 296 */ 297 static struct rpc_xprt * 298 xprt_setup_rdma(struct xprt_create *args) 299 { 300 struct rpcrdma_create_data_internal cdata; 301 struct rpc_xprt *xprt; 302 struct rpcrdma_xprt *new_xprt; 303 struct rpcrdma_ep *new_ep; 304 struct sockaddr *sap; 305 int rc; 306 307 if (args->addrlen > sizeof(xprt->addr)) { 308 dprintk("RPC: %s: address too large\n", __func__); 309 return ERR_PTR(-EBADF); 310 } 311 312 xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt), 313 xprt_rdma_slot_table_entries, 314 xprt_rdma_slot_table_entries); 315 if (xprt == NULL) { 316 dprintk("RPC: %s: couldn't allocate rpcrdma_xprt\n", 317 __func__); 318 return ERR_PTR(-ENOMEM); 319 } 320 321 /* 60 second timeout, no retries */ 322 xprt->timeout = &xprt_rdma_default_timeout; 323 xprt->bind_timeout = RPCRDMA_BIND_TO; 324 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO; 325 xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO; 326 327 xprt->resvport = 0; /* privileged port not needed */ 328 xprt->tsh_size = 0; /* RPC-RDMA handles framing */ 329 xprt->ops = &xprt_rdma_procs; 330 331 /* 332 * Set up RDMA-specific connect data. 333 */ 334 335 sap = (struct sockaddr *)&cdata.addr; 336 memcpy(sap, args->dstaddr, args->addrlen); 337 338 /* Ensure xprt->addr holds valid server TCP (not RDMA) 339 * address, for any side protocols which peek at it */ 340 xprt->prot = IPPROTO_TCP; 341 xprt->addrlen = args->addrlen; 342 memcpy(&xprt->addr, sap, xprt->addrlen); 343 344 if (rpc_get_port(sap)) 345 xprt_set_bound(xprt); 346 347 cdata.max_requests = xprt->max_reqs; 348 349 cdata.rsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA write max */ 350 cdata.wsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA read max */ 351 352 cdata.inline_wsize = xprt_rdma_max_inline_write; 353 if (cdata.inline_wsize > cdata.wsize) 354 cdata.inline_wsize = cdata.wsize; 355 356 cdata.inline_rsize = xprt_rdma_max_inline_read; 357 if (cdata.inline_rsize > cdata.rsize) 358 cdata.inline_rsize = cdata.rsize; 359 360 cdata.padding = xprt_rdma_inline_write_padding; 361 362 /* 363 * Create new transport instance, which includes initialized 364 * o ia 365 * o endpoint 366 * o buffers 367 */ 368 369 new_xprt = rpcx_to_rdmax(xprt); 370 371 rc = rpcrdma_ia_open(new_xprt, sap, xprt_rdma_memreg_strategy); 372 if (rc) 373 goto out1; 374 375 /* 376 * initialize and create ep 377 */ 378 new_xprt->rx_data = cdata; 379 new_ep = &new_xprt->rx_ep; 380 new_ep->rep_remote_addr = cdata.addr; 381 382 rc = rpcrdma_ep_create(&new_xprt->rx_ep, 383 &new_xprt->rx_ia, &new_xprt->rx_data); 384 if (rc) 385 goto out2; 386 387 /* 388 * Allocate pre-registered send and receive buffers for headers and 389 * any inline data. Also specify any padding which will be provided 390 * from a preregistered zero buffer. 391 */ 392 rc = rpcrdma_buffer_create(new_xprt); 393 if (rc) 394 goto out3; 395 396 /* 397 * Register a callback for connection events. This is necessary because 398 * connection loss notification is async. We also catch connection loss 399 * when reaping receives. 400 */ 401 INIT_DELAYED_WORK(&new_xprt->rx_connect_worker, 402 xprt_rdma_connect_worker); 403 404 xprt_rdma_format_addresses(xprt, sap); 405 xprt->max_payload = new_xprt->rx_ia.ri_ops->ro_maxpages(new_xprt); 406 if (xprt->max_payload == 0) 407 goto out4; 408 xprt->max_payload <<= PAGE_SHIFT; 409 dprintk("RPC: %s: transport data payload maximum: %zu bytes\n", 410 __func__, xprt->max_payload); 411 412 if (!try_module_get(THIS_MODULE)) 413 goto out4; 414 415 dprintk("RPC: %s: %s:%s\n", __func__, 416 xprt->address_strings[RPC_DISPLAY_ADDR], 417 xprt->address_strings[RPC_DISPLAY_PORT]); 418 return xprt; 419 420 out4: 421 xprt_rdma_free_addresses(xprt); 422 rc = -EINVAL; 423 out3: 424 rpcrdma_ep_destroy(new_ep, &new_xprt->rx_ia); 425 out2: 426 rpcrdma_ia_close(&new_xprt->rx_ia); 427 out1: 428 xprt_free(xprt); 429 return ERR_PTR(rc); 430 } 431 432 /* 433 * Close a connection, during shutdown or timeout/reconnect 434 */ 435 static void 436 xprt_rdma_close(struct rpc_xprt *xprt) 437 { 438 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 439 440 dprintk("RPC: %s: closing\n", __func__); 441 if (r_xprt->rx_ep.rep_connected > 0) 442 xprt->reestablish_timeout = 0; 443 xprt_disconnect_done(xprt); 444 rpcrdma_ep_disconnect(&r_xprt->rx_ep, &r_xprt->rx_ia); 445 } 446 447 static void 448 xprt_rdma_set_port(struct rpc_xprt *xprt, u16 port) 449 { 450 struct sockaddr_in *sap; 451 452 sap = (struct sockaddr_in *)&xprt->addr; 453 sap->sin_port = htons(port); 454 sap = (struct sockaddr_in *)&rpcx_to_rdmad(xprt).addr; 455 sap->sin_port = htons(port); 456 dprintk("RPC: %s: %u\n", __func__, port); 457 } 458 459 static void 460 xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task) 461 { 462 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 463 464 if (r_xprt->rx_ep.rep_connected != 0) { 465 /* Reconnect */ 466 schedule_delayed_work(&r_xprt->rx_connect_worker, 467 xprt->reestablish_timeout); 468 xprt->reestablish_timeout <<= 1; 469 if (xprt->reestablish_timeout > RPCRDMA_MAX_REEST_TO) 470 xprt->reestablish_timeout = RPCRDMA_MAX_REEST_TO; 471 else if (xprt->reestablish_timeout < RPCRDMA_INIT_REEST_TO) 472 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO; 473 } else { 474 schedule_delayed_work(&r_xprt->rx_connect_worker, 0); 475 if (!RPC_IS_ASYNC(task)) 476 flush_delayed_work(&r_xprt->rx_connect_worker); 477 } 478 } 479 480 /* 481 * The RDMA allocate/free functions need the task structure as a place 482 * to hide the struct rpcrdma_req, which is necessary for the actual send/recv 483 * sequence. 484 * 485 * The RPC layer allocates both send and receive buffers in the same call 486 * (rq_send_buf and rq_rcv_buf are both part of a single contiguous buffer). 487 * We may register rq_rcv_buf when using reply chunks. 488 */ 489 static void * 490 xprt_rdma_allocate(struct rpc_task *task, size_t size) 491 { 492 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; 493 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 494 struct rpcrdma_regbuf *rb; 495 struct rpcrdma_req *req; 496 size_t min_size; 497 gfp_t flags; 498 499 req = rpcrdma_buffer_get(&r_xprt->rx_buf); 500 if (req == NULL) 501 return NULL; 502 503 flags = RPCRDMA_DEF_GFP; 504 if (RPC_IS_SWAPPER(task)) 505 flags = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN; 506 507 if (req->rl_rdmabuf == NULL) 508 goto out_rdmabuf; 509 if (req->rl_sendbuf == NULL) 510 goto out_sendbuf; 511 if (size > req->rl_sendbuf->rg_size) 512 goto out_sendbuf; 513 514 out: 515 dprintk("RPC: %s: size %zd, request 0x%p\n", __func__, size, req); 516 req->rl_connect_cookie = 0; /* our reserved value */ 517 req->rl_task = task; 518 return req->rl_sendbuf->rg_base; 519 520 out_rdmabuf: 521 min_size = RPCRDMA_INLINE_WRITE_THRESHOLD(task->tk_rqstp); 522 rb = rpcrdma_alloc_regbuf(&r_xprt->rx_ia, min_size, flags); 523 if (IS_ERR(rb)) 524 goto out_fail; 525 req->rl_rdmabuf = rb; 526 527 out_sendbuf: 528 /* XDR encoding and RPC/RDMA marshaling of this request has not 529 * yet occurred. Thus a lower bound is needed to prevent buffer 530 * overrun during marshaling. 531 * 532 * RPC/RDMA marshaling may choose to send payload bearing ops 533 * inline, if the result is smaller than the inline threshold. 534 * The value of the "size" argument accounts for header 535 * requirements but not for the payload in these cases. 536 * 537 * Likewise, allocate enough space to receive a reply up to the 538 * size of the inline threshold. 539 * 540 * It's unlikely that both the send header and the received 541 * reply will be large, but slush is provided here to allow 542 * flexibility when marshaling. 543 */ 544 min_size = RPCRDMA_INLINE_READ_THRESHOLD(task->tk_rqstp); 545 min_size += RPCRDMA_INLINE_WRITE_THRESHOLD(task->tk_rqstp); 546 if (size < min_size) 547 size = min_size; 548 549 rb = rpcrdma_alloc_regbuf(&r_xprt->rx_ia, size, flags); 550 if (IS_ERR(rb)) 551 goto out_fail; 552 rb->rg_owner = req; 553 554 r_xprt->rx_stats.hardway_register_count += size; 555 rpcrdma_free_regbuf(&r_xprt->rx_ia, req->rl_sendbuf); 556 req->rl_sendbuf = rb; 557 goto out; 558 559 out_fail: 560 rpcrdma_buffer_put(req); 561 r_xprt->rx_stats.failed_marshal_count++; 562 return NULL; 563 } 564 565 /* 566 * This function returns all RDMA resources to the pool. 567 */ 568 static void 569 xprt_rdma_free(void *buffer) 570 { 571 struct rpcrdma_req *req; 572 struct rpcrdma_xprt *r_xprt; 573 struct rpcrdma_regbuf *rb; 574 575 if (buffer == NULL) 576 return; 577 578 rb = container_of(buffer, struct rpcrdma_regbuf, rg_base[0]); 579 req = rb->rg_owner; 580 if (req->rl_backchannel) 581 return; 582 583 r_xprt = container_of(req->rl_buffer, struct rpcrdma_xprt, rx_buf); 584 585 dprintk("RPC: %s: called on 0x%p\n", __func__, req->rl_reply); 586 587 r_xprt->rx_ia.ri_ops->ro_unmap_safe(r_xprt, req, 588 !RPC_IS_ASYNC(req->rl_task)); 589 590 rpcrdma_buffer_put(req); 591 } 592 593 /* 594 * send_request invokes the meat of RPC RDMA. It must do the following: 595 * 1. Marshal the RPC request into an RPC RDMA request, which means 596 * putting a header in front of data, and creating IOVs for RDMA 597 * from those in the request. 598 * 2. In marshaling, detect opportunities for RDMA, and use them. 599 * 3. Post a recv message to set up asynch completion, then send 600 * the request (rpcrdma_ep_post). 601 * 4. No partial sends are possible in the RPC-RDMA protocol (as in UDP). 602 */ 603 604 static int 605 xprt_rdma_send_request(struct rpc_task *task) 606 { 607 struct rpc_rqst *rqst = task->tk_rqstp; 608 struct rpc_xprt *xprt = rqst->rq_xprt; 609 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 610 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 611 int rc = 0; 612 613 rc = rpcrdma_marshal_req(rqst); 614 if (rc < 0) 615 goto failed_marshal; 616 617 if (req->rl_reply == NULL) /* e.g. reconnection */ 618 rpcrdma_recv_buffer_get(req); 619 620 /* Must suppress retransmit to maintain credits */ 621 if (req->rl_connect_cookie == xprt->connect_cookie) 622 goto drop_connection; 623 req->rl_connect_cookie = xprt->connect_cookie; 624 625 if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req)) 626 goto drop_connection; 627 628 rqst->rq_xmit_bytes_sent += rqst->rq_snd_buf.len; 629 rqst->rq_bytes_sent = 0; 630 return 0; 631 632 failed_marshal: 633 r_xprt->rx_stats.failed_marshal_count++; 634 dprintk("RPC: %s: rpcrdma_marshal_req failed, status %i\n", 635 __func__, rc); 636 if (rc == -EIO) 637 return -EIO; 638 drop_connection: 639 xprt_disconnect_done(xprt); 640 return -ENOTCONN; /* implies disconnect */ 641 } 642 643 void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) 644 { 645 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 646 long idle_time = 0; 647 648 if (xprt_connected(xprt)) 649 idle_time = (long)(jiffies - xprt->last_used) / HZ; 650 651 seq_puts(seq, "\txprt:\trdma "); 652 seq_printf(seq, "%u %lu %lu %lu %ld %lu %lu %lu %llu %llu ", 653 0, /* need a local port? */ 654 xprt->stat.bind_count, 655 xprt->stat.connect_count, 656 xprt->stat.connect_time, 657 idle_time, 658 xprt->stat.sends, 659 xprt->stat.recvs, 660 xprt->stat.bad_xids, 661 xprt->stat.req_u, 662 xprt->stat.bklog_u); 663 seq_printf(seq, "%lu %lu %lu %llu %llu %llu %llu %lu %lu %lu %lu\n", 664 r_xprt->rx_stats.read_chunk_count, 665 r_xprt->rx_stats.write_chunk_count, 666 r_xprt->rx_stats.reply_chunk_count, 667 r_xprt->rx_stats.total_rdma_request, 668 r_xprt->rx_stats.total_rdma_reply, 669 r_xprt->rx_stats.pullup_copy_count, 670 r_xprt->rx_stats.fixup_copy_count, 671 r_xprt->rx_stats.hardway_register_count, 672 r_xprt->rx_stats.failed_marshal_count, 673 r_xprt->rx_stats.bad_reply_count, 674 r_xprt->rx_stats.nomsg_call_count); 675 } 676 677 static int 678 xprt_rdma_enable_swap(struct rpc_xprt *xprt) 679 { 680 return 0; 681 } 682 683 static void 684 xprt_rdma_disable_swap(struct rpc_xprt *xprt) 685 { 686 } 687 688 /* 689 * Plumbing for rpc transport switch and kernel module 690 */ 691 692 static struct rpc_xprt_ops xprt_rdma_procs = { 693 .reserve_xprt = xprt_reserve_xprt_cong, 694 .release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */ 695 .alloc_slot = xprt_alloc_slot, 696 .release_request = xprt_release_rqst_cong, /* ditto */ 697 .set_retrans_timeout = xprt_set_retrans_timeout_def, /* ditto */ 698 .rpcbind = rpcb_getport_async, /* sunrpc/rpcb_clnt.c */ 699 .set_port = xprt_rdma_set_port, 700 .connect = xprt_rdma_connect, 701 .buf_alloc = xprt_rdma_allocate, 702 .buf_free = xprt_rdma_free, 703 .send_request = xprt_rdma_send_request, 704 .close = xprt_rdma_close, 705 .destroy = xprt_rdma_destroy, 706 .print_stats = xprt_rdma_print_stats, 707 .enable_swap = xprt_rdma_enable_swap, 708 .disable_swap = xprt_rdma_disable_swap, 709 .inject_disconnect = xprt_rdma_inject_disconnect, 710 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 711 .bc_setup = xprt_rdma_bc_setup, 712 .bc_up = xprt_rdma_bc_up, 713 .bc_maxpayload = xprt_rdma_bc_maxpayload, 714 .bc_free_rqst = xprt_rdma_bc_free_rqst, 715 .bc_destroy = xprt_rdma_bc_destroy, 716 #endif 717 }; 718 719 static struct xprt_class xprt_rdma = { 720 .list = LIST_HEAD_INIT(xprt_rdma.list), 721 .name = "rdma", 722 .owner = THIS_MODULE, 723 .ident = XPRT_TRANSPORT_RDMA, 724 .setup = xprt_setup_rdma, 725 }; 726 727 void xprt_rdma_cleanup(void) 728 { 729 int rc; 730 731 dprintk("RPCRDMA Module Removed, deregister RPC RDMA transport\n"); 732 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 733 if (sunrpc_table_header) { 734 unregister_sysctl_table(sunrpc_table_header); 735 sunrpc_table_header = NULL; 736 } 737 #endif 738 rc = xprt_unregister_transport(&xprt_rdma); 739 if (rc) 740 dprintk("RPC: %s: xprt_unregister returned %i\n", 741 __func__, rc); 742 743 rpcrdma_destroy_wq(); 744 frwr_destroy_recovery_wq(); 745 746 rc = xprt_unregister_transport(&xprt_rdma_bc); 747 if (rc) 748 dprintk("RPC: %s: xprt_unregister(bc) returned %i\n", 749 __func__, rc); 750 } 751 752 int xprt_rdma_init(void) 753 { 754 int rc; 755 756 rc = frwr_alloc_recovery_wq(); 757 if (rc) 758 return rc; 759 760 rc = rpcrdma_alloc_wq(); 761 if (rc) { 762 frwr_destroy_recovery_wq(); 763 return rc; 764 } 765 766 rc = xprt_register_transport(&xprt_rdma); 767 if (rc) { 768 rpcrdma_destroy_wq(); 769 frwr_destroy_recovery_wq(); 770 return rc; 771 } 772 773 rc = xprt_register_transport(&xprt_rdma_bc); 774 if (rc) { 775 xprt_unregister_transport(&xprt_rdma); 776 rpcrdma_destroy_wq(); 777 frwr_destroy_recovery_wq(); 778 return rc; 779 } 780 781 dprintk("RPCRDMA Module Init, register RPC RDMA transport\n"); 782 783 dprintk("Defaults:\n"); 784 dprintk("\tSlots %d\n" 785 "\tMaxInlineRead %d\n\tMaxInlineWrite %d\n", 786 xprt_rdma_slot_table_entries, 787 xprt_rdma_max_inline_read, xprt_rdma_max_inline_write); 788 dprintk("\tPadding %d\n\tMemreg %d\n", 789 xprt_rdma_inline_write_padding, xprt_rdma_memreg_strategy); 790 791 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 792 if (!sunrpc_table_header) 793 sunrpc_table_header = register_sysctl_table(sunrpc_table); 794 #endif 795 return 0; 796 } 797