1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (c) 2014-2017 Oracle. All rights reserved. 4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the BSD-type 10 * license below: 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 19 * Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials provided 22 * with the distribution. 23 * 24 * Neither the name of the Network Appliance, Inc. nor the names of 25 * its contributors may be used to endorse or promote products 26 * derived from this software without specific prior written 27 * permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 */ 41 42 /* 43 * transport.c 44 * 45 * This file contains the top-level implementation of an RPC RDMA 46 * transport. 47 * 48 * Naming convention: functions beginning with xprt_ are part of the 49 * transport switch. All others are RPC RDMA internal. 50 */ 51 52 #include <linux/module.h> 53 #include <linux/slab.h> 54 #include <linux/seq_file.h> 55 #include <linux/smp.h> 56 57 #include <linux/sunrpc/addr.h> 58 #include <linux/sunrpc/svc_rdma.h> 59 60 #include "xprt_rdma.h" 61 #include <trace/events/rpcrdma.h> 62 63 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 64 # define RPCDBG_FACILITY RPCDBG_TRANS 65 #endif 66 67 /* 68 * tunables 69 */ 70 71 static unsigned int xprt_rdma_slot_table_entries = RPCRDMA_DEF_SLOT_TABLE; 72 unsigned int xprt_rdma_max_inline_read = RPCRDMA_DEF_INLINE; 73 unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE; 74 unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRWR; 75 int xprt_rdma_pad_optimize; 76 77 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 78 79 static unsigned int min_slot_table_size = RPCRDMA_MIN_SLOT_TABLE; 80 static unsigned int max_slot_table_size = RPCRDMA_MAX_SLOT_TABLE; 81 static unsigned int min_inline_size = RPCRDMA_MIN_INLINE; 82 static unsigned int max_inline_size = RPCRDMA_MAX_INLINE; 83 static unsigned int max_padding = PAGE_SIZE; 84 static unsigned int min_memreg = RPCRDMA_BOUNCEBUFFERS; 85 static unsigned int max_memreg = RPCRDMA_LAST - 1; 86 static unsigned int dummy; 87 88 static struct ctl_table_header *sunrpc_table_header; 89 90 static struct ctl_table xr_tunables_table[] = { 91 { 92 .procname = "rdma_slot_table_entries", 93 .data = &xprt_rdma_slot_table_entries, 94 .maxlen = sizeof(unsigned int), 95 .mode = 0644, 96 .proc_handler = proc_dointvec_minmax, 97 .extra1 = &min_slot_table_size, 98 .extra2 = &max_slot_table_size 99 }, 100 { 101 .procname = "rdma_max_inline_read", 102 .data = &xprt_rdma_max_inline_read, 103 .maxlen = sizeof(unsigned int), 104 .mode = 0644, 105 .proc_handler = proc_dointvec_minmax, 106 .extra1 = &min_inline_size, 107 .extra2 = &max_inline_size, 108 }, 109 { 110 .procname = "rdma_max_inline_write", 111 .data = &xprt_rdma_max_inline_write, 112 .maxlen = sizeof(unsigned int), 113 .mode = 0644, 114 .proc_handler = proc_dointvec_minmax, 115 .extra1 = &min_inline_size, 116 .extra2 = &max_inline_size, 117 }, 118 { 119 .procname = "rdma_inline_write_padding", 120 .data = &dummy, 121 .maxlen = sizeof(unsigned int), 122 .mode = 0644, 123 .proc_handler = proc_dointvec_minmax, 124 .extra1 = SYSCTL_ZERO, 125 .extra2 = &max_padding, 126 }, 127 { 128 .procname = "rdma_memreg_strategy", 129 .data = &xprt_rdma_memreg_strategy, 130 .maxlen = sizeof(unsigned int), 131 .mode = 0644, 132 .proc_handler = proc_dointvec_minmax, 133 .extra1 = &min_memreg, 134 .extra2 = &max_memreg, 135 }, 136 { 137 .procname = "rdma_pad_optimize", 138 .data = &xprt_rdma_pad_optimize, 139 .maxlen = sizeof(unsigned int), 140 .mode = 0644, 141 .proc_handler = proc_dointvec, 142 }, 143 { }, 144 }; 145 146 static struct ctl_table sunrpc_table[] = { 147 { 148 .procname = "sunrpc", 149 .mode = 0555, 150 .child = xr_tunables_table 151 }, 152 { }, 153 }; 154 155 #endif 156 157 static const struct rpc_xprt_ops xprt_rdma_procs; 158 159 static void 160 xprt_rdma_format_addresses4(struct rpc_xprt *xprt, struct sockaddr *sap) 161 { 162 struct sockaddr_in *sin = (struct sockaddr_in *)sap; 163 char buf[20]; 164 165 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr)); 166 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); 167 168 xprt->address_strings[RPC_DISPLAY_NETID] = RPCBIND_NETID_RDMA; 169 } 170 171 static void 172 xprt_rdma_format_addresses6(struct rpc_xprt *xprt, struct sockaddr *sap) 173 { 174 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; 175 char buf[40]; 176 177 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr); 178 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); 179 180 xprt->address_strings[RPC_DISPLAY_NETID] = RPCBIND_NETID_RDMA6; 181 } 182 183 void 184 xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap) 185 { 186 char buf[128]; 187 188 switch (sap->sa_family) { 189 case AF_INET: 190 xprt_rdma_format_addresses4(xprt, sap); 191 break; 192 case AF_INET6: 193 xprt_rdma_format_addresses6(xprt, sap); 194 break; 195 default: 196 pr_err("rpcrdma: Unrecognized address family\n"); 197 return; 198 } 199 200 (void)rpc_ntop(sap, buf, sizeof(buf)); 201 xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL); 202 203 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); 204 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); 205 206 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); 207 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); 208 209 xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma"; 210 } 211 212 void 213 xprt_rdma_free_addresses(struct rpc_xprt *xprt) 214 { 215 unsigned int i; 216 217 for (i = 0; i < RPC_DISPLAY_MAX; i++) 218 switch (i) { 219 case RPC_DISPLAY_PROTO: 220 case RPC_DISPLAY_NETID: 221 continue; 222 default: 223 kfree(xprt->address_strings[i]); 224 } 225 } 226 227 /** 228 * xprt_rdma_connect_worker - establish connection in the background 229 * @work: worker thread context 230 * 231 * Requester holds the xprt's send lock to prevent activity on this 232 * transport while a fresh connection is being established. RPC tasks 233 * sleep on the xprt's pending queue waiting for connect to complete. 234 */ 235 static void 236 xprt_rdma_connect_worker(struct work_struct *work) 237 { 238 struct rpcrdma_xprt *r_xprt = container_of(work, struct rpcrdma_xprt, 239 rx_connect_worker.work); 240 struct rpc_xprt *xprt = &r_xprt->rx_xprt; 241 int rc; 242 243 rc = rpcrdma_xprt_connect(r_xprt); 244 xprt_clear_connecting(xprt); 245 if (!rc) { 246 xprt->connect_cookie++; 247 xprt->stat.connect_count++; 248 xprt->stat.connect_time += (long)jiffies - 249 xprt->stat.connect_start; 250 xprt_set_connected(xprt); 251 rc = -EAGAIN; 252 } 253 xprt_wake_pending_tasks(xprt, rc); 254 } 255 256 /** 257 * xprt_rdma_inject_disconnect - inject a connection fault 258 * @xprt: transport context 259 * 260 * If @xprt is connected, disconnect it to simulate spurious connection 261 * loss. 262 */ 263 static void 264 xprt_rdma_inject_disconnect(struct rpc_xprt *xprt) 265 { 266 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 267 268 trace_xprtrdma_op_inject_dsc(r_xprt); 269 rdma_disconnect(r_xprt->rx_ep->re_id); 270 } 271 272 /** 273 * xprt_rdma_destroy - Full tear down of transport 274 * @xprt: doomed transport context 275 * 276 * Caller guarantees there will be no more calls to us with 277 * this @xprt. 278 */ 279 static void 280 xprt_rdma_destroy(struct rpc_xprt *xprt) 281 { 282 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 283 284 cancel_delayed_work_sync(&r_xprt->rx_connect_worker); 285 286 rpcrdma_xprt_disconnect(r_xprt); 287 rpcrdma_buffer_destroy(&r_xprt->rx_buf); 288 289 xprt_rdma_free_addresses(xprt); 290 xprt_free(xprt); 291 292 module_put(THIS_MODULE); 293 } 294 295 /* 60 second timeout, no retries */ 296 static const struct rpc_timeout xprt_rdma_default_timeout = { 297 .to_initval = 60 * HZ, 298 .to_maxval = 60 * HZ, 299 }; 300 301 /** 302 * xprt_setup_rdma - Set up transport to use RDMA 303 * 304 * @args: rpc transport arguments 305 */ 306 static struct rpc_xprt * 307 xprt_setup_rdma(struct xprt_create *args) 308 { 309 struct rpc_xprt *xprt; 310 struct rpcrdma_xprt *new_xprt; 311 struct sockaddr *sap; 312 int rc; 313 314 if (args->addrlen > sizeof(xprt->addr)) 315 return ERR_PTR(-EBADF); 316 317 if (!try_module_get(THIS_MODULE)) 318 return ERR_PTR(-EIO); 319 320 xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt), 0, 321 xprt_rdma_slot_table_entries); 322 if (!xprt) { 323 module_put(THIS_MODULE); 324 return ERR_PTR(-ENOMEM); 325 } 326 327 xprt->timeout = &xprt_rdma_default_timeout; 328 xprt->connect_timeout = xprt->timeout->to_initval; 329 xprt->max_reconnect_timeout = xprt->timeout->to_maxval; 330 xprt->bind_timeout = RPCRDMA_BIND_TO; 331 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO; 332 xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO; 333 334 xprt->resvport = 0; /* privileged port not needed */ 335 xprt->ops = &xprt_rdma_procs; 336 337 /* 338 * Set up RDMA-specific connect data. 339 */ 340 sap = args->dstaddr; 341 342 /* Ensure xprt->addr holds valid server TCP (not RDMA) 343 * address, for any side protocols which peek at it */ 344 xprt->prot = IPPROTO_TCP; 345 xprt->addrlen = args->addrlen; 346 memcpy(&xprt->addr, sap, xprt->addrlen); 347 348 if (rpc_get_port(sap)) 349 xprt_set_bound(xprt); 350 xprt_rdma_format_addresses(xprt, sap); 351 352 new_xprt = rpcx_to_rdmax(xprt); 353 rc = rpcrdma_buffer_create(new_xprt); 354 if (rc) { 355 xprt_rdma_free_addresses(xprt); 356 xprt_free(xprt); 357 module_put(THIS_MODULE); 358 return ERR_PTR(rc); 359 } 360 361 INIT_DELAYED_WORK(&new_xprt->rx_connect_worker, 362 xprt_rdma_connect_worker); 363 364 xprt->max_payload = RPCRDMA_MAX_DATA_SEGS << PAGE_SHIFT; 365 366 return xprt; 367 } 368 369 /** 370 * xprt_rdma_close - close a transport connection 371 * @xprt: transport context 372 * 373 * Called during autoclose or device removal. 374 * 375 * Caller holds @xprt's send lock to prevent activity on this 376 * transport while the connection is torn down. 377 */ 378 void xprt_rdma_close(struct rpc_xprt *xprt) 379 { 380 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 381 382 rpcrdma_xprt_disconnect(r_xprt); 383 384 xprt->reestablish_timeout = 0; 385 ++xprt->connect_cookie; 386 xprt_disconnect_done(xprt); 387 } 388 389 /** 390 * xprt_rdma_set_port - update server port with rpcbind result 391 * @xprt: controlling RPC transport 392 * @port: new port value 393 * 394 * Transport connect status is unchanged. 395 */ 396 static void 397 xprt_rdma_set_port(struct rpc_xprt *xprt, u16 port) 398 { 399 struct sockaddr *sap = (struct sockaddr *)&xprt->addr; 400 char buf[8]; 401 402 rpc_set_port(sap, port); 403 404 kfree(xprt->address_strings[RPC_DISPLAY_PORT]); 405 snprintf(buf, sizeof(buf), "%u", port); 406 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); 407 408 kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]); 409 snprintf(buf, sizeof(buf), "%4hx", port); 410 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); 411 412 trace_xprtrdma_op_setport(container_of(xprt, struct rpcrdma_xprt, 413 rx_xprt)); 414 } 415 416 /** 417 * xprt_rdma_timer - invoked when an RPC times out 418 * @xprt: controlling RPC transport 419 * @task: RPC task that timed out 420 * 421 * Invoked when the transport is still connected, but an RPC 422 * retransmit timeout occurs. 423 * 424 * Since RDMA connections don't have a keep-alive, forcibly 425 * disconnect and retry to connect. This drives full 426 * detection of the network path, and retransmissions of 427 * all pending RPCs. 428 */ 429 static void 430 xprt_rdma_timer(struct rpc_xprt *xprt, struct rpc_task *task) 431 { 432 xprt_force_disconnect(xprt); 433 } 434 435 /** 436 * xprt_rdma_set_connect_timeout - set timeouts for establishing a connection 437 * @xprt: controlling transport instance 438 * @connect_timeout: reconnect timeout after client disconnects 439 * @reconnect_timeout: reconnect timeout after server disconnects 440 * 441 */ 442 static void xprt_rdma_set_connect_timeout(struct rpc_xprt *xprt, 443 unsigned long connect_timeout, 444 unsigned long reconnect_timeout) 445 { 446 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 447 448 trace_xprtrdma_op_set_cto(r_xprt, connect_timeout, reconnect_timeout); 449 450 spin_lock(&xprt->transport_lock); 451 452 if (connect_timeout < xprt->connect_timeout) { 453 struct rpc_timeout to; 454 unsigned long initval; 455 456 to = *xprt->timeout; 457 initval = connect_timeout; 458 if (initval < RPCRDMA_INIT_REEST_TO << 1) 459 initval = RPCRDMA_INIT_REEST_TO << 1; 460 to.to_initval = initval; 461 to.to_maxval = initval; 462 r_xprt->rx_timeout = to; 463 xprt->timeout = &r_xprt->rx_timeout; 464 xprt->connect_timeout = connect_timeout; 465 } 466 467 if (reconnect_timeout < xprt->max_reconnect_timeout) 468 xprt->max_reconnect_timeout = reconnect_timeout; 469 470 spin_unlock(&xprt->transport_lock); 471 } 472 473 /** 474 * xprt_rdma_connect - schedule an attempt to reconnect 475 * @xprt: transport state 476 * @task: RPC scheduler context (unused) 477 * 478 */ 479 static void 480 xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task) 481 { 482 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 483 struct rpcrdma_ep *ep = r_xprt->rx_ep; 484 unsigned long delay; 485 486 delay = 0; 487 if (ep && ep->re_connect_status != 0) { 488 delay = xprt_reconnect_delay(xprt); 489 xprt_reconnect_backoff(xprt, RPCRDMA_INIT_REEST_TO); 490 } 491 trace_xprtrdma_op_connect(r_xprt, delay); 492 queue_delayed_work(xprtiod_workqueue, &r_xprt->rx_connect_worker, 493 delay); 494 } 495 496 /** 497 * xprt_rdma_alloc_slot - allocate an rpc_rqst 498 * @xprt: controlling RPC transport 499 * @task: RPC task requesting a fresh rpc_rqst 500 * 501 * tk_status values: 502 * %0 if task->tk_rqstp points to a fresh rpc_rqst 503 * %-EAGAIN if no rpc_rqst is available; queued on backlog 504 */ 505 static void 506 xprt_rdma_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) 507 { 508 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 509 struct rpcrdma_req *req; 510 511 req = rpcrdma_buffer_get(&r_xprt->rx_buf); 512 if (!req) 513 goto out_sleep; 514 task->tk_rqstp = &req->rl_slot; 515 task->tk_status = 0; 516 return; 517 518 out_sleep: 519 set_bit(XPRT_CONGESTED, &xprt->state); 520 rpc_sleep_on(&xprt->backlog, task, NULL); 521 task->tk_status = -EAGAIN; 522 } 523 524 /** 525 * xprt_rdma_free_slot - release an rpc_rqst 526 * @xprt: controlling RPC transport 527 * @rqst: rpc_rqst to release 528 * 529 */ 530 static void 531 xprt_rdma_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *rqst) 532 { 533 struct rpcrdma_xprt *r_xprt = 534 container_of(xprt, struct rpcrdma_xprt, rx_xprt); 535 536 memset(rqst, 0, sizeof(*rqst)); 537 rpcrdma_buffer_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst)); 538 if (unlikely(!rpc_wake_up_next(&xprt->backlog))) 539 clear_bit(XPRT_CONGESTED, &xprt->state); 540 } 541 542 static bool rpcrdma_check_regbuf(struct rpcrdma_xprt *r_xprt, 543 struct rpcrdma_regbuf *rb, size_t size, 544 gfp_t flags) 545 { 546 if (unlikely(rdmab_length(rb) < size)) { 547 if (!rpcrdma_regbuf_realloc(rb, size, flags)) 548 return false; 549 r_xprt->rx_stats.hardway_register_count += size; 550 } 551 return true; 552 } 553 554 /** 555 * xprt_rdma_allocate - allocate transport resources for an RPC 556 * @task: RPC task 557 * 558 * Return values: 559 * 0: Success; rq_buffer points to RPC buffer to use 560 * ENOMEM: Out of memory, call again later 561 * EIO: A permanent error occurred, do not retry 562 */ 563 static int 564 xprt_rdma_allocate(struct rpc_task *task) 565 { 566 struct rpc_rqst *rqst = task->tk_rqstp; 567 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); 568 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 569 gfp_t flags; 570 571 flags = RPCRDMA_DEF_GFP; 572 if (RPC_IS_SWAPPER(task)) 573 flags = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN; 574 575 if (!rpcrdma_check_regbuf(r_xprt, req->rl_sendbuf, rqst->rq_callsize, 576 flags)) 577 goto out_fail; 578 if (!rpcrdma_check_regbuf(r_xprt, req->rl_recvbuf, rqst->rq_rcvsize, 579 flags)) 580 goto out_fail; 581 582 rqst->rq_buffer = rdmab_data(req->rl_sendbuf); 583 rqst->rq_rbuffer = rdmab_data(req->rl_recvbuf); 584 trace_xprtrdma_op_allocate(task, req); 585 return 0; 586 587 out_fail: 588 trace_xprtrdma_op_allocate(task, NULL); 589 return -ENOMEM; 590 } 591 592 /** 593 * xprt_rdma_free - release resources allocated by xprt_rdma_allocate 594 * @task: RPC task 595 * 596 * Caller guarantees rqst->rq_buffer is non-NULL. 597 */ 598 static void 599 xprt_rdma_free(struct rpc_task *task) 600 { 601 struct rpc_rqst *rqst = task->tk_rqstp; 602 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); 603 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 604 605 trace_xprtrdma_op_free(task, req); 606 607 if (!list_empty(&req->rl_registered)) 608 frwr_unmap_sync(r_xprt, req); 609 610 /* XXX: If the RPC is completing because of a signal and 611 * not because a reply was received, we ought to ensure 612 * that the Send completion has fired, so that memory 613 * involved with the Send is not still visible to the NIC. 614 */ 615 } 616 617 /** 618 * xprt_rdma_send_request - marshal and send an RPC request 619 * @rqst: RPC message in rq_snd_buf 620 * 621 * Caller holds the transport's write lock. 622 * 623 * Returns: 624 * %0 if the RPC message has been sent 625 * %-ENOTCONN if the caller should reconnect and call again 626 * %-EAGAIN if the caller should call again 627 * %-ENOBUFS if the caller should call again after a delay 628 * %-EMSGSIZE if encoding ran out of buffer space. The request 629 * was not sent. Do not try to send this message again. 630 * %-EIO if an I/O error occurred. The request was not sent. 631 * Do not try to send this message again. 632 */ 633 static int 634 xprt_rdma_send_request(struct rpc_rqst *rqst) 635 { 636 struct rpc_xprt *xprt = rqst->rq_xprt; 637 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 638 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 639 int rc = 0; 640 641 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 642 if (unlikely(!rqst->rq_buffer)) 643 return xprt_rdma_bc_send_reply(rqst); 644 #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 645 646 if (!xprt_connected(xprt)) 647 return -ENOTCONN; 648 649 if (!xprt_request_get_cong(xprt, rqst)) 650 return -EBADSLT; 651 652 rc = rpcrdma_marshal_req(r_xprt, rqst); 653 if (rc < 0) 654 goto failed_marshal; 655 656 /* Must suppress retransmit to maintain credits */ 657 if (rqst->rq_connect_cookie == xprt->connect_cookie) 658 goto drop_connection; 659 rqst->rq_xtime = ktime_get(); 660 661 if (rpcrdma_post_sends(r_xprt, req)) 662 goto drop_connection; 663 664 rqst->rq_xmit_bytes_sent += rqst->rq_snd_buf.len; 665 666 /* An RPC with no reply will throw off credit accounting, 667 * so drop the connection to reset the credit grant. 668 */ 669 if (!rpc_reply_expected(rqst->rq_task)) 670 goto drop_connection; 671 return 0; 672 673 failed_marshal: 674 if (rc != -ENOTCONN) 675 return rc; 676 drop_connection: 677 xprt_rdma_close(xprt); 678 return -ENOTCONN; 679 } 680 681 void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) 682 { 683 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 684 long idle_time = 0; 685 686 if (xprt_connected(xprt)) 687 idle_time = (long)(jiffies - xprt->last_used) / HZ; 688 689 seq_puts(seq, "\txprt:\trdma "); 690 seq_printf(seq, "%u %lu %lu %lu %ld %lu %lu %lu %llu %llu ", 691 0, /* need a local port? */ 692 xprt->stat.bind_count, 693 xprt->stat.connect_count, 694 xprt->stat.connect_time / HZ, 695 idle_time, 696 xprt->stat.sends, 697 xprt->stat.recvs, 698 xprt->stat.bad_xids, 699 xprt->stat.req_u, 700 xprt->stat.bklog_u); 701 seq_printf(seq, "%lu %lu %lu %llu %llu %llu %llu %lu %lu %lu %lu ", 702 r_xprt->rx_stats.read_chunk_count, 703 r_xprt->rx_stats.write_chunk_count, 704 r_xprt->rx_stats.reply_chunk_count, 705 r_xprt->rx_stats.total_rdma_request, 706 r_xprt->rx_stats.total_rdma_reply, 707 r_xprt->rx_stats.pullup_copy_count, 708 r_xprt->rx_stats.fixup_copy_count, 709 r_xprt->rx_stats.hardway_register_count, 710 r_xprt->rx_stats.failed_marshal_count, 711 r_xprt->rx_stats.bad_reply_count, 712 r_xprt->rx_stats.nomsg_call_count); 713 seq_printf(seq, "%lu %lu %lu %lu %lu %lu\n", 714 r_xprt->rx_stats.mrs_recycled, 715 r_xprt->rx_stats.mrs_orphaned, 716 r_xprt->rx_stats.mrs_allocated, 717 r_xprt->rx_stats.local_inv_needed, 718 r_xprt->rx_stats.empty_sendctx_q, 719 r_xprt->rx_stats.reply_waits_for_send); 720 } 721 722 static int 723 xprt_rdma_enable_swap(struct rpc_xprt *xprt) 724 { 725 return 0; 726 } 727 728 static void 729 xprt_rdma_disable_swap(struct rpc_xprt *xprt) 730 { 731 } 732 733 /* 734 * Plumbing for rpc transport switch and kernel module 735 */ 736 737 static const struct rpc_xprt_ops xprt_rdma_procs = { 738 .reserve_xprt = xprt_reserve_xprt_cong, 739 .release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */ 740 .alloc_slot = xprt_rdma_alloc_slot, 741 .free_slot = xprt_rdma_free_slot, 742 .release_request = xprt_release_rqst_cong, /* ditto */ 743 .wait_for_reply_request = xprt_wait_for_reply_request_def, /* ditto */ 744 .timer = xprt_rdma_timer, 745 .rpcbind = rpcb_getport_async, /* sunrpc/rpcb_clnt.c */ 746 .set_port = xprt_rdma_set_port, 747 .connect = xprt_rdma_connect, 748 .buf_alloc = xprt_rdma_allocate, 749 .buf_free = xprt_rdma_free, 750 .send_request = xprt_rdma_send_request, 751 .close = xprt_rdma_close, 752 .destroy = xprt_rdma_destroy, 753 .set_connect_timeout = xprt_rdma_set_connect_timeout, 754 .print_stats = xprt_rdma_print_stats, 755 .enable_swap = xprt_rdma_enable_swap, 756 .disable_swap = xprt_rdma_disable_swap, 757 .inject_disconnect = xprt_rdma_inject_disconnect, 758 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 759 .bc_setup = xprt_rdma_bc_setup, 760 .bc_maxpayload = xprt_rdma_bc_maxpayload, 761 .bc_num_slots = xprt_rdma_bc_max_slots, 762 .bc_free_rqst = xprt_rdma_bc_free_rqst, 763 .bc_destroy = xprt_rdma_bc_destroy, 764 #endif 765 }; 766 767 static struct xprt_class xprt_rdma = { 768 .list = LIST_HEAD_INIT(xprt_rdma.list), 769 .name = "rdma", 770 .owner = THIS_MODULE, 771 .ident = XPRT_TRANSPORT_RDMA, 772 .setup = xprt_setup_rdma, 773 }; 774 775 void xprt_rdma_cleanup(void) 776 { 777 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 778 if (sunrpc_table_header) { 779 unregister_sysctl_table(sunrpc_table_header); 780 sunrpc_table_header = NULL; 781 } 782 #endif 783 784 xprt_unregister_transport(&xprt_rdma); 785 xprt_unregister_transport(&xprt_rdma_bc); 786 } 787 788 int xprt_rdma_init(void) 789 { 790 int rc; 791 792 rc = xprt_register_transport(&xprt_rdma); 793 if (rc) 794 return rc; 795 796 rc = xprt_register_transport(&xprt_rdma_bc); 797 if (rc) { 798 xprt_unregister_transport(&xprt_rdma); 799 return rc; 800 } 801 802 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 803 if (!sunrpc_table_header) 804 sunrpc_table_header = register_sysctl_table(sunrpc_table); 805 #endif 806 return 0; 807 } 808