1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/net/sunrpc/xprtsock.c 4 * 5 * Client-side transport implementation for sockets. 6 * 7 * TCP callback races fixes (C) 1998 Red Hat 8 * TCP send fixes (C) 1998 Red Hat 9 * TCP NFS related read + write fixes 10 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> 11 * 12 * Rewrite of larges part of the code in order to stabilize TCP stuff. 13 * Fix behaviour when socket buffer is full. 14 * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no> 15 * 16 * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com> 17 * 18 * IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005. 19 * <gilles.quillard@bull.net> 20 */ 21 22 #include <linux/types.h> 23 #include <linux/string.h> 24 #include <linux/slab.h> 25 #include <linux/module.h> 26 #include <linux/capability.h> 27 #include <linux/pagemap.h> 28 #include <linux/errno.h> 29 #include <linux/socket.h> 30 #include <linux/in.h> 31 #include <linux/net.h> 32 #include <linux/mm.h> 33 #include <linux/un.h> 34 #include <linux/udp.h> 35 #include <linux/tcp.h> 36 #include <linux/sunrpc/clnt.h> 37 #include <linux/sunrpc/addr.h> 38 #include <linux/sunrpc/sched.h> 39 #include <linux/sunrpc/svcsock.h> 40 #include <linux/sunrpc/xprtsock.h> 41 #include <linux/file.h> 42 #ifdef CONFIG_SUNRPC_BACKCHANNEL 43 #include <linux/sunrpc/bc_xprt.h> 44 #endif 45 46 #include <net/sock.h> 47 #include <net/checksum.h> 48 #include <net/udp.h> 49 #include <net/tcp.h> 50 #include <linux/bvec.h> 51 #include <linux/highmem.h> 52 #include <linux/uio.h> 53 #include <linux/sched/mm.h> 54 55 #include <trace/events/sunrpc.h> 56 57 #include "socklib.h" 58 #include "sunrpc.h" 59 60 static void xs_close(struct rpc_xprt *xprt); 61 static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt, 62 struct socket *sock); 63 64 /* 65 * xprtsock tunables 66 */ 67 static unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE; 68 static unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE; 69 static unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE; 70 71 static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT; 72 static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT; 73 74 #define XS_TCP_LINGER_TO (15U * HZ) 75 static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO; 76 77 /* 78 * We can register our own files under /proc/sys/sunrpc by 79 * calling register_sysctl_table() again. The files in that 80 * directory become the union of all files registered there. 81 * 82 * We simply need to make sure that we don't collide with 83 * someone else's file names! 84 */ 85 86 static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE; 87 static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE; 88 static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT; 89 static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT; 90 static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT; 91 92 static struct ctl_table_header *sunrpc_table_header; 93 94 static struct xprt_class xs_local_transport; 95 static struct xprt_class xs_udp_transport; 96 static struct xprt_class xs_tcp_transport; 97 static struct xprt_class xs_bc_tcp_transport; 98 99 /* 100 * FIXME: changing the UDP slot table size should also resize the UDP 101 * socket buffers for existing UDP transports 102 */ 103 static struct ctl_table xs_tunables_table[] = { 104 { 105 .procname = "udp_slot_table_entries", 106 .data = &xprt_udp_slot_table_entries, 107 .maxlen = sizeof(unsigned int), 108 .mode = 0644, 109 .proc_handler = proc_dointvec_minmax, 110 .extra1 = &min_slot_table_size, 111 .extra2 = &max_slot_table_size 112 }, 113 { 114 .procname = "tcp_slot_table_entries", 115 .data = &xprt_tcp_slot_table_entries, 116 .maxlen = sizeof(unsigned int), 117 .mode = 0644, 118 .proc_handler = proc_dointvec_minmax, 119 .extra1 = &min_slot_table_size, 120 .extra2 = &max_slot_table_size 121 }, 122 { 123 .procname = "tcp_max_slot_table_entries", 124 .data = &xprt_max_tcp_slot_table_entries, 125 .maxlen = sizeof(unsigned int), 126 .mode = 0644, 127 .proc_handler = proc_dointvec_minmax, 128 .extra1 = &min_slot_table_size, 129 .extra2 = &max_tcp_slot_table_limit 130 }, 131 { 132 .procname = "min_resvport", 133 .data = &xprt_min_resvport, 134 .maxlen = sizeof(unsigned int), 135 .mode = 0644, 136 .proc_handler = proc_dointvec_minmax, 137 .extra1 = &xprt_min_resvport_limit, 138 .extra2 = &xprt_max_resvport_limit 139 }, 140 { 141 .procname = "max_resvport", 142 .data = &xprt_max_resvport, 143 .maxlen = sizeof(unsigned int), 144 .mode = 0644, 145 .proc_handler = proc_dointvec_minmax, 146 .extra1 = &xprt_min_resvport_limit, 147 .extra2 = &xprt_max_resvport_limit 148 }, 149 { 150 .procname = "tcp_fin_timeout", 151 .data = &xs_tcp_fin_timeout, 152 .maxlen = sizeof(xs_tcp_fin_timeout), 153 .mode = 0644, 154 .proc_handler = proc_dointvec_jiffies, 155 }, 156 { }, 157 }; 158 159 static struct ctl_table sunrpc_table[] = { 160 { 161 .procname = "sunrpc", 162 .mode = 0555, 163 .child = xs_tunables_table 164 }, 165 { }, 166 }; 167 168 /* 169 * Wait duration for a reply from the RPC portmapper. 170 */ 171 #define XS_BIND_TO (60U * HZ) 172 173 /* 174 * Delay if a UDP socket connect error occurs. This is most likely some 175 * kind of resource problem on the local host. 176 */ 177 #define XS_UDP_REEST_TO (2U * HZ) 178 179 /* 180 * The reestablish timeout allows clients to delay for a bit before attempting 181 * to reconnect to a server that just dropped our connection. 182 * 183 * We implement an exponential backoff when trying to reestablish a TCP 184 * transport connection with the server. Some servers like to drop a TCP 185 * connection when they are overworked, so we start with a short timeout and 186 * increase over time if the server is down or not responding. 187 */ 188 #define XS_TCP_INIT_REEST_TO (3U * HZ) 189 190 /* 191 * TCP idle timeout; client drops the transport socket if it is idle 192 * for this long. Note that we also timeout UDP sockets to prevent 193 * holding port numbers when there is no RPC traffic. 194 */ 195 #define XS_IDLE_DISC_TO (5U * 60 * HZ) 196 197 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 198 # undef RPC_DEBUG_DATA 199 # define RPCDBG_FACILITY RPCDBG_TRANS 200 #endif 201 202 #ifdef RPC_DEBUG_DATA 203 static void xs_pktdump(char *msg, u32 *packet, unsigned int count) 204 { 205 u8 *buf = (u8 *) packet; 206 int j; 207 208 dprintk("RPC: %s\n", msg); 209 for (j = 0; j < count && j < 128; j += 4) { 210 if (!(j & 31)) { 211 if (j) 212 dprintk("\n"); 213 dprintk("0x%04x ", j); 214 } 215 dprintk("%02x%02x%02x%02x ", 216 buf[j], buf[j+1], buf[j+2], buf[j+3]); 217 } 218 dprintk("\n"); 219 } 220 #else 221 static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count) 222 { 223 /* NOP */ 224 } 225 #endif 226 227 static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) 228 { 229 return (struct rpc_xprt *) sk->sk_user_data; 230 } 231 232 static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt) 233 { 234 return (struct sockaddr *) &xprt->addr; 235 } 236 237 static inline struct sockaddr_un *xs_addr_un(struct rpc_xprt *xprt) 238 { 239 return (struct sockaddr_un *) &xprt->addr; 240 } 241 242 static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt) 243 { 244 return (struct sockaddr_in *) &xprt->addr; 245 } 246 247 static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt) 248 { 249 return (struct sockaddr_in6 *) &xprt->addr; 250 } 251 252 static void xs_format_common_peer_addresses(struct rpc_xprt *xprt) 253 { 254 struct sockaddr *sap = xs_addr(xprt); 255 struct sockaddr_in6 *sin6; 256 struct sockaddr_in *sin; 257 struct sockaddr_un *sun; 258 char buf[128]; 259 260 switch (sap->sa_family) { 261 case AF_LOCAL: 262 sun = xs_addr_un(xprt); 263 strlcpy(buf, sun->sun_path, sizeof(buf)); 264 xprt->address_strings[RPC_DISPLAY_ADDR] = 265 kstrdup(buf, GFP_KERNEL); 266 break; 267 case AF_INET: 268 (void)rpc_ntop(sap, buf, sizeof(buf)); 269 xprt->address_strings[RPC_DISPLAY_ADDR] = 270 kstrdup(buf, GFP_KERNEL); 271 sin = xs_addr_in(xprt); 272 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr)); 273 break; 274 case AF_INET6: 275 (void)rpc_ntop(sap, buf, sizeof(buf)); 276 xprt->address_strings[RPC_DISPLAY_ADDR] = 277 kstrdup(buf, GFP_KERNEL); 278 sin6 = xs_addr_in6(xprt); 279 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr); 280 break; 281 default: 282 BUG(); 283 } 284 285 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); 286 } 287 288 static void xs_format_common_peer_ports(struct rpc_xprt *xprt) 289 { 290 struct sockaddr *sap = xs_addr(xprt); 291 char buf[128]; 292 293 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); 294 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); 295 296 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); 297 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); 298 } 299 300 static void xs_format_peer_addresses(struct rpc_xprt *xprt, 301 const char *protocol, 302 const char *netid) 303 { 304 xprt->address_strings[RPC_DISPLAY_PROTO] = protocol; 305 xprt->address_strings[RPC_DISPLAY_NETID] = netid; 306 xs_format_common_peer_addresses(xprt); 307 xs_format_common_peer_ports(xprt); 308 } 309 310 static void xs_update_peer_port(struct rpc_xprt *xprt) 311 { 312 kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]); 313 kfree(xprt->address_strings[RPC_DISPLAY_PORT]); 314 315 xs_format_common_peer_ports(xprt); 316 } 317 318 static void xs_free_peer_addresses(struct rpc_xprt *xprt) 319 { 320 unsigned int i; 321 322 for (i = 0; i < RPC_DISPLAY_MAX; i++) 323 switch (i) { 324 case RPC_DISPLAY_PROTO: 325 case RPC_DISPLAY_NETID: 326 continue; 327 default: 328 kfree(xprt->address_strings[i]); 329 } 330 } 331 332 static size_t 333 xs_alloc_sparse_pages(struct xdr_buf *buf, size_t want, gfp_t gfp) 334 { 335 size_t i,n; 336 337 if (!want || !(buf->flags & XDRBUF_SPARSE_PAGES)) 338 return want; 339 n = (buf->page_base + want + PAGE_SIZE - 1) >> PAGE_SHIFT; 340 for (i = 0; i < n; i++) { 341 if (buf->pages[i]) 342 continue; 343 buf->bvec[i].bv_page = buf->pages[i] = alloc_page(gfp); 344 if (!buf->pages[i]) { 345 i *= PAGE_SIZE; 346 return i > buf->page_base ? i - buf->page_base : 0; 347 } 348 } 349 return want; 350 } 351 352 static ssize_t 353 xs_sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags, size_t seek) 354 { 355 ssize_t ret; 356 if (seek != 0) 357 iov_iter_advance(&msg->msg_iter, seek); 358 ret = sock_recvmsg(sock, msg, flags); 359 return ret > 0 ? ret + seek : ret; 360 } 361 362 static ssize_t 363 xs_read_kvec(struct socket *sock, struct msghdr *msg, int flags, 364 struct kvec *kvec, size_t count, size_t seek) 365 { 366 iov_iter_kvec(&msg->msg_iter, READ, kvec, 1, count); 367 return xs_sock_recvmsg(sock, msg, flags, seek); 368 } 369 370 static ssize_t 371 xs_read_bvec(struct socket *sock, struct msghdr *msg, int flags, 372 struct bio_vec *bvec, unsigned long nr, size_t count, 373 size_t seek) 374 { 375 iov_iter_bvec(&msg->msg_iter, READ, bvec, nr, count); 376 return xs_sock_recvmsg(sock, msg, flags, seek); 377 } 378 379 static ssize_t 380 xs_read_discard(struct socket *sock, struct msghdr *msg, int flags, 381 size_t count) 382 { 383 iov_iter_discard(&msg->msg_iter, READ, count); 384 return sock_recvmsg(sock, msg, flags); 385 } 386 387 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 388 static void 389 xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek) 390 { 391 struct bvec_iter bi = { 392 .bi_size = count, 393 }; 394 struct bio_vec bv; 395 396 bvec_iter_advance(bvec, &bi, seek & PAGE_MASK); 397 for_each_bvec(bv, bvec, bi, bi) 398 flush_dcache_page(bv.bv_page); 399 } 400 #else 401 static inline void 402 xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek) 403 { 404 } 405 #endif 406 407 static ssize_t 408 xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, 409 struct xdr_buf *buf, size_t count, size_t seek, size_t *read) 410 { 411 size_t want, seek_init = seek, offset = 0; 412 ssize_t ret; 413 414 want = min_t(size_t, count, buf->head[0].iov_len); 415 if (seek < want) { 416 ret = xs_read_kvec(sock, msg, flags, &buf->head[0], want, seek); 417 if (ret <= 0) 418 goto sock_err; 419 offset += ret; 420 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) 421 goto out; 422 if (ret != want) 423 goto out; 424 seek = 0; 425 } else { 426 seek -= want; 427 offset += want; 428 } 429 430 want = xs_alloc_sparse_pages(buf, 431 min_t(size_t, count - offset, buf->page_len), 432 GFP_KERNEL); 433 if (seek < want) { 434 ret = xs_read_bvec(sock, msg, flags, buf->bvec, 435 xdr_buf_pagecount(buf), 436 want + buf->page_base, 437 seek + buf->page_base); 438 if (ret <= 0) 439 goto sock_err; 440 xs_flush_bvec(buf->bvec, ret, seek + buf->page_base); 441 ret -= buf->page_base; 442 offset += ret; 443 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) 444 goto out; 445 if (ret != want) 446 goto out; 447 seek = 0; 448 } else { 449 seek -= want; 450 offset += want; 451 } 452 453 want = min_t(size_t, count - offset, buf->tail[0].iov_len); 454 if (seek < want) { 455 ret = xs_read_kvec(sock, msg, flags, &buf->tail[0], want, seek); 456 if (ret <= 0) 457 goto sock_err; 458 offset += ret; 459 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) 460 goto out; 461 if (ret != want) 462 goto out; 463 } else if (offset < seek_init) 464 offset = seek_init; 465 ret = -EMSGSIZE; 466 out: 467 *read = offset - seek_init; 468 return ret; 469 sock_err: 470 offset += seek; 471 goto out; 472 } 473 474 static void 475 xs_read_header(struct sock_xprt *transport, struct xdr_buf *buf) 476 { 477 if (!transport->recv.copied) { 478 if (buf->head[0].iov_len >= transport->recv.offset) 479 memcpy(buf->head[0].iov_base, 480 &transport->recv.xid, 481 transport->recv.offset); 482 transport->recv.copied = transport->recv.offset; 483 } 484 } 485 486 static bool 487 xs_read_stream_request_done(struct sock_xprt *transport) 488 { 489 return transport->recv.fraghdr & cpu_to_be32(RPC_LAST_STREAM_FRAGMENT); 490 } 491 492 static void 493 xs_read_stream_check_eor(struct sock_xprt *transport, 494 struct msghdr *msg) 495 { 496 if (xs_read_stream_request_done(transport)) 497 msg->msg_flags |= MSG_EOR; 498 } 499 500 static ssize_t 501 xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg, 502 int flags, struct rpc_rqst *req) 503 { 504 struct xdr_buf *buf = &req->rq_private_buf; 505 size_t want, read; 506 ssize_t ret; 507 508 xs_read_header(transport, buf); 509 510 want = transport->recv.len - transport->recv.offset; 511 if (want != 0) { 512 ret = xs_read_xdr_buf(transport->sock, msg, flags, buf, 513 transport->recv.copied + want, 514 transport->recv.copied, 515 &read); 516 transport->recv.offset += read; 517 transport->recv.copied += read; 518 } 519 520 if (transport->recv.offset == transport->recv.len) 521 xs_read_stream_check_eor(transport, msg); 522 523 if (want == 0) 524 return 0; 525 526 switch (ret) { 527 default: 528 break; 529 case -EFAULT: 530 case -EMSGSIZE: 531 msg->msg_flags |= MSG_TRUNC; 532 return read; 533 case 0: 534 return -ESHUTDOWN; 535 } 536 return ret < 0 ? ret : read; 537 } 538 539 static size_t 540 xs_read_stream_headersize(bool isfrag) 541 { 542 if (isfrag) 543 return sizeof(__be32); 544 return 3 * sizeof(__be32); 545 } 546 547 static ssize_t 548 xs_read_stream_header(struct sock_xprt *transport, struct msghdr *msg, 549 int flags, size_t want, size_t seek) 550 { 551 struct kvec kvec = { 552 .iov_base = &transport->recv.fraghdr, 553 .iov_len = want, 554 }; 555 return xs_read_kvec(transport->sock, msg, flags, &kvec, want, seek); 556 } 557 558 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 559 static ssize_t 560 xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags) 561 { 562 struct rpc_xprt *xprt = &transport->xprt; 563 struct rpc_rqst *req; 564 ssize_t ret; 565 566 /* Is this transport associated with the backchannel? */ 567 if (!xprt->bc_serv) 568 return -ESHUTDOWN; 569 570 /* Look up and lock the request corresponding to the given XID */ 571 req = xprt_lookup_bc_request(xprt, transport->recv.xid); 572 if (!req) { 573 printk(KERN_WARNING "Callback slot table overflowed\n"); 574 return -ESHUTDOWN; 575 } 576 if (transport->recv.copied && !req->rq_private_buf.len) 577 return -ESHUTDOWN; 578 579 ret = xs_read_stream_request(transport, msg, flags, req); 580 if (msg->msg_flags & (MSG_EOR|MSG_TRUNC)) 581 xprt_complete_bc_request(req, transport->recv.copied); 582 else 583 req->rq_private_buf.len = transport->recv.copied; 584 585 return ret; 586 } 587 #else /* CONFIG_SUNRPC_BACKCHANNEL */ 588 static ssize_t 589 xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags) 590 { 591 return -ESHUTDOWN; 592 } 593 #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 594 595 static ssize_t 596 xs_read_stream_reply(struct sock_xprt *transport, struct msghdr *msg, int flags) 597 { 598 struct rpc_xprt *xprt = &transport->xprt; 599 struct rpc_rqst *req; 600 ssize_t ret = 0; 601 602 /* Look up and lock the request corresponding to the given XID */ 603 spin_lock(&xprt->queue_lock); 604 req = xprt_lookup_rqst(xprt, transport->recv.xid); 605 if (!req || (transport->recv.copied && !req->rq_private_buf.len)) { 606 msg->msg_flags |= MSG_TRUNC; 607 goto out; 608 } 609 xprt_pin_rqst(req); 610 spin_unlock(&xprt->queue_lock); 611 612 ret = xs_read_stream_request(transport, msg, flags, req); 613 614 spin_lock(&xprt->queue_lock); 615 if (msg->msg_flags & (MSG_EOR|MSG_TRUNC)) 616 xprt_complete_rqst(req->rq_task, transport->recv.copied); 617 else 618 req->rq_private_buf.len = transport->recv.copied; 619 xprt_unpin_rqst(req); 620 out: 621 spin_unlock(&xprt->queue_lock); 622 return ret; 623 } 624 625 static ssize_t 626 xs_read_stream(struct sock_xprt *transport, int flags) 627 { 628 struct msghdr msg = { 0 }; 629 size_t want, read = 0; 630 ssize_t ret = 0; 631 632 if (transport->recv.len == 0) { 633 want = xs_read_stream_headersize(transport->recv.copied != 0); 634 ret = xs_read_stream_header(transport, &msg, flags, want, 635 transport->recv.offset); 636 if (ret <= 0) 637 goto out_err; 638 transport->recv.offset = ret; 639 if (transport->recv.offset != want) 640 return transport->recv.offset; 641 transport->recv.len = be32_to_cpu(transport->recv.fraghdr) & 642 RPC_FRAGMENT_SIZE_MASK; 643 transport->recv.offset -= sizeof(transport->recv.fraghdr); 644 read = ret; 645 } 646 647 switch (be32_to_cpu(transport->recv.calldir)) { 648 default: 649 msg.msg_flags |= MSG_TRUNC; 650 break; 651 case RPC_CALL: 652 ret = xs_read_stream_call(transport, &msg, flags); 653 break; 654 case RPC_REPLY: 655 ret = xs_read_stream_reply(transport, &msg, flags); 656 } 657 if (msg.msg_flags & MSG_TRUNC) { 658 transport->recv.calldir = cpu_to_be32(-1); 659 transport->recv.copied = -1; 660 } 661 if (ret < 0) 662 goto out_err; 663 read += ret; 664 if (transport->recv.offset < transport->recv.len) { 665 if (!(msg.msg_flags & MSG_TRUNC)) 666 return read; 667 msg.msg_flags = 0; 668 ret = xs_read_discard(transport->sock, &msg, flags, 669 transport->recv.len - transport->recv.offset); 670 if (ret <= 0) 671 goto out_err; 672 transport->recv.offset += ret; 673 read += ret; 674 if (transport->recv.offset != transport->recv.len) 675 return read; 676 } 677 if (xs_read_stream_request_done(transport)) { 678 trace_xs_stream_read_request(transport); 679 transport->recv.copied = 0; 680 } 681 transport->recv.offset = 0; 682 transport->recv.len = 0; 683 return read; 684 out_err: 685 return ret != 0 ? ret : -ESHUTDOWN; 686 } 687 688 static __poll_t xs_poll_socket(struct sock_xprt *transport) 689 { 690 return transport->sock->ops->poll(transport->file, transport->sock, 691 NULL); 692 } 693 694 static bool xs_poll_socket_readable(struct sock_xprt *transport) 695 { 696 __poll_t events = xs_poll_socket(transport); 697 698 return (events & (EPOLLIN | EPOLLRDNORM)) && !(events & EPOLLRDHUP); 699 } 700 701 static void xs_poll_check_readable(struct sock_xprt *transport) 702 { 703 704 clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state); 705 if (!xs_poll_socket_readable(transport)) 706 return; 707 if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state)) 708 queue_work(xprtiod_workqueue, &transport->recv_worker); 709 } 710 711 static void xs_stream_data_receive(struct sock_xprt *transport) 712 { 713 size_t read = 0; 714 ssize_t ret = 0; 715 716 mutex_lock(&transport->recv_mutex); 717 if (transport->sock == NULL) 718 goto out; 719 for (;;) { 720 ret = xs_read_stream(transport, MSG_DONTWAIT); 721 if (ret < 0) 722 break; 723 read += ret; 724 cond_resched(); 725 } 726 if (ret == -ESHUTDOWN) 727 kernel_sock_shutdown(transport->sock, SHUT_RDWR); 728 else 729 xs_poll_check_readable(transport); 730 out: 731 mutex_unlock(&transport->recv_mutex); 732 trace_xs_stream_read_data(&transport->xprt, ret, read); 733 } 734 735 static void xs_stream_data_receive_workfn(struct work_struct *work) 736 { 737 struct sock_xprt *transport = 738 container_of(work, struct sock_xprt, recv_worker); 739 unsigned int pflags = memalloc_nofs_save(); 740 741 xs_stream_data_receive(transport); 742 memalloc_nofs_restore(pflags); 743 } 744 745 static void 746 xs_stream_reset_connect(struct sock_xprt *transport) 747 { 748 transport->recv.offset = 0; 749 transport->recv.len = 0; 750 transport->recv.copied = 0; 751 transport->xmit.offset = 0; 752 } 753 754 static void 755 xs_stream_start_connect(struct sock_xprt *transport) 756 { 757 transport->xprt.stat.connect_count++; 758 transport->xprt.stat.connect_start = jiffies; 759 } 760 761 #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL) 762 763 /** 764 * xs_nospace - handle transmit was incomplete 765 * @req: pointer to RPC request 766 * 767 */ 768 static int xs_nospace(struct rpc_rqst *req) 769 { 770 struct rpc_xprt *xprt = req->rq_xprt; 771 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 772 struct sock *sk = transport->inet; 773 int ret = -EAGAIN; 774 775 trace_rpc_socket_nospace(req, transport); 776 777 /* Protect against races with write_space */ 778 spin_lock(&xprt->transport_lock); 779 780 /* Don't race with disconnect */ 781 if (xprt_connected(xprt)) { 782 /* wait for more buffer space */ 783 sk->sk_write_pending++; 784 xprt_wait_for_buffer_space(xprt); 785 } else 786 ret = -ENOTCONN; 787 788 spin_unlock(&xprt->transport_lock); 789 790 /* Race breaker in case memory is freed before above code is called */ 791 if (ret == -EAGAIN) { 792 struct socket_wq *wq; 793 794 rcu_read_lock(); 795 wq = rcu_dereference(sk->sk_wq); 796 set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags); 797 rcu_read_unlock(); 798 799 sk->sk_write_space(sk); 800 } 801 return ret; 802 } 803 804 static void 805 xs_stream_prepare_request(struct rpc_rqst *req) 806 { 807 xdr_free_bvec(&req->rq_rcv_buf); 808 req->rq_task->tk_status = xdr_alloc_bvec(&req->rq_rcv_buf, GFP_KERNEL); 809 } 810 811 /* 812 * Determine if the previous message in the stream was aborted before it 813 * could complete transmission. 814 */ 815 static bool 816 xs_send_request_was_aborted(struct sock_xprt *transport, struct rpc_rqst *req) 817 { 818 return transport->xmit.offset != 0 && req->rq_bytes_sent == 0; 819 } 820 821 /* 822 * Return the stream record marker field for a record of length < 2^31-1 823 */ 824 static rpc_fraghdr 825 xs_stream_record_marker(struct xdr_buf *xdr) 826 { 827 if (!xdr->len) 828 return 0; 829 return cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | (u32)xdr->len); 830 } 831 832 /** 833 * xs_local_send_request - write an RPC request to an AF_LOCAL socket 834 * @req: pointer to RPC request 835 * 836 * Return values: 837 * 0: The request has been sent 838 * EAGAIN: The socket was blocked, please call again later to 839 * complete the request 840 * ENOTCONN: Caller needs to invoke connect logic then call again 841 * other: Some other error occurred, the request was not sent 842 */ 843 static int xs_local_send_request(struct rpc_rqst *req) 844 { 845 struct rpc_xprt *xprt = req->rq_xprt; 846 struct sock_xprt *transport = 847 container_of(xprt, struct sock_xprt, xprt); 848 struct xdr_buf *xdr = &req->rq_snd_buf; 849 rpc_fraghdr rm = xs_stream_record_marker(xdr); 850 unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen; 851 struct msghdr msg = { 852 .msg_flags = XS_SENDMSG_FLAGS, 853 }; 854 unsigned int sent; 855 int status; 856 857 /* Close the stream if the previous transmission was incomplete */ 858 if (xs_send_request_was_aborted(transport, req)) { 859 xs_close(xprt); 860 return -ENOTCONN; 861 } 862 863 xs_pktdump("packet data:", 864 req->rq_svec->iov_base, req->rq_svec->iov_len); 865 866 req->rq_xtime = ktime_get(); 867 status = xprt_sock_sendmsg(transport->sock, &msg, xdr, 868 transport->xmit.offset, rm, &sent); 869 dprintk("RPC: %s(%u) = %d\n", 870 __func__, xdr->len - transport->xmit.offset, status); 871 872 if (status == -EAGAIN && sock_writeable(transport->inet)) 873 status = -ENOBUFS; 874 875 if (likely(sent > 0) || status == 0) { 876 transport->xmit.offset += sent; 877 req->rq_bytes_sent = transport->xmit.offset; 878 if (likely(req->rq_bytes_sent >= msglen)) { 879 req->rq_xmit_bytes_sent += transport->xmit.offset; 880 transport->xmit.offset = 0; 881 return 0; 882 } 883 status = -EAGAIN; 884 } 885 886 switch (status) { 887 case -ENOBUFS: 888 break; 889 case -EAGAIN: 890 status = xs_nospace(req); 891 break; 892 default: 893 dprintk("RPC: sendmsg returned unrecognized error %d\n", 894 -status); 895 fallthrough; 896 case -EPIPE: 897 xs_close(xprt); 898 status = -ENOTCONN; 899 } 900 901 return status; 902 } 903 904 /** 905 * xs_udp_send_request - write an RPC request to a UDP socket 906 * @req: pointer to RPC request 907 * 908 * Return values: 909 * 0: The request has been sent 910 * EAGAIN: The socket was blocked, please call again later to 911 * complete the request 912 * ENOTCONN: Caller needs to invoke connect logic then call again 913 * other: Some other error occurred, the request was not sent 914 */ 915 static int xs_udp_send_request(struct rpc_rqst *req) 916 { 917 struct rpc_xprt *xprt = req->rq_xprt; 918 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 919 struct xdr_buf *xdr = &req->rq_snd_buf; 920 struct msghdr msg = { 921 .msg_name = xs_addr(xprt), 922 .msg_namelen = xprt->addrlen, 923 .msg_flags = XS_SENDMSG_FLAGS, 924 }; 925 unsigned int sent; 926 int status; 927 928 xs_pktdump("packet data:", 929 req->rq_svec->iov_base, 930 req->rq_svec->iov_len); 931 932 if (!xprt_bound(xprt)) 933 return -ENOTCONN; 934 935 if (!xprt_request_get_cong(xprt, req)) 936 return -EBADSLT; 937 938 req->rq_xtime = ktime_get(); 939 status = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, 0, &sent); 940 941 dprintk("RPC: xs_udp_send_request(%u) = %d\n", 942 xdr->len, status); 943 944 /* firewall is blocking us, don't return -EAGAIN or we end up looping */ 945 if (status == -EPERM) 946 goto process_status; 947 948 if (status == -EAGAIN && sock_writeable(transport->inet)) 949 status = -ENOBUFS; 950 951 if (sent > 0 || status == 0) { 952 req->rq_xmit_bytes_sent += sent; 953 if (sent >= req->rq_slen) 954 return 0; 955 /* Still some bytes left; set up for a retry later. */ 956 status = -EAGAIN; 957 } 958 959 process_status: 960 switch (status) { 961 case -ENOTSOCK: 962 status = -ENOTCONN; 963 /* Should we call xs_close() here? */ 964 break; 965 case -EAGAIN: 966 status = xs_nospace(req); 967 break; 968 case -ENETUNREACH: 969 case -ENOBUFS: 970 case -EPIPE: 971 case -ECONNREFUSED: 972 case -EPERM: 973 /* When the server has died, an ICMP port unreachable message 974 * prompts ECONNREFUSED. */ 975 break; 976 default: 977 dprintk("RPC: sendmsg returned unrecognized error %d\n", 978 -status); 979 } 980 981 return status; 982 } 983 984 /** 985 * xs_tcp_send_request - write an RPC request to a TCP socket 986 * @req: pointer to RPC request 987 * 988 * Return values: 989 * 0: The request has been sent 990 * EAGAIN: The socket was blocked, please call again later to 991 * complete the request 992 * ENOTCONN: Caller needs to invoke connect logic then call again 993 * other: Some other error occurred, the request was not sent 994 * 995 * XXX: In the case of soft timeouts, should we eventually give up 996 * if sendmsg is not able to make progress? 997 */ 998 static int xs_tcp_send_request(struct rpc_rqst *req) 999 { 1000 struct rpc_xprt *xprt = req->rq_xprt; 1001 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1002 struct xdr_buf *xdr = &req->rq_snd_buf; 1003 rpc_fraghdr rm = xs_stream_record_marker(xdr); 1004 unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen; 1005 struct msghdr msg = { 1006 .msg_flags = XS_SENDMSG_FLAGS, 1007 }; 1008 bool vm_wait = false; 1009 unsigned int sent; 1010 int status; 1011 1012 /* Close the stream if the previous transmission was incomplete */ 1013 if (xs_send_request_was_aborted(transport, req)) { 1014 if (transport->sock != NULL) 1015 kernel_sock_shutdown(transport->sock, SHUT_RDWR); 1016 return -ENOTCONN; 1017 } 1018 if (!transport->inet) 1019 return -ENOTCONN; 1020 1021 xs_pktdump("packet data:", 1022 req->rq_svec->iov_base, 1023 req->rq_svec->iov_len); 1024 1025 if (test_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state)) 1026 xs_tcp_set_socket_timeouts(xprt, transport->sock); 1027 1028 /* Continue transmitting the packet/record. We must be careful 1029 * to cope with writespace callbacks arriving _after_ we have 1030 * called sendmsg(). */ 1031 req->rq_xtime = ktime_get(); 1032 tcp_sock_set_cork(transport->inet, true); 1033 while (1) { 1034 status = xprt_sock_sendmsg(transport->sock, &msg, xdr, 1035 transport->xmit.offset, rm, &sent); 1036 1037 dprintk("RPC: xs_tcp_send_request(%u) = %d\n", 1038 xdr->len - transport->xmit.offset, status); 1039 1040 /* If we've sent the entire packet, immediately 1041 * reset the count of bytes sent. */ 1042 transport->xmit.offset += sent; 1043 req->rq_bytes_sent = transport->xmit.offset; 1044 if (likely(req->rq_bytes_sent >= msglen)) { 1045 req->rq_xmit_bytes_sent += transport->xmit.offset; 1046 transport->xmit.offset = 0; 1047 if (atomic_long_read(&xprt->xmit_queuelen) == 1) 1048 tcp_sock_set_cork(transport->inet, false); 1049 return 0; 1050 } 1051 1052 WARN_ON_ONCE(sent == 0 && status == 0); 1053 1054 if (status == -EAGAIN ) { 1055 /* 1056 * Return EAGAIN if we're sure we're hitting the 1057 * socket send buffer limits. 1058 */ 1059 if (test_bit(SOCK_NOSPACE, &transport->sock->flags)) 1060 break; 1061 /* 1062 * Did we hit a memory allocation failure? 1063 */ 1064 if (sent == 0) { 1065 status = -ENOBUFS; 1066 if (vm_wait) 1067 break; 1068 /* Retry, knowing now that we're below the 1069 * socket send buffer limit 1070 */ 1071 vm_wait = true; 1072 } 1073 continue; 1074 } 1075 if (status < 0) 1076 break; 1077 vm_wait = false; 1078 } 1079 1080 switch (status) { 1081 case -ENOTSOCK: 1082 status = -ENOTCONN; 1083 /* Should we call xs_close() here? */ 1084 break; 1085 case -EAGAIN: 1086 status = xs_nospace(req); 1087 break; 1088 case -ECONNRESET: 1089 case -ECONNREFUSED: 1090 case -ENOTCONN: 1091 case -EADDRINUSE: 1092 case -ENOBUFS: 1093 case -EPIPE: 1094 break; 1095 default: 1096 dprintk("RPC: sendmsg returned unrecognized error %d\n", 1097 -status); 1098 } 1099 1100 return status; 1101 } 1102 1103 static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk) 1104 { 1105 transport->old_data_ready = sk->sk_data_ready; 1106 transport->old_state_change = sk->sk_state_change; 1107 transport->old_write_space = sk->sk_write_space; 1108 transport->old_error_report = sk->sk_error_report; 1109 } 1110 1111 static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk) 1112 { 1113 sk->sk_data_ready = transport->old_data_ready; 1114 sk->sk_state_change = transport->old_state_change; 1115 sk->sk_write_space = transport->old_write_space; 1116 sk->sk_error_report = transport->old_error_report; 1117 } 1118 1119 static void xs_sock_reset_state_flags(struct rpc_xprt *xprt) 1120 { 1121 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1122 1123 clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state); 1124 clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state); 1125 clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state); 1126 clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state); 1127 } 1128 1129 static void xs_run_error_worker(struct sock_xprt *transport, unsigned int nr) 1130 { 1131 set_bit(nr, &transport->sock_state); 1132 queue_work(xprtiod_workqueue, &transport->error_worker); 1133 } 1134 1135 static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt) 1136 { 1137 xprt->connect_cookie++; 1138 smp_mb__before_atomic(); 1139 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 1140 clear_bit(XPRT_CLOSING, &xprt->state); 1141 xs_sock_reset_state_flags(xprt); 1142 smp_mb__after_atomic(); 1143 } 1144 1145 /** 1146 * xs_error_report - callback to handle TCP socket state errors 1147 * @sk: socket 1148 * 1149 * Note: we don't call sock_error() since there may be a rpc_task 1150 * using the socket, and so we don't want to clear sk->sk_err. 1151 */ 1152 static void xs_error_report(struct sock *sk) 1153 { 1154 struct sock_xprt *transport; 1155 struct rpc_xprt *xprt; 1156 1157 if (!(xprt = xprt_from_sock(sk))) 1158 return; 1159 1160 transport = container_of(xprt, struct sock_xprt, xprt); 1161 transport->xprt_err = -sk->sk_err; 1162 if (transport->xprt_err == 0) 1163 return; 1164 dprintk("RPC: xs_error_report client %p, error=%d...\n", 1165 xprt, -transport->xprt_err); 1166 trace_rpc_socket_error(xprt, sk->sk_socket, transport->xprt_err); 1167 1168 /* barrier ensures xprt_err is set before XPRT_SOCK_WAKE_ERROR */ 1169 smp_mb__before_atomic(); 1170 xs_run_error_worker(transport, XPRT_SOCK_WAKE_ERROR); 1171 } 1172 1173 static void xs_reset_transport(struct sock_xprt *transport) 1174 { 1175 struct socket *sock = transport->sock; 1176 struct sock *sk = transport->inet; 1177 struct rpc_xprt *xprt = &transport->xprt; 1178 struct file *filp = transport->file; 1179 1180 if (sk == NULL) 1181 return; 1182 1183 if (atomic_read(&transport->xprt.swapper)) 1184 sk_clear_memalloc(sk); 1185 1186 kernel_sock_shutdown(sock, SHUT_RDWR); 1187 1188 mutex_lock(&transport->recv_mutex); 1189 lock_sock(sk); 1190 transport->inet = NULL; 1191 transport->sock = NULL; 1192 transport->file = NULL; 1193 1194 sk->sk_user_data = NULL; 1195 1196 xs_restore_old_callbacks(transport, sk); 1197 xprt_clear_connected(xprt); 1198 xs_sock_reset_connection_flags(xprt); 1199 /* Reset stream record info */ 1200 xs_stream_reset_connect(transport); 1201 release_sock(sk); 1202 mutex_unlock(&transport->recv_mutex); 1203 1204 trace_rpc_socket_close(xprt, sock); 1205 fput(filp); 1206 1207 xprt_disconnect_done(xprt); 1208 } 1209 1210 /** 1211 * xs_close - close a socket 1212 * @xprt: transport 1213 * 1214 * This is used when all requests are complete; ie, no DRC state remains 1215 * on the server we want to save. 1216 * 1217 * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with 1218 * xs_reset_transport() zeroing the socket from underneath a writer. 1219 */ 1220 static void xs_close(struct rpc_xprt *xprt) 1221 { 1222 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1223 1224 dprintk("RPC: xs_close xprt %p\n", xprt); 1225 1226 xs_reset_transport(transport); 1227 xprt->reestablish_timeout = 0; 1228 } 1229 1230 static void xs_inject_disconnect(struct rpc_xprt *xprt) 1231 { 1232 dprintk("RPC: injecting transport disconnect on xprt=%p\n", 1233 xprt); 1234 xprt_disconnect_done(xprt); 1235 } 1236 1237 static void xs_xprt_free(struct rpc_xprt *xprt) 1238 { 1239 xs_free_peer_addresses(xprt); 1240 xprt_free(xprt); 1241 } 1242 1243 /** 1244 * xs_destroy - prepare to shutdown a transport 1245 * @xprt: doomed transport 1246 * 1247 */ 1248 static void xs_destroy(struct rpc_xprt *xprt) 1249 { 1250 struct sock_xprt *transport = container_of(xprt, 1251 struct sock_xprt, xprt); 1252 dprintk("RPC: xs_destroy xprt %p\n", xprt); 1253 1254 cancel_delayed_work_sync(&transport->connect_worker); 1255 xs_close(xprt); 1256 cancel_work_sync(&transport->recv_worker); 1257 cancel_work_sync(&transport->error_worker); 1258 xs_xprt_free(xprt); 1259 module_put(THIS_MODULE); 1260 } 1261 1262 /** 1263 * xs_udp_data_read_skb - receive callback for UDP sockets 1264 * @xprt: transport 1265 * @sk: socket 1266 * @skb: skbuff 1267 * 1268 */ 1269 static void xs_udp_data_read_skb(struct rpc_xprt *xprt, 1270 struct sock *sk, 1271 struct sk_buff *skb) 1272 { 1273 struct rpc_task *task; 1274 struct rpc_rqst *rovr; 1275 int repsize, copied; 1276 u32 _xid; 1277 __be32 *xp; 1278 1279 repsize = skb->len; 1280 if (repsize < 4) { 1281 dprintk("RPC: impossible RPC reply size %d!\n", repsize); 1282 return; 1283 } 1284 1285 /* Copy the XID from the skb... */ 1286 xp = skb_header_pointer(skb, 0, sizeof(_xid), &_xid); 1287 if (xp == NULL) 1288 return; 1289 1290 /* Look up and lock the request corresponding to the given XID */ 1291 spin_lock(&xprt->queue_lock); 1292 rovr = xprt_lookup_rqst(xprt, *xp); 1293 if (!rovr) 1294 goto out_unlock; 1295 xprt_pin_rqst(rovr); 1296 xprt_update_rtt(rovr->rq_task); 1297 spin_unlock(&xprt->queue_lock); 1298 task = rovr->rq_task; 1299 1300 if ((copied = rovr->rq_private_buf.buflen) > repsize) 1301 copied = repsize; 1302 1303 /* Suck it into the iovec, verify checksum if not done by hw. */ 1304 if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) { 1305 spin_lock(&xprt->queue_lock); 1306 __UDPX_INC_STATS(sk, UDP_MIB_INERRORS); 1307 goto out_unpin; 1308 } 1309 1310 1311 spin_lock(&xprt->transport_lock); 1312 xprt_adjust_cwnd(xprt, task, copied); 1313 spin_unlock(&xprt->transport_lock); 1314 spin_lock(&xprt->queue_lock); 1315 xprt_complete_rqst(task, copied); 1316 __UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS); 1317 out_unpin: 1318 xprt_unpin_rqst(rovr); 1319 out_unlock: 1320 spin_unlock(&xprt->queue_lock); 1321 } 1322 1323 static void xs_udp_data_receive(struct sock_xprt *transport) 1324 { 1325 struct sk_buff *skb; 1326 struct sock *sk; 1327 int err; 1328 1329 mutex_lock(&transport->recv_mutex); 1330 sk = transport->inet; 1331 if (sk == NULL) 1332 goto out; 1333 for (;;) { 1334 skb = skb_recv_udp(sk, 0, 1, &err); 1335 if (skb == NULL) 1336 break; 1337 xs_udp_data_read_skb(&transport->xprt, sk, skb); 1338 consume_skb(skb); 1339 cond_resched(); 1340 } 1341 xs_poll_check_readable(transport); 1342 out: 1343 mutex_unlock(&transport->recv_mutex); 1344 } 1345 1346 static void xs_udp_data_receive_workfn(struct work_struct *work) 1347 { 1348 struct sock_xprt *transport = 1349 container_of(work, struct sock_xprt, recv_worker); 1350 unsigned int pflags = memalloc_nofs_save(); 1351 1352 xs_udp_data_receive(transport); 1353 memalloc_nofs_restore(pflags); 1354 } 1355 1356 /** 1357 * xs_data_ready - "data ready" callback for UDP sockets 1358 * @sk: socket with data to read 1359 * 1360 */ 1361 static void xs_data_ready(struct sock *sk) 1362 { 1363 struct rpc_xprt *xprt; 1364 1365 dprintk("RPC: xs_data_ready...\n"); 1366 xprt = xprt_from_sock(sk); 1367 if (xprt != NULL) { 1368 struct sock_xprt *transport = container_of(xprt, 1369 struct sock_xprt, xprt); 1370 transport->old_data_ready(sk); 1371 /* Any data means we had a useful conversation, so 1372 * then we don't need to delay the next reconnect 1373 */ 1374 if (xprt->reestablish_timeout) 1375 xprt->reestablish_timeout = 0; 1376 if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state)) 1377 queue_work(xprtiod_workqueue, &transport->recv_worker); 1378 } 1379 } 1380 1381 /* 1382 * Helper function to force a TCP close if the server is sending 1383 * junk and/or it has put us in CLOSE_WAIT 1384 */ 1385 static void xs_tcp_force_close(struct rpc_xprt *xprt) 1386 { 1387 xprt_force_disconnect(xprt); 1388 } 1389 1390 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 1391 static size_t xs_tcp_bc_maxpayload(struct rpc_xprt *xprt) 1392 { 1393 return PAGE_SIZE; 1394 } 1395 #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 1396 1397 /** 1398 * xs_tcp_state_change - callback to handle TCP socket state changes 1399 * @sk: socket whose state has changed 1400 * 1401 */ 1402 static void xs_tcp_state_change(struct sock *sk) 1403 { 1404 struct rpc_xprt *xprt; 1405 struct sock_xprt *transport; 1406 1407 if (!(xprt = xprt_from_sock(sk))) 1408 return; 1409 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); 1410 dprintk("RPC: state %x conn %d dead %d zapped %d sk_shutdown %d\n", 1411 sk->sk_state, xprt_connected(xprt), 1412 sock_flag(sk, SOCK_DEAD), 1413 sock_flag(sk, SOCK_ZAPPED), 1414 sk->sk_shutdown); 1415 1416 transport = container_of(xprt, struct sock_xprt, xprt); 1417 trace_rpc_socket_state_change(xprt, sk->sk_socket); 1418 switch (sk->sk_state) { 1419 case TCP_ESTABLISHED: 1420 if (!xprt_test_and_set_connected(xprt)) { 1421 xprt->connect_cookie++; 1422 clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); 1423 xprt_clear_connecting(xprt); 1424 1425 xprt->stat.connect_count++; 1426 xprt->stat.connect_time += (long)jiffies - 1427 xprt->stat.connect_start; 1428 xs_run_error_worker(transport, XPRT_SOCK_WAKE_PENDING); 1429 } 1430 break; 1431 case TCP_FIN_WAIT1: 1432 /* The client initiated a shutdown of the socket */ 1433 xprt->connect_cookie++; 1434 xprt->reestablish_timeout = 0; 1435 set_bit(XPRT_CLOSING, &xprt->state); 1436 smp_mb__before_atomic(); 1437 clear_bit(XPRT_CONNECTED, &xprt->state); 1438 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 1439 smp_mb__after_atomic(); 1440 break; 1441 case TCP_CLOSE_WAIT: 1442 /* The server initiated a shutdown of the socket */ 1443 xprt->connect_cookie++; 1444 clear_bit(XPRT_CONNECTED, &xprt->state); 1445 xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT); 1446 fallthrough; 1447 case TCP_CLOSING: 1448 /* 1449 * If the server closed down the connection, make sure that 1450 * we back off before reconnecting 1451 */ 1452 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) 1453 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 1454 break; 1455 case TCP_LAST_ACK: 1456 set_bit(XPRT_CLOSING, &xprt->state); 1457 smp_mb__before_atomic(); 1458 clear_bit(XPRT_CONNECTED, &xprt->state); 1459 smp_mb__after_atomic(); 1460 break; 1461 case TCP_CLOSE: 1462 if (test_and_clear_bit(XPRT_SOCK_CONNECTING, 1463 &transport->sock_state)) 1464 xprt_clear_connecting(xprt); 1465 clear_bit(XPRT_CLOSING, &xprt->state); 1466 /* Trigger the socket release */ 1467 xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT); 1468 } 1469 } 1470 1471 static void xs_write_space(struct sock *sk) 1472 { 1473 struct socket_wq *wq; 1474 struct sock_xprt *transport; 1475 struct rpc_xprt *xprt; 1476 1477 if (!sk->sk_socket) 1478 return; 1479 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1480 1481 if (unlikely(!(xprt = xprt_from_sock(sk)))) 1482 return; 1483 transport = container_of(xprt, struct sock_xprt, xprt); 1484 rcu_read_lock(); 1485 wq = rcu_dereference(sk->sk_wq); 1486 if (!wq || test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags) == 0) 1487 goto out; 1488 1489 xs_run_error_worker(transport, XPRT_SOCK_WAKE_WRITE); 1490 sk->sk_write_pending--; 1491 out: 1492 rcu_read_unlock(); 1493 } 1494 1495 /** 1496 * xs_udp_write_space - callback invoked when socket buffer space 1497 * becomes available 1498 * @sk: socket whose state has changed 1499 * 1500 * Called when more output buffer space is available for this socket. 1501 * We try not to wake our writers until they can make "significant" 1502 * progress, otherwise we'll waste resources thrashing kernel_sendmsg 1503 * with a bunch of small requests. 1504 */ 1505 static void xs_udp_write_space(struct sock *sk) 1506 { 1507 /* from net/core/sock.c:sock_def_write_space */ 1508 if (sock_writeable(sk)) 1509 xs_write_space(sk); 1510 } 1511 1512 /** 1513 * xs_tcp_write_space - callback invoked when socket buffer space 1514 * becomes available 1515 * @sk: socket whose state has changed 1516 * 1517 * Called when more output buffer space is available for this socket. 1518 * We try not to wake our writers until they can make "significant" 1519 * progress, otherwise we'll waste resources thrashing kernel_sendmsg 1520 * with a bunch of small requests. 1521 */ 1522 static void xs_tcp_write_space(struct sock *sk) 1523 { 1524 /* from net/core/stream.c:sk_stream_write_space */ 1525 if (sk_stream_is_writeable(sk)) 1526 xs_write_space(sk); 1527 } 1528 1529 static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt) 1530 { 1531 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1532 struct sock *sk = transport->inet; 1533 1534 if (transport->rcvsize) { 1535 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 1536 sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2; 1537 } 1538 if (transport->sndsize) { 1539 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 1540 sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2; 1541 sk->sk_write_space(sk); 1542 } 1543 } 1544 1545 /** 1546 * xs_udp_set_buffer_size - set send and receive limits 1547 * @xprt: generic transport 1548 * @sndsize: requested size of send buffer, in bytes 1549 * @rcvsize: requested size of receive buffer, in bytes 1550 * 1551 * Set socket send and receive buffer size limits. 1552 */ 1553 static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize) 1554 { 1555 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1556 1557 transport->sndsize = 0; 1558 if (sndsize) 1559 transport->sndsize = sndsize + 1024; 1560 transport->rcvsize = 0; 1561 if (rcvsize) 1562 transport->rcvsize = rcvsize + 1024; 1563 1564 xs_udp_do_set_buffer_size(xprt); 1565 } 1566 1567 /** 1568 * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport 1569 * @xprt: controlling transport 1570 * @task: task that timed out 1571 * 1572 * Adjust the congestion window after a retransmit timeout has occurred. 1573 */ 1574 static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task) 1575 { 1576 spin_lock(&xprt->transport_lock); 1577 xprt_adjust_cwnd(xprt, task, -ETIMEDOUT); 1578 spin_unlock(&xprt->transport_lock); 1579 } 1580 1581 static int xs_get_random_port(void) 1582 { 1583 unsigned short min = xprt_min_resvport, max = xprt_max_resvport; 1584 unsigned short range; 1585 unsigned short rand; 1586 1587 if (max < min) 1588 return -EADDRINUSE; 1589 range = max - min + 1; 1590 rand = (unsigned short) prandom_u32() % range; 1591 return rand + min; 1592 } 1593 1594 static unsigned short xs_sock_getport(struct socket *sock) 1595 { 1596 struct sockaddr_storage buf; 1597 unsigned short port = 0; 1598 1599 if (kernel_getsockname(sock, (struct sockaddr *)&buf) < 0) 1600 goto out; 1601 switch (buf.ss_family) { 1602 case AF_INET6: 1603 port = ntohs(((struct sockaddr_in6 *)&buf)->sin6_port); 1604 break; 1605 case AF_INET: 1606 port = ntohs(((struct sockaddr_in *)&buf)->sin_port); 1607 } 1608 out: 1609 return port; 1610 } 1611 1612 /** 1613 * xs_set_port - reset the port number in the remote endpoint address 1614 * @xprt: generic transport 1615 * @port: new port number 1616 * 1617 */ 1618 static void xs_set_port(struct rpc_xprt *xprt, unsigned short port) 1619 { 1620 dprintk("RPC: setting port for xprt %p to %u\n", xprt, port); 1621 1622 rpc_set_port(xs_addr(xprt), port); 1623 xs_update_peer_port(xprt); 1624 } 1625 1626 static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock) 1627 { 1628 if (transport->srcport == 0 && transport->xprt.reuseport) 1629 transport->srcport = xs_sock_getport(sock); 1630 } 1631 1632 static int xs_get_srcport(struct sock_xprt *transport) 1633 { 1634 int port = transport->srcport; 1635 1636 if (port == 0 && transport->xprt.resvport) 1637 port = xs_get_random_port(); 1638 return port; 1639 } 1640 1641 unsigned short get_srcport(struct rpc_xprt *xprt) 1642 { 1643 struct sock_xprt *sock = container_of(xprt, struct sock_xprt, xprt); 1644 return xs_sock_getport(sock->sock); 1645 } 1646 EXPORT_SYMBOL(get_srcport); 1647 1648 static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port) 1649 { 1650 if (transport->srcport != 0) 1651 transport->srcport = 0; 1652 if (!transport->xprt.resvport) 1653 return 0; 1654 if (port <= xprt_min_resvport || port > xprt_max_resvport) 1655 return xprt_max_resvport; 1656 return --port; 1657 } 1658 static int xs_bind(struct sock_xprt *transport, struct socket *sock) 1659 { 1660 struct sockaddr_storage myaddr; 1661 int err, nloop = 0; 1662 int port = xs_get_srcport(transport); 1663 unsigned short last; 1664 1665 /* 1666 * If we are asking for any ephemeral port (i.e. port == 0 && 1667 * transport->xprt.resvport == 0), don't bind. Let the local 1668 * port selection happen implicitly when the socket is used 1669 * (for example at connect time). 1670 * 1671 * This ensures that we can continue to establish TCP 1672 * connections even when all local ephemeral ports are already 1673 * a part of some TCP connection. This makes no difference 1674 * for UDP sockets, but also doesn't harm them. 1675 * 1676 * If we're asking for any reserved port (i.e. port == 0 && 1677 * transport->xprt.resvport == 1) xs_get_srcport above will 1678 * ensure that port is non-zero and we will bind as needed. 1679 */ 1680 if (port <= 0) 1681 return port; 1682 1683 memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen); 1684 do { 1685 rpc_set_port((struct sockaddr *)&myaddr, port); 1686 err = kernel_bind(sock, (struct sockaddr *)&myaddr, 1687 transport->xprt.addrlen); 1688 if (err == 0) { 1689 if (transport->xprt.reuseport) 1690 transport->srcport = port; 1691 break; 1692 } 1693 last = port; 1694 port = xs_next_srcport(transport, port); 1695 if (port > last) 1696 nloop++; 1697 } while (err == -EADDRINUSE && nloop != 2); 1698 1699 if (myaddr.ss_family == AF_INET) 1700 dprintk("RPC: %s %pI4:%u: %s (%d)\n", __func__, 1701 &((struct sockaddr_in *)&myaddr)->sin_addr, 1702 port, err ? "failed" : "ok", err); 1703 else 1704 dprintk("RPC: %s %pI6:%u: %s (%d)\n", __func__, 1705 &((struct sockaddr_in6 *)&myaddr)->sin6_addr, 1706 port, err ? "failed" : "ok", err); 1707 return err; 1708 } 1709 1710 /* 1711 * We don't support autobind on AF_LOCAL sockets 1712 */ 1713 static void xs_local_rpcbind(struct rpc_task *task) 1714 { 1715 xprt_set_bound(task->tk_xprt); 1716 } 1717 1718 static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port) 1719 { 1720 } 1721 1722 #ifdef CONFIG_DEBUG_LOCK_ALLOC 1723 static struct lock_class_key xs_key[2]; 1724 static struct lock_class_key xs_slock_key[2]; 1725 1726 static inline void xs_reclassify_socketu(struct socket *sock) 1727 { 1728 struct sock *sk = sock->sk; 1729 1730 sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC", 1731 &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]); 1732 } 1733 1734 static inline void xs_reclassify_socket4(struct socket *sock) 1735 { 1736 struct sock *sk = sock->sk; 1737 1738 sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC", 1739 &xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]); 1740 } 1741 1742 static inline void xs_reclassify_socket6(struct socket *sock) 1743 { 1744 struct sock *sk = sock->sk; 1745 1746 sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC", 1747 &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]); 1748 } 1749 1750 static inline void xs_reclassify_socket(int family, struct socket *sock) 1751 { 1752 if (WARN_ON_ONCE(!sock_allow_reclassification(sock->sk))) 1753 return; 1754 1755 switch (family) { 1756 case AF_LOCAL: 1757 xs_reclassify_socketu(sock); 1758 break; 1759 case AF_INET: 1760 xs_reclassify_socket4(sock); 1761 break; 1762 case AF_INET6: 1763 xs_reclassify_socket6(sock); 1764 break; 1765 } 1766 } 1767 #else 1768 static inline void xs_reclassify_socket(int family, struct socket *sock) 1769 { 1770 } 1771 #endif 1772 1773 static void xs_dummy_setup_socket(struct work_struct *work) 1774 { 1775 } 1776 1777 static struct socket *xs_create_sock(struct rpc_xprt *xprt, 1778 struct sock_xprt *transport, int family, int type, 1779 int protocol, bool reuseport) 1780 { 1781 struct file *filp; 1782 struct socket *sock; 1783 int err; 1784 1785 err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1); 1786 if (err < 0) { 1787 dprintk("RPC: can't create %d transport socket (%d).\n", 1788 protocol, -err); 1789 goto out; 1790 } 1791 xs_reclassify_socket(family, sock); 1792 1793 if (reuseport) 1794 sock_set_reuseport(sock->sk); 1795 1796 err = xs_bind(transport, sock); 1797 if (err) { 1798 sock_release(sock); 1799 goto out; 1800 } 1801 1802 filp = sock_alloc_file(sock, O_NONBLOCK, NULL); 1803 if (IS_ERR(filp)) 1804 return ERR_CAST(filp); 1805 transport->file = filp; 1806 1807 return sock; 1808 out: 1809 return ERR_PTR(err); 1810 } 1811 1812 static int xs_local_finish_connecting(struct rpc_xprt *xprt, 1813 struct socket *sock) 1814 { 1815 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, 1816 xprt); 1817 1818 if (!transport->inet) { 1819 struct sock *sk = sock->sk; 1820 1821 lock_sock(sk); 1822 1823 xs_save_old_callbacks(transport, sk); 1824 1825 sk->sk_user_data = xprt; 1826 sk->sk_data_ready = xs_data_ready; 1827 sk->sk_write_space = xs_udp_write_space; 1828 sock_set_flag(sk, SOCK_FASYNC); 1829 sk->sk_error_report = xs_error_report; 1830 1831 xprt_clear_connected(xprt); 1832 1833 /* Reset to new socket */ 1834 transport->sock = sock; 1835 transport->inet = sk; 1836 1837 release_sock(sk); 1838 } 1839 1840 xs_stream_start_connect(transport); 1841 1842 return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0); 1843 } 1844 1845 /** 1846 * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint 1847 * @transport: socket transport to connect 1848 */ 1849 static int xs_local_setup_socket(struct sock_xprt *transport) 1850 { 1851 struct rpc_xprt *xprt = &transport->xprt; 1852 struct file *filp; 1853 struct socket *sock; 1854 int status; 1855 1856 status = __sock_create(xprt->xprt_net, AF_LOCAL, 1857 SOCK_STREAM, 0, &sock, 1); 1858 if (status < 0) { 1859 dprintk("RPC: can't create AF_LOCAL " 1860 "transport socket (%d).\n", -status); 1861 goto out; 1862 } 1863 xs_reclassify_socket(AF_LOCAL, sock); 1864 1865 filp = sock_alloc_file(sock, O_NONBLOCK, NULL); 1866 if (IS_ERR(filp)) { 1867 status = PTR_ERR(filp); 1868 goto out; 1869 } 1870 transport->file = filp; 1871 1872 dprintk("RPC: worker connecting xprt %p via AF_LOCAL to %s\n", 1873 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); 1874 1875 status = xs_local_finish_connecting(xprt, sock); 1876 trace_rpc_socket_connect(xprt, sock, status); 1877 switch (status) { 1878 case 0: 1879 dprintk("RPC: xprt %p connected to %s\n", 1880 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); 1881 xprt->stat.connect_count++; 1882 xprt->stat.connect_time += (long)jiffies - 1883 xprt->stat.connect_start; 1884 xprt_set_connected(xprt); 1885 break; 1886 case -ENOBUFS: 1887 break; 1888 case -ENOENT: 1889 dprintk("RPC: xprt %p: socket %s does not exist\n", 1890 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); 1891 break; 1892 case -ECONNREFUSED: 1893 dprintk("RPC: xprt %p: connection refused for %s\n", 1894 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); 1895 break; 1896 default: 1897 printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n", 1898 __func__, -status, 1899 xprt->address_strings[RPC_DISPLAY_ADDR]); 1900 } 1901 1902 out: 1903 xprt_clear_connecting(xprt); 1904 xprt_wake_pending_tasks(xprt, status); 1905 return status; 1906 } 1907 1908 static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task) 1909 { 1910 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1911 int ret; 1912 1913 if (RPC_IS_ASYNC(task)) { 1914 /* 1915 * We want the AF_LOCAL connect to be resolved in the 1916 * filesystem namespace of the process making the rpc 1917 * call. Thus we connect synchronously. 1918 * 1919 * If we want to support asynchronous AF_LOCAL calls, 1920 * we'll need to figure out how to pass a namespace to 1921 * connect. 1922 */ 1923 task->tk_rpc_status = -ENOTCONN; 1924 rpc_exit(task, -ENOTCONN); 1925 return; 1926 } 1927 ret = xs_local_setup_socket(transport); 1928 if (ret && !RPC_IS_SOFTCONN(task)) 1929 msleep_interruptible(15000); 1930 } 1931 1932 #if IS_ENABLED(CONFIG_SUNRPC_SWAP) 1933 /* 1934 * Note that this should be called with XPRT_LOCKED held (or when we otherwise 1935 * know that we have exclusive access to the socket), to guard against 1936 * races with xs_reset_transport. 1937 */ 1938 static void xs_set_memalloc(struct rpc_xprt *xprt) 1939 { 1940 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, 1941 xprt); 1942 1943 /* 1944 * If there's no sock, then we have nothing to set. The 1945 * reconnecting process will get it for us. 1946 */ 1947 if (!transport->inet) 1948 return; 1949 if (atomic_read(&xprt->swapper)) 1950 sk_set_memalloc(transport->inet); 1951 } 1952 1953 /** 1954 * xs_enable_swap - Tag this transport as being used for swap. 1955 * @xprt: transport to tag 1956 * 1957 * Take a reference to this transport on behalf of the rpc_clnt, and 1958 * optionally mark it for swapping if it wasn't already. 1959 */ 1960 static int 1961 xs_enable_swap(struct rpc_xprt *xprt) 1962 { 1963 struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt); 1964 1965 if (atomic_inc_return(&xprt->swapper) != 1) 1966 return 0; 1967 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) 1968 return -ERESTARTSYS; 1969 if (xs->inet) 1970 sk_set_memalloc(xs->inet); 1971 xprt_release_xprt(xprt, NULL); 1972 return 0; 1973 } 1974 1975 /** 1976 * xs_disable_swap - Untag this transport as being used for swap. 1977 * @xprt: transport to tag 1978 * 1979 * Drop a "swapper" reference to this xprt on behalf of the rpc_clnt. If the 1980 * swapper refcount goes to 0, untag the socket as a memalloc socket. 1981 */ 1982 static void 1983 xs_disable_swap(struct rpc_xprt *xprt) 1984 { 1985 struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt); 1986 1987 if (!atomic_dec_and_test(&xprt->swapper)) 1988 return; 1989 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) 1990 return; 1991 if (xs->inet) 1992 sk_clear_memalloc(xs->inet); 1993 xprt_release_xprt(xprt, NULL); 1994 } 1995 #else 1996 static void xs_set_memalloc(struct rpc_xprt *xprt) 1997 { 1998 } 1999 2000 static int 2001 xs_enable_swap(struct rpc_xprt *xprt) 2002 { 2003 return -EINVAL; 2004 } 2005 2006 static void 2007 xs_disable_swap(struct rpc_xprt *xprt) 2008 { 2009 } 2010 #endif 2011 2012 static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) 2013 { 2014 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2015 2016 if (!transport->inet) { 2017 struct sock *sk = sock->sk; 2018 2019 lock_sock(sk); 2020 2021 xs_save_old_callbacks(transport, sk); 2022 2023 sk->sk_user_data = xprt; 2024 sk->sk_data_ready = xs_data_ready; 2025 sk->sk_write_space = xs_udp_write_space; 2026 sock_set_flag(sk, SOCK_FASYNC); 2027 2028 xprt_set_connected(xprt); 2029 2030 /* Reset to new socket */ 2031 transport->sock = sock; 2032 transport->inet = sk; 2033 2034 xs_set_memalloc(xprt); 2035 2036 release_sock(sk); 2037 } 2038 xs_udp_do_set_buffer_size(xprt); 2039 2040 xprt->stat.connect_start = jiffies; 2041 } 2042 2043 static void xs_udp_setup_socket(struct work_struct *work) 2044 { 2045 struct sock_xprt *transport = 2046 container_of(work, struct sock_xprt, connect_worker.work); 2047 struct rpc_xprt *xprt = &transport->xprt; 2048 struct socket *sock; 2049 int status = -EIO; 2050 2051 sock = xs_create_sock(xprt, transport, 2052 xs_addr(xprt)->sa_family, SOCK_DGRAM, 2053 IPPROTO_UDP, false); 2054 if (IS_ERR(sock)) 2055 goto out; 2056 2057 dprintk("RPC: worker connecting xprt %p via %s to " 2058 "%s (port %s)\n", xprt, 2059 xprt->address_strings[RPC_DISPLAY_PROTO], 2060 xprt->address_strings[RPC_DISPLAY_ADDR], 2061 xprt->address_strings[RPC_DISPLAY_PORT]); 2062 2063 xs_udp_finish_connecting(xprt, sock); 2064 trace_rpc_socket_connect(xprt, sock, 0); 2065 status = 0; 2066 out: 2067 xprt_clear_connecting(xprt); 2068 xprt_unlock_connect(xprt, transport); 2069 xprt_wake_pending_tasks(xprt, status); 2070 } 2071 2072 /** 2073 * xs_tcp_shutdown - gracefully shut down a TCP socket 2074 * @xprt: transport 2075 * 2076 * Initiates a graceful shutdown of the TCP socket by calling the 2077 * equivalent of shutdown(SHUT_RDWR); 2078 */ 2079 static void xs_tcp_shutdown(struct rpc_xprt *xprt) 2080 { 2081 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2082 struct socket *sock = transport->sock; 2083 int skst = transport->inet ? transport->inet->sk_state : TCP_CLOSE; 2084 2085 if (sock == NULL) 2086 return; 2087 if (!xprt->reuseport) { 2088 xs_close(xprt); 2089 return; 2090 } 2091 switch (skst) { 2092 case TCP_FIN_WAIT1: 2093 case TCP_FIN_WAIT2: 2094 break; 2095 case TCP_ESTABLISHED: 2096 case TCP_CLOSE_WAIT: 2097 kernel_sock_shutdown(sock, SHUT_RDWR); 2098 trace_rpc_socket_shutdown(xprt, sock); 2099 break; 2100 default: 2101 xs_reset_transport(transport); 2102 } 2103 } 2104 2105 static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt, 2106 struct socket *sock) 2107 { 2108 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2109 unsigned int keepidle; 2110 unsigned int keepcnt; 2111 unsigned int timeo; 2112 2113 spin_lock(&xprt->transport_lock); 2114 keepidle = DIV_ROUND_UP(xprt->timeout->to_initval, HZ); 2115 keepcnt = xprt->timeout->to_retries + 1; 2116 timeo = jiffies_to_msecs(xprt->timeout->to_initval) * 2117 (xprt->timeout->to_retries + 1); 2118 clear_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state); 2119 spin_unlock(&xprt->transport_lock); 2120 2121 /* TCP Keepalive options */ 2122 sock_set_keepalive(sock->sk); 2123 tcp_sock_set_keepidle(sock->sk, keepidle); 2124 tcp_sock_set_keepintvl(sock->sk, keepidle); 2125 tcp_sock_set_keepcnt(sock->sk, keepcnt); 2126 2127 /* TCP user timeout (see RFC5482) */ 2128 tcp_sock_set_user_timeout(sock->sk, timeo); 2129 } 2130 2131 static void xs_tcp_set_connect_timeout(struct rpc_xprt *xprt, 2132 unsigned long connect_timeout, 2133 unsigned long reconnect_timeout) 2134 { 2135 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2136 struct rpc_timeout to; 2137 unsigned long initval; 2138 2139 spin_lock(&xprt->transport_lock); 2140 if (reconnect_timeout < xprt->max_reconnect_timeout) 2141 xprt->max_reconnect_timeout = reconnect_timeout; 2142 if (connect_timeout < xprt->connect_timeout) { 2143 memcpy(&to, xprt->timeout, sizeof(to)); 2144 initval = DIV_ROUND_UP(connect_timeout, to.to_retries + 1); 2145 /* Arbitrary lower limit */ 2146 if (initval < XS_TCP_INIT_REEST_TO << 1) 2147 initval = XS_TCP_INIT_REEST_TO << 1; 2148 to.to_initval = initval; 2149 to.to_maxval = initval; 2150 memcpy(&transport->tcp_timeout, &to, 2151 sizeof(transport->tcp_timeout)); 2152 xprt->timeout = &transport->tcp_timeout; 2153 xprt->connect_timeout = connect_timeout; 2154 } 2155 set_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state); 2156 spin_unlock(&xprt->transport_lock); 2157 } 2158 2159 static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) 2160 { 2161 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2162 2163 if (!transport->inet) { 2164 struct sock *sk = sock->sk; 2165 2166 /* Avoid temporary address, they are bad for long-lived 2167 * connections such as NFS mounts. 2168 * RFC4941, section 3.6 suggests that: 2169 * Individual applications, which have specific 2170 * knowledge about the normal duration of connections, 2171 * MAY override this as appropriate. 2172 */ 2173 if (xs_addr(xprt)->sa_family == PF_INET6) { 2174 ip6_sock_set_addr_preferences(sk, 2175 IPV6_PREFER_SRC_PUBLIC); 2176 } 2177 2178 xs_tcp_set_socket_timeouts(xprt, sock); 2179 tcp_sock_set_nodelay(sk); 2180 2181 lock_sock(sk); 2182 2183 xs_save_old_callbacks(transport, sk); 2184 2185 sk->sk_user_data = xprt; 2186 sk->sk_data_ready = xs_data_ready; 2187 sk->sk_state_change = xs_tcp_state_change; 2188 sk->sk_write_space = xs_tcp_write_space; 2189 sock_set_flag(sk, SOCK_FASYNC); 2190 sk->sk_error_report = xs_error_report; 2191 2192 /* socket options */ 2193 sock_reset_flag(sk, SOCK_LINGER); 2194 2195 xprt_clear_connected(xprt); 2196 2197 /* Reset to new socket */ 2198 transport->sock = sock; 2199 transport->inet = sk; 2200 2201 release_sock(sk); 2202 } 2203 2204 if (!xprt_bound(xprt)) 2205 return -ENOTCONN; 2206 2207 xs_set_memalloc(xprt); 2208 2209 xs_stream_start_connect(transport); 2210 2211 /* Tell the socket layer to start connecting... */ 2212 set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); 2213 return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK); 2214 } 2215 2216 /** 2217 * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint 2218 * @work: queued work item 2219 * 2220 * Invoked by a work queue tasklet. 2221 */ 2222 static void xs_tcp_setup_socket(struct work_struct *work) 2223 { 2224 struct sock_xprt *transport = 2225 container_of(work, struct sock_xprt, connect_worker.work); 2226 struct socket *sock = transport->sock; 2227 struct rpc_xprt *xprt = &transport->xprt; 2228 int status; 2229 2230 if (!sock) { 2231 sock = xs_create_sock(xprt, transport, 2232 xs_addr(xprt)->sa_family, SOCK_STREAM, 2233 IPPROTO_TCP, true); 2234 if (IS_ERR(sock)) { 2235 xprt_wake_pending_tasks(xprt, PTR_ERR(sock)); 2236 goto out; 2237 } 2238 } 2239 2240 dprintk("RPC: worker connecting xprt %p via %s to " 2241 "%s (port %s)\n", xprt, 2242 xprt->address_strings[RPC_DISPLAY_PROTO], 2243 xprt->address_strings[RPC_DISPLAY_ADDR], 2244 xprt->address_strings[RPC_DISPLAY_PORT]); 2245 2246 status = xs_tcp_finish_connecting(xprt, sock); 2247 trace_rpc_socket_connect(xprt, sock, status); 2248 dprintk("RPC: %p connect status %d connected %d sock state %d\n", 2249 xprt, -status, xprt_connected(xprt), 2250 sock->sk->sk_state); 2251 switch (status) { 2252 case 0: 2253 xs_set_srcport(transport, sock); 2254 fallthrough; 2255 case -EINPROGRESS: 2256 /* SYN_SENT! */ 2257 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) 2258 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 2259 fallthrough; 2260 case -EALREADY: 2261 goto out_unlock; 2262 case -EADDRNOTAVAIL: 2263 /* Source port number is unavailable. Try a new one! */ 2264 transport->srcport = 0; 2265 status = -EAGAIN; 2266 break; 2267 case -EINVAL: 2268 /* Happens, for instance, if the user specified a link 2269 * local IPv6 address without a scope-id. 2270 */ 2271 case -ECONNREFUSED: 2272 case -ECONNRESET: 2273 case -ENETDOWN: 2274 case -ENETUNREACH: 2275 case -EHOSTUNREACH: 2276 case -EADDRINUSE: 2277 case -ENOBUFS: 2278 break; 2279 default: 2280 printk("%s: connect returned unhandled error %d\n", 2281 __func__, status); 2282 status = -EAGAIN; 2283 } 2284 2285 /* xs_tcp_force_close() wakes tasks with a fixed error code. 2286 * We need to wake them first to ensure the correct error code. 2287 */ 2288 xprt_wake_pending_tasks(xprt, status); 2289 xs_tcp_force_close(xprt); 2290 out: 2291 xprt_clear_connecting(xprt); 2292 out_unlock: 2293 xprt_unlock_connect(xprt, transport); 2294 } 2295 2296 /** 2297 * xs_connect - connect a socket to a remote endpoint 2298 * @xprt: pointer to transport structure 2299 * @task: address of RPC task that manages state of connect request 2300 * 2301 * TCP: If the remote end dropped the connection, delay reconnecting. 2302 * 2303 * UDP socket connects are synchronous, but we use a work queue anyway 2304 * to guarantee that even unprivileged user processes can set up a 2305 * socket on a privileged port. 2306 * 2307 * If a UDP socket connect fails, the delay behavior here prevents 2308 * retry floods (hard mounts). 2309 */ 2310 static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task) 2311 { 2312 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2313 unsigned long delay = 0; 2314 2315 WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport)); 2316 2317 if (transport->sock != NULL && !xprt_connecting(xprt)) { 2318 dprintk("RPC: xs_connect delayed xprt %p for %lu " 2319 "seconds\n", 2320 xprt, xprt->reestablish_timeout / HZ); 2321 2322 /* Start by resetting any existing state */ 2323 xs_reset_transport(transport); 2324 2325 delay = xprt_reconnect_delay(xprt); 2326 xprt_reconnect_backoff(xprt, XS_TCP_INIT_REEST_TO); 2327 2328 } else 2329 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); 2330 2331 queue_delayed_work(xprtiod_workqueue, 2332 &transport->connect_worker, 2333 delay); 2334 } 2335 2336 static void xs_wake_disconnect(struct sock_xprt *transport) 2337 { 2338 if (test_and_clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state)) 2339 xs_tcp_force_close(&transport->xprt); 2340 } 2341 2342 static void xs_wake_write(struct sock_xprt *transport) 2343 { 2344 if (test_and_clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state)) 2345 xprt_write_space(&transport->xprt); 2346 } 2347 2348 static void xs_wake_error(struct sock_xprt *transport) 2349 { 2350 int sockerr; 2351 2352 if (!test_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state)) 2353 return; 2354 mutex_lock(&transport->recv_mutex); 2355 if (transport->sock == NULL) 2356 goto out; 2357 if (!test_and_clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state)) 2358 goto out; 2359 sockerr = xchg(&transport->xprt_err, 0); 2360 if (sockerr < 0) 2361 xprt_wake_pending_tasks(&transport->xprt, sockerr); 2362 out: 2363 mutex_unlock(&transport->recv_mutex); 2364 } 2365 2366 static void xs_wake_pending(struct sock_xprt *transport) 2367 { 2368 if (test_and_clear_bit(XPRT_SOCK_WAKE_PENDING, &transport->sock_state)) 2369 xprt_wake_pending_tasks(&transport->xprt, -EAGAIN); 2370 } 2371 2372 static void xs_error_handle(struct work_struct *work) 2373 { 2374 struct sock_xprt *transport = container_of(work, 2375 struct sock_xprt, error_worker); 2376 2377 xs_wake_disconnect(transport); 2378 xs_wake_write(transport); 2379 xs_wake_error(transport); 2380 xs_wake_pending(transport); 2381 } 2382 2383 /** 2384 * xs_local_print_stats - display AF_LOCAL socket-specific stats 2385 * @xprt: rpc_xprt struct containing statistics 2386 * @seq: output file 2387 * 2388 */ 2389 static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) 2390 { 2391 long idle_time = 0; 2392 2393 if (xprt_connected(xprt)) 2394 idle_time = (long)(jiffies - xprt->last_used) / HZ; 2395 2396 seq_printf(seq, "\txprt:\tlocal %lu %lu %lu %ld %lu %lu %lu " 2397 "%llu %llu %lu %llu %llu\n", 2398 xprt->stat.bind_count, 2399 xprt->stat.connect_count, 2400 xprt->stat.connect_time / HZ, 2401 idle_time, 2402 xprt->stat.sends, 2403 xprt->stat.recvs, 2404 xprt->stat.bad_xids, 2405 xprt->stat.req_u, 2406 xprt->stat.bklog_u, 2407 xprt->stat.max_slots, 2408 xprt->stat.sending_u, 2409 xprt->stat.pending_u); 2410 } 2411 2412 /** 2413 * xs_udp_print_stats - display UDP socket-specific stats 2414 * @xprt: rpc_xprt struct containing statistics 2415 * @seq: output file 2416 * 2417 */ 2418 static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) 2419 { 2420 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2421 2422 seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %llu %llu " 2423 "%lu %llu %llu\n", 2424 transport->srcport, 2425 xprt->stat.bind_count, 2426 xprt->stat.sends, 2427 xprt->stat.recvs, 2428 xprt->stat.bad_xids, 2429 xprt->stat.req_u, 2430 xprt->stat.bklog_u, 2431 xprt->stat.max_slots, 2432 xprt->stat.sending_u, 2433 xprt->stat.pending_u); 2434 } 2435 2436 /** 2437 * xs_tcp_print_stats - display TCP socket-specific stats 2438 * @xprt: rpc_xprt struct containing statistics 2439 * @seq: output file 2440 * 2441 */ 2442 static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) 2443 { 2444 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2445 long idle_time = 0; 2446 2447 if (xprt_connected(xprt)) 2448 idle_time = (long)(jiffies - xprt->last_used) / HZ; 2449 2450 seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu " 2451 "%llu %llu %lu %llu %llu\n", 2452 transport->srcport, 2453 xprt->stat.bind_count, 2454 xprt->stat.connect_count, 2455 xprt->stat.connect_time / HZ, 2456 idle_time, 2457 xprt->stat.sends, 2458 xprt->stat.recvs, 2459 xprt->stat.bad_xids, 2460 xprt->stat.req_u, 2461 xprt->stat.bklog_u, 2462 xprt->stat.max_slots, 2463 xprt->stat.sending_u, 2464 xprt->stat.pending_u); 2465 } 2466 2467 /* 2468 * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason 2469 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want 2470 * to use the server side send routines. 2471 */ 2472 static int bc_malloc(struct rpc_task *task) 2473 { 2474 struct rpc_rqst *rqst = task->tk_rqstp; 2475 size_t size = rqst->rq_callsize; 2476 struct page *page; 2477 struct rpc_buffer *buf; 2478 2479 if (size > PAGE_SIZE - sizeof(struct rpc_buffer)) { 2480 WARN_ONCE(1, "xprtsock: large bc buffer request (size %zu)\n", 2481 size); 2482 return -EINVAL; 2483 } 2484 2485 page = alloc_page(GFP_KERNEL); 2486 if (!page) 2487 return -ENOMEM; 2488 2489 buf = page_address(page); 2490 buf->len = PAGE_SIZE; 2491 2492 rqst->rq_buffer = buf->data; 2493 rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize; 2494 return 0; 2495 } 2496 2497 /* 2498 * Free the space allocated in the bc_alloc routine 2499 */ 2500 static void bc_free(struct rpc_task *task) 2501 { 2502 void *buffer = task->tk_rqstp->rq_buffer; 2503 struct rpc_buffer *buf; 2504 2505 buf = container_of(buffer, struct rpc_buffer, data); 2506 free_page((unsigned long)buf); 2507 } 2508 2509 static int bc_sendto(struct rpc_rqst *req) 2510 { 2511 struct xdr_buf *xdr = &req->rq_snd_buf; 2512 struct sock_xprt *transport = 2513 container_of(req->rq_xprt, struct sock_xprt, xprt); 2514 struct msghdr msg = { 2515 .msg_flags = 0, 2516 }; 2517 rpc_fraghdr marker = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | 2518 (u32)xdr->len); 2519 unsigned int sent = 0; 2520 int err; 2521 2522 req->rq_xtime = ktime_get(); 2523 err = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, marker, &sent); 2524 xdr_free_bvec(xdr); 2525 if (err < 0 || sent != (xdr->len + sizeof(marker))) 2526 return -EAGAIN; 2527 return sent; 2528 } 2529 2530 /** 2531 * bc_send_request - Send a backchannel Call on a TCP socket 2532 * @req: rpc_rqst containing Call message to be sent 2533 * 2534 * xpt_mutex ensures @rqstp's whole message is written to the socket 2535 * without interruption. 2536 * 2537 * Return values: 2538 * %0 if the message was sent successfully 2539 * %ENOTCONN if the message was not sent 2540 */ 2541 static int bc_send_request(struct rpc_rqst *req) 2542 { 2543 struct svc_xprt *xprt; 2544 int len; 2545 2546 /* 2547 * Get the server socket associated with this callback xprt 2548 */ 2549 xprt = req->rq_xprt->bc_xprt; 2550 2551 /* 2552 * Grab the mutex to serialize data as the connection is shared 2553 * with the fore channel 2554 */ 2555 mutex_lock(&xprt->xpt_mutex); 2556 if (test_bit(XPT_DEAD, &xprt->xpt_flags)) 2557 len = -ENOTCONN; 2558 else 2559 len = bc_sendto(req); 2560 mutex_unlock(&xprt->xpt_mutex); 2561 2562 if (len > 0) 2563 len = 0; 2564 2565 return len; 2566 } 2567 2568 /* 2569 * The close routine. Since this is client initiated, we do nothing 2570 */ 2571 2572 static void bc_close(struct rpc_xprt *xprt) 2573 { 2574 xprt_disconnect_done(xprt); 2575 } 2576 2577 /* 2578 * The xprt destroy routine. Again, because this connection is client 2579 * initiated, we do nothing 2580 */ 2581 2582 static void bc_destroy(struct rpc_xprt *xprt) 2583 { 2584 dprintk("RPC: bc_destroy xprt %p\n", xprt); 2585 2586 xs_xprt_free(xprt); 2587 module_put(THIS_MODULE); 2588 } 2589 2590 static const struct rpc_xprt_ops xs_local_ops = { 2591 .reserve_xprt = xprt_reserve_xprt, 2592 .release_xprt = xprt_release_xprt, 2593 .alloc_slot = xprt_alloc_slot, 2594 .free_slot = xprt_free_slot, 2595 .rpcbind = xs_local_rpcbind, 2596 .set_port = xs_local_set_port, 2597 .connect = xs_local_connect, 2598 .buf_alloc = rpc_malloc, 2599 .buf_free = rpc_free, 2600 .prepare_request = xs_stream_prepare_request, 2601 .send_request = xs_local_send_request, 2602 .wait_for_reply_request = xprt_wait_for_reply_request_def, 2603 .close = xs_close, 2604 .destroy = xs_destroy, 2605 .print_stats = xs_local_print_stats, 2606 .enable_swap = xs_enable_swap, 2607 .disable_swap = xs_disable_swap, 2608 }; 2609 2610 static const struct rpc_xprt_ops xs_udp_ops = { 2611 .set_buffer_size = xs_udp_set_buffer_size, 2612 .reserve_xprt = xprt_reserve_xprt_cong, 2613 .release_xprt = xprt_release_xprt_cong, 2614 .alloc_slot = xprt_alloc_slot, 2615 .free_slot = xprt_free_slot, 2616 .rpcbind = rpcb_getport_async, 2617 .set_port = xs_set_port, 2618 .connect = xs_connect, 2619 .buf_alloc = rpc_malloc, 2620 .buf_free = rpc_free, 2621 .send_request = xs_udp_send_request, 2622 .wait_for_reply_request = xprt_wait_for_reply_request_rtt, 2623 .timer = xs_udp_timer, 2624 .release_request = xprt_release_rqst_cong, 2625 .close = xs_close, 2626 .destroy = xs_destroy, 2627 .print_stats = xs_udp_print_stats, 2628 .enable_swap = xs_enable_swap, 2629 .disable_swap = xs_disable_swap, 2630 .inject_disconnect = xs_inject_disconnect, 2631 }; 2632 2633 static const struct rpc_xprt_ops xs_tcp_ops = { 2634 .reserve_xprt = xprt_reserve_xprt, 2635 .release_xprt = xprt_release_xprt, 2636 .alloc_slot = xprt_alloc_slot, 2637 .free_slot = xprt_free_slot, 2638 .rpcbind = rpcb_getport_async, 2639 .set_port = xs_set_port, 2640 .connect = xs_connect, 2641 .buf_alloc = rpc_malloc, 2642 .buf_free = rpc_free, 2643 .prepare_request = xs_stream_prepare_request, 2644 .send_request = xs_tcp_send_request, 2645 .wait_for_reply_request = xprt_wait_for_reply_request_def, 2646 .close = xs_tcp_shutdown, 2647 .destroy = xs_destroy, 2648 .set_connect_timeout = xs_tcp_set_connect_timeout, 2649 .print_stats = xs_tcp_print_stats, 2650 .enable_swap = xs_enable_swap, 2651 .disable_swap = xs_disable_swap, 2652 .inject_disconnect = xs_inject_disconnect, 2653 #ifdef CONFIG_SUNRPC_BACKCHANNEL 2654 .bc_setup = xprt_setup_bc, 2655 .bc_maxpayload = xs_tcp_bc_maxpayload, 2656 .bc_num_slots = xprt_bc_max_slots, 2657 .bc_free_rqst = xprt_free_bc_rqst, 2658 .bc_destroy = xprt_destroy_bc, 2659 #endif 2660 }; 2661 2662 /* 2663 * The rpc_xprt_ops for the server backchannel 2664 */ 2665 2666 static const struct rpc_xprt_ops bc_tcp_ops = { 2667 .reserve_xprt = xprt_reserve_xprt, 2668 .release_xprt = xprt_release_xprt, 2669 .alloc_slot = xprt_alloc_slot, 2670 .free_slot = xprt_free_slot, 2671 .buf_alloc = bc_malloc, 2672 .buf_free = bc_free, 2673 .send_request = bc_send_request, 2674 .wait_for_reply_request = xprt_wait_for_reply_request_def, 2675 .close = bc_close, 2676 .destroy = bc_destroy, 2677 .print_stats = xs_tcp_print_stats, 2678 .enable_swap = xs_enable_swap, 2679 .disable_swap = xs_disable_swap, 2680 .inject_disconnect = xs_inject_disconnect, 2681 }; 2682 2683 static int xs_init_anyaddr(const int family, struct sockaddr *sap) 2684 { 2685 static const struct sockaddr_in sin = { 2686 .sin_family = AF_INET, 2687 .sin_addr.s_addr = htonl(INADDR_ANY), 2688 }; 2689 static const struct sockaddr_in6 sin6 = { 2690 .sin6_family = AF_INET6, 2691 .sin6_addr = IN6ADDR_ANY_INIT, 2692 }; 2693 2694 switch (family) { 2695 case AF_LOCAL: 2696 break; 2697 case AF_INET: 2698 memcpy(sap, &sin, sizeof(sin)); 2699 break; 2700 case AF_INET6: 2701 memcpy(sap, &sin6, sizeof(sin6)); 2702 break; 2703 default: 2704 dprintk("RPC: %s: Bad address family\n", __func__); 2705 return -EAFNOSUPPORT; 2706 } 2707 return 0; 2708 } 2709 2710 static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args, 2711 unsigned int slot_table_size, 2712 unsigned int max_slot_table_size) 2713 { 2714 struct rpc_xprt *xprt; 2715 struct sock_xprt *new; 2716 2717 if (args->addrlen > sizeof(xprt->addr)) { 2718 dprintk("RPC: xs_setup_xprt: address too large\n"); 2719 return ERR_PTR(-EBADF); 2720 } 2721 2722 xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size, 2723 max_slot_table_size); 2724 if (xprt == NULL) { 2725 dprintk("RPC: xs_setup_xprt: couldn't allocate " 2726 "rpc_xprt\n"); 2727 return ERR_PTR(-ENOMEM); 2728 } 2729 2730 new = container_of(xprt, struct sock_xprt, xprt); 2731 mutex_init(&new->recv_mutex); 2732 memcpy(&xprt->addr, args->dstaddr, args->addrlen); 2733 xprt->addrlen = args->addrlen; 2734 if (args->srcaddr) 2735 memcpy(&new->srcaddr, args->srcaddr, args->addrlen); 2736 else { 2737 int err; 2738 err = xs_init_anyaddr(args->dstaddr->sa_family, 2739 (struct sockaddr *)&new->srcaddr); 2740 if (err != 0) { 2741 xprt_free(xprt); 2742 return ERR_PTR(err); 2743 } 2744 } 2745 2746 return xprt; 2747 } 2748 2749 static const struct rpc_timeout xs_local_default_timeout = { 2750 .to_initval = 10 * HZ, 2751 .to_maxval = 10 * HZ, 2752 .to_retries = 2, 2753 }; 2754 2755 /** 2756 * xs_setup_local - Set up transport to use an AF_LOCAL socket 2757 * @args: rpc transport creation arguments 2758 * 2759 * AF_LOCAL is a "tpi_cots_ord" transport, just like TCP 2760 */ 2761 static struct rpc_xprt *xs_setup_local(struct xprt_create *args) 2762 { 2763 struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr; 2764 struct sock_xprt *transport; 2765 struct rpc_xprt *xprt; 2766 struct rpc_xprt *ret; 2767 2768 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries, 2769 xprt_max_tcp_slot_table_entries); 2770 if (IS_ERR(xprt)) 2771 return xprt; 2772 transport = container_of(xprt, struct sock_xprt, xprt); 2773 2774 xprt->prot = 0; 2775 xprt->xprt_class = &xs_local_transport; 2776 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 2777 2778 xprt->bind_timeout = XS_BIND_TO; 2779 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 2780 xprt->idle_timeout = XS_IDLE_DISC_TO; 2781 2782 xprt->ops = &xs_local_ops; 2783 xprt->timeout = &xs_local_default_timeout; 2784 2785 INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn); 2786 INIT_WORK(&transport->error_worker, xs_error_handle); 2787 INIT_DELAYED_WORK(&transport->connect_worker, xs_dummy_setup_socket); 2788 2789 switch (sun->sun_family) { 2790 case AF_LOCAL: 2791 if (sun->sun_path[0] != '/') { 2792 dprintk("RPC: bad AF_LOCAL address: %s\n", 2793 sun->sun_path); 2794 ret = ERR_PTR(-EINVAL); 2795 goto out_err; 2796 } 2797 xprt_set_bound(xprt); 2798 xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL); 2799 ret = ERR_PTR(xs_local_setup_socket(transport)); 2800 if (ret) 2801 goto out_err; 2802 break; 2803 default: 2804 ret = ERR_PTR(-EAFNOSUPPORT); 2805 goto out_err; 2806 } 2807 2808 dprintk("RPC: set up xprt to %s via AF_LOCAL\n", 2809 xprt->address_strings[RPC_DISPLAY_ADDR]); 2810 2811 if (try_module_get(THIS_MODULE)) 2812 return xprt; 2813 ret = ERR_PTR(-EINVAL); 2814 out_err: 2815 xs_xprt_free(xprt); 2816 return ret; 2817 } 2818 2819 static const struct rpc_timeout xs_udp_default_timeout = { 2820 .to_initval = 5 * HZ, 2821 .to_maxval = 30 * HZ, 2822 .to_increment = 5 * HZ, 2823 .to_retries = 5, 2824 }; 2825 2826 /** 2827 * xs_setup_udp - Set up transport to use a UDP socket 2828 * @args: rpc transport creation arguments 2829 * 2830 */ 2831 static struct rpc_xprt *xs_setup_udp(struct xprt_create *args) 2832 { 2833 struct sockaddr *addr = args->dstaddr; 2834 struct rpc_xprt *xprt; 2835 struct sock_xprt *transport; 2836 struct rpc_xprt *ret; 2837 2838 xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries, 2839 xprt_udp_slot_table_entries); 2840 if (IS_ERR(xprt)) 2841 return xprt; 2842 transport = container_of(xprt, struct sock_xprt, xprt); 2843 2844 xprt->prot = IPPROTO_UDP; 2845 xprt->xprt_class = &xs_udp_transport; 2846 /* XXX: header size can vary due to auth type, IPv6, etc. */ 2847 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); 2848 2849 xprt->bind_timeout = XS_BIND_TO; 2850 xprt->reestablish_timeout = XS_UDP_REEST_TO; 2851 xprt->idle_timeout = XS_IDLE_DISC_TO; 2852 2853 xprt->ops = &xs_udp_ops; 2854 2855 xprt->timeout = &xs_udp_default_timeout; 2856 2857 INIT_WORK(&transport->recv_worker, xs_udp_data_receive_workfn); 2858 INIT_WORK(&transport->error_worker, xs_error_handle); 2859 INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_setup_socket); 2860 2861 switch (addr->sa_family) { 2862 case AF_INET: 2863 if (((struct sockaddr_in *)addr)->sin_port != htons(0)) 2864 xprt_set_bound(xprt); 2865 2866 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP); 2867 break; 2868 case AF_INET6: 2869 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0)) 2870 xprt_set_bound(xprt); 2871 2872 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6); 2873 break; 2874 default: 2875 ret = ERR_PTR(-EAFNOSUPPORT); 2876 goto out_err; 2877 } 2878 2879 if (xprt_bound(xprt)) 2880 dprintk("RPC: set up xprt to %s (port %s) via %s\n", 2881 xprt->address_strings[RPC_DISPLAY_ADDR], 2882 xprt->address_strings[RPC_DISPLAY_PORT], 2883 xprt->address_strings[RPC_DISPLAY_PROTO]); 2884 else 2885 dprintk("RPC: set up xprt to %s (autobind) via %s\n", 2886 xprt->address_strings[RPC_DISPLAY_ADDR], 2887 xprt->address_strings[RPC_DISPLAY_PROTO]); 2888 2889 if (try_module_get(THIS_MODULE)) 2890 return xprt; 2891 ret = ERR_PTR(-EINVAL); 2892 out_err: 2893 xs_xprt_free(xprt); 2894 return ret; 2895 } 2896 2897 static const struct rpc_timeout xs_tcp_default_timeout = { 2898 .to_initval = 60 * HZ, 2899 .to_maxval = 60 * HZ, 2900 .to_retries = 2, 2901 }; 2902 2903 /** 2904 * xs_setup_tcp - Set up transport to use a TCP socket 2905 * @args: rpc transport creation arguments 2906 * 2907 */ 2908 static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) 2909 { 2910 struct sockaddr *addr = args->dstaddr; 2911 struct rpc_xprt *xprt; 2912 struct sock_xprt *transport; 2913 struct rpc_xprt *ret; 2914 unsigned int max_slot_table_size = xprt_max_tcp_slot_table_entries; 2915 2916 if (args->flags & XPRT_CREATE_INFINITE_SLOTS) 2917 max_slot_table_size = RPC_MAX_SLOT_TABLE_LIMIT; 2918 2919 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries, 2920 max_slot_table_size); 2921 if (IS_ERR(xprt)) 2922 return xprt; 2923 transport = container_of(xprt, struct sock_xprt, xprt); 2924 2925 xprt->prot = IPPROTO_TCP; 2926 xprt->xprt_class = &xs_tcp_transport; 2927 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 2928 2929 xprt->bind_timeout = XS_BIND_TO; 2930 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 2931 xprt->idle_timeout = XS_IDLE_DISC_TO; 2932 2933 xprt->ops = &xs_tcp_ops; 2934 xprt->timeout = &xs_tcp_default_timeout; 2935 2936 xprt->max_reconnect_timeout = xprt->timeout->to_maxval; 2937 xprt->connect_timeout = xprt->timeout->to_initval * 2938 (xprt->timeout->to_retries + 1); 2939 2940 INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn); 2941 INIT_WORK(&transport->error_worker, xs_error_handle); 2942 INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket); 2943 2944 switch (addr->sa_family) { 2945 case AF_INET: 2946 if (((struct sockaddr_in *)addr)->sin_port != htons(0)) 2947 xprt_set_bound(xprt); 2948 2949 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP); 2950 break; 2951 case AF_INET6: 2952 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0)) 2953 xprt_set_bound(xprt); 2954 2955 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6); 2956 break; 2957 default: 2958 ret = ERR_PTR(-EAFNOSUPPORT); 2959 goto out_err; 2960 } 2961 2962 if (xprt_bound(xprt)) 2963 dprintk("RPC: set up xprt to %s (port %s) via %s\n", 2964 xprt->address_strings[RPC_DISPLAY_ADDR], 2965 xprt->address_strings[RPC_DISPLAY_PORT], 2966 xprt->address_strings[RPC_DISPLAY_PROTO]); 2967 else 2968 dprintk("RPC: set up xprt to %s (autobind) via %s\n", 2969 xprt->address_strings[RPC_DISPLAY_ADDR], 2970 xprt->address_strings[RPC_DISPLAY_PROTO]); 2971 2972 if (try_module_get(THIS_MODULE)) 2973 return xprt; 2974 ret = ERR_PTR(-EINVAL); 2975 out_err: 2976 xs_xprt_free(xprt); 2977 return ret; 2978 } 2979 2980 /** 2981 * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket 2982 * @args: rpc transport creation arguments 2983 * 2984 */ 2985 static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) 2986 { 2987 struct sockaddr *addr = args->dstaddr; 2988 struct rpc_xprt *xprt; 2989 struct sock_xprt *transport; 2990 struct svc_sock *bc_sock; 2991 struct rpc_xprt *ret; 2992 2993 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries, 2994 xprt_tcp_slot_table_entries); 2995 if (IS_ERR(xprt)) 2996 return xprt; 2997 transport = container_of(xprt, struct sock_xprt, xprt); 2998 2999 xprt->prot = IPPROTO_TCP; 3000 xprt->xprt_class = &xs_bc_tcp_transport; 3001 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 3002 xprt->timeout = &xs_tcp_default_timeout; 3003 3004 /* backchannel */ 3005 xprt_set_bound(xprt); 3006 xprt->bind_timeout = 0; 3007 xprt->reestablish_timeout = 0; 3008 xprt->idle_timeout = 0; 3009 3010 xprt->ops = &bc_tcp_ops; 3011 3012 switch (addr->sa_family) { 3013 case AF_INET: 3014 xs_format_peer_addresses(xprt, "tcp", 3015 RPCBIND_NETID_TCP); 3016 break; 3017 case AF_INET6: 3018 xs_format_peer_addresses(xprt, "tcp", 3019 RPCBIND_NETID_TCP6); 3020 break; 3021 default: 3022 ret = ERR_PTR(-EAFNOSUPPORT); 3023 goto out_err; 3024 } 3025 3026 dprintk("RPC: set up xprt to %s (port %s) via %s\n", 3027 xprt->address_strings[RPC_DISPLAY_ADDR], 3028 xprt->address_strings[RPC_DISPLAY_PORT], 3029 xprt->address_strings[RPC_DISPLAY_PROTO]); 3030 3031 /* 3032 * Once we've associated a backchannel xprt with a connection, 3033 * we want to keep it around as long as the connection lasts, 3034 * in case we need to start using it for a backchannel again; 3035 * this reference won't be dropped until bc_xprt is destroyed. 3036 */ 3037 xprt_get(xprt); 3038 args->bc_xprt->xpt_bc_xprt = xprt; 3039 xprt->bc_xprt = args->bc_xprt; 3040 bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt); 3041 transport->sock = bc_sock->sk_sock; 3042 transport->inet = bc_sock->sk_sk; 3043 3044 /* 3045 * Since we don't want connections for the backchannel, we set 3046 * the xprt status to connected 3047 */ 3048 xprt_set_connected(xprt); 3049 3050 if (try_module_get(THIS_MODULE)) 3051 return xprt; 3052 3053 args->bc_xprt->xpt_bc_xprt = NULL; 3054 args->bc_xprt->xpt_bc_xps = NULL; 3055 xprt_put(xprt); 3056 ret = ERR_PTR(-EINVAL); 3057 out_err: 3058 xs_xprt_free(xprt); 3059 return ret; 3060 } 3061 3062 static struct xprt_class xs_local_transport = { 3063 .list = LIST_HEAD_INIT(xs_local_transport.list), 3064 .name = "named UNIX socket", 3065 .owner = THIS_MODULE, 3066 .ident = XPRT_TRANSPORT_LOCAL, 3067 .setup = xs_setup_local, 3068 .netid = { "" }, 3069 }; 3070 3071 static struct xprt_class xs_udp_transport = { 3072 .list = LIST_HEAD_INIT(xs_udp_transport.list), 3073 .name = "udp", 3074 .owner = THIS_MODULE, 3075 .ident = XPRT_TRANSPORT_UDP, 3076 .setup = xs_setup_udp, 3077 .netid = { "udp", "udp6", "" }, 3078 }; 3079 3080 static struct xprt_class xs_tcp_transport = { 3081 .list = LIST_HEAD_INIT(xs_tcp_transport.list), 3082 .name = "tcp", 3083 .owner = THIS_MODULE, 3084 .ident = XPRT_TRANSPORT_TCP, 3085 .setup = xs_setup_tcp, 3086 .netid = { "tcp", "tcp6", "" }, 3087 }; 3088 3089 static struct xprt_class xs_bc_tcp_transport = { 3090 .list = LIST_HEAD_INIT(xs_bc_tcp_transport.list), 3091 .name = "tcp NFSv4.1 backchannel", 3092 .owner = THIS_MODULE, 3093 .ident = XPRT_TRANSPORT_BC_TCP, 3094 .setup = xs_setup_bc_tcp, 3095 .netid = { "" }, 3096 }; 3097 3098 /** 3099 * init_socket_xprt - set up xprtsock's sysctls, register with RPC client 3100 * 3101 */ 3102 int init_socket_xprt(void) 3103 { 3104 if (!sunrpc_table_header) 3105 sunrpc_table_header = register_sysctl_table(sunrpc_table); 3106 3107 xprt_register_transport(&xs_local_transport); 3108 xprt_register_transport(&xs_udp_transport); 3109 xprt_register_transport(&xs_tcp_transport); 3110 xprt_register_transport(&xs_bc_tcp_transport); 3111 3112 return 0; 3113 } 3114 3115 /** 3116 * cleanup_socket_xprt - remove xprtsock's sysctls, unregister 3117 * 3118 */ 3119 void cleanup_socket_xprt(void) 3120 { 3121 if (sunrpc_table_header) { 3122 unregister_sysctl_table(sunrpc_table_header); 3123 sunrpc_table_header = NULL; 3124 } 3125 3126 xprt_unregister_transport(&xs_local_transport); 3127 xprt_unregister_transport(&xs_udp_transport); 3128 xprt_unregister_transport(&xs_tcp_transport); 3129 xprt_unregister_transport(&xs_bc_tcp_transport); 3130 } 3131 3132 static int param_set_portnr(const char *val, const struct kernel_param *kp) 3133 { 3134 return param_set_uint_minmax(val, kp, 3135 RPC_MIN_RESVPORT, 3136 RPC_MAX_RESVPORT); 3137 } 3138 3139 static const struct kernel_param_ops param_ops_portnr = { 3140 .set = param_set_portnr, 3141 .get = param_get_uint, 3142 }; 3143 3144 #define param_check_portnr(name, p) \ 3145 __param_check(name, p, unsigned int); 3146 3147 module_param_named(min_resvport, xprt_min_resvport, portnr, 0644); 3148 module_param_named(max_resvport, xprt_max_resvport, portnr, 0644); 3149 3150 static int param_set_slot_table_size(const char *val, 3151 const struct kernel_param *kp) 3152 { 3153 return param_set_uint_minmax(val, kp, 3154 RPC_MIN_SLOT_TABLE, 3155 RPC_MAX_SLOT_TABLE); 3156 } 3157 3158 static const struct kernel_param_ops param_ops_slot_table_size = { 3159 .set = param_set_slot_table_size, 3160 .get = param_get_uint, 3161 }; 3162 3163 #define param_check_slot_table_size(name, p) \ 3164 __param_check(name, p, unsigned int); 3165 3166 static int param_set_max_slot_table_size(const char *val, 3167 const struct kernel_param *kp) 3168 { 3169 return param_set_uint_minmax(val, kp, 3170 RPC_MIN_SLOT_TABLE, 3171 RPC_MAX_SLOT_TABLE_LIMIT); 3172 } 3173 3174 static const struct kernel_param_ops param_ops_max_slot_table_size = { 3175 .set = param_set_max_slot_table_size, 3176 .get = param_get_uint, 3177 }; 3178 3179 #define param_check_max_slot_table_size(name, p) \ 3180 __param_check(name, p, unsigned int); 3181 3182 module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries, 3183 slot_table_size, 0644); 3184 module_param_named(tcp_max_slot_table_entries, xprt_max_tcp_slot_table_entries, 3185 max_slot_table_size, 0644); 3186 module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries, 3187 slot_table_size, 0644); 3188