1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/net/sunrpc/xprtsock.c 4 * 5 * Client-side transport implementation for sockets. 6 * 7 * TCP callback races fixes (C) 1998 Red Hat 8 * TCP send fixes (C) 1998 Red Hat 9 * TCP NFS related read + write fixes 10 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> 11 * 12 * Rewrite of larges part of the code in order to stabilize TCP stuff. 13 * Fix behaviour when socket buffer is full. 14 * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no> 15 * 16 * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com> 17 * 18 * IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005. 19 * <gilles.quillard@bull.net> 20 */ 21 22 #include <linux/types.h> 23 #include <linux/string.h> 24 #include <linux/slab.h> 25 #include <linux/module.h> 26 #include <linux/capability.h> 27 #include <linux/pagemap.h> 28 #include <linux/errno.h> 29 #include <linux/socket.h> 30 #include <linux/in.h> 31 #include <linux/net.h> 32 #include <linux/mm.h> 33 #include <linux/un.h> 34 #include <linux/udp.h> 35 #include <linux/tcp.h> 36 #include <linux/sunrpc/clnt.h> 37 #include <linux/sunrpc/addr.h> 38 #include <linux/sunrpc/sched.h> 39 #include <linux/sunrpc/svcsock.h> 40 #include <linux/sunrpc/xprtsock.h> 41 #include <linux/file.h> 42 #ifdef CONFIG_SUNRPC_BACKCHANNEL 43 #include <linux/sunrpc/bc_xprt.h> 44 #endif 45 46 #include <net/sock.h> 47 #include <net/checksum.h> 48 #include <net/udp.h> 49 #include <net/tcp.h> 50 #include <net/tls_prot.h> 51 #include <net/handshake.h> 52 53 #include <linux/bvec.h> 54 #include <linux/highmem.h> 55 #include <linux/uio.h> 56 #include <linux/sched/mm.h> 57 58 #include <trace/events/sock.h> 59 #include <trace/events/sunrpc.h> 60 61 #include "socklib.h" 62 #include "sunrpc.h" 63 64 static void xs_close(struct rpc_xprt *xprt); 65 static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock); 66 static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt, 67 struct socket *sock); 68 69 /* 70 * xprtsock tunables 71 */ 72 static unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE; 73 static unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE; 74 static unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE; 75 76 static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT; 77 static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT; 78 79 #define XS_TCP_LINGER_TO (15U * HZ) 80 static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO; 81 82 /* 83 * We can register our own files under /proc/sys/sunrpc by 84 * calling register_sysctl() again. The files in that 85 * directory become the union of all files registered there. 86 * 87 * We simply need to make sure that we don't collide with 88 * someone else's file names! 89 */ 90 91 static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE; 92 static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE; 93 static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT; 94 static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT; 95 static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT; 96 97 static struct ctl_table_header *sunrpc_table_header; 98 99 static struct xprt_class xs_local_transport; 100 static struct xprt_class xs_udp_transport; 101 static struct xprt_class xs_tcp_transport; 102 static struct xprt_class xs_tcp_tls_transport; 103 static struct xprt_class xs_bc_tcp_transport; 104 105 /* 106 * FIXME: changing the UDP slot table size should also resize the UDP 107 * socket buffers for existing UDP transports 108 */ 109 static struct ctl_table xs_tunables_table[] = { 110 { 111 .procname = "udp_slot_table_entries", 112 .data = &xprt_udp_slot_table_entries, 113 .maxlen = sizeof(unsigned int), 114 .mode = 0644, 115 .proc_handler = proc_dointvec_minmax, 116 .extra1 = &min_slot_table_size, 117 .extra2 = &max_slot_table_size 118 }, 119 { 120 .procname = "tcp_slot_table_entries", 121 .data = &xprt_tcp_slot_table_entries, 122 .maxlen = sizeof(unsigned int), 123 .mode = 0644, 124 .proc_handler = proc_dointvec_minmax, 125 .extra1 = &min_slot_table_size, 126 .extra2 = &max_slot_table_size 127 }, 128 { 129 .procname = "tcp_max_slot_table_entries", 130 .data = &xprt_max_tcp_slot_table_entries, 131 .maxlen = sizeof(unsigned int), 132 .mode = 0644, 133 .proc_handler = proc_dointvec_minmax, 134 .extra1 = &min_slot_table_size, 135 .extra2 = &max_tcp_slot_table_limit 136 }, 137 { 138 .procname = "min_resvport", 139 .data = &xprt_min_resvport, 140 .maxlen = sizeof(unsigned int), 141 .mode = 0644, 142 .proc_handler = proc_dointvec_minmax, 143 .extra1 = &xprt_min_resvport_limit, 144 .extra2 = &xprt_max_resvport_limit 145 }, 146 { 147 .procname = "max_resvport", 148 .data = &xprt_max_resvport, 149 .maxlen = sizeof(unsigned int), 150 .mode = 0644, 151 .proc_handler = proc_dointvec_minmax, 152 .extra1 = &xprt_min_resvport_limit, 153 .extra2 = &xprt_max_resvport_limit 154 }, 155 { 156 .procname = "tcp_fin_timeout", 157 .data = &xs_tcp_fin_timeout, 158 .maxlen = sizeof(xs_tcp_fin_timeout), 159 .mode = 0644, 160 .proc_handler = proc_dointvec_jiffies, 161 }, 162 { }, 163 }; 164 165 /* 166 * Wait duration for a reply from the RPC portmapper. 167 */ 168 #define XS_BIND_TO (60U * HZ) 169 170 /* 171 * Delay if a UDP socket connect error occurs. This is most likely some 172 * kind of resource problem on the local host. 173 */ 174 #define XS_UDP_REEST_TO (2U * HZ) 175 176 /* 177 * The reestablish timeout allows clients to delay for a bit before attempting 178 * to reconnect to a server that just dropped our connection. 179 * 180 * We implement an exponential backoff when trying to reestablish a TCP 181 * transport connection with the server. Some servers like to drop a TCP 182 * connection when they are overworked, so we start with a short timeout and 183 * increase over time if the server is down or not responding. 184 */ 185 #define XS_TCP_INIT_REEST_TO (3U * HZ) 186 187 /* 188 * TCP idle timeout; client drops the transport socket if it is idle 189 * for this long. Note that we also timeout UDP sockets to prevent 190 * holding port numbers when there is no RPC traffic. 191 */ 192 #define XS_IDLE_DISC_TO (5U * 60 * HZ) 193 194 /* 195 * TLS handshake timeout. 196 */ 197 #define XS_TLS_HANDSHAKE_TO (10U * HZ) 198 199 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 200 # undef RPC_DEBUG_DATA 201 # define RPCDBG_FACILITY RPCDBG_TRANS 202 #endif 203 204 #ifdef RPC_DEBUG_DATA 205 static void xs_pktdump(char *msg, u32 *packet, unsigned int count) 206 { 207 u8 *buf = (u8 *) packet; 208 int j; 209 210 dprintk("RPC: %s\n", msg); 211 for (j = 0; j < count && j < 128; j += 4) { 212 if (!(j & 31)) { 213 if (j) 214 dprintk("\n"); 215 dprintk("0x%04x ", j); 216 } 217 dprintk("%02x%02x%02x%02x ", 218 buf[j], buf[j+1], buf[j+2], buf[j+3]); 219 } 220 dprintk("\n"); 221 } 222 #else 223 static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count) 224 { 225 /* NOP */ 226 } 227 #endif 228 229 static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) 230 { 231 return (struct rpc_xprt *) sk->sk_user_data; 232 } 233 234 static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt) 235 { 236 return (struct sockaddr *) &xprt->addr; 237 } 238 239 static inline struct sockaddr_un *xs_addr_un(struct rpc_xprt *xprt) 240 { 241 return (struct sockaddr_un *) &xprt->addr; 242 } 243 244 static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt) 245 { 246 return (struct sockaddr_in *) &xprt->addr; 247 } 248 249 static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt) 250 { 251 return (struct sockaddr_in6 *) &xprt->addr; 252 } 253 254 static void xs_format_common_peer_addresses(struct rpc_xprt *xprt) 255 { 256 struct sockaddr *sap = xs_addr(xprt); 257 struct sockaddr_in6 *sin6; 258 struct sockaddr_in *sin; 259 struct sockaddr_un *sun; 260 char buf[128]; 261 262 switch (sap->sa_family) { 263 case AF_LOCAL: 264 sun = xs_addr_un(xprt); 265 if (sun->sun_path[0]) { 266 strscpy(buf, sun->sun_path, sizeof(buf)); 267 } else { 268 buf[0] = '@'; 269 strscpy(buf+1, sun->sun_path+1, sizeof(buf)-1); 270 } 271 xprt->address_strings[RPC_DISPLAY_ADDR] = 272 kstrdup(buf, GFP_KERNEL); 273 break; 274 case AF_INET: 275 (void)rpc_ntop(sap, buf, sizeof(buf)); 276 xprt->address_strings[RPC_DISPLAY_ADDR] = 277 kstrdup(buf, GFP_KERNEL); 278 sin = xs_addr_in(xprt); 279 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr)); 280 break; 281 case AF_INET6: 282 (void)rpc_ntop(sap, buf, sizeof(buf)); 283 xprt->address_strings[RPC_DISPLAY_ADDR] = 284 kstrdup(buf, GFP_KERNEL); 285 sin6 = xs_addr_in6(xprt); 286 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr); 287 break; 288 default: 289 BUG(); 290 } 291 292 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); 293 } 294 295 static void xs_format_common_peer_ports(struct rpc_xprt *xprt) 296 { 297 struct sockaddr *sap = xs_addr(xprt); 298 char buf[128]; 299 300 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); 301 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); 302 303 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); 304 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); 305 } 306 307 static void xs_format_peer_addresses(struct rpc_xprt *xprt, 308 const char *protocol, 309 const char *netid) 310 { 311 xprt->address_strings[RPC_DISPLAY_PROTO] = protocol; 312 xprt->address_strings[RPC_DISPLAY_NETID] = netid; 313 xs_format_common_peer_addresses(xprt); 314 xs_format_common_peer_ports(xprt); 315 } 316 317 static void xs_update_peer_port(struct rpc_xprt *xprt) 318 { 319 kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]); 320 kfree(xprt->address_strings[RPC_DISPLAY_PORT]); 321 322 xs_format_common_peer_ports(xprt); 323 } 324 325 static void xs_free_peer_addresses(struct rpc_xprt *xprt) 326 { 327 unsigned int i; 328 329 for (i = 0; i < RPC_DISPLAY_MAX; i++) 330 switch (i) { 331 case RPC_DISPLAY_PROTO: 332 case RPC_DISPLAY_NETID: 333 continue; 334 default: 335 kfree(xprt->address_strings[i]); 336 } 337 } 338 339 static size_t 340 xs_alloc_sparse_pages(struct xdr_buf *buf, size_t want, gfp_t gfp) 341 { 342 size_t i,n; 343 344 if (!want || !(buf->flags & XDRBUF_SPARSE_PAGES)) 345 return want; 346 n = (buf->page_base + want + PAGE_SIZE - 1) >> PAGE_SHIFT; 347 for (i = 0; i < n; i++) { 348 if (buf->pages[i]) 349 continue; 350 buf->bvec[i].bv_page = buf->pages[i] = alloc_page(gfp); 351 if (!buf->pages[i]) { 352 i *= PAGE_SIZE; 353 return i > buf->page_base ? i - buf->page_base : 0; 354 } 355 } 356 return want; 357 } 358 359 static int 360 xs_sock_process_cmsg(struct socket *sock, struct msghdr *msg, 361 struct cmsghdr *cmsg, int ret) 362 { 363 u8 content_type = tls_get_record_type(sock->sk, cmsg); 364 u8 level, description; 365 366 switch (content_type) { 367 case 0: 368 break; 369 case TLS_RECORD_TYPE_DATA: 370 /* TLS sets EOR at the end of each application data 371 * record, even though there might be more frames 372 * waiting to be decrypted. 373 */ 374 msg->msg_flags &= ~MSG_EOR; 375 break; 376 case TLS_RECORD_TYPE_ALERT: 377 tls_alert_recv(sock->sk, msg, &level, &description); 378 ret = (level == TLS_ALERT_LEVEL_FATAL) ? 379 -EACCES : -EAGAIN; 380 break; 381 default: 382 /* discard this record type */ 383 ret = -EAGAIN; 384 } 385 return ret; 386 } 387 388 static int 389 xs_sock_recv_cmsg(struct socket *sock, struct msghdr *msg, int flags) 390 { 391 union { 392 struct cmsghdr cmsg; 393 u8 buf[CMSG_SPACE(sizeof(u8))]; 394 } u; 395 int ret; 396 397 msg->msg_control = &u; 398 msg->msg_controllen = sizeof(u); 399 ret = sock_recvmsg(sock, msg, flags); 400 if (msg->msg_controllen != sizeof(u)) 401 ret = xs_sock_process_cmsg(sock, msg, &u.cmsg, ret); 402 return ret; 403 } 404 405 static ssize_t 406 xs_sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags, size_t seek) 407 { 408 ssize_t ret; 409 if (seek != 0) 410 iov_iter_advance(&msg->msg_iter, seek); 411 ret = xs_sock_recv_cmsg(sock, msg, flags); 412 return ret > 0 ? ret + seek : ret; 413 } 414 415 static ssize_t 416 xs_read_kvec(struct socket *sock, struct msghdr *msg, int flags, 417 struct kvec *kvec, size_t count, size_t seek) 418 { 419 iov_iter_kvec(&msg->msg_iter, ITER_DEST, kvec, 1, count); 420 return xs_sock_recvmsg(sock, msg, flags, seek); 421 } 422 423 static ssize_t 424 xs_read_bvec(struct socket *sock, struct msghdr *msg, int flags, 425 struct bio_vec *bvec, unsigned long nr, size_t count, 426 size_t seek) 427 { 428 iov_iter_bvec(&msg->msg_iter, ITER_DEST, bvec, nr, count); 429 return xs_sock_recvmsg(sock, msg, flags, seek); 430 } 431 432 static ssize_t 433 xs_read_discard(struct socket *sock, struct msghdr *msg, int flags, 434 size_t count) 435 { 436 iov_iter_discard(&msg->msg_iter, ITER_DEST, count); 437 return xs_sock_recv_cmsg(sock, msg, flags); 438 } 439 440 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 441 static void 442 xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek) 443 { 444 struct bvec_iter bi = { 445 .bi_size = count, 446 }; 447 struct bio_vec bv; 448 449 bvec_iter_advance(bvec, &bi, seek & PAGE_MASK); 450 for_each_bvec(bv, bvec, bi, bi) 451 flush_dcache_page(bv.bv_page); 452 } 453 #else 454 static inline void 455 xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek) 456 { 457 } 458 #endif 459 460 static ssize_t 461 xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, 462 struct xdr_buf *buf, size_t count, size_t seek, size_t *read) 463 { 464 size_t want, seek_init = seek, offset = 0; 465 ssize_t ret; 466 467 want = min_t(size_t, count, buf->head[0].iov_len); 468 if (seek < want) { 469 ret = xs_read_kvec(sock, msg, flags, &buf->head[0], want, seek); 470 if (ret <= 0) 471 goto sock_err; 472 offset += ret; 473 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) 474 goto out; 475 if (ret != want) 476 goto out; 477 seek = 0; 478 } else { 479 seek -= want; 480 offset += want; 481 } 482 483 want = xs_alloc_sparse_pages( 484 buf, min_t(size_t, count - offset, buf->page_len), 485 GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); 486 if (seek < want) { 487 ret = xs_read_bvec(sock, msg, flags, buf->bvec, 488 xdr_buf_pagecount(buf), 489 want + buf->page_base, 490 seek + buf->page_base); 491 if (ret <= 0) 492 goto sock_err; 493 xs_flush_bvec(buf->bvec, ret, seek + buf->page_base); 494 ret -= buf->page_base; 495 offset += ret; 496 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) 497 goto out; 498 if (ret != want) 499 goto out; 500 seek = 0; 501 } else { 502 seek -= want; 503 offset += want; 504 } 505 506 want = min_t(size_t, count - offset, buf->tail[0].iov_len); 507 if (seek < want) { 508 ret = xs_read_kvec(sock, msg, flags, &buf->tail[0], want, seek); 509 if (ret <= 0) 510 goto sock_err; 511 offset += ret; 512 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) 513 goto out; 514 if (ret != want) 515 goto out; 516 } else if (offset < seek_init) 517 offset = seek_init; 518 ret = -EMSGSIZE; 519 out: 520 *read = offset - seek_init; 521 return ret; 522 sock_err: 523 offset += seek; 524 goto out; 525 } 526 527 static void 528 xs_read_header(struct sock_xprt *transport, struct xdr_buf *buf) 529 { 530 if (!transport->recv.copied) { 531 if (buf->head[0].iov_len >= transport->recv.offset) 532 memcpy(buf->head[0].iov_base, 533 &transport->recv.xid, 534 transport->recv.offset); 535 transport->recv.copied = transport->recv.offset; 536 } 537 } 538 539 static bool 540 xs_read_stream_request_done(struct sock_xprt *transport) 541 { 542 return transport->recv.fraghdr & cpu_to_be32(RPC_LAST_STREAM_FRAGMENT); 543 } 544 545 static void 546 xs_read_stream_check_eor(struct sock_xprt *transport, 547 struct msghdr *msg) 548 { 549 if (xs_read_stream_request_done(transport)) 550 msg->msg_flags |= MSG_EOR; 551 } 552 553 static ssize_t 554 xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg, 555 int flags, struct rpc_rqst *req) 556 { 557 struct xdr_buf *buf = &req->rq_private_buf; 558 size_t want, read; 559 ssize_t ret; 560 561 xs_read_header(transport, buf); 562 563 want = transport->recv.len - transport->recv.offset; 564 if (want != 0) { 565 ret = xs_read_xdr_buf(transport->sock, msg, flags, buf, 566 transport->recv.copied + want, 567 transport->recv.copied, 568 &read); 569 transport->recv.offset += read; 570 transport->recv.copied += read; 571 } 572 573 if (transport->recv.offset == transport->recv.len) 574 xs_read_stream_check_eor(transport, msg); 575 576 if (want == 0) 577 return 0; 578 579 switch (ret) { 580 default: 581 break; 582 case -EFAULT: 583 case -EMSGSIZE: 584 msg->msg_flags |= MSG_TRUNC; 585 return read; 586 case 0: 587 return -ESHUTDOWN; 588 } 589 return ret < 0 ? ret : read; 590 } 591 592 static size_t 593 xs_read_stream_headersize(bool isfrag) 594 { 595 if (isfrag) 596 return sizeof(__be32); 597 return 3 * sizeof(__be32); 598 } 599 600 static ssize_t 601 xs_read_stream_header(struct sock_xprt *transport, struct msghdr *msg, 602 int flags, size_t want, size_t seek) 603 { 604 struct kvec kvec = { 605 .iov_base = &transport->recv.fraghdr, 606 .iov_len = want, 607 }; 608 return xs_read_kvec(transport->sock, msg, flags, &kvec, want, seek); 609 } 610 611 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 612 static ssize_t 613 xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags) 614 { 615 struct rpc_xprt *xprt = &transport->xprt; 616 struct rpc_rqst *req; 617 ssize_t ret; 618 619 /* Is this transport associated with the backchannel? */ 620 if (!xprt->bc_serv) 621 return -ESHUTDOWN; 622 623 /* Look up and lock the request corresponding to the given XID */ 624 req = xprt_lookup_bc_request(xprt, transport->recv.xid); 625 if (!req) { 626 printk(KERN_WARNING "Callback slot table overflowed\n"); 627 return -ESHUTDOWN; 628 } 629 if (transport->recv.copied && !req->rq_private_buf.len) 630 return -ESHUTDOWN; 631 632 ret = xs_read_stream_request(transport, msg, flags, req); 633 if (msg->msg_flags & (MSG_EOR|MSG_TRUNC)) 634 xprt_complete_bc_request(req, transport->recv.copied); 635 else 636 req->rq_private_buf.len = transport->recv.copied; 637 638 return ret; 639 } 640 #else /* CONFIG_SUNRPC_BACKCHANNEL */ 641 static ssize_t 642 xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags) 643 { 644 return -ESHUTDOWN; 645 } 646 #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 647 648 static ssize_t 649 xs_read_stream_reply(struct sock_xprt *transport, struct msghdr *msg, int flags) 650 { 651 struct rpc_xprt *xprt = &transport->xprt; 652 struct rpc_rqst *req; 653 ssize_t ret = 0; 654 655 /* Look up and lock the request corresponding to the given XID */ 656 spin_lock(&xprt->queue_lock); 657 req = xprt_lookup_rqst(xprt, transport->recv.xid); 658 if (!req || (transport->recv.copied && !req->rq_private_buf.len)) { 659 msg->msg_flags |= MSG_TRUNC; 660 goto out; 661 } 662 xprt_pin_rqst(req); 663 spin_unlock(&xprt->queue_lock); 664 665 ret = xs_read_stream_request(transport, msg, flags, req); 666 667 spin_lock(&xprt->queue_lock); 668 if (msg->msg_flags & (MSG_EOR|MSG_TRUNC)) 669 xprt_complete_rqst(req->rq_task, transport->recv.copied); 670 else 671 req->rq_private_buf.len = transport->recv.copied; 672 xprt_unpin_rqst(req); 673 out: 674 spin_unlock(&xprt->queue_lock); 675 return ret; 676 } 677 678 static ssize_t 679 xs_read_stream(struct sock_xprt *transport, int flags) 680 { 681 struct msghdr msg = { 0 }; 682 size_t want, read = 0; 683 ssize_t ret = 0; 684 685 if (transport->recv.len == 0) { 686 want = xs_read_stream_headersize(transport->recv.copied != 0); 687 ret = xs_read_stream_header(transport, &msg, flags, want, 688 transport->recv.offset); 689 if (ret <= 0) 690 goto out_err; 691 transport->recv.offset = ret; 692 if (transport->recv.offset != want) 693 return transport->recv.offset; 694 transport->recv.len = be32_to_cpu(transport->recv.fraghdr) & 695 RPC_FRAGMENT_SIZE_MASK; 696 transport->recv.offset -= sizeof(transport->recv.fraghdr); 697 read = ret; 698 } 699 700 switch (be32_to_cpu(transport->recv.calldir)) { 701 default: 702 msg.msg_flags |= MSG_TRUNC; 703 break; 704 case RPC_CALL: 705 ret = xs_read_stream_call(transport, &msg, flags); 706 break; 707 case RPC_REPLY: 708 ret = xs_read_stream_reply(transport, &msg, flags); 709 } 710 if (msg.msg_flags & MSG_TRUNC) { 711 transport->recv.calldir = cpu_to_be32(-1); 712 transport->recv.copied = -1; 713 } 714 if (ret < 0) 715 goto out_err; 716 read += ret; 717 if (transport->recv.offset < transport->recv.len) { 718 if (!(msg.msg_flags & MSG_TRUNC)) 719 return read; 720 msg.msg_flags = 0; 721 ret = xs_read_discard(transport->sock, &msg, flags, 722 transport->recv.len - transport->recv.offset); 723 if (ret <= 0) 724 goto out_err; 725 transport->recv.offset += ret; 726 read += ret; 727 if (transport->recv.offset != transport->recv.len) 728 return read; 729 } 730 if (xs_read_stream_request_done(transport)) { 731 trace_xs_stream_read_request(transport); 732 transport->recv.copied = 0; 733 } 734 transport->recv.offset = 0; 735 transport->recv.len = 0; 736 return read; 737 out_err: 738 return ret != 0 ? ret : -ESHUTDOWN; 739 } 740 741 static __poll_t xs_poll_socket(struct sock_xprt *transport) 742 { 743 return transport->sock->ops->poll(transport->file, transport->sock, 744 NULL); 745 } 746 747 static bool xs_poll_socket_readable(struct sock_xprt *transport) 748 { 749 __poll_t events = xs_poll_socket(transport); 750 751 return (events & (EPOLLIN | EPOLLRDNORM)) && !(events & EPOLLRDHUP); 752 } 753 754 static void xs_poll_check_readable(struct sock_xprt *transport) 755 { 756 757 clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state); 758 if (test_bit(XPRT_SOCK_IGNORE_RECV, &transport->sock_state)) 759 return; 760 if (!xs_poll_socket_readable(transport)) 761 return; 762 if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state)) 763 queue_work(xprtiod_workqueue, &transport->recv_worker); 764 } 765 766 static void xs_stream_data_receive(struct sock_xprt *transport) 767 { 768 size_t read = 0; 769 ssize_t ret = 0; 770 771 mutex_lock(&transport->recv_mutex); 772 if (transport->sock == NULL) 773 goto out; 774 for (;;) { 775 ret = xs_read_stream(transport, MSG_DONTWAIT); 776 if (ret < 0) 777 break; 778 read += ret; 779 cond_resched(); 780 } 781 if (ret == -ESHUTDOWN) 782 kernel_sock_shutdown(transport->sock, SHUT_RDWR); 783 else if (ret == -EACCES) 784 xprt_wake_pending_tasks(&transport->xprt, -EACCES); 785 else 786 xs_poll_check_readable(transport); 787 out: 788 mutex_unlock(&transport->recv_mutex); 789 trace_xs_stream_read_data(&transport->xprt, ret, read); 790 } 791 792 static void xs_stream_data_receive_workfn(struct work_struct *work) 793 { 794 struct sock_xprt *transport = 795 container_of(work, struct sock_xprt, recv_worker); 796 unsigned int pflags = memalloc_nofs_save(); 797 798 xs_stream_data_receive(transport); 799 memalloc_nofs_restore(pflags); 800 } 801 802 static void 803 xs_stream_reset_connect(struct sock_xprt *transport) 804 { 805 transport->recv.offset = 0; 806 transport->recv.len = 0; 807 transport->recv.copied = 0; 808 transport->xmit.offset = 0; 809 } 810 811 static void 812 xs_stream_start_connect(struct sock_xprt *transport) 813 { 814 transport->xprt.stat.connect_count++; 815 transport->xprt.stat.connect_start = jiffies; 816 } 817 818 #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL) 819 820 /** 821 * xs_nospace - handle transmit was incomplete 822 * @req: pointer to RPC request 823 * @transport: pointer to struct sock_xprt 824 * 825 */ 826 static int xs_nospace(struct rpc_rqst *req, struct sock_xprt *transport) 827 { 828 struct rpc_xprt *xprt = &transport->xprt; 829 struct sock *sk = transport->inet; 830 int ret = -EAGAIN; 831 832 trace_rpc_socket_nospace(req, transport); 833 834 /* Protect against races with write_space */ 835 spin_lock(&xprt->transport_lock); 836 837 /* Don't race with disconnect */ 838 if (xprt_connected(xprt)) { 839 /* wait for more buffer space */ 840 set_bit(XPRT_SOCK_NOSPACE, &transport->sock_state); 841 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 842 sk->sk_write_pending++; 843 xprt_wait_for_buffer_space(xprt); 844 } else 845 ret = -ENOTCONN; 846 847 spin_unlock(&xprt->transport_lock); 848 return ret; 849 } 850 851 static int xs_sock_nospace(struct rpc_rqst *req) 852 { 853 struct sock_xprt *transport = 854 container_of(req->rq_xprt, struct sock_xprt, xprt); 855 struct sock *sk = transport->inet; 856 int ret = -EAGAIN; 857 858 lock_sock(sk); 859 if (!sock_writeable(sk)) 860 ret = xs_nospace(req, transport); 861 release_sock(sk); 862 return ret; 863 } 864 865 static int xs_stream_nospace(struct rpc_rqst *req, bool vm_wait) 866 { 867 struct sock_xprt *transport = 868 container_of(req->rq_xprt, struct sock_xprt, xprt); 869 struct sock *sk = transport->inet; 870 int ret = -EAGAIN; 871 872 if (vm_wait) 873 return -ENOBUFS; 874 lock_sock(sk); 875 if (!sk_stream_memory_free(sk)) 876 ret = xs_nospace(req, transport); 877 release_sock(sk); 878 return ret; 879 } 880 881 static int xs_stream_prepare_request(struct rpc_rqst *req, struct xdr_buf *buf) 882 { 883 return xdr_alloc_bvec(buf, rpc_task_gfp_mask()); 884 } 885 886 /* 887 * Determine if the previous message in the stream was aborted before it 888 * could complete transmission. 889 */ 890 static bool 891 xs_send_request_was_aborted(struct sock_xprt *transport, struct rpc_rqst *req) 892 { 893 return transport->xmit.offset != 0 && req->rq_bytes_sent == 0; 894 } 895 896 /* 897 * Return the stream record marker field for a record of length < 2^31-1 898 */ 899 static rpc_fraghdr 900 xs_stream_record_marker(struct xdr_buf *xdr) 901 { 902 if (!xdr->len) 903 return 0; 904 return cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | (u32)xdr->len); 905 } 906 907 /** 908 * xs_local_send_request - write an RPC request to an AF_LOCAL socket 909 * @req: pointer to RPC request 910 * 911 * Return values: 912 * 0: The request has been sent 913 * EAGAIN: The socket was blocked, please call again later to 914 * complete the request 915 * ENOTCONN: Caller needs to invoke connect logic then call again 916 * other: Some other error occurred, the request was not sent 917 */ 918 static int xs_local_send_request(struct rpc_rqst *req) 919 { 920 struct rpc_xprt *xprt = req->rq_xprt; 921 struct sock_xprt *transport = 922 container_of(xprt, struct sock_xprt, xprt); 923 struct xdr_buf *xdr = &req->rq_snd_buf; 924 rpc_fraghdr rm = xs_stream_record_marker(xdr); 925 unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen; 926 struct msghdr msg = { 927 .msg_flags = XS_SENDMSG_FLAGS, 928 }; 929 bool vm_wait; 930 unsigned int sent; 931 int status; 932 933 /* Close the stream if the previous transmission was incomplete */ 934 if (xs_send_request_was_aborted(transport, req)) { 935 xprt_force_disconnect(xprt); 936 return -ENOTCONN; 937 } 938 939 xs_pktdump("packet data:", 940 req->rq_svec->iov_base, req->rq_svec->iov_len); 941 942 vm_wait = sk_stream_is_writeable(transport->inet) ? true : false; 943 944 req->rq_xtime = ktime_get(); 945 status = xprt_sock_sendmsg(transport->sock, &msg, xdr, 946 transport->xmit.offset, rm, &sent); 947 dprintk("RPC: %s(%u) = %d\n", 948 __func__, xdr->len - transport->xmit.offset, status); 949 950 if (likely(sent > 0) || status == 0) { 951 transport->xmit.offset += sent; 952 req->rq_bytes_sent = transport->xmit.offset; 953 if (likely(req->rq_bytes_sent >= msglen)) { 954 req->rq_xmit_bytes_sent += transport->xmit.offset; 955 transport->xmit.offset = 0; 956 return 0; 957 } 958 status = -EAGAIN; 959 vm_wait = false; 960 } 961 962 switch (status) { 963 case -EAGAIN: 964 status = xs_stream_nospace(req, vm_wait); 965 break; 966 default: 967 dprintk("RPC: sendmsg returned unrecognized error %d\n", 968 -status); 969 fallthrough; 970 case -EPIPE: 971 xprt_force_disconnect(xprt); 972 status = -ENOTCONN; 973 } 974 975 return status; 976 } 977 978 /** 979 * xs_udp_send_request - write an RPC request to a UDP socket 980 * @req: pointer to RPC request 981 * 982 * Return values: 983 * 0: The request has been sent 984 * EAGAIN: The socket was blocked, please call again later to 985 * complete the request 986 * ENOTCONN: Caller needs to invoke connect logic then call again 987 * other: Some other error occurred, the request was not sent 988 */ 989 static int xs_udp_send_request(struct rpc_rqst *req) 990 { 991 struct rpc_xprt *xprt = req->rq_xprt; 992 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 993 struct xdr_buf *xdr = &req->rq_snd_buf; 994 struct msghdr msg = { 995 .msg_name = xs_addr(xprt), 996 .msg_namelen = xprt->addrlen, 997 .msg_flags = XS_SENDMSG_FLAGS, 998 }; 999 unsigned int sent; 1000 int status; 1001 1002 xs_pktdump("packet data:", 1003 req->rq_svec->iov_base, 1004 req->rq_svec->iov_len); 1005 1006 if (!xprt_bound(xprt)) 1007 return -ENOTCONN; 1008 1009 if (!xprt_request_get_cong(xprt, req)) 1010 return -EBADSLT; 1011 1012 status = xdr_alloc_bvec(xdr, rpc_task_gfp_mask()); 1013 if (status < 0) 1014 return status; 1015 req->rq_xtime = ktime_get(); 1016 status = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, 0, &sent); 1017 1018 dprintk("RPC: xs_udp_send_request(%u) = %d\n", 1019 xdr->len, status); 1020 1021 /* firewall is blocking us, don't return -EAGAIN or we end up looping */ 1022 if (status == -EPERM) 1023 goto process_status; 1024 1025 if (status == -EAGAIN && sock_writeable(transport->inet)) 1026 status = -ENOBUFS; 1027 1028 if (sent > 0 || status == 0) { 1029 req->rq_xmit_bytes_sent += sent; 1030 if (sent >= req->rq_slen) 1031 return 0; 1032 /* Still some bytes left; set up for a retry later. */ 1033 status = -EAGAIN; 1034 } 1035 1036 process_status: 1037 switch (status) { 1038 case -ENOTSOCK: 1039 status = -ENOTCONN; 1040 /* Should we call xs_close() here? */ 1041 break; 1042 case -EAGAIN: 1043 status = xs_sock_nospace(req); 1044 break; 1045 case -ENETUNREACH: 1046 case -ENOBUFS: 1047 case -EPIPE: 1048 case -ECONNREFUSED: 1049 case -EPERM: 1050 /* When the server has died, an ICMP port unreachable message 1051 * prompts ECONNREFUSED. */ 1052 break; 1053 default: 1054 dprintk("RPC: sendmsg returned unrecognized error %d\n", 1055 -status); 1056 } 1057 1058 return status; 1059 } 1060 1061 /** 1062 * xs_tcp_send_request - write an RPC request to a TCP socket 1063 * @req: pointer to RPC request 1064 * 1065 * Return values: 1066 * 0: The request has been sent 1067 * EAGAIN: The socket was blocked, please call again later to 1068 * complete the request 1069 * ENOTCONN: Caller needs to invoke connect logic then call again 1070 * other: Some other error occurred, the request was not sent 1071 * 1072 * XXX: In the case of soft timeouts, should we eventually give up 1073 * if sendmsg is not able to make progress? 1074 */ 1075 static int xs_tcp_send_request(struct rpc_rqst *req) 1076 { 1077 struct rpc_xprt *xprt = req->rq_xprt; 1078 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1079 struct xdr_buf *xdr = &req->rq_snd_buf; 1080 rpc_fraghdr rm = xs_stream_record_marker(xdr); 1081 unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen; 1082 struct msghdr msg = { 1083 .msg_flags = XS_SENDMSG_FLAGS, 1084 }; 1085 bool vm_wait; 1086 unsigned int sent; 1087 int status; 1088 1089 /* Close the stream if the previous transmission was incomplete */ 1090 if (xs_send_request_was_aborted(transport, req)) { 1091 if (transport->sock != NULL) 1092 kernel_sock_shutdown(transport->sock, SHUT_RDWR); 1093 return -ENOTCONN; 1094 } 1095 if (!transport->inet) 1096 return -ENOTCONN; 1097 1098 xs_pktdump("packet data:", 1099 req->rq_svec->iov_base, 1100 req->rq_svec->iov_len); 1101 1102 if (test_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state)) 1103 xs_tcp_set_socket_timeouts(xprt, transport->sock); 1104 1105 xs_set_srcport(transport, transport->sock); 1106 1107 /* Continue transmitting the packet/record. We must be careful 1108 * to cope with writespace callbacks arriving _after_ we have 1109 * called sendmsg(). */ 1110 req->rq_xtime = ktime_get(); 1111 tcp_sock_set_cork(transport->inet, true); 1112 1113 vm_wait = sk_stream_is_writeable(transport->inet) ? true : false; 1114 1115 do { 1116 status = xprt_sock_sendmsg(transport->sock, &msg, xdr, 1117 transport->xmit.offset, rm, &sent); 1118 1119 dprintk("RPC: xs_tcp_send_request(%u) = %d\n", 1120 xdr->len - transport->xmit.offset, status); 1121 1122 /* If we've sent the entire packet, immediately 1123 * reset the count of bytes sent. */ 1124 transport->xmit.offset += sent; 1125 req->rq_bytes_sent = transport->xmit.offset; 1126 if (likely(req->rq_bytes_sent >= msglen)) { 1127 req->rq_xmit_bytes_sent += transport->xmit.offset; 1128 transport->xmit.offset = 0; 1129 if (atomic_long_read(&xprt->xmit_queuelen) == 1) 1130 tcp_sock_set_cork(transport->inet, false); 1131 return 0; 1132 } 1133 1134 WARN_ON_ONCE(sent == 0 && status == 0); 1135 1136 if (sent > 0) 1137 vm_wait = false; 1138 1139 } while (status == 0); 1140 1141 switch (status) { 1142 case -ENOTSOCK: 1143 status = -ENOTCONN; 1144 /* Should we call xs_close() here? */ 1145 break; 1146 case -EAGAIN: 1147 status = xs_stream_nospace(req, vm_wait); 1148 break; 1149 case -ECONNRESET: 1150 case -ECONNREFUSED: 1151 case -ENOTCONN: 1152 case -EADDRINUSE: 1153 case -ENOBUFS: 1154 case -EPIPE: 1155 break; 1156 default: 1157 dprintk("RPC: sendmsg returned unrecognized error %d\n", 1158 -status); 1159 } 1160 1161 return status; 1162 } 1163 1164 static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk) 1165 { 1166 transport->old_data_ready = sk->sk_data_ready; 1167 transport->old_state_change = sk->sk_state_change; 1168 transport->old_write_space = sk->sk_write_space; 1169 transport->old_error_report = sk->sk_error_report; 1170 } 1171 1172 static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk) 1173 { 1174 sk->sk_data_ready = transport->old_data_ready; 1175 sk->sk_state_change = transport->old_state_change; 1176 sk->sk_write_space = transport->old_write_space; 1177 sk->sk_error_report = transport->old_error_report; 1178 } 1179 1180 static void xs_sock_reset_state_flags(struct rpc_xprt *xprt) 1181 { 1182 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1183 1184 clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state); 1185 clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state); 1186 clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state); 1187 clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state); 1188 clear_bit(XPRT_SOCK_NOSPACE, &transport->sock_state); 1189 clear_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state); 1190 } 1191 1192 static void xs_run_error_worker(struct sock_xprt *transport, unsigned int nr) 1193 { 1194 set_bit(nr, &transport->sock_state); 1195 queue_work(xprtiod_workqueue, &transport->error_worker); 1196 } 1197 1198 static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt) 1199 { 1200 xprt->connect_cookie++; 1201 smp_mb__before_atomic(); 1202 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 1203 clear_bit(XPRT_CLOSING, &xprt->state); 1204 xs_sock_reset_state_flags(xprt); 1205 smp_mb__after_atomic(); 1206 } 1207 1208 /** 1209 * xs_error_report - callback to handle TCP socket state errors 1210 * @sk: socket 1211 * 1212 * Note: we don't call sock_error() since there may be a rpc_task 1213 * using the socket, and so we don't want to clear sk->sk_err. 1214 */ 1215 static void xs_error_report(struct sock *sk) 1216 { 1217 struct sock_xprt *transport; 1218 struct rpc_xprt *xprt; 1219 1220 if (!(xprt = xprt_from_sock(sk))) 1221 return; 1222 1223 transport = container_of(xprt, struct sock_xprt, xprt); 1224 transport->xprt_err = -sk->sk_err; 1225 if (transport->xprt_err == 0) 1226 return; 1227 dprintk("RPC: xs_error_report client %p, error=%d...\n", 1228 xprt, -transport->xprt_err); 1229 trace_rpc_socket_error(xprt, sk->sk_socket, transport->xprt_err); 1230 1231 /* barrier ensures xprt_err is set before XPRT_SOCK_WAKE_ERROR */ 1232 smp_mb__before_atomic(); 1233 xs_run_error_worker(transport, XPRT_SOCK_WAKE_ERROR); 1234 } 1235 1236 static void xs_reset_transport(struct sock_xprt *transport) 1237 { 1238 struct socket *sock = transport->sock; 1239 struct sock *sk = transport->inet; 1240 struct rpc_xprt *xprt = &transport->xprt; 1241 struct file *filp = transport->file; 1242 1243 if (sk == NULL) 1244 return; 1245 /* 1246 * Make sure we're calling this in a context from which it is safe 1247 * to call __fput_sync(). In practice that means rpciod and the 1248 * system workqueue. 1249 */ 1250 if (!(current->flags & PF_WQ_WORKER)) { 1251 WARN_ON_ONCE(1); 1252 set_bit(XPRT_CLOSE_WAIT, &xprt->state); 1253 return; 1254 } 1255 1256 if (atomic_read(&transport->xprt.swapper)) 1257 sk_clear_memalloc(sk); 1258 1259 tls_handshake_cancel(sk); 1260 1261 kernel_sock_shutdown(sock, SHUT_RDWR); 1262 1263 mutex_lock(&transport->recv_mutex); 1264 lock_sock(sk); 1265 transport->inet = NULL; 1266 transport->sock = NULL; 1267 transport->file = NULL; 1268 1269 sk->sk_user_data = NULL; 1270 1271 xs_restore_old_callbacks(transport, sk); 1272 xprt_clear_connected(xprt); 1273 xs_sock_reset_connection_flags(xprt); 1274 /* Reset stream record info */ 1275 xs_stream_reset_connect(transport); 1276 release_sock(sk); 1277 mutex_unlock(&transport->recv_mutex); 1278 1279 trace_rpc_socket_close(xprt, sock); 1280 __fput_sync(filp); 1281 1282 xprt_disconnect_done(xprt); 1283 } 1284 1285 /** 1286 * xs_close - close a socket 1287 * @xprt: transport 1288 * 1289 * This is used when all requests are complete; ie, no DRC state remains 1290 * on the server we want to save. 1291 * 1292 * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with 1293 * xs_reset_transport() zeroing the socket from underneath a writer. 1294 */ 1295 static void xs_close(struct rpc_xprt *xprt) 1296 { 1297 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1298 1299 dprintk("RPC: xs_close xprt %p\n", xprt); 1300 1301 if (transport->sock) 1302 tls_handshake_close(transport->sock); 1303 xs_reset_transport(transport); 1304 xprt->reestablish_timeout = 0; 1305 } 1306 1307 static void xs_inject_disconnect(struct rpc_xprt *xprt) 1308 { 1309 dprintk("RPC: injecting transport disconnect on xprt=%p\n", 1310 xprt); 1311 xprt_disconnect_done(xprt); 1312 } 1313 1314 static void xs_xprt_free(struct rpc_xprt *xprt) 1315 { 1316 xs_free_peer_addresses(xprt); 1317 xprt_free(xprt); 1318 } 1319 1320 /** 1321 * xs_destroy - prepare to shutdown a transport 1322 * @xprt: doomed transport 1323 * 1324 */ 1325 static void xs_destroy(struct rpc_xprt *xprt) 1326 { 1327 struct sock_xprt *transport = container_of(xprt, 1328 struct sock_xprt, xprt); 1329 dprintk("RPC: xs_destroy xprt %p\n", xprt); 1330 1331 cancel_delayed_work_sync(&transport->connect_worker); 1332 xs_close(xprt); 1333 cancel_work_sync(&transport->recv_worker); 1334 cancel_work_sync(&transport->error_worker); 1335 xs_xprt_free(xprt); 1336 module_put(THIS_MODULE); 1337 } 1338 1339 /** 1340 * xs_udp_data_read_skb - receive callback for UDP sockets 1341 * @xprt: transport 1342 * @sk: socket 1343 * @skb: skbuff 1344 * 1345 */ 1346 static void xs_udp_data_read_skb(struct rpc_xprt *xprt, 1347 struct sock *sk, 1348 struct sk_buff *skb) 1349 { 1350 struct rpc_task *task; 1351 struct rpc_rqst *rovr; 1352 int repsize, copied; 1353 u32 _xid; 1354 __be32 *xp; 1355 1356 repsize = skb->len; 1357 if (repsize < 4) { 1358 dprintk("RPC: impossible RPC reply size %d!\n", repsize); 1359 return; 1360 } 1361 1362 /* Copy the XID from the skb... */ 1363 xp = skb_header_pointer(skb, 0, sizeof(_xid), &_xid); 1364 if (xp == NULL) 1365 return; 1366 1367 /* Look up and lock the request corresponding to the given XID */ 1368 spin_lock(&xprt->queue_lock); 1369 rovr = xprt_lookup_rqst(xprt, *xp); 1370 if (!rovr) 1371 goto out_unlock; 1372 xprt_pin_rqst(rovr); 1373 xprt_update_rtt(rovr->rq_task); 1374 spin_unlock(&xprt->queue_lock); 1375 task = rovr->rq_task; 1376 1377 if ((copied = rovr->rq_private_buf.buflen) > repsize) 1378 copied = repsize; 1379 1380 /* Suck it into the iovec, verify checksum if not done by hw. */ 1381 if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) { 1382 spin_lock(&xprt->queue_lock); 1383 __UDPX_INC_STATS(sk, UDP_MIB_INERRORS); 1384 goto out_unpin; 1385 } 1386 1387 1388 spin_lock(&xprt->transport_lock); 1389 xprt_adjust_cwnd(xprt, task, copied); 1390 spin_unlock(&xprt->transport_lock); 1391 spin_lock(&xprt->queue_lock); 1392 xprt_complete_rqst(task, copied); 1393 __UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS); 1394 out_unpin: 1395 xprt_unpin_rqst(rovr); 1396 out_unlock: 1397 spin_unlock(&xprt->queue_lock); 1398 } 1399 1400 static void xs_udp_data_receive(struct sock_xprt *transport) 1401 { 1402 struct sk_buff *skb; 1403 struct sock *sk; 1404 int err; 1405 1406 mutex_lock(&transport->recv_mutex); 1407 sk = transport->inet; 1408 if (sk == NULL) 1409 goto out; 1410 for (;;) { 1411 skb = skb_recv_udp(sk, MSG_DONTWAIT, &err); 1412 if (skb == NULL) 1413 break; 1414 xs_udp_data_read_skb(&transport->xprt, sk, skb); 1415 consume_skb(skb); 1416 cond_resched(); 1417 } 1418 xs_poll_check_readable(transport); 1419 out: 1420 mutex_unlock(&transport->recv_mutex); 1421 } 1422 1423 static void xs_udp_data_receive_workfn(struct work_struct *work) 1424 { 1425 struct sock_xprt *transport = 1426 container_of(work, struct sock_xprt, recv_worker); 1427 unsigned int pflags = memalloc_nofs_save(); 1428 1429 xs_udp_data_receive(transport); 1430 memalloc_nofs_restore(pflags); 1431 } 1432 1433 /** 1434 * xs_data_ready - "data ready" callback for sockets 1435 * @sk: socket with data to read 1436 * 1437 */ 1438 static void xs_data_ready(struct sock *sk) 1439 { 1440 struct rpc_xprt *xprt; 1441 1442 trace_sk_data_ready(sk); 1443 1444 xprt = xprt_from_sock(sk); 1445 if (xprt != NULL) { 1446 struct sock_xprt *transport = container_of(xprt, 1447 struct sock_xprt, xprt); 1448 1449 trace_xs_data_ready(xprt); 1450 1451 transport->old_data_ready(sk); 1452 1453 if (test_bit(XPRT_SOCK_IGNORE_RECV, &transport->sock_state)) 1454 return; 1455 1456 /* Any data means we had a useful conversation, so 1457 * then we don't need to delay the next reconnect 1458 */ 1459 if (xprt->reestablish_timeout) 1460 xprt->reestablish_timeout = 0; 1461 if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state)) 1462 queue_work(xprtiod_workqueue, &transport->recv_worker); 1463 } 1464 } 1465 1466 /* 1467 * Helper function to force a TCP close if the server is sending 1468 * junk and/or it has put us in CLOSE_WAIT 1469 */ 1470 static void xs_tcp_force_close(struct rpc_xprt *xprt) 1471 { 1472 xprt_force_disconnect(xprt); 1473 } 1474 1475 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 1476 static size_t xs_tcp_bc_maxpayload(struct rpc_xprt *xprt) 1477 { 1478 return PAGE_SIZE; 1479 } 1480 #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 1481 1482 /** 1483 * xs_local_state_change - callback to handle AF_LOCAL socket state changes 1484 * @sk: socket whose state has changed 1485 * 1486 */ 1487 static void xs_local_state_change(struct sock *sk) 1488 { 1489 struct rpc_xprt *xprt; 1490 struct sock_xprt *transport; 1491 1492 if (!(xprt = xprt_from_sock(sk))) 1493 return; 1494 transport = container_of(xprt, struct sock_xprt, xprt); 1495 if (sk->sk_shutdown & SHUTDOWN_MASK) { 1496 clear_bit(XPRT_CONNECTED, &xprt->state); 1497 /* Trigger the socket release */ 1498 xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT); 1499 } 1500 } 1501 1502 /** 1503 * xs_tcp_state_change - callback to handle TCP socket state changes 1504 * @sk: socket whose state has changed 1505 * 1506 */ 1507 static void xs_tcp_state_change(struct sock *sk) 1508 { 1509 struct rpc_xprt *xprt; 1510 struct sock_xprt *transport; 1511 1512 if (!(xprt = xprt_from_sock(sk))) 1513 return; 1514 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); 1515 dprintk("RPC: state %x conn %d dead %d zapped %d sk_shutdown %d\n", 1516 sk->sk_state, xprt_connected(xprt), 1517 sock_flag(sk, SOCK_DEAD), 1518 sock_flag(sk, SOCK_ZAPPED), 1519 sk->sk_shutdown); 1520 1521 transport = container_of(xprt, struct sock_xprt, xprt); 1522 trace_rpc_socket_state_change(xprt, sk->sk_socket); 1523 switch (sk->sk_state) { 1524 case TCP_ESTABLISHED: 1525 if (!xprt_test_and_set_connected(xprt)) { 1526 xprt->connect_cookie++; 1527 clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); 1528 xprt_clear_connecting(xprt); 1529 1530 xprt->stat.connect_count++; 1531 xprt->stat.connect_time += (long)jiffies - 1532 xprt->stat.connect_start; 1533 xs_run_error_worker(transport, XPRT_SOCK_WAKE_PENDING); 1534 } 1535 break; 1536 case TCP_FIN_WAIT1: 1537 /* The client initiated a shutdown of the socket */ 1538 xprt->connect_cookie++; 1539 xprt->reestablish_timeout = 0; 1540 set_bit(XPRT_CLOSING, &xprt->state); 1541 smp_mb__before_atomic(); 1542 clear_bit(XPRT_CONNECTED, &xprt->state); 1543 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 1544 smp_mb__after_atomic(); 1545 break; 1546 case TCP_CLOSE_WAIT: 1547 /* The server initiated a shutdown of the socket */ 1548 xprt->connect_cookie++; 1549 clear_bit(XPRT_CONNECTED, &xprt->state); 1550 xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT); 1551 fallthrough; 1552 case TCP_CLOSING: 1553 /* 1554 * If the server closed down the connection, make sure that 1555 * we back off before reconnecting 1556 */ 1557 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) 1558 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 1559 break; 1560 case TCP_LAST_ACK: 1561 set_bit(XPRT_CLOSING, &xprt->state); 1562 smp_mb__before_atomic(); 1563 clear_bit(XPRT_CONNECTED, &xprt->state); 1564 smp_mb__after_atomic(); 1565 break; 1566 case TCP_CLOSE: 1567 if (test_and_clear_bit(XPRT_SOCK_CONNECTING, 1568 &transport->sock_state)) 1569 xprt_clear_connecting(xprt); 1570 clear_bit(XPRT_CLOSING, &xprt->state); 1571 /* Trigger the socket release */ 1572 xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT); 1573 } 1574 } 1575 1576 static void xs_write_space(struct sock *sk) 1577 { 1578 struct sock_xprt *transport; 1579 struct rpc_xprt *xprt; 1580 1581 if (!sk->sk_socket) 1582 return; 1583 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1584 1585 if (unlikely(!(xprt = xprt_from_sock(sk)))) 1586 return; 1587 transport = container_of(xprt, struct sock_xprt, xprt); 1588 if (!test_and_clear_bit(XPRT_SOCK_NOSPACE, &transport->sock_state)) 1589 return; 1590 xs_run_error_worker(transport, XPRT_SOCK_WAKE_WRITE); 1591 sk->sk_write_pending--; 1592 } 1593 1594 /** 1595 * xs_udp_write_space - callback invoked when socket buffer space 1596 * becomes available 1597 * @sk: socket whose state has changed 1598 * 1599 * Called when more output buffer space is available for this socket. 1600 * We try not to wake our writers until they can make "significant" 1601 * progress, otherwise we'll waste resources thrashing kernel_sendmsg 1602 * with a bunch of small requests. 1603 */ 1604 static void xs_udp_write_space(struct sock *sk) 1605 { 1606 /* from net/core/sock.c:sock_def_write_space */ 1607 if (sock_writeable(sk)) 1608 xs_write_space(sk); 1609 } 1610 1611 /** 1612 * xs_tcp_write_space - callback invoked when socket buffer space 1613 * becomes available 1614 * @sk: socket whose state has changed 1615 * 1616 * Called when more output buffer space is available for this socket. 1617 * We try not to wake our writers until they can make "significant" 1618 * progress, otherwise we'll waste resources thrashing kernel_sendmsg 1619 * with a bunch of small requests. 1620 */ 1621 static void xs_tcp_write_space(struct sock *sk) 1622 { 1623 /* from net/core/stream.c:sk_stream_write_space */ 1624 if (sk_stream_is_writeable(sk)) 1625 xs_write_space(sk); 1626 } 1627 1628 static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt) 1629 { 1630 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1631 struct sock *sk = transport->inet; 1632 1633 if (transport->rcvsize) { 1634 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 1635 sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2; 1636 } 1637 if (transport->sndsize) { 1638 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 1639 sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2; 1640 sk->sk_write_space(sk); 1641 } 1642 } 1643 1644 /** 1645 * xs_udp_set_buffer_size - set send and receive limits 1646 * @xprt: generic transport 1647 * @sndsize: requested size of send buffer, in bytes 1648 * @rcvsize: requested size of receive buffer, in bytes 1649 * 1650 * Set socket send and receive buffer size limits. 1651 */ 1652 static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize) 1653 { 1654 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1655 1656 transport->sndsize = 0; 1657 if (sndsize) 1658 transport->sndsize = sndsize + 1024; 1659 transport->rcvsize = 0; 1660 if (rcvsize) 1661 transport->rcvsize = rcvsize + 1024; 1662 1663 xs_udp_do_set_buffer_size(xprt); 1664 } 1665 1666 /** 1667 * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport 1668 * @xprt: controlling transport 1669 * @task: task that timed out 1670 * 1671 * Adjust the congestion window after a retransmit timeout has occurred. 1672 */ 1673 static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task) 1674 { 1675 spin_lock(&xprt->transport_lock); 1676 xprt_adjust_cwnd(xprt, task, -ETIMEDOUT); 1677 spin_unlock(&xprt->transport_lock); 1678 } 1679 1680 static int xs_get_random_port(void) 1681 { 1682 unsigned short min = xprt_min_resvport, max = xprt_max_resvport; 1683 unsigned short range; 1684 unsigned short rand; 1685 1686 if (max < min) 1687 return -EADDRINUSE; 1688 range = max - min + 1; 1689 rand = get_random_u32_below(range); 1690 return rand + min; 1691 } 1692 1693 static unsigned short xs_sock_getport(struct socket *sock) 1694 { 1695 struct sockaddr_storage buf; 1696 unsigned short port = 0; 1697 1698 if (kernel_getsockname(sock, (struct sockaddr *)&buf) < 0) 1699 goto out; 1700 switch (buf.ss_family) { 1701 case AF_INET6: 1702 port = ntohs(((struct sockaddr_in6 *)&buf)->sin6_port); 1703 break; 1704 case AF_INET: 1705 port = ntohs(((struct sockaddr_in *)&buf)->sin_port); 1706 } 1707 out: 1708 return port; 1709 } 1710 1711 /** 1712 * xs_set_port - reset the port number in the remote endpoint address 1713 * @xprt: generic transport 1714 * @port: new port number 1715 * 1716 */ 1717 static void xs_set_port(struct rpc_xprt *xprt, unsigned short port) 1718 { 1719 dprintk("RPC: setting port for xprt %p to %u\n", xprt, port); 1720 1721 rpc_set_port(xs_addr(xprt), port); 1722 xs_update_peer_port(xprt); 1723 } 1724 1725 static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock) 1726 { 1727 if (transport->srcport == 0 && transport->xprt.reuseport) 1728 transport->srcport = xs_sock_getport(sock); 1729 } 1730 1731 static int xs_get_srcport(struct sock_xprt *transport) 1732 { 1733 int port = transport->srcport; 1734 1735 if (port == 0 && transport->xprt.resvport) 1736 port = xs_get_random_port(); 1737 return port; 1738 } 1739 1740 static unsigned short xs_sock_srcport(struct rpc_xprt *xprt) 1741 { 1742 struct sock_xprt *sock = container_of(xprt, struct sock_xprt, xprt); 1743 unsigned short ret = 0; 1744 mutex_lock(&sock->recv_mutex); 1745 if (sock->sock) 1746 ret = xs_sock_getport(sock->sock); 1747 mutex_unlock(&sock->recv_mutex); 1748 return ret; 1749 } 1750 1751 static int xs_sock_srcaddr(struct rpc_xprt *xprt, char *buf, size_t buflen) 1752 { 1753 struct sock_xprt *sock = container_of(xprt, struct sock_xprt, xprt); 1754 union { 1755 struct sockaddr sa; 1756 struct sockaddr_storage st; 1757 } saddr; 1758 int ret = -ENOTCONN; 1759 1760 mutex_lock(&sock->recv_mutex); 1761 if (sock->sock) { 1762 ret = kernel_getsockname(sock->sock, &saddr.sa); 1763 if (ret >= 0) 1764 ret = snprintf(buf, buflen, "%pISc", &saddr.sa); 1765 } 1766 mutex_unlock(&sock->recv_mutex); 1767 return ret; 1768 } 1769 1770 static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port) 1771 { 1772 if (transport->srcport != 0) 1773 transport->srcport = 0; 1774 if (!transport->xprt.resvport) 1775 return 0; 1776 if (port <= xprt_min_resvport || port > xprt_max_resvport) 1777 return xprt_max_resvport; 1778 return --port; 1779 } 1780 static int xs_bind(struct sock_xprt *transport, struct socket *sock) 1781 { 1782 struct sockaddr_storage myaddr; 1783 int err, nloop = 0; 1784 int port = xs_get_srcport(transport); 1785 unsigned short last; 1786 1787 /* 1788 * If we are asking for any ephemeral port (i.e. port == 0 && 1789 * transport->xprt.resvport == 0), don't bind. Let the local 1790 * port selection happen implicitly when the socket is used 1791 * (for example at connect time). 1792 * 1793 * This ensures that we can continue to establish TCP 1794 * connections even when all local ephemeral ports are already 1795 * a part of some TCP connection. This makes no difference 1796 * for UDP sockets, but also doesn't harm them. 1797 * 1798 * If we're asking for any reserved port (i.e. port == 0 && 1799 * transport->xprt.resvport == 1) xs_get_srcport above will 1800 * ensure that port is non-zero and we will bind as needed. 1801 */ 1802 if (port <= 0) 1803 return port; 1804 1805 memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen); 1806 do { 1807 rpc_set_port((struct sockaddr *)&myaddr, port); 1808 err = kernel_bind(sock, (struct sockaddr *)&myaddr, 1809 transport->xprt.addrlen); 1810 if (err == 0) { 1811 if (transport->xprt.reuseport) 1812 transport->srcport = port; 1813 break; 1814 } 1815 last = port; 1816 port = xs_next_srcport(transport, port); 1817 if (port > last) 1818 nloop++; 1819 } while (err == -EADDRINUSE && nloop != 2); 1820 1821 if (myaddr.ss_family == AF_INET) 1822 dprintk("RPC: %s %pI4:%u: %s (%d)\n", __func__, 1823 &((struct sockaddr_in *)&myaddr)->sin_addr, 1824 port, err ? "failed" : "ok", err); 1825 else 1826 dprintk("RPC: %s %pI6:%u: %s (%d)\n", __func__, 1827 &((struct sockaddr_in6 *)&myaddr)->sin6_addr, 1828 port, err ? "failed" : "ok", err); 1829 return err; 1830 } 1831 1832 /* 1833 * We don't support autobind on AF_LOCAL sockets 1834 */ 1835 static void xs_local_rpcbind(struct rpc_task *task) 1836 { 1837 xprt_set_bound(task->tk_xprt); 1838 } 1839 1840 static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port) 1841 { 1842 } 1843 1844 #ifdef CONFIG_DEBUG_LOCK_ALLOC 1845 static struct lock_class_key xs_key[3]; 1846 static struct lock_class_key xs_slock_key[3]; 1847 1848 static inline void xs_reclassify_socketu(struct socket *sock) 1849 { 1850 struct sock *sk = sock->sk; 1851 1852 sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC", 1853 &xs_slock_key[0], "sk_lock-AF_LOCAL-RPC", &xs_key[0]); 1854 } 1855 1856 static inline void xs_reclassify_socket4(struct socket *sock) 1857 { 1858 struct sock *sk = sock->sk; 1859 1860 sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC", 1861 &xs_slock_key[1], "sk_lock-AF_INET-RPC", &xs_key[1]); 1862 } 1863 1864 static inline void xs_reclassify_socket6(struct socket *sock) 1865 { 1866 struct sock *sk = sock->sk; 1867 1868 sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC", 1869 &xs_slock_key[2], "sk_lock-AF_INET6-RPC", &xs_key[2]); 1870 } 1871 1872 static inline void xs_reclassify_socket(int family, struct socket *sock) 1873 { 1874 if (WARN_ON_ONCE(!sock_allow_reclassification(sock->sk))) 1875 return; 1876 1877 switch (family) { 1878 case AF_LOCAL: 1879 xs_reclassify_socketu(sock); 1880 break; 1881 case AF_INET: 1882 xs_reclassify_socket4(sock); 1883 break; 1884 case AF_INET6: 1885 xs_reclassify_socket6(sock); 1886 break; 1887 } 1888 } 1889 #else 1890 static inline void xs_reclassify_socket(int family, struct socket *sock) 1891 { 1892 } 1893 #endif 1894 1895 static void xs_dummy_setup_socket(struct work_struct *work) 1896 { 1897 } 1898 1899 static struct socket *xs_create_sock(struct rpc_xprt *xprt, 1900 struct sock_xprt *transport, int family, int type, 1901 int protocol, bool reuseport) 1902 { 1903 struct file *filp; 1904 struct socket *sock; 1905 int err; 1906 1907 err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1); 1908 if (err < 0) { 1909 dprintk("RPC: can't create %d transport socket (%d).\n", 1910 protocol, -err); 1911 goto out; 1912 } 1913 xs_reclassify_socket(family, sock); 1914 1915 if (reuseport) 1916 sock_set_reuseport(sock->sk); 1917 1918 err = xs_bind(transport, sock); 1919 if (err) { 1920 sock_release(sock); 1921 goto out; 1922 } 1923 1924 if (protocol == IPPROTO_TCP) { 1925 __netns_tracker_free(xprt->xprt_net, &sock->sk->ns_tracker, false); 1926 sock->sk->sk_net_refcnt = 1; 1927 get_net_track(xprt->xprt_net, &sock->sk->ns_tracker, GFP_KERNEL); 1928 sock_inuse_add(xprt->xprt_net, 1); 1929 } 1930 1931 filp = sock_alloc_file(sock, O_NONBLOCK, NULL); 1932 if (IS_ERR(filp)) 1933 return ERR_CAST(filp); 1934 transport->file = filp; 1935 1936 return sock; 1937 out: 1938 return ERR_PTR(err); 1939 } 1940 1941 static int xs_local_finish_connecting(struct rpc_xprt *xprt, 1942 struct socket *sock) 1943 { 1944 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, 1945 xprt); 1946 1947 if (!transport->inet) { 1948 struct sock *sk = sock->sk; 1949 1950 lock_sock(sk); 1951 1952 xs_save_old_callbacks(transport, sk); 1953 1954 sk->sk_user_data = xprt; 1955 sk->sk_data_ready = xs_data_ready; 1956 sk->sk_write_space = xs_udp_write_space; 1957 sk->sk_state_change = xs_local_state_change; 1958 sk->sk_error_report = xs_error_report; 1959 sk->sk_use_task_frag = false; 1960 1961 xprt_clear_connected(xprt); 1962 1963 /* Reset to new socket */ 1964 transport->sock = sock; 1965 transport->inet = sk; 1966 1967 release_sock(sk); 1968 } 1969 1970 xs_stream_start_connect(transport); 1971 1972 return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0); 1973 } 1974 1975 /** 1976 * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint 1977 * @transport: socket transport to connect 1978 */ 1979 static int xs_local_setup_socket(struct sock_xprt *transport) 1980 { 1981 struct rpc_xprt *xprt = &transport->xprt; 1982 struct file *filp; 1983 struct socket *sock; 1984 int status; 1985 1986 status = __sock_create(xprt->xprt_net, AF_LOCAL, 1987 SOCK_STREAM, 0, &sock, 1); 1988 if (status < 0) { 1989 dprintk("RPC: can't create AF_LOCAL " 1990 "transport socket (%d).\n", -status); 1991 goto out; 1992 } 1993 xs_reclassify_socket(AF_LOCAL, sock); 1994 1995 filp = sock_alloc_file(sock, O_NONBLOCK, NULL); 1996 if (IS_ERR(filp)) { 1997 status = PTR_ERR(filp); 1998 goto out; 1999 } 2000 transport->file = filp; 2001 2002 dprintk("RPC: worker connecting xprt %p via AF_LOCAL to %s\n", 2003 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); 2004 2005 status = xs_local_finish_connecting(xprt, sock); 2006 trace_rpc_socket_connect(xprt, sock, status); 2007 switch (status) { 2008 case 0: 2009 dprintk("RPC: xprt %p connected to %s\n", 2010 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); 2011 xprt->stat.connect_count++; 2012 xprt->stat.connect_time += (long)jiffies - 2013 xprt->stat.connect_start; 2014 xprt_set_connected(xprt); 2015 break; 2016 case -ENOBUFS: 2017 break; 2018 case -ENOENT: 2019 dprintk("RPC: xprt %p: socket %s does not exist\n", 2020 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); 2021 break; 2022 case -ECONNREFUSED: 2023 dprintk("RPC: xprt %p: connection refused for %s\n", 2024 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); 2025 break; 2026 default: 2027 printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n", 2028 __func__, -status, 2029 xprt->address_strings[RPC_DISPLAY_ADDR]); 2030 } 2031 2032 out: 2033 xprt_clear_connecting(xprt); 2034 xprt_wake_pending_tasks(xprt, status); 2035 return status; 2036 } 2037 2038 static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task) 2039 { 2040 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2041 int ret; 2042 2043 if (transport->file) 2044 goto force_disconnect; 2045 2046 if (RPC_IS_ASYNC(task)) { 2047 /* 2048 * We want the AF_LOCAL connect to be resolved in the 2049 * filesystem namespace of the process making the rpc 2050 * call. Thus we connect synchronously. 2051 * 2052 * If we want to support asynchronous AF_LOCAL calls, 2053 * we'll need to figure out how to pass a namespace to 2054 * connect. 2055 */ 2056 rpc_task_set_rpc_status(task, -ENOTCONN); 2057 goto out_wake; 2058 } 2059 ret = xs_local_setup_socket(transport); 2060 if (ret && !RPC_IS_SOFTCONN(task)) 2061 msleep_interruptible(15000); 2062 return; 2063 force_disconnect: 2064 xprt_force_disconnect(xprt); 2065 out_wake: 2066 xprt_clear_connecting(xprt); 2067 xprt_wake_pending_tasks(xprt, -ENOTCONN); 2068 } 2069 2070 #if IS_ENABLED(CONFIG_SUNRPC_SWAP) 2071 /* 2072 * Note that this should be called with XPRT_LOCKED held, or recv_mutex 2073 * held, or when we otherwise know that we have exclusive access to the 2074 * socket, to guard against races with xs_reset_transport. 2075 */ 2076 static void xs_set_memalloc(struct rpc_xprt *xprt) 2077 { 2078 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, 2079 xprt); 2080 2081 /* 2082 * If there's no sock, then we have nothing to set. The 2083 * reconnecting process will get it for us. 2084 */ 2085 if (!transport->inet) 2086 return; 2087 if (atomic_read(&xprt->swapper)) 2088 sk_set_memalloc(transport->inet); 2089 } 2090 2091 /** 2092 * xs_enable_swap - Tag this transport as being used for swap. 2093 * @xprt: transport to tag 2094 * 2095 * Take a reference to this transport on behalf of the rpc_clnt, and 2096 * optionally mark it for swapping if it wasn't already. 2097 */ 2098 static int 2099 xs_enable_swap(struct rpc_xprt *xprt) 2100 { 2101 struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt); 2102 2103 mutex_lock(&xs->recv_mutex); 2104 if (atomic_inc_return(&xprt->swapper) == 1 && 2105 xs->inet) 2106 sk_set_memalloc(xs->inet); 2107 mutex_unlock(&xs->recv_mutex); 2108 return 0; 2109 } 2110 2111 /** 2112 * xs_disable_swap - Untag this transport as being used for swap. 2113 * @xprt: transport to tag 2114 * 2115 * Drop a "swapper" reference to this xprt on behalf of the rpc_clnt. If the 2116 * swapper refcount goes to 0, untag the socket as a memalloc socket. 2117 */ 2118 static void 2119 xs_disable_swap(struct rpc_xprt *xprt) 2120 { 2121 struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt); 2122 2123 mutex_lock(&xs->recv_mutex); 2124 if (atomic_dec_and_test(&xprt->swapper) && 2125 xs->inet) 2126 sk_clear_memalloc(xs->inet); 2127 mutex_unlock(&xs->recv_mutex); 2128 } 2129 #else 2130 static void xs_set_memalloc(struct rpc_xprt *xprt) 2131 { 2132 } 2133 2134 static int 2135 xs_enable_swap(struct rpc_xprt *xprt) 2136 { 2137 return -EINVAL; 2138 } 2139 2140 static void 2141 xs_disable_swap(struct rpc_xprt *xprt) 2142 { 2143 } 2144 #endif 2145 2146 static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) 2147 { 2148 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2149 2150 if (!transport->inet) { 2151 struct sock *sk = sock->sk; 2152 2153 lock_sock(sk); 2154 2155 xs_save_old_callbacks(transport, sk); 2156 2157 sk->sk_user_data = xprt; 2158 sk->sk_data_ready = xs_data_ready; 2159 sk->sk_write_space = xs_udp_write_space; 2160 sk->sk_use_task_frag = false; 2161 2162 xprt_set_connected(xprt); 2163 2164 /* Reset to new socket */ 2165 transport->sock = sock; 2166 transport->inet = sk; 2167 2168 xs_set_memalloc(xprt); 2169 2170 release_sock(sk); 2171 } 2172 xs_udp_do_set_buffer_size(xprt); 2173 2174 xprt->stat.connect_start = jiffies; 2175 } 2176 2177 static void xs_udp_setup_socket(struct work_struct *work) 2178 { 2179 struct sock_xprt *transport = 2180 container_of(work, struct sock_xprt, connect_worker.work); 2181 struct rpc_xprt *xprt = &transport->xprt; 2182 struct socket *sock; 2183 int status = -EIO; 2184 unsigned int pflags = current->flags; 2185 2186 if (atomic_read(&xprt->swapper)) 2187 current->flags |= PF_MEMALLOC; 2188 sock = xs_create_sock(xprt, transport, 2189 xs_addr(xprt)->sa_family, SOCK_DGRAM, 2190 IPPROTO_UDP, false); 2191 if (IS_ERR(sock)) 2192 goto out; 2193 2194 dprintk("RPC: worker connecting xprt %p via %s to " 2195 "%s (port %s)\n", xprt, 2196 xprt->address_strings[RPC_DISPLAY_PROTO], 2197 xprt->address_strings[RPC_DISPLAY_ADDR], 2198 xprt->address_strings[RPC_DISPLAY_PORT]); 2199 2200 xs_udp_finish_connecting(xprt, sock); 2201 trace_rpc_socket_connect(xprt, sock, 0); 2202 status = 0; 2203 out: 2204 xprt_clear_connecting(xprt); 2205 xprt_unlock_connect(xprt, transport); 2206 xprt_wake_pending_tasks(xprt, status); 2207 current_restore_flags(pflags, PF_MEMALLOC); 2208 } 2209 2210 /** 2211 * xs_tcp_shutdown - gracefully shut down a TCP socket 2212 * @xprt: transport 2213 * 2214 * Initiates a graceful shutdown of the TCP socket by calling the 2215 * equivalent of shutdown(SHUT_RDWR); 2216 */ 2217 static void xs_tcp_shutdown(struct rpc_xprt *xprt) 2218 { 2219 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2220 struct socket *sock = transport->sock; 2221 int skst = transport->inet ? transport->inet->sk_state : TCP_CLOSE; 2222 2223 if (sock == NULL) 2224 return; 2225 if (!xprt->reuseport) { 2226 xs_close(xprt); 2227 return; 2228 } 2229 switch (skst) { 2230 case TCP_FIN_WAIT1: 2231 case TCP_FIN_WAIT2: 2232 case TCP_LAST_ACK: 2233 break; 2234 case TCP_ESTABLISHED: 2235 case TCP_CLOSE_WAIT: 2236 kernel_sock_shutdown(sock, SHUT_RDWR); 2237 trace_rpc_socket_shutdown(xprt, sock); 2238 break; 2239 default: 2240 xs_reset_transport(transport); 2241 } 2242 } 2243 2244 static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt, 2245 struct socket *sock) 2246 { 2247 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2248 struct net *net = sock_net(sock->sk); 2249 unsigned long connect_timeout; 2250 unsigned long syn_retries; 2251 unsigned int keepidle; 2252 unsigned int keepcnt; 2253 unsigned int timeo; 2254 unsigned long t; 2255 2256 spin_lock(&xprt->transport_lock); 2257 keepidle = DIV_ROUND_UP(xprt->timeout->to_initval, HZ); 2258 keepcnt = xprt->timeout->to_retries + 1; 2259 timeo = jiffies_to_msecs(xprt->timeout->to_initval) * 2260 (xprt->timeout->to_retries + 1); 2261 clear_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state); 2262 spin_unlock(&xprt->transport_lock); 2263 2264 /* TCP Keepalive options */ 2265 sock_set_keepalive(sock->sk); 2266 tcp_sock_set_keepidle(sock->sk, keepidle); 2267 tcp_sock_set_keepintvl(sock->sk, keepidle); 2268 tcp_sock_set_keepcnt(sock->sk, keepcnt); 2269 2270 /* TCP user timeout (see RFC5482) */ 2271 tcp_sock_set_user_timeout(sock->sk, timeo); 2272 2273 /* Connect timeout */ 2274 connect_timeout = max_t(unsigned long, 2275 DIV_ROUND_UP(xprt->connect_timeout, HZ), 1); 2276 syn_retries = max_t(unsigned long, 2277 READ_ONCE(net->ipv4.sysctl_tcp_syn_retries), 1); 2278 for (t = 0; t <= syn_retries && (1UL << t) < connect_timeout; t++) 2279 ; 2280 if (t <= syn_retries) 2281 tcp_sock_set_syncnt(sock->sk, t - 1); 2282 } 2283 2284 static void xs_tcp_do_set_connect_timeout(struct rpc_xprt *xprt, 2285 unsigned long connect_timeout) 2286 { 2287 struct sock_xprt *transport = 2288 container_of(xprt, struct sock_xprt, xprt); 2289 struct rpc_timeout to; 2290 unsigned long initval; 2291 2292 memcpy(&to, xprt->timeout, sizeof(to)); 2293 /* Arbitrary lower limit */ 2294 initval = max_t(unsigned long, connect_timeout, XS_TCP_INIT_REEST_TO); 2295 to.to_initval = initval; 2296 to.to_maxval = initval; 2297 to.to_retries = 0; 2298 memcpy(&transport->tcp_timeout, &to, sizeof(transport->tcp_timeout)); 2299 xprt->timeout = &transport->tcp_timeout; 2300 xprt->connect_timeout = connect_timeout; 2301 } 2302 2303 static void xs_tcp_set_connect_timeout(struct rpc_xprt *xprt, 2304 unsigned long connect_timeout, 2305 unsigned long reconnect_timeout) 2306 { 2307 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2308 2309 spin_lock(&xprt->transport_lock); 2310 if (reconnect_timeout < xprt->max_reconnect_timeout) 2311 xprt->max_reconnect_timeout = reconnect_timeout; 2312 if (connect_timeout < xprt->connect_timeout) 2313 xs_tcp_do_set_connect_timeout(xprt, connect_timeout); 2314 set_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state); 2315 spin_unlock(&xprt->transport_lock); 2316 } 2317 2318 static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) 2319 { 2320 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2321 2322 if (!transport->inet) { 2323 struct sock *sk = sock->sk; 2324 2325 /* Avoid temporary address, they are bad for long-lived 2326 * connections such as NFS mounts. 2327 * RFC4941, section 3.6 suggests that: 2328 * Individual applications, which have specific 2329 * knowledge about the normal duration of connections, 2330 * MAY override this as appropriate. 2331 */ 2332 if (xs_addr(xprt)->sa_family == PF_INET6) { 2333 ip6_sock_set_addr_preferences(sk, 2334 IPV6_PREFER_SRC_PUBLIC); 2335 } 2336 2337 xs_tcp_set_socket_timeouts(xprt, sock); 2338 tcp_sock_set_nodelay(sk); 2339 2340 lock_sock(sk); 2341 2342 xs_save_old_callbacks(transport, sk); 2343 2344 sk->sk_user_data = xprt; 2345 sk->sk_data_ready = xs_data_ready; 2346 sk->sk_state_change = xs_tcp_state_change; 2347 sk->sk_write_space = xs_tcp_write_space; 2348 sk->sk_error_report = xs_error_report; 2349 sk->sk_use_task_frag = false; 2350 2351 /* socket options */ 2352 sock_reset_flag(sk, SOCK_LINGER); 2353 2354 xprt_clear_connected(xprt); 2355 2356 /* Reset to new socket */ 2357 transport->sock = sock; 2358 transport->inet = sk; 2359 2360 release_sock(sk); 2361 } 2362 2363 if (!xprt_bound(xprt)) 2364 return -ENOTCONN; 2365 2366 xs_set_memalloc(xprt); 2367 2368 xs_stream_start_connect(transport); 2369 2370 /* Tell the socket layer to start connecting... */ 2371 set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); 2372 return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK); 2373 } 2374 2375 /** 2376 * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint 2377 * @work: queued work item 2378 * 2379 * Invoked by a work queue tasklet. 2380 */ 2381 static void xs_tcp_setup_socket(struct work_struct *work) 2382 { 2383 struct sock_xprt *transport = 2384 container_of(work, struct sock_xprt, connect_worker.work); 2385 struct socket *sock = transport->sock; 2386 struct rpc_xprt *xprt = &transport->xprt; 2387 int status; 2388 unsigned int pflags = current->flags; 2389 2390 if (atomic_read(&xprt->swapper)) 2391 current->flags |= PF_MEMALLOC; 2392 2393 if (xprt_connected(xprt)) 2394 goto out; 2395 if (test_and_clear_bit(XPRT_SOCK_CONNECT_SENT, 2396 &transport->sock_state) || 2397 !sock) { 2398 xs_reset_transport(transport); 2399 sock = xs_create_sock(xprt, transport, xs_addr(xprt)->sa_family, 2400 SOCK_STREAM, IPPROTO_TCP, true); 2401 if (IS_ERR(sock)) { 2402 xprt_wake_pending_tasks(xprt, PTR_ERR(sock)); 2403 goto out; 2404 } 2405 } 2406 2407 dprintk("RPC: worker connecting xprt %p via %s to " 2408 "%s (port %s)\n", xprt, 2409 xprt->address_strings[RPC_DISPLAY_PROTO], 2410 xprt->address_strings[RPC_DISPLAY_ADDR], 2411 xprt->address_strings[RPC_DISPLAY_PORT]); 2412 2413 status = xs_tcp_finish_connecting(xprt, sock); 2414 trace_rpc_socket_connect(xprt, sock, status); 2415 dprintk("RPC: %p connect status %d connected %d sock state %d\n", 2416 xprt, -status, xprt_connected(xprt), 2417 sock->sk->sk_state); 2418 switch (status) { 2419 case 0: 2420 case -EINPROGRESS: 2421 /* SYN_SENT! */ 2422 set_bit(XPRT_SOCK_CONNECT_SENT, &transport->sock_state); 2423 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) 2424 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 2425 fallthrough; 2426 case -EALREADY: 2427 goto out_unlock; 2428 case -EADDRNOTAVAIL: 2429 /* Source port number is unavailable. Try a new one! */ 2430 transport->srcport = 0; 2431 status = -EAGAIN; 2432 break; 2433 case -EPERM: 2434 /* Happens, for instance, if a BPF program is preventing 2435 * the connect. Remap the error so upper layers can better 2436 * deal with it. 2437 */ 2438 status = -ECONNREFUSED; 2439 fallthrough; 2440 case -EINVAL: 2441 /* Happens, for instance, if the user specified a link 2442 * local IPv6 address without a scope-id. 2443 */ 2444 case -ECONNREFUSED: 2445 case -ECONNRESET: 2446 case -ENETDOWN: 2447 case -ENETUNREACH: 2448 case -EHOSTUNREACH: 2449 case -EADDRINUSE: 2450 case -ENOBUFS: 2451 case -ENOTCONN: 2452 break; 2453 default: 2454 printk("%s: connect returned unhandled error %d\n", 2455 __func__, status); 2456 status = -EAGAIN; 2457 } 2458 2459 /* xs_tcp_force_close() wakes tasks with a fixed error code. 2460 * We need to wake them first to ensure the correct error code. 2461 */ 2462 xprt_wake_pending_tasks(xprt, status); 2463 xs_tcp_force_close(xprt); 2464 out: 2465 xprt_clear_connecting(xprt); 2466 out_unlock: 2467 xprt_unlock_connect(xprt, transport); 2468 current_restore_flags(pflags, PF_MEMALLOC); 2469 } 2470 2471 /* 2472 * Transfer the connected socket to @upper_transport, then mark that 2473 * xprt CONNECTED. 2474 */ 2475 static int xs_tcp_tls_finish_connecting(struct rpc_xprt *lower_xprt, 2476 struct sock_xprt *upper_transport) 2477 { 2478 struct sock_xprt *lower_transport = 2479 container_of(lower_xprt, struct sock_xprt, xprt); 2480 struct rpc_xprt *upper_xprt = &upper_transport->xprt; 2481 2482 if (!upper_transport->inet) { 2483 struct socket *sock = lower_transport->sock; 2484 struct sock *sk = sock->sk; 2485 2486 /* Avoid temporary address, they are bad for long-lived 2487 * connections such as NFS mounts. 2488 * RFC4941, section 3.6 suggests that: 2489 * Individual applications, which have specific 2490 * knowledge about the normal duration of connections, 2491 * MAY override this as appropriate. 2492 */ 2493 if (xs_addr(upper_xprt)->sa_family == PF_INET6) 2494 ip6_sock_set_addr_preferences(sk, IPV6_PREFER_SRC_PUBLIC); 2495 2496 xs_tcp_set_socket_timeouts(upper_xprt, sock); 2497 tcp_sock_set_nodelay(sk); 2498 2499 lock_sock(sk); 2500 2501 /* @sk is already connected, so it now has the RPC callbacks. 2502 * Reach into @lower_transport to save the original ones. 2503 */ 2504 upper_transport->old_data_ready = lower_transport->old_data_ready; 2505 upper_transport->old_state_change = lower_transport->old_state_change; 2506 upper_transport->old_write_space = lower_transport->old_write_space; 2507 upper_transport->old_error_report = lower_transport->old_error_report; 2508 sk->sk_user_data = upper_xprt; 2509 2510 /* socket options */ 2511 sock_reset_flag(sk, SOCK_LINGER); 2512 2513 xprt_clear_connected(upper_xprt); 2514 2515 upper_transport->sock = sock; 2516 upper_transport->inet = sk; 2517 upper_transport->file = lower_transport->file; 2518 2519 release_sock(sk); 2520 2521 /* Reset lower_transport before shutting down its clnt */ 2522 mutex_lock(&lower_transport->recv_mutex); 2523 lower_transport->inet = NULL; 2524 lower_transport->sock = NULL; 2525 lower_transport->file = NULL; 2526 2527 xprt_clear_connected(lower_xprt); 2528 xs_sock_reset_connection_flags(lower_xprt); 2529 xs_stream_reset_connect(lower_transport); 2530 mutex_unlock(&lower_transport->recv_mutex); 2531 } 2532 2533 if (!xprt_bound(upper_xprt)) 2534 return -ENOTCONN; 2535 2536 xs_set_memalloc(upper_xprt); 2537 2538 if (!xprt_test_and_set_connected(upper_xprt)) { 2539 upper_xprt->connect_cookie++; 2540 clear_bit(XPRT_SOCK_CONNECTING, &upper_transport->sock_state); 2541 xprt_clear_connecting(upper_xprt); 2542 2543 upper_xprt->stat.connect_count++; 2544 upper_xprt->stat.connect_time += (long)jiffies - 2545 upper_xprt->stat.connect_start; 2546 xs_run_error_worker(upper_transport, XPRT_SOCK_WAKE_PENDING); 2547 } 2548 return 0; 2549 } 2550 2551 /** 2552 * xs_tls_handshake_done - TLS handshake completion handler 2553 * @data: address of xprt to wake 2554 * @status: status of handshake 2555 * @peerid: serial number of key containing the remote's identity 2556 * 2557 */ 2558 static void xs_tls_handshake_done(void *data, int status, key_serial_t peerid) 2559 { 2560 struct rpc_xprt *lower_xprt = data; 2561 struct sock_xprt *lower_transport = 2562 container_of(lower_xprt, struct sock_xprt, xprt); 2563 2564 lower_transport->xprt_err = status ? -EACCES : 0; 2565 complete(&lower_transport->handshake_done); 2566 xprt_put(lower_xprt); 2567 } 2568 2569 static int xs_tls_handshake_sync(struct rpc_xprt *lower_xprt, struct xprtsec_parms *xprtsec) 2570 { 2571 struct sock_xprt *lower_transport = 2572 container_of(lower_xprt, struct sock_xprt, xprt); 2573 struct tls_handshake_args args = { 2574 .ta_sock = lower_transport->sock, 2575 .ta_done = xs_tls_handshake_done, 2576 .ta_data = xprt_get(lower_xprt), 2577 .ta_peername = lower_xprt->servername, 2578 }; 2579 struct sock *sk = lower_transport->inet; 2580 int rc; 2581 2582 init_completion(&lower_transport->handshake_done); 2583 set_bit(XPRT_SOCK_IGNORE_RECV, &lower_transport->sock_state); 2584 lower_transport->xprt_err = -ETIMEDOUT; 2585 switch (xprtsec->policy) { 2586 case RPC_XPRTSEC_TLS_ANON: 2587 rc = tls_client_hello_anon(&args, GFP_KERNEL); 2588 if (rc) 2589 goto out_put_xprt; 2590 break; 2591 case RPC_XPRTSEC_TLS_X509: 2592 args.ta_my_cert = xprtsec->cert_serial; 2593 args.ta_my_privkey = xprtsec->privkey_serial; 2594 rc = tls_client_hello_x509(&args, GFP_KERNEL); 2595 if (rc) 2596 goto out_put_xprt; 2597 break; 2598 default: 2599 rc = -EACCES; 2600 goto out_put_xprt; 2601 } 2602 2603 rc = wait_for_completion_interruptible_timeout(&lower_transport->handshake_done, 2604 XS_TLS_HANDSHAKE_TO); 2605 if (rc <= 0) { 2606 tls_handshake_cancel(sk); 2607 if (rc == 0) 2608 rc = -ETIMEDOUT; 2609 goto out_put_xprt; 2610 } 2611 2612 rc = lower_transport->xprt_err; 2613 2614 out: 2615 xs_stream_reset_connect(lower_transport); 2616 clear_bit(XPRT_SOCK_IGNORE_RECV, &lower_transport->sock_state); 2617 return rc; 2618 2619 out_put_xprt: 2620 xprt_put(lower_xprt); 2621 goto out; 2622 } 2623 2624 /** 2625 * xs_tcp_tls_setup_socket - establish a TLS session on a TCP socket 2626 * @work: queued work item 2627 * 2628 * Invoked by a work queue tasklet. 2629 * 2630 * For RPC-with-TLS, there is a two-stage connection process. 2631 * 2632 * The "upper-layer xprt" is visible to the RPC consumer. Once it has 2633 * been marked connected, the consumer knows that a TCP connection and 2634 * a TLS session have been established. 2635 * 2636 * A "lower-layer xprt", created in this function, handles the mechanics 2637 * of connecting the TCP socket, performing the RPC_AUTH_TLS probe, and 2638 * then driving the TLS handshake. Once all that is complete, the upper 2639 * layer xprt is marked connected. 2640 */ 2641 static void xs_tcp_tls_setup_socket(struct work_struct *work) 2642 { 2643 struct sock_xprt *upper_transport = 2644 container_of(work, struct sock_xprt, connect_worker.work); 2645 struct rpc_clnt *upper_clnt = upper_transport->clnt; 2646 struct rpc_xprt *upper_xprt = &upper_transport->xprt; 2647 struct rpc_create_args args = { 2648 .net = upper_xprt->xprt_net, 2649 .protocol = upper_xprt->prot, 2650 .address = (struct sockaddr *)&upper_xprt->addr, 2651 .addrsize = upper_xprt->addrlen, 2652 .timeout = upper_clnt->cl_timeout, 2653 .servername = upper_xprt->servername, 2654 .program = upper_clnt->cl_program, 2655 .prognumber = upper_clnt->cl_prog, 2656 .version = upper_clnt->cl_vers, 2657 .authflavor = RPC_AUTH_TLS, 2658 .cred = upper_clnt->cl_cred, 2659 .xprtsec = { 2660 .policy = RPC_XPRTSEC_NONE, 2661 }, 2662 .stats = upper_clnt->cl_stats, 2663 }; 2664 unsigned int pflags = current->flags; 2665 struct rpc_clnt *lower_clnt; 2666 struct rpc_xprt *lower_xprt; 2667 int status; 2668 2669 if (atomic_read(&upper_xprt->swapper)) 2670 current->flags |= PF_MEMALLOC; 2671 2672 xs_stream_start_connect(upper_transport); 2673 2674 /* This implicitly sends an RPC_AUTH_TLS probe */ 2675 lower_clnt = rpc_create(&args); 2676 if (IS_ERR(lower_clnt)) { 2677 trace_rpc_tls_unavailable(upper_clnt, upper_xprt); 2678 clear_bit(XPRT_SOCK_CONNECTING, &upper_transport->sock_state); 2679 xprt_clear_connecting(upper_xprt); 2680 xprt_wake_pending_tasks(upper_xprt, PTR_ERR(lower_clnt)); 2681 xs_run_error_worker(upper_transport, XPRT_SOCK_WAKE_PENDING); 2682 goto out_unlock; 2683 } 2684 2685 /* RPC_AUTH_TLS probe was successful. Try a TLS handshake on 2686 * the lower xprt. 2687 */ 2688 rcu_read_lock(); 2689 lower_xprt = rcu_dereference(lower_clnt->cl_xprt); 2690 rcu_read_unlock(); 2691 2692 if (wait_on_bit_lock(&lower_xprt->state, XPRT_LOCKED, TASK_KILLABLE)) 2693 goto out_unlock; 2694 2695 status = xs_tls_handshake_sync(lower_xprt, &upper_xprt->xprtsec); 2696 if (status) { 2697 trace_rpc_tls_not_started(upper_clnt, upper_xprt); 2698 goto out_close; 2699 } 2700 2701 status = xs_tcp_tls_finish_connecting(lower_xprt, upper_transport); 2702 if (status) 2703 goto out_close; 2704 xprt_release_write(lower_xprt, NULL); 2705 2706 trace_rpc_socket_connect(upper_xprt, upper_transport->sock, 0); 2707 if (!xprt_test_and_set_connected(upper_xprt)) { 2708 upper_xprt->connect_cookie++; 2709 clear_bit(XPRT_SOCK_CONNECTING, &upper_transport->sock_state); 2710 xprt_clear_connecting(upper_xprt); 2711 2712 upper_xprt->stat.connect_count++; 2713 upper_xprt->stat.connect_time += (long)jiffies - 2714 upper_xprt->stat.connect_start; 2715 xs_run_error_worker(upper_transport, XPRT_SOCK_WAKE_PENDING); 2716 } 2717 rpc_shutdown_client(lower_clnt); 2718 2719 out_unlock: 2720 current_restore_flags(pflags, PF_MEMALLOC); 2721 upper_transport->clnt = NULL; 2722 xprt_unlock_connect(upper_xprt, upper_transport); 2723 return; 2724 2725 out_close: 2726 xprt_release_write(lower_xprt, NULL); 2727 rpc_shutdown_client(lower_clnt); 2728 2729 /* xprt_force_disconnect() wakes tasks with a fixed tk_status code. 2730 * Wake them first here to ensure they get our tk_status code. 2731 */ 2732 xprt_wake_pending_tasks(upper_xprt, status); 2733 xs_tcp_force_close(upper_xprt); 2734 xprt_clear_connecting(upper_xprt); 2735 goto out_unlock; 2736 } 2737 2738 /** 2739 * xs_connect - connect a socket to a remote endpoint 2740 * @xprt: pointer to transport structure 2741 * @task: address of RPC task that manages state of connect request 2742 * 2743 * TCP: If the remote end dropped the connection, delay reconnecting. 2744 * 2745 * UDP socket connects are synchronous, but we use a work queue anyway 2746 * to guarantee that even unprivileged user processes can set up a 2747 * socket on a privileged port. 2748 * 2749 * If a UDP socket connect fails, the delay behavior here prevents 2750 * retry floods (hard mounts). 2751 */ 2752 static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task) 2753 { 2754 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2755 unsigned long delay = 0; 2756 2757 WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport)); 2758 2759 if (transport->sock != NULL) { 2760 dprintk("RPC: xs_connect delayed xprt %p for %lu " 2761 "seconds\n", xprt, xprt->reestablish_timeout / HZ); 2762 2763 delay = xprt_reconnect_delay(xprt); 2764 xprt_reconnect_backoff(xprt, XS_TCP_INIT_REEST_TO); 2765 2766 } else 2767 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); 2768 2769 transport->clnt = task->tk_client; 2770 queue_delayed_work(xprtiod_workqueue, 2771 &transport->connect_worker, 2772 delay); 2773 } 2774 2775 static void xs_wake_disconnect(struct sock_xprt *transport) 2776 { 2777 if (test_and_clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state)) 2778 xs_tcp_force_close(&transport->xprt); 2779 } 2780 2781 static void xs_wake_write(struct sock_xprt *transport) 2782 { 2783 if (test_and_clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state)) 2784 xprt_write_space(&transport->xprt); 2785 } 2786 2787 static void xs_wake_error(struct sock_xprt *transport) 2788 { 2789 int sockerr; 2790 2791 if (!test_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state)) 2792 return; 2793 mutex_lock(&transport->recv_mutex); 2794 if (transport->sock == NULL) 2795 goto out; 2796 if (!test_and_clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state)) 2797 goto out; 2798 sockerr = xchg(&transport->xprt_err, 0); 2799 if (sockerr < 0) 2800 xprt_wake_pending_tasks(&transport->xprt, sockerr); 2801 out: 2802 mutex_unlock(&transport->recv_mutex); 2803 } 2804 2805 static void xs_wake_pending(struct sock_xprt *transport) 2806 { 2807 if (test_and_clear_bit(XPRT_SOCK_WAKE_PENDING, &transport->sock_state)) 2808 xprt_wake_pending_tasks(&transport->xprt, -EAGAIN); 2809 } 2810 2811 static void xs_error_handle(struct work_struct *work) 2812 { 2813 struct sock_xprt *transport = container_of(work, 2814 struct sock_xprt, error_worker); 2815 2816 xs_wake_disconnect(transport); 2817 xs_wake_write(transport); 2818 xs_wake_error(transport); 2819 xs_wake_pending(transport); 2820 } 2821 2822 /** 2823 * xs_local_print_stats - display AF_LOCAL socket-specific stats 2824 * @xprt: rpc_xprt struct containing statistics 2825 * @seq: output file 2826 * 2827 */ 2828 static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) 2829 { 2830 long idle_time = 0; 2831 2832 if (xprt_connected(xprt)) 2833 idle_time = (long)(jiffies - xprt->last_used) / HZ; 2834 2835 seq_printf(seq, "\txprt:\tlocal %lu %lu %lu %ld %lu %lu %lu " 2836 "%llu %llu %lu %llu %llu\n", 2837 xprt->stat.bind_count, 2838 xprt->stat.connect_count, 2839 xprt->stat.connect_time / HZ, 2840 idle_time, 2841 xprt->stat.sends, 2842 xprt->stat.recvs, 2843 xprt->stat.bad_xids, 2844 xprt->stat.req_u, 2845 xprt->stat.bklog_u, 2846 xprt->stat.max_slots, 2847 xprt->stat.sending_u, 2848 xprt->stat.pending_u); 2849 } 2850 2851 /** 2852 * xs_udp_print_stats - display UDP socket-specific stats 2853 * @xprt: rpc_xprt struct containing statistics 2854 * @seq: output file 2855 * 2856 */ 2857 static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) 2858 { 2859 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2860 2861 seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %llu %llu " 2862 "%lu %llu %llu\n", 2863 transport->srcport, 2864 xprt->stat.bind_count, 2865 xprt->stat.sends, 2866 xprt->stat.recvs, 2867 xprt->stat.bad_xids, 2868 xprt->stat.req_u, 2869 xprt->stat.bklog_u, 2870 xprt->stat.max_slots, 2871 xprt->stat.sending_u, 2872 xprt->stat.pending_u); 2873 } 2874 2875 /** 2876 * xs_tcp_print_stats - display TCP socket-specific stats 2877 * @xprt: rpc_xprt struct containing statistics 2878 * @seq: output file 2879 * 2880 */ 2881 static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) 2882 { 2883 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2884 long idle_time = 0; 2885 2886 if (xprt_connected(xprt)) 2887 idle_time = (long)(jiffies - xprt->last_used) / HZ; 2888 2889 seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu " 2890 "%llu %llu %lu %llu %llu\n", 2891 transport->srcport, 2892 xprt->stat.bind_count, 2893 xprt->stat.connect_count, 2894 xprt->stat.connect_time / HZ, 2895 idle_time, 2896 xprt->stat.sends, 2897 xprt->stat.recvs, 2898 xprt->stat.bad_xids, 2899 xprt->stat.req_u, 2900 xprt->stat.bklog_u, 2901 xprt->stat.max_slots, 2902 xprt->stat.sending_u, 2903 xprt->stat.pending_u); 2904 } 2905 2906 /* 2907 * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason 2908 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want 2909 * to use the server side send routines. 2910 */ 2911 static int bc_malloc(struct rpc_task *task) 2912 { 2913 struct rpc_rqst *rqst = task->tk_rqstp; 2914 size_t size = rqst->rq_callsize; 2915 struct page *page; 2916 struct rpc_buffer *buf; 2917 2918 if (size > PAGE_SIZE - sizeof(struct rpc_buffer)) { 2919 WARN_ONCE(1, "xprtsock: large bc buffer request (size %zu)\n", 2920 size); 2921 return -EINVAL; 2922 } 2923 2924 page = alloc_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); 2925 if (!page) 2926 return -ENOMEM; 2927 2928 buf = page_address(page); 2929 buf->len = PAGE_SIZE; 2930 2931 rqst->rq_buffer = buf->data; 2932 rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize; 2933 return 0; 2934 } 2935 2936 /* 2937 * Free the space allocated in the bc_alloc routine 2938 */ 2939 static void bc_free(struct rpc_task *task) 2940 { 2941 void *buffer = task->tk_rqstp->rq_buffer; 2942 struct rpc_buffer *buf; 2943 2944 buf = container_of(buffer, struct rpc_buffer, data); 2945 free_page((unsigned long)buf); 2946 } 2947 2948 static int bc_sendto(struct rpc_rqst *req) 2949 { 2950 struct xdr_buf *xdr = &req->rq_snd_buf; 2951 struct sock_xprt *transport = 2952 container_of(req->rq_xprt, struct sock_xprt, xprt); 2953 struct msghdr msg = { 2954 .msg_flags = 0, 2955 }; 2956 rpc_fraghdr marker = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | 2957 (u32)xdr->len); 2958 unsigned int sent = 0; 2959 int err; 2960 2961 req->rq_xtime = ktime_get(); 2962 err = xdr_alloc_bvec(xdr, rpc_task_gfp_mask()); 2963 if (err < 0) 2964 return err; 2965 err = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, marker, &sent); 2966 xdr_free_bvec(xdr); 2967 if (err < 0 || sent != (xdr->len + sizeof(marker))) 2968 return -EAGAIN; 2969 return sent; 2970 } 2971 2972 /** 2973 * bc_send_request - Send a backchannel Call on a TCP socket 2974 * @req: rpc_rqst containing Call message to be sent 2975 * 2976 * xpt_mutex ensures @rqstp's whole message is written to the socket 2977 * without interruption. 2978 * 2979 * Return values: 2980 * %0 if the message was sent successfully 2981 * %ENOTCONN if the message was not sent 2982 */ 2983 static int bc_send_request(struct rpc_rqst *req) 2984 { 2985 struct svc_xprt *xprt; 2986 int len; 2987 2988 /* 2989 * Get the server socket associated with this callback xprt 2990 */ 2991 xprt = req->rq_xprt->bc_xprt; 2992 2993 /* 2994 * Grab the mutex to serialize data as the connection is shared 2995 * with the fore channel 2996 */ 2997 mutex_lock(&xprt->xpt_mutex); 2998 if (test_bit(XPT_DEAD, &xprt->xpt_flags)) 2999 len = -ENOTCONN; 3000 else 3001 len = bc_sendto(req); 3002 mutex_unlock(&xprt->xpt_mutex); 3003 3004 if (len > 0) 3005 len = 0; 3006 3007 return len; 3008 } 3009 3010 /* 3011 * The close routine. Since this is client initiated, we do nothing 3012 */ 3013 3014 static void bc_close(struct rpc_xprt *xprt) 3015 { 3016 xprt_disconnect_done(xprt); 3017 } 3018 3019 /* 3020 * The xprt destroy routine. Again, because this connection is client 3021 * initiated, we do nothing 3022 */ 3023 3024 static void bc_destroy(struct rpc_xprt *xprt) 3025 { 3026 dprintk("RPC: bc_destroy xprt %p\n", xprt); 3027 3028 xs_xprt_free(xprt); 3029 module_put(THIS_MODULE); 3030 } 3031 3032 static const struct rpc_xprt_ops xs_local_ops = { 3033 .reserve_xprt = xprt_reserve_xprt, 3034 .release_xprt = xprt_release_xprt, 3035 .alloc_slot = xprt_alloc_slot, 3036 .free_slot = xprt_free_slot, 3037 .rpcbind = xs_local_rpcbind, 3038 .set_port = xs_local_set_port, 3039 .connect = xs_local_connect, 3040 .buf_alloc = rpc_malloc, 3041 .buf_free = rpc_free, 3042 .prepare_request = xs_stream_prepare_request, 3043 .send_request = xs_local_send_request, 3044 .wait_for_reply_request = xprt_wait_for_reply_request_def, 3045 .close = xs_close, 3046 .destroy = xs_destroy, 3047 .print_stats = xs_local_print_stats, 3048 .enable_swap = xs_enable_swap, 3049 .disable_swap = xs_disable_swap, 3050 }; 3051 3052 static const struct rpc_xprt_ops xs_udp_ops = { 3053 .set_buffer_size = xs_udp_set_buffer_size, 3054 .reserve_xprt = xprt_reserve_xprt_cong, 3055 .release_xprt = xprt_release_xprt_cong, 3056 .alloc_slot = xprt_alloc_slot, 3057 .free_slot = xprt_free_slot, 3058 .rpcbind = rpcb_getport_async, 3059 .set_port = xs_set_port, 3060 .connect = xs_connect, 3061 .get_srcaddr = xs_sock_srcaddr, 3062 .get_srcport = xs_sock_srcport, 3063 .buf_alloc = rpc_malloc, 3064 .buf_free = rpc_free, 3065 .send_request = xs_udp_send_request, 3066 .wait_for_reply_request = xprt_wait_for_reply_request_rtt, 3067 .timer = xs_udp_timer, 3068 .release_request = xprt_release_rqst_cong, 3069 .close = xs_close, 3070 .destroy = xs_destroy, 3071 .print_stats = xs_udp_print_stats, 3072 .enable_swap = xs_enable_swap, 3073 .disable_swap = xs_disable_swap, 3074 .inject_disconnect = xs_inject_disconnect, 3075 }; 3076 3077 static const struct rpc_xprt_ops xs_tcp_ops = { 3078 .reserve_xprt = xprt_reserve_xprt, 3079 .release_xprt = xprt_release_xprt, 3080 .alloc_slot = xprt_alloc_slot, 3081 .free_slot = xprt_free_slot, 3082 .rpcbind = rpcb_getport_async, 3083 .set_port = xs_set_port, 3084 .connect = xs_connect, 3085 .get_srcaddr = xs_sock_srcaddr, 3086 .get_srcport = xs_sock_srcport, 3087 .buf_alloc = rpc_malloc, 3088 .buf_free = rpc_free, 3089 .prepare_request = xs_stream_prepare_request, 3090 .send_request = xs_tcp_send_request, 3091 .wait_for_reply_request = xprt_wait_for_reply_request_def, 3092 .close = xs_tcp_shutdown, 3093 .destroy = xs_destroy, 3094 .set_connect_timeout = xs_tcp_set_connect_timeout, 3095 .print_stats = xs_tcp_print_stats, 3096 .enable_swap = xs_enable_swap, 3097 .disable_swap = xs_disable_swap, 3098 .inject_disconnect = xs_inject_disconnect, 3099 #ifdef CONFIG_SUNRPC_BACKCHANNEL 3100 .bc_setup = xprt_setup_bc, 3101 .bc_maxpayload = xs_tcp_bc_maxpayload, 3102 .bc_num_slots = xprt_bc_max_slots, 3103 .bc_free_rqst = xprt_free_bc_rqst, 3104 .bc_destroy = xprt_destroy_bc, 3105 #endif 3106 }; 3107 3108 /* 3109 * The rpc_xprt_ops for the server backchannel 3110 */ 3111 3112 static const struct rpc_xprt_ops bc_tcp_ops = { 3113 .reserve_xprt = xprt_reserve_xprt, 3114 .release_xprt = xprt_release_xprt, 3115 .alloc_slot = xprt_alloc_slot, 3116 .free_slot = xprt_free_slot, 3117 .buf_alloc = bc_malloc, 3118 .buf_free = bc_free, 3119 .send_request = bc_send_request, 3120 .wait_for_reply_request = xprt_wait_for_reply_request_def, 3121 .close = bc_close, 3122 .destroy = bc_destroy, 3123 .print_stats = xs_tcp_print_stats, 3124 .enable_swap = xs_enable_swap, 3125 .disable_swap = xs_disable_swap, 3126 .inject_disconnect = xs_inject_disconnect, 3127 }; 3128 3129 static int xs_init_anyaddr(const int family, struct sockaddr *sap) 3130 { 3131 static const struct sockaddr_in sin = { 3132 .sin_family = AF_INET, 3133 .sin_addr.s_addr = htonl(INADDR_ANY), 3134 }; 3135 static const struct sockaddr_in6 sin6 = { 3136 .sin6_family = AF_INET6, 3137 .sin6_addr = IN6ADDR_ANY_INIT, 3138 }; 3139 3140 switch (family) { 3141 case AF_LOCAL: 3142 break; 3143 case AF_INET: 3144 memcpy(sap, &sin, sizeof(sin)); 3145 break; 3146 case AF_INET6: 3147 memcpy(sap, &sin6, sizeof(sin6)); 3148 break; 3149 default: 3150 dprintk("RPC: %s: Bad address family\n", __func__); 3151 return -EAFNOSUPPORT; 3152 } 3153 return 0; 3154 } 3155 3156 static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args, 3157 unsigned int slot_table_size, 3158 unsigned int max_slot_table_size) 3159 { 3160 struct rpc_xprt *xprt; 3161 struct sock_xprt *new; 3162 3163 if (args->addrlen > sizeof(xprt->addr)) { 3164 dprintk("RPC: xs_setup_xprt: address too large\n"); 3165 return ERR_PTR(-EBADF); 3166 } 3167 3168 xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size, 3169 max_slot_table_size); 3170 if (xprt == NULL) { 3171 dprintk("RPC: xs_setup_xprt: couldn't allocate " 3172 "rpc_xprt\n"); 3173 return ERR_PTR(-ENOMEM); 3174 } 3175 3176 new = container_of(xprt, struct sock_xprt, xprt); 3177 mutex_init(&new->recv_mutex); 3178 memcpy(&xprt->addr, args->dstaddr, args->addrlen); 3179 xprt->addrlen = args->addrlen; 3180 if (args->srcaddr) 3181 memcpy(&new->srcaddr, args->srcaddr, args->addrlen); 3182 else { 3183 int err; 3184 err = xs_init_anyaddr(args->dstaddr->sa_family, 3185 (struct sockaddr *)&new->srcaddr); 3186 if (err != 0) { 3187 xprt_free(xprt); 3188 return ERR_PTR(err); 3189 } 3190 } 3191 3192 return xprt; 3193 } 3194 3195 static const struct rpc_timeout xs_local_default_timeout = { 3196 .to_initval = 10 * HZ, 3197 .to_maxval = 10 * HZ, 3198 .to_retries = 2, 3199 }; 3200 3201 /** 3202 * xs_setup_local - Set up transport to use an AF_LOCAL socket 3203 * @args: rpc transport creation arguments 3204 * 3205 * AF_LOCAL is a "tpi_cots_ord" transport, just like TCP 3206 */ 3207 static struct rpc_xprt *xs_setup_local(struct xprt_create *args) 3208 { 3209 struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr; 3210 struct sock_xprt *transport; 3211 struct rpc_xprt *xprt; 3212 struct rpc_xprt *ret; 3213 3214 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries, 3215 xprt_max_tcp_slot_table_entries); 3216 if (IS_ERR(xprt)) 3217 return xprt; 3218 transport = container_of(xprt, struct sock_xprt, xprt); 3219 3220 xprt->prot = 0; 3221 xprt->xprt_class = &xs_local_transport; 3222 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 3223 3224 xprt->bind_timeout = XS_BIND_TO; 3225 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 3226 xprt->idle_timeout = XS_IDLE_DISC_TO; 3227 3228 xprt->ops = &xs_local_ops; 3229 xprt->timeout = &xs_local_default_timeout; 3230 3231 INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn); 3232 INIT_WORK(&transport->error_worker, xs_error_handle); 3233 INIT_DELAYED_WORK(&transport->connect_worker, xs_dummy_setup_socket); 3234 3235 switch (sun->sun_family) { 3236 case AF_LOCAL: 3237 if (sun->sun_path[0] != '/' && sun->sun_path[0] != '\0') { 3238 dprintk("RPC: bad AF_LOCAL address: %s\n", 3239 sun->sun_path); 3240 ret = ERR_PTR(-EINVAL); 3241 goto out_err; 3242 } 3243 xprt_set_bound(xprt); 3244 xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL); 3245 break; 3246 default: 3247 ret = ERR_PTR(-EAFNOSUPPORT); 3248 goto out_err; 3249 } 3250 3251 dprintk("RPC: set up xprt to %s via AF_LOCAL\n", 3252 xprt->address_strings[RPC_DISPLAY_ADDR]); 3253 3254 if (try_module_get(THIS_MODULE)) 3255 return xprt; 3256 ret = ERR_PTR(-EINVAL); 3257 out_err: 3258 xs_xprt_free(xprt); 3259 return ret; 3260 } 3261 3262 static const struct rpc_timeout xs_udp_default_timeout = { 3263 .to_initval = 5 * HZ, 3264 .to_maxval = 30 * HZ, 3265 .to_increment = 5 * HZ, 3266 .to_retries = 5, 3267 }; 3268 3269 /** 3270 * xs_setup_udp - Set up transport to use a UDP socket 3271 * @args: rpc transport creation arguments 3272 * 3273 */ 3274 static struct rpc_xprt *xs_setup_udp(struct xprt_create *args) 3275 { 3276 struct sockaddr *addr = args->dstaddr; 3277 struct rpc_xprt *xprt; 3278 struct sock_xprt *transport; 3279 struct rpc_xprt *ret; 3280 3281 xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries, 3282 xprt_udp_slot_table_entries); 3283 if (IS_ERR(xprt)) 3284 return xprt; 3285 transport = container_of(xprt, struct sock_xprt, xprt); 3286 3287 xprt->prot = IPPROTO_UDP; 3288 xprt->xprt_class = &xs_udp_transport; 3289 /* XXX: header size can vary due to auth type, IPv6, etc. */ 3290 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); 3291 3292 xprt->bind_timeout = XS_BIND_TO; 3293 xprt->reestablish_timeout = XS_UDP_REEST_TO; 3294 xprt->idle_timeout = XS_IDLE_DISC_TO; 3295 3296 xprt->ops = &xs_udp_ops; 3297 3298 xprt->timeout = &xs_udp_default_timeout; 3299 3300 INIT_WORK(&transport->recv_worker, xs_udp_data_receive_workfn); 3301 INIT_WORK(&transport->error_worker, xs_error_handle); 3302 INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_setup_socket); 3303 3304 switch (addr->sa_family) { 3305 case AF_INET: 3306 if (((struct sockaddr_in *)addr)->sin_port != htons(0)) 3307 xprt_set_bound(xprt); 3308 3309 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP); 3310 break; 3311 case AF_INET6: 3312 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0)) 3313 xprt_set_bound(xprt); 3314 3315 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6); 3316 break; 3317 default: 3318 ret = ERR_PTR(-EAFNOSUPPORT); 3319 goto out_err; 3320 } 3321 3322 if (xprt_bound(xprt)) 3323 dprintk("RPC: set up xprt to %s (port %s) via %s\n", 3324 xprt->address_strings[RPC_DISPLAY_ADDR], 3325 xprt->address_strings[RPC_DISPLAY_PORT], 3326 xprt->address_strings[RPC_DISPLAY_PROTO]); 3327 else 3328 dprintk("RPC: set up xprt to %s (autobind) via %s\n", 3329 xprt->address_strings[RPC_DISPLAY_ADDR], 3330 xprt->address_strings[RPC_DISPLAY_PROTO]); 3331 3332 if (try_module_get(THIS_MODULE)) 3333 return xprt; 3334 ret = ERR_PTR(-EINVAL); 3335 out_err: 3336 xs_xprt_free(xprt); 3337 return ret; 3338 } 3339 3340 static const struct rpc_timeout xs_tcp_default_timeout = { 3341 .to_initval = 60 * HZ, 3342 .to_maxval = 60 * HZ, 3343 .to_retries = 2, 3344 }; 3345 3346 /** 3347 * xs_setup_tcp - Set up transport to use a TCP socket 3348 * @args: rpc transport creation arguments 3349 * 3350 */ 3351 static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) 3352 { 3353 struct sockaddr *addr = args->dstaddr; 3354 struct rpc_xprt *xprt; 3355 struct sock_xprt *transport; 3356 struct rpc_xprt *ret; 3357 unsigned int max_slot_table_size = xprt_max_tcp_slot_table_entries; 3358 3359 if (args->flags & XPRT_CREATE_INFINITE_SLOTS) 3360 max_slot_table_size = RPC_MAX_SLOT_TABLE_LIMIT; 3361 3362 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries, 3363 max_slot_table_size); 3364 if (IS_ERR(xprt)) 3365 return xprt; 3366 transport = container_of(xprt, struct sock_xprt, xprt); 3367 3368 xprt->prot = IPPROTO_TCP; 3369 xprt->xprt_class = &xs_tcp_transport; 3370 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 3371 3372 xprt->bind_timeout = XS_BIND_TO; 3373 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 3374 xprt->idle_timeout = XS_IDLE_DISC_TO; 3375 3376 xprt->ops = &xs_tcp_ops; 3377 xprt->timeout = &xs_tcp_default_timeout; 3378 3379 xprt->max_reconnect_timeout = xprt->timeout->to_maxval; 3380 if (args->reconnect_timeout) 3381 xprt->max_reconnect_timeout = args->reconnect_timeout; 3382 3383 xprt->connect_timeout = xprt->timeout->to_initval * 3384 (xprt->timeout->to_retries + 1); 3385 if (args->connect_timeout) 3386 xs_tcp_do_set_connect_timeout(xprt, args->connect_timeout); 3387 3388 INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn); 3389 INIT_WORK(&transport->error_worker, xs_error_handle); 3390 INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket); 3391 3392 switch (addr->sa_family) { 3393 case AF_INET: 3394 if (((struct sockaddr_in *)addr)->sin_port != htons(0)) 3395 xprt_set_bound(xprt); 3396 3397 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP); 3398 break; 3399 case AF_INET6: 3400 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0)) 3401 xprt_set_bound(xprt); 3402 3403 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6); 3404 break; 3405 default: 3406 ret = ERR_PTR(-EAFNOSUPPORT); 3407 goto out_err; 3408 } 3409 3410 if (xprt_bound(xprt)) 3411 dprintk("RPC: set up xprt to %s (port %s) via %s\n", 3412 xprt->address_strings[RPC_DISPLAY_ADDR], 3413 xprt->address_strings[RPC_DISPLAY_PORT], 3414 xprt->address_strings[RPC_DISPLAY_PROTO]); 3415 else 3416 dprintk("RPC: set up xprt to %s (autobind) via %s\n", 3417 xprt->address_strings[RPC_DISPLAY_ADDR], 3418 xprt->address_strings[RPC_DISPLAY_PROTO]); 3419 3420 if (try_module_get(THIS_MODULE)) 3421 return xprt; 3422 ret = ERR_PTR(-EINVAL); 3423 out_err: 3424 xs_xprt_free(xprt); 3425 return ret; 3426 } 3427 3428 /** 3429 * xs_setup_tcp_tls - Set up transport to use a TCP with TLS 3430 * @args: rpc transport creation arguments 3431 * 3432 */ 3433 static struct rpc_xprt *xs_setup_tcp_tls(struct xprt_create *args) 3434 { 3435 struct sockaddr *addr = args->dstaddr; 3436 struct rpc_xprt *xprt; 3437 struct sock_xprt *transport; 3438 struct rpc_xprt *ret; 3439 unsigned int max_slot_table_size = xprt_max_tcp_slot_table_entries; 3440 3441 if (args->flags & XPRT_CREATE_INFINITE_SLOTS) 3442 max_slot_table_size = RPC_MAX_SLOT_TABLE_LIMIT; 3443 3444 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries, 3445 max_slot_table_size); 3446 if (IS_ERR(xprt)) 3447 return xprt; 3448 transport = container_of(xprt, struct sock_xprt, xprt); 3449 3450 xprt->prot = IPPROTO_TCP; 3451 xprt->xprt_class = &xs_tcp_transport; 3452 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 3453 3454 xprt->bind_timeout = XS_BIND_TO; 3455 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 3456 xprt->idle_timeout = XS_IDLE_DISC_TO; 3457 3458 xprt->ops = &xs_tcp_ops; 3459 xprt->timeout = &xs_tcp_default_timeout; 3460 3461 xprt->max_reconnect_timeout = xprt->timeout->to_maxval; 3462 xprt->connect_timeout = xprt->timeout->to_initval * 3463 (xprt->timeout->to_retries + 1); 3464 3465 INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn); 3466 INIT_WORK(&transport->error_worker, xs_error_handle); 3467 3468 switch (args->xprtsec.policy) { 3469 case RPC_XPRTSEC_TLS_ANON: 3470 case RPC_XPRTSEC_TLS_X509: 3471 xprt->xprtsec = args->xprtsec; 3472 INIT_DELAYED_WORK(&transport->connect_worker, 3473 xs_tcp_tls_setup_socket); 3474 break; 3475 default: 3476 ret = ERR_PTR(-EACCES); 3477 goto out_err; 3478 } 3479 3480 switch (addr->sa_family) { 3481 case AF_INET: 3482 if (((struct sockaddr_in *)addr)->sin_port != htons(0)) 3483 xprt_set_bound(xprt); 3484 3485 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP); 3486 break; 3487 case AF_INET6: 3488 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0)) 3489 xprt_set_bound(xprt); 3490 3491 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6); 3492 break; 3493 default: 3494 ret = ERR_PTR(-EAFNOSUPPORT); 3495 goto out_err; 3496 } 3497 3498 if (xprt_bound(xprt)) 3499 dprintk("RPC: set up xprt to %s (port %s) via %s\n", 3500 xprt->address_strings[RPC_DISPLAY_ADDR], 3501 xprt->address_strings[RPC_DISPLAY_PORT], 3502 xprt->address_strings[RPC_DISPLAY_PROTO]); 3503 else 3504 dprintk("RPC: set up xprt to %s (autobind) via %s\n", 3505 xprt->address_strings[RPC_DISPLAY_ADDR], 3506 xprt->address_strings[RPC_DISPLAY_PROTO]); 3507 3508 if (try_module_get(THIS_MODULE)) 3509 return xprt; 3510 ret = ERR_PTR(-EINVAL); 3511 out_err: 3512 xs_xprt_free(xprt); 3513 return ret; 3514 } 3515 3516 /** 3517 * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket 3518 * @args: rpc transport creation arguments 3519 * 3520 */ 3521 static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) 3522 { 3523 struct sockaddr *addr = args->dstaddr; 3524 struct rpc_xprt *xprt; 3525 struct sock_xprt *transport; 3526 struct svc_sock *bc_sock; 3527 struct rpc_xprt *ret; 3528 3529 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries, 3530 xprt_tcp_slot_table_entries); 3531 if (IS_ERR(xprt)) 3532 return xprt; 3533 transport = container_of(xprt, struct sock_xprt, xprt); 3534 3535 xprt->prot = IPPROTO_TCP; 3536 xprt->xprt_class = &xs_bc_tcp_transport; 3537 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 3538 xprt->timeout = &xs_tcp_default_timeout; 3539 3540 /* backchannel */ 3541 xprt_set_bound(xprt); 3542 xprt->bind_timeout = 0; 3543 xprt->reestablish_timeout = 0; 3544 xprt->idle_timeout = 0; 3545 3546 xprt->ops = &bc_tcp_ops; 3547 3548 switch (addr->sa_family) { 3549 case AF_INET: 3550 xs_format_peer_addresses(xprt, "tcp", 3551 RPCBIND_NETID_TCP); 3552 break; 3553 case AF_INET6: 3554 xs_format_peer_addresses(xprt, "tcp", 3555 RPCBIND_NETID_TCP6); 3556 break; 3557 default: 3558 ret = ERR_PTR(-EAFNOSUPPORT); 3559 goto out_err; 3560 } 3561 3562 dprintk("RPC: set up xprt to %s (port %s) via %s\n", 3563 xprt->address_strings[RPC_DISPLAY_ADDR], 3564 xprt->address_strings[RPC_DISPLAY_PORT], 3565 xprt->address_strings[RPC_DISPLAY_PROTO]); 3566 3567 /* 3568 * Once we've associated a backchannel xprt with a connection, 3569 * we want to keep it around as long as the connection lasts, 3570 * in case we need to start using it for a backchannel again; 3571 * this reference won't be dropped until bc_xprt is destroyed. 3572 */ 3573 xprt_get(xprt); 3574 args->bc_xprt->xpt_bc_xprt = xprt; 3575 xprt->bc_xprt = args->bc_xprt; 3576 bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt); 3577 transport->sock = bc_sock->sk_sock; 3578 transport->inet = bc_sock->sk_sk; 3579 3580 /* 3581 * Since we don't want connections for the backchannel, we set 3582 * the xprt status to connected 3583 */ 3584 xprt_set_connected(xprt); 3585 3586 if (try_module_get(THIS_MODULE)) 3587 return xprt; 3588 3589 args->bc_xprt->xpt_bc_xprt = NULL; 3590 args->bc_xprt->xpt_bc_xps = NULL; 3591 xprt_put(xprt); 3592 ret = ERR_PTR(-EINVAL); 3593 out_err: 3594 xs_xprt_free(xprt); 3595 return ret; 3596 } 3597 3598 static struct xprt_class xs_local_transport = { 3599 .list = LIST_HEAD_INIT(xs_local_transport.list), 3600 .name = "named UNIX socket", 3601 .owner = THIS_MODULE, 3602 .ident = XPRT_TRANSPORT_LOCAL, 3603 .setup = xs_setup_local, 3604 .netid = { "" }, 3605 }; 3606 3607 static struct xprt_class xs_udp_transport = { 3608 .list = LIST_HEAD_INIT(xs_udp_transport.list), 3609 .name = "udp", 3610 .owner = THIS_MODULE, 3611 .ident = XPRT_TRANSPORT_UDP, 3612 .setup = xs_setup_udp, 3613 .netid = { "udp", "udp6", "" }, 3614 }; 3615 3616 static struct xprt_class xs_tcp_transport = { 3617 .list = LIST_HEAD_INIT(xs_tcp_transport.list), 3618 .name = "tcp", 3619 .owner = THIS_MODULE, 3620 .ident = XPRT_TRANSPORT_TCP, 3621 .setup = xs_setup_tcp, 3622 .netid = { "tcp", "tcp6", "" }, 3623 }; 3624 3625 static struct xprt_class xs_tcp_tls_transport = { 3626 .list = LIST_HEAD_INIT(xs_tcp_tls_transport.list), 3627 .name = "tcp-with-tls", 3628 .owner = THIS_MODULE, 3629 .ident = XPRT_TRANSPORT_TCP_TLS, 3630 .setup = xs_setup_tcp_tls, 3631 .netid = { "tcp", "tcp6", "" }, 3632 }; 3633 3634 static struct xprt_class xs_bc_tcp_transport = { 3635 .list = LIST_HEAD_INIT(xs_bc_tcp_transport.list), 3636 .name = "tcp NFSv4.1 backchannel", 3637 .owner = THIS_MODULE, 3638 .ident = XPRT_TRANSPORT_BC_TCP, 3639 .setup = xs_setup_bc_tcp, 3640 .netid = { "" }, 3641 }; 3642 3643 /** 3644 * init_socket_xprt - set up xprtsock's sysctls, register with RPC client 3645 * 3646 */ 3647 int init_socket_xprt(void) 3648 { 3649 if (!sunrpc_table_header) 3650 sunrpc_table_header = register_sysctl("sunrpc", xs_tunables_table); 3651 3652 xprt_register_transport(&xs_local_transport); 3653 xprt_register_transport(&xs_udp_transport); 3654 xprt_register_transport(&xs_tcp_transport); 3655 xprt_register_transport(&xs_tcp_tls_transport); 3656 xprt_register_transport(&xs_bc_tcp_transport); 3657 3658 return 0; 3659 } 3660 3661 /** 3662 * cleanup_socket_xprt - remove xprtsock's sysctls, unregister 3663 * 3664 */ 3665 void cleanup_socket_xprt(void) 3666 { 3667 if (sunrpc_table_header) { 3668 unregister_sysctl_table(sunrpc_table_header); 3669 sunrpc_table_header = NULL; 3670 } 3671 3672 xprt_unregister_transport(&xs_local_transport); 3673 xprt_unregister_transport(&xs_udp_transport); 3674 xprt_unregister_transport(&xs_tcp_transport); 3675 xprt_unregister_transport(&xs_tcp_tls_transport); 3676 xprt_unregister_transport(&xs_bc_tcp_transport); 3677 } 3678 3679 static int param_set_portnr(const char *val, const struct kernel_param *kp) 3680 { 3681 return param_set_uint_minmax(val, kp, 3682 RPC_MIN_RESVPORT, 3683 RPC_MAX_RESVPORT); 3684 } 3685 3686 static const struct kernel_param_ops param_ops_portnr = { 3687 .set = param_set_portnr, 3688 .get = param_get_uint, 3689 }; 3690 3691 #define param_check_portnr(name, p) \ 3692 __param_check(name, p, unsigned int); 3693 3694 module_param_named(min_resvport, xprt_min_resvport, portnr, 0644); 3695 module_param_named(max_resvport, xprt_max_resvport, portnr, 0644); 3696 3697 static int param_set_slot_table_size(const char *val, 3698 const struct kernel_param *kp) 3699 { 3700 return param_set_uint_minmax(val, kp, 3701 RPC_MIN_SLOT_TABLE, 3702 RPC_MAX_SLOT_TABLE); 3703 } 3704 3705 static const struct kernel_param_ops param_ops_slot_table_size = { 3706 .set = param_set_slot_table_size, 3707 .get = param_get_uint, 3708 }; 3709 3710 #define param_check_slot_table_size(name, p) \ 3711 __param_check(name, p, unsigned int); 3712 3713 static int param_set_max_slot_table_size(const char *val, 3714 const struct kernel_param *kp) 3715 { 3716 return param_set_uint_minmax(val, kp, 3717 RPC_MIN_SLOT_TABLE, 3718 RPC_MAX_SLOT_TABLE_LIMIT); 3719 } 3720 3721 static const struct kernel_param_ops param_ops_max_slot_table_size = { 3722 .set = param_set_max_slot_table_size, 3723 .get = param_get_uint, 3724 }; 3725 3726 #define param_check_max_slot_table_size(name, p) \ 3727 __param_check(name, p, unsigned int); 3728 3729 module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries, 3730 slot_table_size, 0644); 3731 module_param_named(tcp_max_slot_table_entries, xprt_max_tcp_slot_table_entries, 3732 max_slot_table_size, 0644); 3733 module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries, 3734 slot_table_size, 0644); 3735