1a246b010SChuck Lever /* 2a246b010SChuck Lever * linux/net/sunrpc/xprtsock.c 3a246b010SChuck Lever * 4a246b010SChuck Lever * Client-side transport implementation for sockets. 5a246b010SChuck Lever * 6a246b010SChuck Lever * TCP callback races fixes (C) 1998 Red Hat Software <alan@redhat.com> 7a246b010SChuck Lever * TCP send fixes (C) 1998 Red Hat Software <alan@redhat.com> 8a246b010SChuck Lever * TCP NFS related read + write fixes 9a246b010SChuck Lever * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> 10a246b010SChuck Lever * 11a246b010SChuck Lever * Rewrite of larges part of the code in order to stabilize TCP stuff. 12a246b010SChuck Lever * Fix behaviour when socket buffer is full. 13a246b010SChuck Lever * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no> 1455aa4f58SChuck Lever * 1555aa4f58SChuck Lever * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com> 168f9d5b1aSChuck Lever * 178f9d5b1aSChuck Lever * IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005. 188f9d5b1aSChuck Lever * <gilles.quillard@bull.net> 19a246b010SChuck Lever */ 20a246b010SChuck Lever 21a246b010SChuck Lever #include <linux/types.h> 22a246b010SChuck Lever #include <linux/slab.h> 23a246b010SChuck Lever #include <linux/capability.h> 24a246b010SChuck Lever #include <linux/pagemap.h> 25a246b010SChuck Lever #include <linux/errno.h> 26a246b010SChuck Lever #include <linux/socket.h> 27a246b010SChuck Lever #include <linux/in.h> 28a246b010SChuck Lever #include <linux/net.h> 29a246b010SChuck Lever #include <linux/mm.h> 30a246b010SChuck Lever #include <linux/udp.h> 31a246b010SChuck Lever #include <linux/tcp.h> 32a246b010SChuck Lever #include <linux/sunrpc/clnt.h> 3302107148SChuck Lever #include <linux/sunrpc/sched.h> 34a246b010SChuck Lever #include <linux/file.h> 35a246b010SChuck Lever 36a246b010SChuck Lever #include <net/sock.h> 37a246b010SChuck Lever #include <net/checksum.h> 38a246b010SChuck Lever #include <net/udp.h> 39a246b010SChuck Lever #include <net/tcp.h> 40a246b010SChuck Lever 419903cd1cSChuck Lever /* 42c556b754SChuck Lever * xprtsock tunables 43c556b754SChuck Lever */ 44c556b754SChuck Lever unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE; 45c556b754SChuck Lever unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE; 46c556b754SChuck Lever 47c556b754SChuck Lever unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT; 48c556b754SChuck Lever unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT; 49c556b754SChuck Lever 50c556b754SChuck Lever /* 51fbf76683SChuck Lever * We can register our own files under /proc/sys/sunrpc by 52fbf76683SChuck Lever * calling register_sysctl_table() again. The files in that 53fbf76683SChuck Lever * directory become the union of all files registered there. 54fbf76683SChuck Lever * 55fbf76683SChuck Lever * We simply need to make sure that we don't collide with 56fbf76683SChuck Lever * someone else's file names! 57fbf76683SChuck Lever */ 58fbf76683SChuck Lever 59fbf76683SChuck Lever #ifdef RPC_DEBUG 60fbf76683SChuck Lever 61fbf76683SChuck Lever static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE; 62fbf76683SChuck Lever static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE; 63fbf76683SChuck Lever static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT; 64fbf76683SChuck Lever static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT; 65fbf76683SChuck Lever 66fbf76683SChuck Lever static struct ctl_table_header *sunrpc_table_header; 67fbf76683SChuck Lever 68fbf76683SChuck Lever /* 69fbf76683SChuck Lever * FIXME: changing the UDP slot table size should also resize the UDP 70fbf76683SChuck Lever * socket buffers for existing UDP transports 71fbf76683SChuck Lever */ 72fbf76683SChuck Lever static ctl_table xs_tunables_table[] = { 73fbf76683SChuck Lever { 74fbf76683SChuck Lever .ctl_name = CTL_SLOTTABLE_UDP, 75fbf76683SChuck Lever .procname = "udp_slot_table_entries", 76fbf76683SChuck Lever .data = &xprt_udp_slot_table_entries, 77fbf76683SChuck Lever .maxlen = sizeof(unsigned int), 78fbf76683SChuck Lever .mode = 0644, 79fbf76683SChuck Lever .proc_handler = &proc_dointvec_minmax, 80fbf76683SChuck Lever .strategy = &sysctl_intvec, 81fbf76683SChuck Lever .extra1 = &min_slot_table_size, 82fbf76683SChuck Lever .extra2 = &max_slot_table_size 83fbf76683SChuck Lever }, 84fbf76683SChuck Lever { 85fbf76683SChuck Lever .ctl_name = CTL_SLOTTABLE_TCP, 86fbf76683SChuck Lever .procname = "tcp_slot_table_entries", 87fbf76683SChuck Lever .data = &xprt_tcp_slot_table_entries, 88fbf76683SChuck Lever .maxlen = sizeof(unsigned int), 89fbf76683SChuck Lever .mode = 0644, 90fbf76683SChuck Lever .proc_handler = &proc_dointvec_minmax, 91fbf76683SChuck Lever .strategy = &sysctl_intvec, 92fbf76683SChuck Lever .extra1 = &min_slot_table_size, 93fbf76683SChuck Lever .extra2 = &max_slot_table_size 94fbf76683SChuck Lever }, 95fbf76683SChuck Lever { 96fbf76683SChuck Lever .ctl_name = CTL_MIN_RESVPORT, 97fbf76683SChuck Lever .procname = "min_resvport", 98fbf76683SChuck Lever .data = &xprt_min_resvport, 99fbf76683SChuck Lever .maxlen = sizeof(unsigned int), 100fbf76683SChuck Lever .mode = 0644, 101fbf76683SChuck Lever .proc_handler = &proc_dointvec_minmax, 102fbf76683SChuck Lever .strategy = &sysctl_intvec, 103fbf76683SChuck Lever .extra1 = &xprt_min_resvport_limit, 104fbf76683SChuck Lever .extra2 = &xprt_max_resvport_limit 105fbf76683SChuck Lever }, 106fbf76683SChuck Lever { 107fbf76683SChuck Lever .ctl_name = CTL_MAX_RESVPORT, 108fbf76683SChuck Lever .procname = "max_resvport", 109fbf76683SChuck Lever .data = &xprt_max_resvport, 110fbf76683SChuck Lever .maxlen = sizeof(unsigned int), 111fbf76683SChuck Lever .mode = 0644, 112fbf76683SChuck Lever .proc_handler = &proc_dointvec_minmax, 113fbf76683SChuck Lever .strategy = &sysctl_intvec, 114fbf76683SChuck Lever .extra1 = &xprt_min_resvport_limit, 115fbf76683SChuck Lever .extra2 = &xprt_max_resvport_limit 116fbf76683SChuck Lever }, 117fbf76683SChuck Lever { 118fbf76683SChuck Lever .ctl_name = 0, 119fbf76683SChuck Lever }, 120fbf76683SChuck Lever }; 121fbf76683SChuck Lever 122fbf76683SChuck Lever static ctl_table sunrpc_table[] = { 123fbf76683SChuck Lever { 124fbf76683SChuck Lever .ctl_name = CTL_SUNRPC, 125fbf76683SChuck Lever .procname = "sunrpc", 126fbf76683SChuck Lever .mode = 0555, 127fbf76683SChuck Lever .child = xs_tunables_table 128fbf76683SChuck Lever }, 129fbf76683SChuck Lever { 130fbf76683SChuck Lever .ctl_name = 0, 131fbf76683SChuck Lever }, 132fbf76683SChuck Lever }; 133fbf76683SChuck Lever 134fbf76683SChuck Lever #endif 135fbf76683SChuck Lever 136fbf76683SChuck Lever /* 137262965f5SChuck Lever * How many times to try sending a request on a socket before waiting 138262965f5SChuck Lever * for the socket buffer to clear. 139262965f5SChuck Lever */ 140262965f5SChuck Lever #define XS_SENDMSG_RETRY (10U) 141262965f5SChuck Lever 14203bf4b70SChuck Lever /* 14303bf4b70SChuck Lever * Time out for an RPC UDP socket connect. UDP socket connects are 14403bf4b70SChuck Lever * synchronous, but we set a timeout anyway in case of resource 14503bf4b70SChuck Lever * exhaustion on the local host. 14603bf4b70SChuck Lever */ 14703bf4b70SChuck Lever #define XS_UDP_CONN_TO (5U * HZ) 14803bf4b70SChuck Lever 14903bf4b70SChuck Lever /* 15003bf4b70SChuck Lever * Wait duration for an RPC TCP connection to be established. Solaris 15103bf4b70SChuck Lever * NFS over TCP uses 60 seconds, for example, which is in line with how 15203bf4b70SChuck Lever * long a server takes to reboot. 15303bf4b70SChuck Lever */ 15403bf4b70SChuck Lever #define XS_TCP_CONN_TO (60U * HZ) 15503bf4b70SChuck Lever 15603bf4b70SChuck Lever /* 15703bf4b70SChuck Lever * Wait duration for a reply from the RPC portmapper. 15803bf4b70SChuck Lever */ 15903bf4b70SChuck Lever #define XS_BIND_TO (60U * HZ) 16003bf4b70SChuck Lever 16103bf4b70SChuck Lever /* 16203bf4b70SChuck Lever * Delay if a UDP socket connect error occurs. This is most likely some 16303bf4b70SChuck Lever * kind of resource problem on the local host. 16403bf4b70SChuck Lever */ 16503bf4b70SChuck Lever #define XS_UDP_REEST_TO (2U * HZ) 16603bf4b70SChuck Lever 16703bf4b70SChuck Lever /* 16803bf4b70SChuck Lever * The reestablish timeout allows clients to delay for a bit before attempting 16903bf4b70SChuck Lever * to reconnect to a server that just dropped our connection. 17003bf4b70SChuck Lever * 17103bf4b70SChuck Lever * We implement an exponential backoff when trying to reestablish a TCP 17203bf4b70SChuck Lever * transport connection with the server. Some servers like to drop a TCP 17303bf4b70SChuck Lever * connection when they are overworked, so we start with a short timeout and 17403bf4b70SChuck Lever * increase over time if the server is down or not responding. 17503bf4b70SChuck Lever */ 17603bf4b70SChuck Lever #define XS_TCP_INIT_REEST_TO (3U * HZ) 17703bf4b70SChuck Lever #define XS_TCP_MAX_REEST_TO (5U * 60 * HZ) 17803bf4b70SChuck Lever 17903bf4b70SChuck Lever /* 18003bf4b70SChuck Lever * TCP idle timeout; client drops the transport socket if it is idle 18103bf4b70SChuck Lever * for this long. Note that we also timeout UDP sockets to prevent 18203bf4b70SChuck Lever * holding port numbers when there is no RPC traffic. 18303bf4b70SChuck Lever */ 18403bf4b70SChuck Lever #define XS_IDLE_DISC_TO (5U * 60 * HZ) 18503bf4b70SChuck Lever 186a246b010SChuck Lever #ifdef RPC_DEBUG 187a246b010SChuck Lever # undef RPC_DEBUG_DATA 1889903cd1cSChuck Lever # define RPCDBG_FACILITY RPCDBG_TRANS 189a246b010SChuck Lever #endif 190a246b010SChuck Lever 191a246b010SChuck Lever #ifdef RPC_DEBUG_DATA 1929903cd1cSChuck Lever static void xs_pktdump(char *msg, u32 *packet, unsigned int count) 193a246b010SChuck Lever { 194a246b010SChuck Lever u8 *buf = (u8 *) packet; 195a246b010SChuck Lever int j; 196a246b010SChuck Lever 197a246b010SChuck Lever dprintk("RPC: %s\n", msg); 198a246b010SChuck Lever for (j = 0; j < count && j < 128; j += 4) { 199a246b010SChuck Lever if (!(j & 31)) { 200a246b010SChuck Lever if (j) 201a246b010SChuck Lever dprintk("\n"); 202a246b010SChuck Lever dprintk("0x%04x ", j); 203a246b010SChuck Lever } 204a246b010SChuck Lever dprintk("%02x%02x%02x%02x ", 205a246b010SChuck Lever buf[j], buf[j+1], buf[j+2], buf[j+3]); 206a246b010SChuck Lever } 207a246b010SChuck Lever dprintk("\n"); 208a246b010SChuck Lever } 209a246b010SChuck Lever #else 2109903cd1cSChuck Lever static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count) 211a246b010SChuck Lever { 212a246b010SChuck Lever /* NOP */ 213a246b010SChuck Lever } 214a246b010SChuck Lever #endif 215a246b010SChuck Lever 216ffc2e518SChuck Lever struct sock_xprt { 217ffc2e518SChuck Lever struct rpc_xprt xprt; 218ee0ac0c2SChuck Lever 219ee0ac0c2SChuck Lever /* 220ee0ac0c2SChuck Lever * Network layer 221ee0ac0c2SChuck Lever */ 222ee0ac0c2SChuck Lever struct socket * sock; 223ee0ac0c2SChuck Lever struct sock * inet; 22451971139SChuck Lever 22551971139SChuck Lever /* 22651971139SChuck Lever * State of TCP reply receive 22751971139SChuck Lever */ 22851971139SChuck Lever __be32 tcp_fraghdr, 22951971139SChuck Lever tcp_xid; 23051971139SChuck Lever 23151971139SChuck Lever u32 tcp_offset, 23251971139SChuck Lever tcp_reclen; 23351971139SChuck Lever 23451971139SChuck Lever unsigned long tcp_copied, 23551971139SChuck Lever tcp_flags; 236c8475461SChuck Lever 237c8475461SChuck Lever /* 238c8475461SChuck Lever * Connection of transports 239c8475461SChuck Lever */ 24034161db6STrond Myklebust struct delayed_work connect_worker; 241d3bc9a1dSFrank van Maarseveen struct sockaddr_storage addr; 242c8475461SChuck Lever unsigned short port; 2437c6e066eSChuck Lever 2447c6e066eSChuck Lever /* 2457c6e066eSChuck Lever * UDP socket buffer size parameters 2467c6e066eSChuck Lever */ 2477c6e066eSChuck Lever size_t rcvsize, 2487c6e066eSChuck Lever sndsize; 249314dfd79SChuck Lever 250314dfd79SChuck Lever /* 251314dfd79SChuck Lever * Saved socket callback addresses 252314dfd79SChuck Lever */ 253314dfd79SChuck Lever void (*old_data_ready)(struct sock *, int); 254314dfd79SChuck Lever void (*old_state_change)(struct sock *); 255314dfd79SChuck Lever void (*old_write_space)(struct sock *); 256ffc2e518SChuck Lever }; 257ffc2e518SChuck Lever 258e136d092SChuck Lever /* 259e136d092SChuck Lever * TCP receive state flags 260e136d092SChuck Lever */ 261e136d092SChuck Lever #define TCP_RCV_LAST_FRAG (1UL << 0) 262e136d092SChuck Lever #define TCP_RCV_COPY_FRAGHDR (1UL << 1) 263e136d092SChuck Lever #define TCP_RCV_COPY_XID (1UL << 2) 264e136d092SChuck Lever #define TCP_RCV_COPY_DATA (1UL << 3) 265e136d092SChuck Lever 266*95392c59SChuck Lever static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt) 267*95392c59SChuck Lever { 268*95392c59SChuck Lever return (struct sockaddr *) &xprt->addr; 269*95392c59SChuck Lever } 270*95392c59SChuck Lever 271*95392c59SChuck Lever static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt) 272*95392c59SChuck Lever { 273*95392c59SChuck Lever return (struct sockaddr_in *) &xprt->addr; 274*95392c59SChuck Lever } 275*95392c59SChuck Lever 276*95392c59SChuck Lever static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt) 277*95392c59SChuck Lever { 278*95392c59SChuck Lever return (struct sockaddr_in6 *) &xprt->addr; 279*95392c59SChuck Lever } 280*95392c59SChuck Lever 281ba10f2c2SChuck Lever static void xs_format_ipv4_peer_addresses(struct rpc_xprt *xprt) 282edb267a6SChuck Lever { 283*95392c59SChuck Lever struct sockaddr_in *addr = xs_addr_in(xprt); 284edb267a6SChuck Lever char *buf; 285edb267a6SChuck Lever 286edb267a6SChuck Lever buf = kzalloc(20, GFP_KERNEL); 287edb267a6SChuck Lever if (buf) { 288bda243dfSChuck Lever snprintf(buf, 20, NIPQUAD_FMT, 289edb267a6SChuck Lever NIPQUAD(addr->sin_addr.s_addr)); 290edb267a6SChuck Lever } 291edb267a6SChuck Lever xprt->address_strings[RPC_DISPLAY_ADDR] = buf; 292edb267a6SChuck Lever 293edb267a6SChuck Lever buf = kzalloc(8, GFP_KERNEL); 294edb267a6SChuck Lever if (buf) { 295edb267a6SChuck Lever snprintf(buf, 8, "%u", 296edb267a6SChuck Lever ntohs(addr->sin_port)); 297edb267a6SChuck Lever } 298edb267a6SChuck Lever xprt->address_strings[RPC_DISPLAY_PORT] = buf; 299edb267a6SChuck Lever 3000c43b3d8SChuck Lever buf = kzalloc(8, GFP_KERNEL); 3010c43b3d8SChuck Lever if (buf) { 302edb267a6SChuck Lever if (xprt->prot == IPPROTO_UDP) 3030c43b3d8SChuck Lever snprintf(buf, 8, "udp"); 304edb267a6SChuck Lever else 3050c43b3d8SChuck Lever snprintf(buf, 8, "tcp"); 3060c43b3d8SChuck Lever } 3070c43b3d8SChuck Lever xprt->address_strings[RPC_DISPLAY_PROTO] = buf; 308edb267a6SChuck Lever 309edb267a6SChuck Lever buf = kzalloc(48, GFP_KERNEL); 310edb267a6SChuck Lever if (buf) { 311bda243dfSChuck Lever snprintf(buf, 48, "addr="NIPQUAD_FMT" port=%u proto=%s", 312edb267a6SChuck Lever NIPQUAD(addr->sin_addr.s_addr), 313edb267a6SChuck Lever ntohs(addr->sin_port), 314edb267a6SChuck Lever xprt->prot == IPPROTO_UDP ? "udp" : "tcp"); 315edb267a6SChuck Lever } 316edb267a6SChuck Lever xprt->address_strings[RPC_DISPLAY_ALL] = buf; 317fbfe3cc6SChuck Lever 318fbfe3cc6SChuck Lever buf = kzalloc(10, GFP_KERNEL); 319fbfe3cc6SChuck Lever if (buf) { 320fbfe3cc6SChuck Lever snprintf(buf, 10, "%02x%02x%02x%02x", 321fbfe3cc6SChuck Lever NIPQUAD(addr->sin_addr.s_addr)); 322fbfe3cc6SChuck Lever } 323fbfe3cc6SChuck Lever xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = buf; 324fbfe3cc6SChuck Lever 325fbfe3cc6SChuck Lever buf = kzalloc(8, GFP_KERNEL); 326fbfe3cc6SChuck Lever if (buf) { 327fbfe3cc6SChuck Lever snprintf(buf, 8, "%4hx", 328fbfe3cc6SChuck Lever ntohs(addr->sin_port)); 329fbfe3cc6SChuck Lever } 330fbfe3cc6SChuck Lever xprt->address_strings[RPC_DISPLAY_HEX_PORT] = buf; 331edb267a6SChuck Lever } 332edb267a6SChuck Lever 3334b6473fbSChuck Lever static void xs_format_ipv6_peer_addresses(struct rpc_xprt *xprt) 3344b6473fbSChuck Lever { 335*95392c59SChuck Lever struct sockaddr_in6 *addr = xs_addr_in6(xprt); 3364b6473fbSChuck Lever char *buf; 3374b6473fbSChuck Lever 3384b6473fbSChuck Lever buf = kzalloc(40, GFP_KERNEL); 3394b6473fbSChuck Lever if (buf) { 3404b6473fbSChuck Lever snprintf(buf, 40, NIP6_FMT, 3414b6473fbSChuck Lever NIP6(addr->sin6_addr)); 3424b6473fbSChuck Lever } 3434b6473fbSChuck Lever xprt->address_strings[RPC_DISPLAY_ADDR] = buf; 3444b6473fbSChuck Lever 3454b6473fbSChuck Lever buf = kzalloc(8, GFP_KERNEL); 3464b6473fbSChuck Lever if (buf) { 3474b6473fbSChuck Lever snprintf(buf, 8, "%u", 3484b6473fbSChuck Lever ntohs(addr->sin6_port)); 3494b6473fbSChuck Lever } 3504b6473fbSChuck Lever xprt->address_strings[RPC_DISPLAY_PORT] = buf; 3514b6473fbSChuck Lever 3524b6473fbSChuck Lever buf = kzalloc(8, GFP_KERNEL); 3534b6473fbSChuck Lever if (buf) { 3544b6473fbSChuck Lever if (xprt->prot == IPPROTO_UDP) 3554b6473fbSChuck Lever snprintf(buf, 8, "udp"); 3564b6473fbSChuck Lever else 3574b6473fbSChuck Lever snprintf(buf, 8, "tcp"); 3584b6473fbSChuck Lever } 3594b6473fbSChuck Lever xprt->address_strings[RPC_DISPLAY_PROTO] = buf; 3604b6473fbSChuck Lever 3614b6473fbSChuck Lever buf = kzalloc(64, GFP_KERNEL); 3624b6473fbSChuck Lever if (buf) { 3634b6473fbSChuck Lever snprintf(buf, 64, "addr="NIP6_FMT" port=%u proto=%s", 3644b6473fbSChuck Lever NIP6(addr->sin6_addr), 3654b6473fbSChuck Lever ntohs(addr->sin6_port), 3664b6473fbSChuck Lever xprt->prot == IPPROTO_UDP ? "udp" : "tcp"); 3674b6473fbSChuck Lever } 3684b6473fbSChuck Lever xprt->address_strings[RPC_DISPLAY_ALL] = buf; 3694b6473fbSChuck Lever 3704b6473fbSChuck Lever buf = kzalloc(36, GFP_KERNEL); 3714b6473fbSChuck Lever if (buf) { 3724b6473fbSChuck Lever snprintf(buf, 36, NIP6_SEQFMT, 3734b6473fbSChuck Lever NIP6(addr->sin6_addr)); 3744b6473fbSChuck Lever } 3754b6473fbSChuck Lever xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = buf; 3764b6473fbSChuck Lever 3774b6473fbSChuck Lever buf = kzalloc(8, GFP_KERNEL); 3784b6473fbSChuck Lever if (buf) { 3794b6473fbSChuck Lever snprintf(buf, 8, "%4hx", 3804b6473fbSChuck Lever ntohs(addr->sin6_port)); 3814b6473fbSChuck Lever } 3824b6473fbSChuck Lever xprt->address_strings[RPC_DISPLAY_HEX_PORT] = buf; 3834b6473fbSChuck Lever } 3844b6473fbSChuck Lever 385edb267a6SChuck Lever static void xs_free_peer_addresses(struct rpc_xprt *xprt) 386edb267a6SChuck Lever { 3870c43b3d8SChuck Lever int i; 3880c43b3d8SChuck Lever 3890c43b3d8SChuck Lever for (i = 0; i < RPC_DISPLAY_MAX; i++) 3900c43b3d8SChuck Lever kfree(xprt->address_strings[i]); 391edb267a6SChuck Lever } 392edb267a6SChuck Lever 393b4b5cc85SChuck Lever #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL) 394b4b5cc85SChuck Lever 39524c5684bSTrond Myklebust static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen, struct kvec *vec, unsigned int base, int more) 396b4b5cc85SChuck Lever { 397b4b5cc85SChuck Lever struct msghdr msg = { 398b4b5cc85SChuck Lever .msg_name = addr, 399b4b5cc85SChuck Lever .msg_namelen = addrlen, 40024c5684bSTrond Myklebust .msg_flags = XS_SENDMSG_FLAGS | (more ? MSG_MORE : 0), 40124c5684bSTrond Myklebust }; 40224c5684bSTrond Myklebust struct kvec iov = { 40324c5684bSTrond Myklebust .iov_base = vec->iov_base + base, 40424c5684bSTrond Myklebust .iov_len = vec->iov_len - base, 405b4b5cc85SChuck Lever }; 406b4b5cc85SChuck Lever 40724c5684bSTrond Myklebust if (iov.iov_len != 0) 408b4b5cc85SChuck Lever return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); 409b4b5cc85SChuck Lever return kernel_sendmsg(sock, &msg, NULL, 0, 0); 410b4b5cc85SChuck Lever } 411b4b5cc85SChuck Lever 41224c5684bSTrond Myklebust static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more) 413b4b5cc85SChuck Lever { 41424c5684bSTrond Myklebust struct page **ppage; 41524c5684bSTrond Myklebust unsigned int remainder; 41624c5684bSTrond Myklebust int err, sent = 0; 417b4b5cc85SChuck Lever 41824c5684bSTrond Myklebust remainder = xdr->page_len - base; 41924c5684bSTrond Myklebust base += xdr->page_base; 42024c5684bSTrond Myklebust ppage = xdr->pages + (base >> PAGE_SHIFT); 42124c5684bSTrond Myklebust base &= ~PAGE_MASK; 42224c5684bSTrond Myklebust for(;;) { 42324c5684bSTrond Myklebust unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder); 42424c5684bSTrond Myklebust int flags = XS_SENDMSG_FLAGS; 42524c5684bSTrond Myklebust 42624c5684bSTrond Myklebust remainder -= len; 42724c5684bSTrond Myklebust if (remainder != 0 || more) 42824c5684bSTrond Myklebust flags |= MSG_MORE; 42924c5684bSTrond Myklebust err = sock->ops->sendpage(sock, *ppage, base, len, flags); 43024c5684bSTrond Myklebust if (remainder == 0 || err != len) 43124c5684bSTrond Myklebust break; 43224c5684bSTrond Myklebust sent += err; 43324c5684bSTrond Myklebust ppage++; 43424c5684bSTrond Myklebust base = 0; 43524c5684bSTrond Myklebust } 43624c5684bSTrond Myklebust if (sent == 0) 43724c5684bSTrond Myklebust return err; 43824c5684bSTrond Myklebust if (err > 0) 43924c5684bSTrond Myklebust sent += err; 44024c5684bSTrond Myklebust return sent; 441b4b5cc85SChuck Lever } 442b4b5cc85SChuck Lever 4439903cd1cSChuck Lever /** 4449903cd1cSChuck Lever * xs_sendpages - write pages directly to a socket 4459903cd1cSChuck Lever * @sock: socket to send on 4469903cd1cSChuck Lever * @addr: UDP only -- address of destination 4479903cd1cSChuck Lever * @addrlen: UDP only -- length of destination address 4489903cd1cSChuck Lever * @xdr: buffer containing this request 4499903cd1cSChuck Lever * @base: starting position in the buffer 4509903cd1cSChuck Lever * 451a246b010SChuck Lever */ 45224c5684bSTrond Myklebust static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base) 453a246b010SChuck Lever { 45424c5684bSTrond Myklebust unsigned int remainder = xdr->len - base; 45524c5684bSTrond Myklebust int err, sent = 0; 456a246b010SChuck Lever 457262965f5SChuck Lever if (unlikely(!sock)) 458262965f5SChuck Lever return -ENOTCONN; 459262965f5SChuck Lever 460262965f5SChuck Lever clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); 46124c5684bSTrond Myklebust if (base != 0) { 46224c5684bSTrond Myklebust addr = NULL; 46324c5684bSTrond Myklebust addrlen = 0; 46424c5684bSTrond Myklebust } 465262965f5SChuck Lever 46624c5684bSTrond Myklebust if (base < xdr->head[0].iov_len || addr != NULL) { 46724c5684bSTrond Myklebust unsigned int len = xdr->head[0].iov_len - base; 46824c5684bSTrond Myklebust remainder -= len; 46924c5684bSTrond Myklebust err = xs_send_kvec(sock, addr, addrlen, &xdr->head[0], base, remainder != 0); 47024c5684bSTrond Myklebust if (remainder == 0 || err != len) 471a246b010SChuck Lever goto out; 47224c5684bSTrond Myklebust sent += err; 473a246b010SChuck Lever base = 0; 474a246b010SChuck Lever } else 47524c5684bSTrond Myklebust base -= xdr->head[0].iov_len; 476a246b010SChuck Lever 47724c5684bSTrond Myklebust if (base < xdr->page_len) { 47824c5684bSTrond Myklebust unsigned int len = xdr->page_len - base; 47924c5684bSTrond Myklebust remainder -= len; 48024c5684bSTrond Myklebust err = xs_send_pagedata(sock, xdr, base, remainder != 0); 48124c5684bSTrond Myklebust if (remainder == 0 || err != len) 482a246b010SChuck Lever goto out; 48324c5684bSTrond Myklebust sent += err; 484a246b010SChuck Lever base = 0; 48524c5684bSTrond Myklebust } else 48624c5684bSTrond Myklebust base -= xdr->page_len; 48724c5684bSTrond Myklebust 48824c5684bSTrond Myklebust if (base >= xdr->tail[0].iov_len) 48924c5684bSTrond Myklebust return sent; 49024c5684bSTrond Myklebust err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0); 491a246b010SChuck Lever out: 49224c5684bSTrond Myklebust if (sent == 0) 49324c5684bSTrond Myklebust return err; 49424c5684bSTrond Myklebust if (err > 0) 49524c5684bSTrond Myklebust sent += err; 49624c5684bSTrond Myklebust return sent; 497a246b010SChuck Lever } 498a246b010SChuck Lever 4999903cd1cSChuck Lever /** 500262965f5SChuck Lever * xs_nospace - place task on wait queue if transmit was incomplete 501262965f5SChuck Lever * @task: task to put to sleep 5029903cd1cSChuck Lever * 503a246b010SChuck Lever */ 504262965f5SChuck Lever static void xs_nospace(struct rpc_task *task) 505a246b010SChuck Lever { 506262965f5SChuck Lever struct rpc_rqst *req = task->tk_rqstp; 507262965f5SChuck Lever struct rpc_xprt *xprt = req->rq_xprt; 508ee0ac0c2SChuck Lever struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 509a246b010SChuck Lever 51046121cf7SChuck Lever dprintk("RPC: %5u xmit incomplete (%u left of %u)\n", 511262965f5SChuck Lever task->tk_pid, req->rq_slen - req->rq_bytes_sent, 512262965f5SChuck Lever req->rq_slen); 513a246b010SChuck Lever 514ee0ac0c2SChuck Lever if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) { 515262965f5SChuck Lever /* Protect against races with write_space */ 516262965f5SChuck Lever spin_lock_bh(&xprt->transport_lock); 517a246b010SChuck Lever 518262965f5SChuck Lever /* Don't race with disconnect */ 519262965f5SChuck Lever if (!xprt_connected(xprt)) 520262965f5SChuck Lever task->tk_status = -ENOTCONN; 521ee0ac0c2SChuck Lever else if (test_bit(SOCK_NOSPACE, &transport->sock->flags)) 522262965f5SChuck Lever xprt_wait_for_buffer_space(task); 523a246b010SChuck Lever 524262965f5SChuck Lever spin_unlock_bh(&xprt->transport_lock); 525262965f5SChuck Lever } else 526262965f5SChuck Lever /* Keep holding the socket if it is blocked */ 527262965f5SChuck Lever rpc_delay(task, HZ>>4); 528a246b010SChuck Lever } 529a246b010SChuck Lever 5309903cd1cSChuck Lever /** 531262965f5SChuck Lever * xs_udp_send_request - write an RPC request to a UDP socket 5329903cd1cSChuck Lever * @task: address of RPC task that manages the state of an RPC request 5339903cd1cSChuck Lever * 5349903cd1cSChuck Lever * Return values: 5359903cd1cSChuck Lever * 0: The request has been sent 5369903cd1cSChuck Lever * EAGAIN: The socket was blocked, please call again later to 5379903cd1cSChuck Lever * complete the request 538262965f5SChuck Lever * ENOTCONN: Caller needs to invoke connect logic then call again 5399903cd1cSChuck Lever * other: Some other error occured, the request was not sent 5409903cd1cSChuck Lever */ 541262965f5SChuck Lever static int xs_udp_send_request(struct rpc_task *task) 542a246b010SChuck Lever { 543a246b010SChuck Lever struct rpc_rqst *req = task->tk_rqstp; 544a246b010SChuck Lever struct rpc_xprt *xprt = req->rq_xprt; 545ee0ac0c2SChuck Lever struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 546262965f5SChuck Lever struct xdr_buf *xdr = &req->rq_snd_buf; 547262965f5SChuck Lever int status; 548262965f5SChuck Lever 549262965f5SChuck Lever xs_pktdump("packet data:", 550262965f5SChuck Lever req->rq_svec->iov_base, 551262965f5SChuck Lever req->rq_svec->iov_len); 552262965f5SChuck Lever 553262965f5SChuck Lever req->rq_xtime = jiffies; 554ee0ac0c2SChuck Lever status = xs_sendpages(transport->sock, 555*95392c59SChuck Lever xs_addr(xprt), 556ee0ac0c2SChuck Lever xprt->addrlen, xdr, 557ee0ac0c2SChuck Lever req->rq_bytes_sent); 558262965f5SChuck Lever 559262965f5SChuck Lever dprintk("RPC: xs_udp_send_request(%u) = %d\n", 560262965f5SChuck Lever xdr->len - req->rq_bytes_sent, status); 561262965f5SChuck Lever 562262965f5SChuck Lever if (likely(status >= (int) req->rq_slen)) 563262965f5SChuck Lever return 0; 564262965f5SChuck Lever 565262965f5SChuck Lever /* Still some bytes left; set up for a retry later. */ 566262965f5SChuck Lever if (status > 0) 567262965f5SChuck Lever status = -EAGAIN; 568262965f5SChuck Lever 569262965f5SChuck Lever switch (status) { 570262965f5SChuck Lever case -ENETUNREACH: 571262965f5SChuck Lever case -EPIPE: 572262965f5SChuck Lever case -ECONNREFUSED: 573262965f5SChuck Lever /* When the server has died, an ICMP port unreachable message 574262965f5SChuck Lever * prompts ECONNREFUSED. */ 575262965f5SChuck Lever break; 576262965f5SChuck Lever case -EAGAIN: 577262965f5SChuck Lever xs_nospace(task); 578262965f5SChuck Lever break; 579262965f5SChuck Lever default: 580262965f5SChuck Lever dprintk("RPC: sendmsg returned unrecognized error %d\n", 581262965f5SChuck Lever -status); 582262965f5SChuck Lever break; 583262965f5SChuck Lever } 584262965f5SChuck Lever 585262965f5SChuck Lever return status; 586262965f5SChuck Lever } 587262965f5SChuck Lever 588808012fbSChuck Lever static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf) 589808012fbSChuck Lever { 590808012fbSChuck Lever u32 reclen = buf->len - sizeof(rpc_fraghdr); 591808012fbSChuck Lever rpc_fraghdr *base = buf->head[0].iov_base; 592808012fbSChuck Lever *base = htonl(RPC_LAST_STREAM_FRAGMENT | reclen); 593808012fbSChuck Lever } 594808012fbSChuck Lever 595262965f5SChuck Lever /** 596262965f5SChuck Lever * xs_tcp_send_request - write an RPC request to a TCP socket 597262965f5SChuck Lever * @task: address of RPC task that manages the state of an RPC request 598262965f5SChuck Lever * 599262965f5SChuck Lever * Return values: 600262965f5SChuck Lever * 0: The request has been sent 601262965f5SChuck Lever * EAGAIN: The socket was blocked, please call again later to 602262965f5SChuck Lever * complete the request 603262965f5SChuck Lever * ENOTCONN: Caller needs to invoke connect logic then call again 604262965f5SChuck Lever * other: Some other error occured, the request was not sent 605262965f5SChuck Lever * 606262965f5SChuck Lever * XXX: In the case of soft timeouts, should we eventually give up 607262965f5SChuck Lever * if sendmsg is not able to make progress? 608262965f5SChuck Lever */ 609262965f5SChuck Lever static int xs_tcp_send_request(struct rpc_task *task) 610262965f5SChuck Lever { 611262965f5SChuck Lever struct rpc_rqst *req = task->tk_rqstp; 612262965f5SChuck Lever struct rpc_xprt *xprt = req->rq_xprt; 613ee0ac0c2SChuck Lever struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 614262965f5SChuck Lever struct xdr_buf *xdr = &req->rq_snd_buf; 615b595bb15SChuck Lever int status; 616b595bb15SChuck Lever unsigned int retry = 0; 617a246b010SChuck Lever 618808012fbSChuck Lever xs_encode_tcp_record_marker(&req->rq_snd_buf); 619262965f5SChuck Lever 620262965f5SChuck Lever xs_pktdump("packet data:", 621262965f5SChuck Lever req->rq_svec->iov_base, 622262965f5SChuck Lever req->rq_svec->iov_len); 623a246b010SChuck Lever 624a246b010SChuck Lever /* Continue transmitting the packet/record. We must be careful 625a246b010SChuck Lever * to cope with writespace callbacks arriving _after_ we have 626262965f5SChuck Lever * called sendmsg(). */ 627a246b010SChuck Lever while (1) { 628a246b010SChuck Lever req->rq_xtime = jiffies; 629ee0ac0c2SChuck Lever status = xs_sendpages(transport->sock, 630ee0ac0c2SChuck Lever NULL, 0, xdr, req->rq_bytes_sent); 631a246b010SChuck Lever 632262965f5SChuck Lever dprintk("RPC: xs_tcp_send_request(%u) = %d\n", 633262965f5SChuck Lever xdr->len - req->rq_bytes_sent, status); 634262965f5SChuck Lever 635262965f5SChuck Lever if (unlikely(status < 0)) 636a246b010SChuck Lever break; 637a246b010SChuck Lever 638a246b010SChuck Lever /* If we've sent the entire packet, immediately 639a246b010SChuck Lever * reset the count of bytes sent. */ 640262965f5SChuck Lever req->rq_bytes_sent += status; 641ef759a2eSChuck Lever task->tk_bytes_sent += status; 642262965f5SChuck Lever if (likely(req->rq_bytes_sent >= req->rq_slen)) { 643a246b010SChuck Lever req->rq_bytes_sent = 0; 644a246b010SChuck Lever return 0; 645a246b010SChuck Lever } 646262965f5SChuck Lever 647a246b010SChuck Lever status = -EAGAIN; 648262965f5SChuck Lever if (retry++ > XS_SENDMSG_RETRY) 649a246b010SChuck Lever break; 650a246b010SChuck Lever } 651a246b010SChuck Lever 652262965f5SChuck Lever switch (status) { 653262965f5SChuck Lever case -EAGAIN: 654262965f5SChuck Lever xs_nospace(task); 655262965f5SChuck Lever break; 656262965f5SChuck Lever case -ECONNREFUSED: 657262965f5SChuck Lever case -ECONNRESET: 658262965f5SChuck Lever case -ENOTCONN: 659262965f5SChuck Lever case -EPIPE: 660262965f5SChuck Lever status = -ENOTCONN; 661262965f5SChuck Lever break; 662262965f5SChuck Lever default: 663262965f5SChuck Lever dprintk("RPC: sendmsg returned unrecognized error %d\n", 664262965f5SChuck Lever -status); 66543118c29SChuck Lever xprt_disconnect(xprt); 666a246b010SChuck Lever break; 667a246b010SChuck Lever } 668a246b010SChuck Lever 669a246b010SChuck Lever return status; 670a246b010SChuck Lever } 671a246b010SChuck Lever 6729903cd1cSChuck Lever /** 673e0ab53deSTrond Myklebust * xs_tcp_release_xprt - clean up after a tcp transmission 674e0ab53deSTrond Myklebust * @xprt: transport 675e0ab53deSTrond Myklebust * @task: rpc task 676e0ab53deSTrond Myklebust * 677e0ab53deSTrond Myklebust * This cleans up if an error causes us to abort the transmission of a request. 678e0ab53deSTrond Myklebust * In this case, the socket may need to be reset in order to avoid confusing 679e0ab53deSTrond Myklebust * the server. 680e0ab53deSTrond Myklebust */ 681e0ab53deSTrond Myklebust static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) 682e0ab53deSTrond Myklebust { 683e0ab53deSTrond Myklebust struct rpc_rqst *req; 684e0ab53deSTrond Myklebust 685e0ab53deSTrond Myklebust if (task != xprt->snd_task) 686e0ab53deSTrond Myklebust return; 687e0ab53deSTrond Myklebust if (task == NULL) 688e0ab53deSTrond Myklebust goto out_release; 689e0ab53deSTrond Myklebust req = task->tk_rqstp; 690e0ab53deSTrond Myklebust if (req->rq_bytes_sent == 0) 691e0ab53deSTrond Myklebust goto out_release; 692e0ab53deSTrond Myklebust if (req->rq_bytes_sent == req->rq_snd_buf.len) 693e0ab53deSTrond Myklebust goto out_release; 694e0ab53deSTrond Myklebust set_bit(XPRT_CLOSE_WAIT, &task->tk_xprt->state); 695e0ab53deSTrond Myklebust out_release: 696e0ab53deSTrond Myklebust xprt_release_xprt(xprt, task); 697e0ab53deSTrond Myklebust } 698e0ab53deSTrond Myklebust 699e0ab53deSTrond Myklebust /** 7009903cd1cSChuck Lever * xs_close - close a socket 7019903cd1cSChuck Lever * @xprt: transport 7029903cd1cSChuck Lever * 7033167e12cSChuck Lever * This is used when all requests are complete; ie, no DRC state remains 7043167e12cSChuck Lever * on the server we want to save. 705a246b010SChuck Lever */ 7069903cd1cSChuck Lever static void xs_close(struct rpc_xprt *xprt) 707a246b010SChuck Lever { 708ee0ac0c2SChuck Lever struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 709ee0ac0c2SChuck Lever struct socket *sock = transport->sock; 710ee0ac0c2SChuck Lever struct sock *sk = transport->inet; 711a246b010SChuck Lever 712a246b010SChuck Lever if (!sk) 713632e3bdcSTrond Myklebust goto clear_close_wait; 714a246b010SChuck Lever 7159903cd1cSChuck Lever dprintk("RPC: xs_close xprt %p\n", xprt); 7169903cd1cSChuck Lever 717a246b010SChuck Lever write_lock_bh(&sk->sk_callback_lock); 718ee0ac0c2SChuck Lever transport->inet = NULL; 719ee0ac0c2SChuck Lever transport->sock = NULL; 720a246b010SChuck Lever 721a246b010SChuck Lever sk->sk_user_data = NULL; 722314dfd79SChuck Lever sk->sk_data_ready = transport->old_data_ready; 723314dfd79SChuck Lever sk->sk_state_change = transport->old_state_change; 724314dfd79SChuck Lever sk->sk_write_space = transport->old_write_space; 725a246b010SChuck Lever write_unlock_bh(&sk->sk_callback_lock); 726a246b010SChuck Lever 727a246b010SChuck Lever sk->sk_no_check = 0; 728a246b010SChuck Lever 729a246b010SChuck Lever sock_release(sock); 730632e3bdcSTrond Myklebust clear_close_wait: 731632e3bdcSTrond Myklebust smp_mb__before_clear_bit(); 732632e3bdcSTrond Myklebust clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 733632e3bdcSTrond Myklebust smp_mb__after_clear_bit(); 734a246b010SChuck Lever } 735a246b010SChuck Lever 7369903cd1cSChuck Lever /** 7379903cd1cSChuck Lever * xs_destroy - prepare to shutdown a transport 7389903cd1cSChuck Lever * @xprt: doomed transport 7399903cd1cSChuck Lever * 7409903cd1cSChuck Lever */ 7419903cd1cSChuck Lever static void xs_destroy(struct rpc_xprt *xprt) 742a246b010SChuck Lever { 743c8475461SChuck Lever struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 744c8475461SChuck Lever 7459903cd1cSChuck Lever dprintk("RPC: xs_destroy xprt %p\n", xprt); 7469903cd1cSChuck Lever 747c1384c9cSTrond Myklebust cancel_rearming_delayed_work(&transport->connect_worker); 748a246b010SChuck Lever 749a246b010SChuck Lever xprt_disconnect(xprt); 7509903cd1cSChuck Lever xs_close(xprt); 751edb267a6SChuck Lever xs_free_peer_addresses(xprt); 752a246b010SChuck Lever kfree(xprt->slot); 753c8541ecdSChuck Lever kfree(xprt); 754a246b010SChuck Lever } 755a246b010SChuck Lever 7569903cd1cSChuck Lever static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) 7579903cd1cSChuck Lever { 7589903cd1cSChuck Lever return (struct rpc_xprt *) sk->sk_user_data; 7599903cd1cSChuck Lever } 7609903cd1cSChuck Lever 7619903cd1cSChuck Lever /** 7629903cd1cSChuck Lever * xs_udp_data_ready - "data ready" callback for UDP sockets 7639903cd1cSChuck Lever * @sk: socket with data to read 7649903cd1cSChuck Lever * @len: how much data to read 7659903cd1cSChuck Lever * 766a246b010SChuck Lever */ 7679903cd1cSChuck Lever static void xs_udp_data_ready(struct sock *sk, int len) 768a246b010SChuck Lever { 769a246b010SChuck Lever struct rpc_task *task; 770a246b010SChuck Lever struct rpc_xprt *xprt; 771a246b010SChuck Lever struct rpc_rqst *rovr; 772a246b010SChuck Lever struct sk_buff *skb; 773a246b010SChuck Lever int err, repsize, copied; 774d8ed029dSAlexey Dobriyan u32 _xid; 775d8ed029dSAlexey Dobriyan __be32 *xp; 776a246b010SChuck Lever 777a246b010SChuck Lever read_lock(&sk->sk_callback_lock); 7789903cd1cSChuck Lever dprintk("RPC: xs_udp_data_ready...\n"); 7799903cd1cSChuck Lever if (!(xprt = xprt_from_sock(sk))) 780a246b010SChuck Lever goto out; 781a246b010SChuck Lever 782a246b010SChuck Lever if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) 783a246b010SChuck Lever goto out; 784a246b010SChuck Lever 785a246b010SChuck Lever if (xprt->shutdown) 786a246b010SChuck Lever goto dropit; 787a246b010SChuck Lever 788a246b010SChuck Lever repsize = skb->len - sizeof(struct udphdr); 789a246b010SChuck Lever if (repsize < 4) { 7909903cd1cSChuck Lever dprintk("RPC: impossible RPC reply size %d!\n", repsize); 791a246b010SChuck Lever goto dropit; 792a246b010SChuck Lever } 793a246b010SChuck Lever 794a246b010SChuck Lever /* Copy the XID from the skb... */ 795a246b010SChuck Lever xp = skb_header_pointer(skb, sizeof(struct udphdr), 796a246b010SChuck Lever sizeof(_xid), &_xid); 797a246b010SChuck Lever if (xp == NULL) 798a246b010SChuck Lever goto dropit; 799a246b010SChuck Lever 800a246b010SChuck Lever /* Look up and lock the request corresponding to the given XID */ 8014a0f8c04SChuck Lever spin_lock(&xprt->transport_lock); 802a246b010SChuck Lever rovr = xprt_lookup_rqst(xprt, *xp); 803a246b010SChuck Lever if (!rovr) 804a246b010SChuck Lever goto out_unlock; 805a246b010SChuck Lever task = rovr->rq_task; 806a246b010SChuck Lever 807a246b010SChuck Lever if ((copied = rovr->rq_private_buf.buflen) > repsize) 808a246b010SChuck Lever copied = repsize; 809a246b010SChuck Lever 810a246b010SChuck Lever /* Suck it into the iovec, verify checksum if not done by hw. */ 811a246b010SChuck Lever if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) 812a246b010SChuck Lever goto out_unlock; 813a246b010SChuck Lever 814a246b010SChuck Lever /* Something worked... */ 815a246b010SChuck Lever dst_confirm(skb->dst); 816a246b010SChuck Lever 8171570c1e4SChuck Lever xprt_adjust_cwnd(task, copied); 8181570c1e4SChuck Lever xprt_update_rtt(task); 8191570c1e4SChuck Lever xprt_complete_rqst(task, copied); 820a246b010SChuck Lever 821a246b010SChuck Lever out_unlock: 8224a0f8c04SChuck Lever spin_unlock(&xprt->transport_lock); 823a246b010SChuck Lever dropit: 824a246b010SChuck Lever skb_free_datagram(sk, skb); 825a246b010SChuck Lever out: 826a246b010SChuck Lever read_unlock(&sk->sk_callback_lock); 827a246b010SChuck Lever } 828a246b010SChuck Lever 829dd456471SChuck Lever static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc) 830a246b010SChuck Lever { 83151971139SChuck Lever struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 832a246b010SChuck Lever size_t len, used; 833a246b010SChuck Lever char *p; 834a246b010SChuck Lever 83551971139SChuck Lever p = ((char *) &transport->tcp_fraghdr) + transport->tcp_offset; 83651971139SChuck Lever len = sizeof(transport->tcp_fraghdr) - transport->tcp_offset; 8379d292316SChuck Lever used = xdr_skb_read_bits(desc, p, len); 83851971139SChuck Lever transport->tcp_offset += used; 839a246b010SChuck Lever if (used != len) 840a246b010SChuck Lever return; 841808012fbSChuck Lever 84251971139SChuck Lever transport->tcp_reclen = ntohl(transport->tcp_fraghdr); 84351971139SChuck Lever if (transport->tcp_reclen & RPC_LAST_STREAM_FRAGMENT) 844e136d092SChuck Lever transport->tcp_flags |= TCP_RCV_LAST_FRAG; 845a246b010SChuck Lever else 846e136d092SChuck Lever transport->tcp_flags &= ~TCP_RCV_LAST_FRAG; 84751971139SChuck Lever transport->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK; 848808012fbSChuck Lever 849e136d092SChuck Lever transport->tcp_flags &= ~TCP_RCV_COPY_FRAGHDR; 85051971139SChuck Lever transport->tcp_offset = 0; 851808012fbSChuck Lever 852a246b010SChuck Lever /* Sanity check of the record length */ 85351971139SChuck Lever if (unlikely(transport->tcp_reclen < 4)) { 8549903cd1cSChuck Lever dprintk("RPC: invalid TCP record fragment length\n"); 855a246b010SChuck Lever xprt_disconnect(xprt); 8569903cd1cSChuck Lever return; 857a246b010SChuck Lever } 858a246b010SChuck Lever dprintk("RPC: reading TCP record fragment of length %d\n", 85951971139SChuck Lever transport->tcp_reclen); 860a246b010SChuck Lever } 861a246b010SChuck Lever 86251971139SChuck Lever static void xs_tcp_check_fraghdr(struct sock_xprt *transport) 863a246b010SChuck Lever { 86451971139SChuck Lever if (transport->tcp_offset == transport->tcp_reclen) { 865e136d092SChuck Lever transport->tcp_flags |= TCP_RCV_COPY_FRAGHDR; 86651971139SChuck Lever transport->tcp_offset = 0; 867e136d092SChuck Lever if (transport->tcp_flags & TCP_RCV_LAST_FRAG) { 868e136d092SChuck Lever transport->tcp_flags &= ~TCP_RCV_COPY_DATA; 869e136d092SChuck Lever transport->tcp_flags |= TCP_RCV_COPY_XID; 87051971139SChuck Lever transport->tcp_copied = 0; 871a246b010SChuck Lever } 872a246b010SChuck Lever } 873a246b010SChuck Lever } 874a246b010SChuck Lever 875dd456471SChuck Lever static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_reader *desc) 876a246b010SChuck Lever { 877a246b010SChuck Lever size_t len, used; 878a246b010SChuck Lever char *p; 879a246b010SChuck Lever 88051971139SChuck Lever len = sizeof(transport->tcp_xid) - transport->tcp_offset; 881a246b010SChuck Lever dprintk("RPC: reading XID (%Zu bytes)\n", len); 88251971139SChuck Lever p = ((char *) &transport->tcp_xid) + transport->tcp_offset; 8839d292316SChuck Lever used = xdr_skb_read_bits(desc, p, len); 88451971139SChuck Lever transport->tcp_offset += used; 885a246b010SChuck Lever if (used != len) 886a246b010SChuck Lever return; 887e136d092SChuck Lever transport->tcp_flags &= ~TCP_RCV_COPY_XID; 888e136d092SChuck Lever transport->tcp_flags |= TCP_RCV_COPY_DATA; 88951971139SChuck Lever transport->tcp_copied = 4; 890a246b010SChuck Lever dprintk("RPC: reading reply for XID %08x\n", 89151971139SChuck Lever ntohl(transport->tcp_xid)); 89251971139SChuck Lever xs_tcp_check_fraghdr(transport); 893a246b010SChuck Lever } 894a246b010SChuck Lever 895dd456471SChuck Lever static inline void xs_tcp_read_request(struct rpc_xprt *xprt, struct xdr_skb_reader *desc) 896a246b010SChuck Lever { 89751971139SChuck Lever struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 898a246b010SChuck Lever struct rpc_rqst *req; 899a246b010SChuck Lever struct xdr_buf *rcvbuf; 900a246b010SChuck Lever size_t len; 901a246b010SChuck Lever ssize_t r; 902a246b010SChuck Lever 903a246b010SChuck Lever /* Find and lock the request corresponding to this xid */ 9044a0f8c04SChuck Lever spin_lock(&xprt->transport_lock); 90551971139SChuck Lever req = xprt_lookup_rqst(xprt, transport->tcp_xid); 906a246b010SChuck Lever if (!req) { 907e136d092SChuck Lever transport->tcp_flags &= ~TCP_RCV_COPY_DATA; 908a246b010SChuck Lever dprintk("RPC: XID %08x request not found!\n", 90951971139SChuck Lever ntohl(transport->tcp_xid)); 9104a0f8c04SChuck Lever spin_unlock(&xprt->transport_lock); 911a246b010SChuck Lever return; 912a246b010SChuck Lever } 913a246b010SChuck Lever 914a246b010SChuck Lever rcvbuf = &req->rq_private_buf; 915a246b010SChuck Lever len = desc->count; 91651971139SChuck Lever if (len > transport->tcp_reclen - transport->tcp_offset) { 917dd456471SChuck Lever struct xdr_skb_reader my_desc; 918a246b010SChuck Lever 91951971139SChuck Lever len = transport->tcp_reclen - transport->tcp_offset; 920a246b010SChuck Lever memcpy(&my_desc, desc, sizeof(my_desc)); 921a246b010SChuck Lever my_desc.count = len; 92251971139SChuck Lever r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied, 9239d292316SChuck Lever &my_desc, xdr_skb_read_bits); 924a246b010SChuck Lever desc->count -= r; 925a246b010SChuck Lever desc->offset += r; 926a246b010SChuck Lever } else 92751971139SChuck Lever r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied, 9289d292316SChuck Lever desc, xdr_skb_read_bits); 929a246b010SChuck Lever 930a246b010SChuck Lever if (r > 0) { 93151971139SChuck Lever transport->tcp_copied += r; 93251971139SChuck Lever transport->tcp_offset += r; 933a246b010SChuck Lever } 934a246b010SChuck Lever if (r != len) { 935a246b010SChuck Lever /* Error when copying to the receive buffer, 936a246b010SChuck Lever * usually because we weren't able to allocate 937a246b010SChuck Lever * additional buffer pages. All we can do now 938e136d092SChuck Lever * is turn off TCP_RCV_COPY_DATA, so the request 939a246b010SChuck Lever * will not receive any additional updates, 940a246b010SChuck Lever * and time out. 941a246b010SChuck Lever * Any remaining data from this record will 942a246b010SChuck Lever * be discarded. 943a246b010SChuck Lever */ 944e136d092SChuck Lever transport->tcp_flags &= ~TCP_RCV_COPY_DATA; 945a246b010SChuck Lever dprintk("RPC: XID %08x truncated request\n", 94651971139SChuck Lever ntohl(transport->tcp_xid)); 94746121cf7SChuck Lever dprintk("RPC: xprt = %p, tcp_copied = %lu, " 94846121cf7SChuck Lever "tcp_offset = %u, tcp_reclen = %u\n", 94946121cf7SChuck Lever xprt, transport->tcp_copied, 95046121cf7SChuck Lever transport->tcp_offset, transport->tcp_reclen); 951a246b010SChuck Lever goto out; 952a246b010SChuck Lever } 953a246b010SChuck Lever 954a246b010SChuck Lever dprintk("RPC: XID %08x read %Zd bytes\n", 95551971139SChuck Lever ntohl(transport->tcp_xid), r); 95646121cf7SChuck Lever dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, " 95746121cf7SChuck Lever "tcp_reclen = %u\n", xprt, transport->tcp_copied, 95846121cf7SChuck Lever transport->tcp_offset, transport->tcp_reclen); 959a246b010SChuck Lever 96051971139SChuck Lever if (transport->tcp_copied == req->rq_private_buf.buflen) 961e136d092SChuck Lever transport->tcp_flags &= ~TCP_RCV_COPY_DATA; 96251971139SChuck Lever else if (transport->tcp_offset == transport->tcp_reclen) { 963e136d092SChuck Lever if (transport->tcp_flags & TCP_RCV_LAST_FRAG) 964e136d092SChuck Lever transport->tcp_flags &= ~TCP_RCV_COPY_DATA; 965a246b010SChuck Lever } 966a246b010SChuck Lever 967a246b010SChuck Lever out: 968e136d092SChuck Lever if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) 96951971139SChuck Lever xprt_complete_rqst(req->rq_task, transport->tcp_copied); 9704a0f8c04SChuck Lever spin_unlock(&xprt->transport_lock); 97151971139SChuck Lever xs_tcp_check_fraghdr(transport); 972a246b010SChuck Lever } 973a246b010SChuck Lever 974dd456471SChuck Lever static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc) 975a246b010SChuck Lever { 976a246b010SChuck Lever size_t len; 977a246b010SChuck Lever 97851971139SChuck Lever len = transport->tcp_reclen - transport->tcp_offset; 979a246b010SChuck Lever if (len > desc->count) 980a246b010SChuck Lever len = desc->count; 981a246b010SChuck Lever desc->count -= len; 982a246b010SChuck Lever desc->offset += len; 98351971139SChuck Lever transport->tcp_offset += len; 984a246b010SChuck Lever dprintk("RPC: discarded %Zu bytes\n", len); 98551971139SChuck Lever xs_tcp_check_fraghdr(transport); 986a246b010SChuck Lever } 987a246b010SChuck Lever 9889903cd1cSChuck Lever static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len) 989a246b010SChuck Lever { 990a246b010SChuck Lever struct rpc_xprt *xprt = rd_desc->arg.data; 99151971139SChuck Lever struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 992dd456471SChuck Lever struct xdr_skb_reader desc = { 993a246b010SChuck Lever .skb = skb, 994a246b010SChuck Lever .offset = offset, 995a246b010SChuck Lever .count = len, 996a246b010SChuck Lever }; 997a246b010SChuck Lever 9989903cd1cSChuck Lever dprintk("RPC: xs_tcp_data_recv started\n"); 999a246b010SChuck Lever do { 1000a246b010SChuck Lever /* Read in a new fragment marker if necessary */ 1001a246b010SChuck Lever /* Can we ever really expect to get completely empty fragments? */ 1002e136d092SChuck Lever if (transport->tcp_flags & TCP_RCV_COPY_FRAGHDR) { 10039903cd1cSChuck Lever xs_tcp_read_fraghdr(xprt, &desc); 1004a246b010SChuck Lever continue; 1005a246b010SChuck Lever } 1006a246b010SChuck Lever /* Read in the xid if necessary */ 1007e136d092SChuck Lever if (transport->tcp_flags & TCP_RCV_COPY_XID) { 100851971139SChuck Lever xs_tcp_read_xid(transport, &desc); 1009a246b010SChuck Lever continue; 1010a246b010SChuck Lever } 1011a246b010SChuck Lever /* Read in the request data */ 1012e136d092SChuck Lever if (transport->tcp_flags & TCP_RCV_COPY_DATA) { 10139903cd1cSChuck Lever xs_tcp_read_request(xprt, &desc); 1014a246b010SChuck Lever continue; 1015a246b010SChuck Lever } 1016a246b010SChuck Lever /* Skip over any trailing bytes on short reads */ 101751971139SChuck Lever xs_tcp_read_discard(transport, &desc); 1018a246b010SChuck Lever } while (desc.count); 10199903cd1cSChuck Lever dprintk("RPC: xs_tcp_data_recv done\n"); 1020a246b010SChuck Lever return len - desc.count; 1021a246b010SChuck Lever } 1022a246b010SChuck Lever 10239903cd1cSChuck Lever /** 10249903cd1cSChuck Lever * xs_tcp_data_ready - "data ready" callback for TCP sockets 10259903cd1cSChuck Lever * @sk: socket with data to read 10269903cd1cSChuck Lever * @bytes: how much data to read 10279903cd1cSChuck Lever * 10289903cd1cSChuck Lever */ 10299903cd1cSChuck Lever static void xs_tcp_data_ready(struct sock *sk, int bytes) 1030a246b010SChuck Lever { 1031a246b010SChuck Lever struct rpc_xprt *xprt; 1032a246b010SChuck Lever read_descriptor_t rd_desc; 1033a246b010SChuck Lever 10349903cd1cSChuck Lever dprintk("RPC: xs_tcp_data_ready...\n"); 103546121cf7SChuck Lever 103646121cf7SChuck Lever read_lock(&sk->sk_callback_lock); 10379903cd1cSChuck Lever if (!(xprt = xprt_from_sock(sk))) 1038a246b010SChuck Lever goto out; 1039a246b010SChuck Lever if (xprt->shutdown) 1040a246b010SChuck Lever goto out; 1041a246b010SChuck Lever 10429903cd1cSChuck Lever /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */ 1043a246b010SChuck Lever rd_desc.arg.data = xprt; 1044a246b010SChuck Lever rd_desc.count = 65536; 10459903cd1cSChuck Lever tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv); 1046a246b010SChuck Lever out: 1047a246b010SChuck Lever read_unlock(&sk->sk_callback_lock); 1048a246b010SChuck Lever } 1049a246b010SChuck Lever 10509903cd1cSChuck Lever /** 10519903cd1cSChuck Lever * xs_tcp_state_change - callback to handle TCP socket state changes 10529903cd1cSChuck Lever * @sk: socket whose state has changed 10539903cd1cSChuck Lever * 10549903cd1cSChuck Lever */ 10559903cd1cSChuck Lever static void xs_tcp_state_change(struct sock *sk) 1056a246b010SChuck Lever { 1057a246b010SChuck Lever struct rpc_xprt *xprt; 1058a246b010SChuck Lever 1059a246b010SChuck Lever read_lock(&sk->sk_callback_lock); 1060a246b010SChuck Lever if (!(xprt = xprt_from_sock(sk))) 1061a246b010SChuck Lever goto out; 10629903cd1cSChuck Lever dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); 1063a246b010SChuck Lever dprintk("RPC: state %x conn %d dead %d zapped %d\n", 1064a246b010SChuck Lever sk->sk_state, xprt_connected(xprt), 1065a246b010SChuck Lever sock_flag(sk, SOCK_DEAD), 1066a246b010SChuck Lever sock_flag(sk, SOCK_ZAPPED)); 1067a246b010SChuck Lever 1068a246b010SChuck Lever switch (sk->sk_state) { 1069a246b010SChuck Lever case TCP_ESTABLISHED: 10704a0f8c04SChuck Lever spin_lock_bh(&xprt->transport_lock); 1071a246b010SChuck Lever if (!xprt_test_and_set_connected(xprt)) { 107251971139SChuck Lever struct sock_xprt *transport = container_of(xprt, 107351971139SChuck Lever struct sock_xprt, xprt); 107451971139SChuck Lever 1075a246b010SChuck Lever /* Reset TCP record info */ 107651971139SChuck Lever transport->tcp_offset = 0; 107751971139SChuck Lever transport->tcp_reclen = 0; 107851971139SChuck Lever transport->tcp_copied = 0; 1079e136d092SChuck Lever transport->tcp_flags = 1080e136d092SChuck Lever TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID; 108151971139SChuck Lever 108203bf4b70SChuck Lever xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 108344fbac22SChuck Lever xprt_wake_pending_tasks(xprt, 0); 1084a246b010SChuck Lever } 10854a0f8c04SChuck Lever spin_unlock_bh(&xprt->transport_lock); 1086a246b010SChuck Lever break; 1087a246b010SChuck Lever case TCP_SYN_SENT: 1088a246b010SChuck Lever case TCP_SYN_RECV: 1089a246b010SChuck Lever break; 1090632e3bdcSTrond Myklebust case TCP_CLOSE_WAIT: 1091632e3bdcSTrond Myklebust /* Try to schedule an autoclose RPC calls */ 1092632e3bdcSTrond Myklebust set_bit(XPRT_CLOSE_WAIT, &xprt->state); 1093632e3bdcSTrond Myklebust if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) 1094c1384c9cSTrond Myklebust queue_work(rpciod_workqueue, &xprt->task_cleanup); 1095a246b010SChuck Lever default: 1096a246b010SChuck Lever xprt_disconnect(xprt); 1097a246b010SChuck Lever } 1098a246b010SChuck Lever out: 1099a246b010SChuck Lever read_unlock(&sk->sk_callback_lock); 1100a246b010SChuck Lever } 1101a246b010SChuck Lever 11029903cd1cSChuck Lever /** 1103c7b2cae8SChuck Lever * xs_udp_write_space - callback invoked when socket buffer space 1104c7b2cae8SChuck Lever * becomes available 11059903cd1cSChuck Lever * @sk: socket whose state has changed 11069903cd1cSChuck Lever * 1107a246b010SChuck Lever * Called when more output buffer space is available for this socket. 1108a246b010SChuck Lever * We try not to wake our writers until they can make "significant" 1109c7b2cae8SChuck Lever * progress, otherwise we'll waste resources thrashing kernel_sendmsg 1110a246b010SChuck Lever * with a bunch of small requests. 1111a246b010SChuck Lever */ 1112c7b2cae8SChuck Lever static void xs_udp_write_space(struct sock *sk) 1113a246b010SChuck Lever { 1114a246b010SChuck Lever read_lock(&sk->sk_callback_lock); 1115c7b2cae8SChuck Lever 1116c7b2cae8SChuck Lever /* from net/core/sock.c:sock_def_write_space */ 1117c7b2cae8SChuck Lever if (sock_writeable(sk)) { 1118c7b2cae8SChuck Lever struct socket *sock; 1119c7b2cae8SChuck Lever struct rpc_xprt *xprt; 1120c7b2cae8SChuck Lever 1121c7b2cae8SChuck Lever if (unlikely(!(sock = sk->sk_socket))) 1122a246b010SChuck Lever goto out; 1123c7b2cae8SChuck Lever if (unlikely(!(xprt = xprt_from_sock(sk)))) 1124c7b2cae8SChuck Lever goto out; 1125c7b2cae8SChuck Lever if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags))) 1126a246b010SChuck Lever goto out; 1127a246b010SChuck Lever 1128c7b2cae8SChuck Lever xprt_write_space(xprt); 1129a246b010SChuck Lever } 1130a246b010SChuck Lever 1131c7b2cae8SChuck Lever out: 1132c7b2cae8SChuck Lever read_unlock(&sk->sk_callback_lock); 1133c7b2cae8SChuck Lever } 1134c7b2cae8SChuck Lever 1135c7b2cae8SChuck Lever /** 1136c7b2cae8SChuck Lever * xs_tcp_write_space - callback invoked when socket buffer space 1137c7b2cae8SChuck Lever * becomes available 1138c7b2cae8SChuck Lever * @sk: socket whose state has changed 1139c7b2cae8SChuck Lever * 1140c7b2cae8SChuck Lever * Called when more output buffer space is available for this socket. 1141c7b2cae8SChuck Lever * We try not to wake our writers until they can make "significant" 1142c7b2cae8SChuck Lever * progress, otherwise we'll waste resources thrashing kernel_sendmsg 1143c7b2cae8SChuck Lever * with a bunch of small requests. 1144c7b2cae8SChuck Lever */ 1145c7b2cae8SChuck Lever static void xs_tcp_write_space(struct sock *sk) 1146c7b2cae8SChuck Lever { 1147c7b2cae8SChuck Lever read_lock(&sk->sk_callback_lock); 1148c7b2cae8SChuck Lever 1149c7b2cae8SChuck Lever /* from net/core/stream.c:sk_stream_write_space */ 1150c7b2cae8SChuck Lever if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { 1151c7b2cae8SChuck Lever struct socket *sock; 1152c7b2cae8SChuck Lever struct rpc_xprt *xprt; 1153c7b2cae8SChuck Lever 1154c7b2cae8SChuck Lever if (unlikely(!(sock = sk->sk_socket))) 1155c7b2cae8SChuck Lever goto out; 1156c7b2cae8SChuck Lever if (unlikely(!(xprt = xprt_from_sock(sk)))) 1157c7b2cae8SChuck Lever goto out; 1158c7b2cae8SChuck Lever if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags))) 1159a246b010SChuck Lever goto out; 1160a246b010SChuck Lever 1161c7b2cae8SChuck Lever xprt_write_space(xprt); 1162c7b2cae8SChuck Lever } 1163c7b2cae8SChuck Lever 1164a246b010SChuck Lever out: 1165a246b010SChuck Lever read_unlock(&sk->sk_callback_lock); 1166a246b010SChuck Lever } 1167a246b010SChuck Lever 1168470056c2SChuck Lever static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt) 1169a246b010SChuck Lever { 1170ee0ac0c2SChuck Lever struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1171ee0ac0c2SChuck Lever struct sock *sk = transport->inet; 1172a246b010SChuck Lever 11737c6e066eSChuck Lever if (transport->rcvsize) { 1174a246b010SChuck Lever sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 11757c6e066eSChuck Lever sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2; 1176a246b010SChuck Lever } 11777c6e066eSChuck Lever if (transport->sndsize) { 1178a246b010SChuck Lever sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 11797c6e066eSChuck Lever sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2; 1180a246b010SChuck Lever sk->sk_write_space(sk); 1181a246b010SChuck Lever } 1182a246b010SChuck Lever } 1183a246b010SChuck Lever 118443118c29SChuck Lever /** 1185470056c2SChuck Lever * xs_udp_set_buffer_size - set send and receive limits 118643118c29SChuck Lever * @xprt: generic transport 1187470056c2SChuck Lever * @sndsize: requested size of send buffer, in bytes 1188470056c2SChuck Lever * @rcvsize: requested size of receive buffer, in bytes 118943118c29SChuck Lever * 1190470056c2SChuck Lever * Set socket send and receive buffer size limits. 119143118c29SChuck Lever */ 1192470056c2SChuck Lever static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize) 119343118c29SChuck Lever { 11947c6e066eSChuck Lever struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 11957c6e066eSChuck Lever 11967c6e066eSChuck Lever transport->sndsize = 0; 1197470056c2SChuck Lever if (sndsize) 11987c6e066eSChuck Lever transport->sndsize = sndsize + 1024; 11997c6e066eSChuck Lever transport->rcvsize = 0; 1200470056c2SChuck Lever if (rcvsize) 12017c6e066eSChuck Lever transport->rcvsize = rcvsize + 1024; 1202470056c2SChuck Lever 1203470056c2SChuck Lever xs_udp_do_set_buffer_size(xprt); 120443118c29SChuck Lever } 120543118c29SChuck Lever 120646c0ee8bSChuck Lever /** 120746c0ee8bSChuck Lever * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport 120846c0ee8bSChuck Lever * @task: task that timed out 120946c0ee8bSChuck Lever * 121046c0ee8bSChuck Lever * Adjust the congestion window after a retransmit timeout has occurred. 121146c0ee8bSChuck Lever */ 121246c0ee8bSChuck Lever static void xs_udp_timer(struct rpc_task *task) 121346c0ee8bSChuck Lever { 121446c0ee8bSChuck Lever xprt_adjust_cwnd(task, -ETIMEDOUT); 121546c0ee8bSChuck Lever } 121646c0ee8bSChuck Lever 1217b85d8806SChuck Lever static unsigned short xs_get_random_port(void) 1218b85d8806SChuck Lever { 1219b85d8806SChuck Lever unsigned short range = xprt_max_resvport - xprt_min_resvport; 1220b85d8806SChuck Lever unsigned short rand = (unsigned short) net_random() % range; 1221b85d8806SChuck Lever return rand + xprt_min_resvport; 1222b85d8806SChuck Lever } 1223b85d8806SChuck Lever 122492200412SChuck Lever /** 122592200412SChuck Lever * xs_set_port - reset the port number in the remote endpoint address 122692200412SChuck Lever * @xprt: generic transport 122792200412SChuck Lever * @port: new port number 122892200412SChuck Lever * 122992200412SChuck Lever */ 123092200412SChuck Lever static void xs_set_port(struct rpc_xprt *xprt, unsigned short port) 123192200412SChuck Lever { 1232*95392c59SChuck Lever struct sockaddr *addr = xs_addr(xprt); 1233c4efcb1dSChuck Lever 123492200412SChuck Lever dprintk("RPC: setting port for xprt %p to %u\n", xprt, port); 1235c4efcb1dSChuck Lever 123620612005SChuck Lever switch (addr->sa_family) { 123720612005SChuck Lever case AF_INET: 123820612005SChuck Lever ((struct sockaddr_in *)addr)->sin_port = htons(port); 123920612005SChuck Lever break; 124020612005SChuck Lever case AF_INET6: 124120612005SChuck Lever ((struct sockaddr_in6 *)addr)->sin6_port = htons(port); 124220612005SChuck Lever break; 124320612005SChuck Lever default: 124420612005SChuck Lever BUG(); 124520612005SChuck Lever } 124692200412SChuck Lever } 124792200412SChuck Lever 12487dc753f0SChuck Lever static int xs_bind4(struct sock_xprt *transport, struct socket *sock) 1249a246b010SChuck Lever { 1250a246b010SChuck Lever struct sockaddr_in myaddr = { 1251a246b010SChuck Lever .sin_family = AF_INET, 1252a246b010SChuck Lever }; 1253d3bc9a1dSFrank van Maarseveen struct sockaddr_in *sa; 1254529b33c6SChuck Lever int err; 1255c8475461SChuck Lever unsigned short port = transport->port; 1256a246b010SChuck Lever 1257d3bc9a1dSFrank van Maarseveen if (!transport->xprt.resvport) 1258d3bc9a1dSFrank van Maarseveen port = 0; 1259d3bc9a1dSFrank van Maarseveen sa = (struct sockaddr_in *)&transport->addr; 1260d3bc9a1dSFrank van Maarseveen myaddr.sin_addr = sa->sin_addr; 1261a246b010SChuck Lever do { 1262a246b010SChuck Lever myaddr.sin_port = htons(port); 1263e6242e92SSridhar Samudrala err = kernel_bind(sock, (struct sockaddr *) &myaddr, 1264a246b010SChuck Lever sizeof(myaddr)); 1265d3bc9a1dSFrank van Maarseveen if (!transport->xprt.resvport) 1266d3bc9a1dSFrank van Maarseveen break; 1267a246b010SChuck Lever if (err == 0) { 1268c8475461SChuck Lever transport->port = port; 1269d3bc9a1dSFrank van Maarseveen break; 1270a246b010SChuck Lever } 1271529b33c6SChuck Lever if (port <= xprt_min_resvport) 1272529b33c6SChuck Lever port = xprt_max_resvport; 1273529b33c6SChuck Lever else 1274529b33c6SChuck Lever port--; 1275c8475461SChuck Lever } while (err == -EADDRINUSE && port != transport->port); 12767dc753f0SChuck Lever dprintk("RPC: %s "NIPQUAD_FMT":%u: %s (%d)\n", 12777dc753f0SChuck Lever __FUNCTION__, NIPQUAD(myaddr.sin_addr), 12787dc753f0SChuck Lever port, err ? "failed" : "ok", err); 1279a246b010SChuck Lever return err; 1280a246b010SChuck Lever } 1281a246b010SChuck Lever 128290058d37SChuck Lever static int xs_bind6(struct sock_xprt *transport, struct socket *sock) 128390058d37SChuck Lever { 128490058d37SChuck Lever struct sockaddr_in6 myaddr = { 128590058d37SChuck Lever .sin6_family = AF_INET6, 128690058d37SChuck Lever }; 128790058d37SChuck Lever struct sockaddr_in6 *sa; 128890058d37SChuck Lever int err; 128990058d37SChuck Lever unsigned short port = transport->port; 129090058d37SChuck Lever 129190058d37SChuck Lever if (!transport->xprt.resvport) 129290058d37SChuck Lever port = 0; 129390058d37SChuck Lever sa = (struct sockaddr_in6 *)&transport->addr; 129490058d37SChuck Lever myaddr.sin6_addr = sa->sin6_addr; 129590058d37SChuck Lever do { 129690058d37SChuck Lever myaddr.sin6_port = htons(port); 129790058d37SChuck Lever err = kernel_bind(sock, (struct sockaddr *) &myaddr, 129890058d37SChuck Lever sizeof(myaddr)); 129990058d37SChuck Lever if (!transport->xprt.resvport) 130090058d37SChuck Lever break; 130190058d37SChuck Lever if (err == 0) { 130290058d37SChuck Lever transport->port = port; 130390058d37SChuck Lever break; 130490058d37SChuck Lever } 130590058d37SChuck Lever if (port <= xprt_min_resvport) 130690058d37SChuck Lever port = xprt_max_resvport; 130790058d37SChuck Lever else 130890058d37SChuck Lever port--; 130990058d37SChuck Lever } while (err == -EADDRINUSE && port != transport->port); 131090058d37SChuck Lever dprintk("RPC: xs_bind6 "NIP6_FMT":%u: %s (%d)\n", 131190058d37SChuck Lever NIP6(myaddr.sin6_addr), port, err ? "failed" : "ok", err); 131290058d37SChuck Lever return err; 131390058d37SChuck Lever } 131490058d37SChuck Lever 1315ed07536eSPeter Zijlstra #ifdef CONFIG_DEBUG_LOCK_ALLOC 1316ed07536eSPeter Zijlstra static struct lock_class_key xs_key[2]; 1317ed07536eSPeter Zijlstra static struct lock_class_key xs_slock_key[2]; 1318ed07536eSPeter Zijlstra 1319ed07536eSPeter Zijlstra static inline void xs_reclassify_socket(struct socket *sock) 1320ed07536eSPeter Zijlstra { 1321ed07536eSPeter Zijlstra struct sock *sk = sock->sk; 1322ed07536eSPeter Zijlstra BUG_ON(sk->sk_lock.owner != NULL); 1323ed07536eSPeter Zijlstra switch (sk->sk_family) { 1324ed07536eSPeter Zijlstra case AF_INET: 1325ed07536eSPeter Zijlstra sock_lock_init_class_and_name(sk, "slock-AF_INET-NFS", 1326ed07536eSPeter Zijlstra &xs_slock_key[0], "sk_lock-AF_INET-NFS", &xs_key[0]); 1327ed07536eSPeter Zijlstra break; 1328ed07536eSPeter Zijlstra 1329ed07536eSPeter Zijlstra case AF_INET6: 1330ed07536eSPeter Zijlstra sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFS", 1331ed07536eSPeter Zijlstra &xs_slock_key[1], "sk_lock-AF_INET6-NFS", &xs_key[1]); 1332ed07536eSPeter Zijlstra break; 1333ed07536eSPeter Zijlstra 1334ed07536eSPeter Zijlstra default: 1335ed07536eSPeter Zijlstra BUG(); 1336ed07536eSPeter Zijlstra } 1337ed07536eSPeter Zijlstra } 1338ed07536eSPeter Zijlstra #else 1339ed07536eSPeter Zijlstra static inline void xs_reclassify_socket(struct socket *sock) 1340ed07536eSPeter Zijlstra { 1341ed07536eSPeter Zijlstra } 1342ed07536eSPeter Zijlstra #endif 1343ed07536eSPeter Zijlstra 134416be2d20SChuck Lever static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) 134516be2d20SChuck Lever { 134616be2d20SChuck Lever struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 134716be2d20SChuck Lever 134816be2d20SChuck Lever if (!transport->inet) { 134916be2d20SChuck Lever struct sock *sk = sock->sk; 135016be2d20SChuck Lever 135116be2d20SChuck Lever write_lock_bh(&sk->sk_callback_lock); 135216be2d20SChuck Lever 135316be2d20SChuck Lever sk->sk_user_data = xprt; 135416be2d20SChuck Lever transport->old_data_ready = sk->sk_data_ready; 135516be2d20SChuck Lever transport->old_state_change = sk->sk_state_change; 135616be2d20SChuck Lever transport->old_write_space = sk->sk_write_space; 135716be2d20SChuck Lever sk->sk_data_ready = xs_udp_data_ready; 135816be2d20SChuck Lever sk->sk_write_space = xs_udp_write_space; 135916be2d20SChuck Lever sk->sk_no_check = UDP_CSUM_NORCV; 136016be2d20SChuck Lever sk->sk_allocation = GFP_ATOMIC; 136116be2d20SChuck Lever 136216be2d20SChuck Lever xprt_set_connected(xprt); 136316be2d20SChuck Lever 136416be2d20SChuck Lever /* Reset to new socket */ 136516be2d20SChuck Lever transport->sock = sock; 136616be2d20SChuck Lever transport->inet = sk; 136716be2d20SChuck Lever 136816be2d20SChuck Lever write_unlock_bh(&sk->sk_callback_lock); 136916be2d20SChuck Lever } 137016be2d20SChuck Lever xs_udp_do_set_buffer_size(xprt); 137116be2d20SChuck Lever } 137216be2d20SChuck Lever 13739903cd1cSChuck Lever /** 13749c3d72deSChuck Lever * xs_udp_connect_worker4 - set up a UDP socket 137565f27f38SDavid Howells * @work: RPC transport to connect 13769903cd1cSChuck Lever * 13779903cd1cSChuck Lever * Invoked by a work queue tasklet. 1378a246b010SChuck Lever */ 13799c3d72deSChuck Lever static void xs_udp_connect_worker4(struct work_struct *work) 1380a246b010SChuck Lever { 138134161db6STrond Myklebust struct sock_xprt *transport = 138234161db6STrond Myklebust container_of(work, struct sock_xprt, connect_worker.work); 1383c8475461SChuck Lever struct rpc_xprt *xprt = &transport->xprt; 1384ee0ac0c2SChuck Lever struct socket *sock = transport->sock; 1385b0d93ad5SChuck Lever int err, status = -EIO; 1386a246b010SChuck Lever 1387ec739ef0SChuck Lever if (xprt->shutdown || !xprt_bound(xprt)) 1388a246b010SChuck Lever goto out; 1389a246b010SChuck Lever 1390b0d93ad5SChuck Lever /* Start by resetting any existing state */ 13919903cd1cSChuck Lever xs_close(xprt); 1392b0d93ad5SChuck Lever 1393b0d93ad5SChuck Lever if ((err = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock)) < 0) { 1394b0d93ad5SChuck Lever dprintk("RPC: can't create UDP transport socket (%d).\n", -err); 1395a246b010SChuck Lever goto out; 1396a246b010SChuck Lever } 1397ed07536eSPeter Zijlstra xs_reclassify_socket(sock); 1398a246b010SChuck Lever 13997dc753f0SChuck Lever if (xs_bind4(transport, sock)) { 1400b0d93ad5SChuck Lever sock_release(sock); 1401b0d93ad5SChuck Lever goto out; 1402b0d93ad5SChuck Lever } 1403b0d93ad5SChuck Lever 1404edb267a6SChuck Lever dprintk("RPC: worker connecting xprt %p to address: %s\n", 14057559c7a2SChuck Lever xprt, xprt->address_strings[RPC_DISPLAY_ALL]); 1406edb267a6SChuck Lever 140716be2d20SChuck Lever xs_udp_finish_connecting(xprt, sock); 1408a246b010SChuck Lever status = 0; 1409b0d93ad5SChuck Lever out: 1410b0d93ad5SChuck Lever xprt_wake_pending_tasks(xprt, status); 1411b0d93ad5SChuck Lever xprt_clear_connecting(xprt); 1412b0d93ad5SChuck Lever } 1413b0d93ad5SChuck Lever 141468e220bdSChuck Lever /** 141568e220bdSChuck Lever * xs_udp_connect_worker6 - set up a UDP socket 141668e220bdSChuck Lever * @work: RPC transport to connect 141768e220bdSChuck Lever * 141868e220bdSChuck Lever * Invoked by a work queue tasklet. 141968e220bdSChuck Lever */ 142068e220bdSChuck Lever static void xs_udp_connect_worker6(struct work_struct *work) 142168e220bdSChuck Lever { 142268e220bdSChuck Lever struct sock_xprt *transport = 142368e220bdSChuck Lever container_of(work, struct sock_xprt, connect_worker.work); 142468e220bdSChuck Lever struct rpc_xprt *xprt = &transport->xprt; 142568e220bdSChuck Lever struct socket *sock = transport->sock; 142668e220bdSChuck Lever int err, status = -EIO; 142768e220bdSChuck Lever 142868e220bdSChuck Lever if (xprt->shutdown || !xprt_bound(xprt)) 142968e220bdSChuck Lever goto out; 143068e220bdSChuck Lever 143168e220bdSChuck Lever /* Start by resetting any existing state */ 143268e220bdSChuck Lever xs_close(xprt); 143368e220bdSChuck Lever 143468e220bdSChuck Lever if ((err = sock_create_kern(PF_INET6, SOCK_DGRAM, IPPROTO_UDP, &sock)) < 0) { 143568e220bdSChuck Lever dprintk("RPC: can't create UDP transport socket (%d).\n", -err); 143668e220bdSChuck Lever goto out; 143768e220bdSChuck Lever } 143868e220bdSChuck Lever xs_reclassify_socket(sock); 143968e220bdSChuck Lever 144068e220bdSChuck Lever if (xs_bind6(transport, sock) < 0) { 144168e220bdSChuck Lever sock_release(sock); 144268e220bdSChuck Lever goto out; 144368e220bdSChuck Lever } 144468e220bdSChuck Lever 144568e220bdSChuck Lever dprintk("RPC: worker connecting xprt %p to address: %s\n", 144668e220bdSChuck Lever xprt, xprt->address_strings[RPC_DISPLAY_ALL]); 144768e220bdSChuck Lever 144868e220bdSChuck Lever xs_udp_finish_connecting(xprt, sock); 144968e220bdSChuck Lever status = 0; 145068e220bdSChuck Lever out: 145168e220bdSChuck Lever xprt_wake_pending_tasks(xprt, status); 145268e220bdSChuck Lever xprt_clear_connecting(xprt); 145368e220bdSChuck Lever } 145468e220bdSChuck Lever 14553167e12cSChuck Lever /* 14563167e12cSChuck Lever * We need to preserve the port number so the reply cache on the server can 14573167e12cSChuck Lever * find our cached RPC replies when we get around to reconnecting. 14583167e12cSChuck Lever */ 14593167e12cSChuck Lever static void xs_tcp_reuse_connection(struct rpc_xprt *xprt) 14603167e12cSChuck Lever { 14613167e12cSChuck Lever int result; 1462ee0ac0c2SChuck Lever struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 14633167e12cSChuck Lever struct sockaddr any; 14643167e12cSChuck Lever 14653167e12cSChuck Lever dprintk("RPC: disconnecting xprt %p to reuse port\n", xprt); 14663167e12cSChuck Lever 14673167e12cSChuck Lever /* 14683167e12cSChuck Lever * Disconnect the transport socket by doing a connect operation 14693167e12cSChuck Lever * with AF_UNSPEC. This should return immediately... 14703167e12cSChuck Lever */ 14713167e12cSChuck Lever memset(&any, 0, sizeof(any)); 14723167e12cSChuck Lever any.sa_family = AF_UNSPEC; 1473ee0ac0c2SChuck Lever result = kernel_connect(transport->sock, &any, sizeof(any), 0); 14743167e12cSChuck Lever if (result) 14753167e12cSChuck Lever dprintk("RPC: AF_UNSPEC connect return code %d\n", 14763167e12cSChuck Lever result); 14773167e12cSChuck Lever } 14783167e12cSChuck Lever 147916be2d20SChuck Lever static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) 1480b0d93ad5SChuck Lever { 148116be2d20SChuck Lever struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1482edb267a6SChuck Lever 1483ee0ac0c2SChuck Lever if (!transport->inet) { 1484b0d93ad5SChuck Lever struct sock *sk = sock->sk; 1485b0d93ad5SChuck Lever 1486b0d93ad5SChuck Lever write_lock_bh(&sk->sk_callback_lock); 1487b0d93ad5SChuck Lever 1488b0d93ad5SChuck Lever sk->sk_user_data = xprt; 1489314dfd79SChuck Lever transport->old_data_ready = sk->sk_data_ready; 1490314dfd79SChuck Lever transport->old_state_change = sk->sk_state_change; 1491314dfd79SChuck Lever transport->old_write_space = sk->sk_write_space; 1492b0d93ad5SChuck Lever sk->sk_data_ready = xs_tcp_data_ready; 1493b0d93ad5SChuck Lever sk->sk_state_change = xs_tcp_state_change; 1494b0d93ad5SChuck Lever sk->sk_write_space = xs_tcp_write_space; 1495b079fa7bSTrond Myklebust sk->sk_allocation = GFP_ATOMIC; 14963167e12cSChuck Lever 14973167e12cSChuck Lever /* socket options */ 14983167e12cSChuck Lever sk->sk_userlocks |= SOCK_BINDPORT_LOCK; 14993167e12cSChuck Lever sock_reset_flag(sk, SOCK_LINGER); 15003167e12cSChuck Lever tcp_sk(sk)->linger2 = 0; 15013167e12cSChuck Lever tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; 1502b0d93ad5SChuck Lever 1503b0d93ad5SChuck Lever xprt_clear_connected(xprt); 1504b0d93ad5SChuck Lever 1505b0d93ad5SChuck Lever /* Reset to new socket */ 1506ee0ac0c2SChuck Lever transport->sock = sock; 1507ee0ac0c2SChuck Lever transport->inet = sk; 1508b0d93ad5SChuck Lever 1509b0d93ad5SChuck Lever write_unlock_bh(&sk->sk_callback_lock); 1510b0d93ad5SChuck Lever } 1511b0d93ad5SChuck Lever 1512b0d93ad5SChuck Lever /* Tell the socket layer to start connecting... */ 1513262ca07dSChuck Lever xprt->stat.connect_count++; 1514262ca07dSChuck Lever xprt->stat.connect_start = jiffies; 1515*95392c59SChuck Lever return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK); 151616be2d20SChuck Lever } 151716be2d20SChuck Lever 151816be2d20SChuck Lever /** 15199c3d72deSChuck Lever * xs_tcp_connect_worker4 - connect a TCP socket to a remote endpoint 152016be2d20SChuck Lever * @work: RPC transport to connect 152116be2d20SChuck Lever * 152216be2d20SChuck Lever * Invoked by a work queue tasklet. 152316be2d20SChuck Lever */ 15249c3d72deSChuck Lever static void xs_tcp_connect_worker4(struct work_struct *work) 152516be2d20SChuck Lever { 152616be2d20SChuck Lever struct sock_xprt *transport = 152716be2d20SChuck Lever container_of(work, struct sock_xprt, connect_worker.work); 152816be2d20SChuck Lever struct rpc_xprt *xprt = &transport->xprt; 152916be2d20SChuck Lever struct socket *sock = transport->sock; 153016be2d20SChuck Lever int err, status = -EIO; 153116be2d20SChuck Lever 153216be2d20SChuck Lever if (xprt->shutdown || !xprt_bound(xprt)) 153316be2d20SChuck Lever goto out; 153416be2d20SChuck Lever 153516be2d20SChuck Lever if (!sock) { 153616be2d20SChuck Lever /* start from scratch */ 153716be2d20SChuck Lever if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) { 153816be2d20SChuck Lever dprintk("RPC: can't create TCP transport socket (%d).\n", -err); 153916be2d20SChuck Lever goto out; 154016be2d20SChuck Lever } 154116be2d20SChuck Lever xs_reclassify_socket(sock); 154216be2d20SChuck Lever 154316be2d20SChuck Lever if (xs_bind4(transport, sock) < 0) { 154416be2d20SChuck Lever sock_release(sock); 154516be2d20SChuck Lever goto out; 154616be2d20SChuck Lever } 154716be2d20SChuck Lever } else 154816be2d20SChuck Lever /* "close" the socket, preserving the local port */ 154916be2d20SChuck Lever xs_tcp_reuse_connection(xprt); 155016be2d20SChuck Lever 155116be2d20SChuck Lever dprintk("RPC: worker connecting xprt %p to address: %s\n", 155216be2d20SChuck Lever xprt, xprt->address_strings[RPC_DISPLAY_ALL]); 155316be2d20SChuck Lever 155416be2d20SChuck Lever status = xs_tcp_finish_connecting(xprt, sock); 1555a246b010SChuck Lever dprintk("RPC: %p connect status %d connected %d sock state %d\n", 155646121cf7SChuck Lever xprt, -status, xprt_connected(xprt), 155746121cf7SChuck Lever sock->sk->sk_state); 1558a246b010SChuck Lever if (status < 0) { 1559a246b010SChuck Lever switch (status) { 1560a246b010SChuck Lever case -EINPROGRESS: 1561a246b010SChuck Lever case -EALREADY: 1562a246b010SChuck Lever goto out_clear; 15633167e12cSChuck Lever case -ECONNREFUSED: 15643167e12cSChuck Lever case -ECONNRESET: 15653167e12cSChuck Lever /* retry with existing socket, after a delay */ 15663167e12cSChuck Lever break; 15673167e12cSChuck Lever default: 15683167e12cSChuck Lever /* get rid of existing socket, and retry */ 15693167e12cSChuck Lever xs_close(xprt); 15703167e12cSChuck Lever break; 1571a246b010SChuck Lever } 1572a246b010SChuck Lever } 1573a246b010SChuck Lever out: 157444fbac22SChuck Lever xprt_wake_pending_tasks(xprt, status); 1575a246b010SChuck Lever out_clear: 15762226feb6SChuck Lever xprt_clear_connecting(xprt); 1577a246b010SChuck Lever } 1578a246b010SChuck Lever 15799903cd1cSChuck Lever /** 158068e220bdSChuck Lever * xs_tcp_connect_worker6 - connect a TCP socket to a remote endpoint 158168e220bdSChuck Lever * @work: RPC transport to connect 158268e220bdSChuck Lever * 158368e220bdSChuck Lever * Invoked by a work queue tasklet. 158468e220bdSChuck Lever */ 158568e220bdSChuck Lever static void xs_tcp_connect_worker6(struct work_struct *work) 158668e220bdSChuck Lever { 158768e220bdSChuck Lever struct sock_xprt *transport = 158868e220bdSChuck Lever container_of(work, struct sock_xprt, connect_worker.work); 158968e220bdSChuck Lever struct rpc_xprt *xprt = &transport->xprt; 159068e220bdSChuck Lever struct socket *sock = transport->sock; 159168e220bdSChuck Lever int err, status = -EIO; 159268e220bdSChuck Lever 159368e220bdSChuck Lever if (xprt->shutdown || !xprt_bound(xprt)) 159468e220bdSChuck Lever goto out; 159568e220bdSChuck Lever 159668e220bdSChuck Lever if (!sock) { 159768e220bdSChuck Lever /* start from scratch */ 159868e220bdSChuck Lever if ((err = sock_create_kern(PF_INET6, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) { 159968e220bdSChuck Lever dprintk("RPC: can't create TCP transport socket (%d).\n", -err); 160068e220bdSChuck Lever goto out; 160168e220bdSChuck Lever } 160268e220bdSChuck Lever xs_reclassify_socket(sock); 160368e220bdSChuck Lever 160468e220bdSChuck Lever if (xs_bind6(transport, sock) < 0) { 160568e220bdSChuck Lever sock_release(sock); 160668e220bdSChuck Lever goto out; 160768e220bdSChuck Lever } 160868e220bdSChuck Lever } else 160968e220bdSChuck Lever /* "close" the socket, preserving the local port */ 161068e220bdSChuck Lever xs_tcp_reuse_connection(xprt); 161168e220bdSChuck Lever 161268e220bdSChuck Lever dprintk("RPC: worker connecting xprt %p to address: %s\n", 161368e220bdSChuck Lever xprt, xprt->address_strings[RPC_DISPLAY_ALL]); 161468e220bdSChuck Lever 161568e220bdSChuck Lever status = xs_tcp_finish_connecting(xprt, sock); 161668e220bdSChuck Lever dprintk("RPC: %p connect status %d connected %d sock state %d\n", 161768e220bdSChuck Lever xprt, -status, xprt_connected(xprt), sock->sk->sk_state); 161868e220bdSChuck Lever if (status < 0) { 161968e220bdSChuck Lever switch (status) { 162068e220bdSChuck Lever case -EINPROGRESS: 162168e220bdSChuck Lever case -EALREADY: 162268e220bdSChuck Lever goto out_clear; 162368e220bdSChuck Lever case -ECONNREFUSED: 162468e220bdSChuck Lever case -ECONNRESET: 162568e220bdSChuck Lever /* retry with existing socket, after a delay */ 162668e220bdSChuck Lever break; 162768e220bdSChuck Lever default: 162868e220bdSChuck Lever /* get rid of existing socket, and retry */ 162968e220bdSChuck Lever xs_close(xprt); 163068e220bdSChuck Lever break; 163168e220bdSChuck Lever } 163268e220bdSChuck Lever } 163368e220bdSChuck Lever out: 163468e220bdSChuck Lever xprt_wake_pending_tasks(xprt, status); 163568e220bdSChuck Lever out_clear: 163668e220bdSChuck Lever xprt_clear_connecting(xprt); 163768e220bdSChuck Lever } 163868e220bdSChuck Lever 163968e220bdSChuck Lever /** 16409903cd1cSChuck Lever * xs_connect - connect a socket to a remote endpoint 16419903cd1cSChuck Lever * @task: address of RPC task that manages state of connect request 16429903cd1cSChuck Lever * 16439903cd1cSChuck Lever * TCP: If the remote end dropped the connection, delay reconnecting. 164403bf4b70SChuck Lever * 164503bf4b70SChuck Lever * UDP socket connects are synchronous, but we use a work queue anyway 164603bf4b70SChuck Lever * to guarantee that even unprivileged user processes can set up a 164703bf4b70SChuck Lever * socket on a privileged port. 164803bf4b70SChuck Lever * 164903bf4b70SChuck Lever * If a UDP socket connect fails, the delay behavior here prevents 165003bf4b70SChuck Lever * retry floods (hard mounts). 16519903cd1cSChuck Lever */ 16529903cd1cSChuck Lever static void xs_connect(struct rpc_task *task) 1653a246b010SChuck Lever { 1654a246b010SChuck Lever struct rpc_xprt *xprt = task->tk_xprt; 1655ee0ac0c2SChuck Lever struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1656a246b010SChuck Lever 1657b0d93ad5SChuck Lever if (xprt_test_and_set_connecting(xprt)) 1658b0d93ad5SChuck Lever return; 1659b0d93ad5SChuck Lever 1660ee0ac0c2SChuck Lever if (transport->sock != NULL) { 166146121cf7SChuck Lever dprintk("RPC: xs_connect delayed xprt %p for %lu " 166246121cf7SChuck Lever "seconds\n", 166303bf4b70SChuck Lever xprt, xprt->reestablish_timeout / HZ); 1664c1384c9cSTrond Myklebust queue_delayed_work(rpciod_workqueue, 1665c1384c9cSTrond Myklebust &transport->connect_worker, 166603bf4b70SChuck Lever xprt->reestablish_timeout); 166703bf4b70SChuck Lever xprt->reestablish_timeout <<= 1; 166803bf4b70SChuck Lever if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) 166903bf4b70SChuck Lever xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; 16709903cd1cSChuck Lever } else { 16719903cd1cSChuck Lever dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); 1672c1384c9cSTrond Myklebust queue_delayed_work(rpciod_workqueue, 1673c1384c9cSTrond Myklebust &transport->connect_worker, 0); 1674a246b010SChuck Lever } 1675a246b010SChuck Lever } 1676a246b010SChuck Lever 1677262ca07dSChuck Lever /** 1678262ca07dSChuck Lever * xs_udp_print_stats - display UDP socket-specifc stats 1679262ca07dSChuck Lever * @xprt: rpc_xprt struct containing statistics 1680262ca07dSChuck Lever * @seq: output file 1681262ca07dSChuck Lever * 1682262ca07dSChuck Lever */ 1683262ca07dSChuck Lever static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) 1684262ca07dSChuck Lever { 1685c8475461SChuck Lever struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1686c8475461SChuck Lever 1687262ca07dSChuck Lever seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %Lu %Lu\n", 1688c8475461SChuck Lever transport->port, 1689262ca07dSChuck Lever xprt->stat.bind_count, 1690262ca07dSChuck Lever xprt->stat.sends, 1691262ca07dSChuck Lever xprt->stat.recvs, 1692262ca07dSChuck Lever xprt->stat.bad_xids, 1693262ca07dSChuck Lever xprt->stat.req_u, 1694262ca07dSChuck Lever xprt->stat.bklog_u); 1695262ca07dSChuck Lever } 1696262ca07dSChuck Lever 1697262ca07dSChuck Lever /** 1698262ca07dSChuck Lever * xs_tcp_print_stats - display TCP socket-specifc stats 1699262ca07dSChuck Lever * @xprt: rpc_xprt struct containing statistics 1700262ca07dSChuck Lever * @seq: output file 1701262ca07dSChuck Lever * 1702262ca07dSChuck Lever */ 1703262ca07dSChuck Lever static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) 1704262ca07dSChuck Lever { 1705c8475461SChuck Lever struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1706262ca07dSChuck Lever long idle_time = 0; 1707262ca07dSChuck Lever 1708262ca07dSChuck Lever if (xprt_connected(xprt)) 1709262ca07dSChuck Lever idle_time = (long)(jiffies - xprt->last_used) / HZ; 1710262ca07dSChuck Lever 1711262ca07dSChuck Lever seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu\n", 1712c8475461SChuck Lever transport->port, 1713262ca07dSChuck Lever xprt->stat.bind_count, 1714262ca07dSChuck Lever xprt->stat.connect_count, 1715262ca07dSChuck Lever xprt->stat.connect_time, 1716262ca07dSChuck Lever idle_time, 1717262ca07dSChuck Lever xprt->stat.sends, 1718262ca07dSChuck Lever xprt->stat.recvs, 1719262ca07dSChuck Lever xprt->stat.bad_xids, 1720262ca07dSChuck Lever xprt->stat.req_u, 1721262ca07dSChuck Lever xprt->stat.bklog_u); 1722262ca07dSChuck Lever } 1723262ca07dSChuck Lever 1724262965f5SChuck Lever static struct rpc_xprt_ops xs_udp_ops = { 172543118c29SChuck Lever .set_buffer_size = xs_udp_set_buffer_size, 172612a80469SChuck Lever .reserve_xprt = xprt_reserve_xprt_cong, 172749e9a890SChuck Lever .release_xprt = xprt_release_xprt_cong, 172845160d62SChuck Lever .rpcbind = rpcb_getport_async, 172992200412SChuck Lever .set_port = xs_set_port, 17309903cd1cSChuck Lever .connect = xs_connect, 173102107148SChuck Lever .buf_alloc = rpc_malloc, 173202107148SChuck Lever .buf_free = rpc_free, 1733262965f5SChuck Lever .send_request = xs_udp_send_request, 1734fe3aca29SChuck Lever .set_retrans_timeout = xprt_set_retrans_timeout_rtt, 173546c0ee8bSChuck Lever .timer = xs_udp_timer, 1736a58dd398SChuck Lever .release_request = xprt_release_rqst_cong, 1737262965f5SChuck Lever .close = xs_close, 1738262965f5SChuck Lever .destroy = xs_destroy, 1739262ca07dSChuck Lever .print_stats = xs_udp_print_stats, 1740262965f5SChuck Lever }; 1741262965f5SChuck Lever 1742262965f5SChuck Lever static struct rpc_xprt_ops xs_tcp_ops = { 174312a80469SChuck Lever .reserve_xprt = xprt_reserve_xprt, 1744e0ab53deSTrond Myklebust .release_xprt = xs_tcp_release_xprt, 174545160d62SChuck Lever .rpcbind = rpcb_getport_async, 174692200412SChuck Lever .set_port = xs_set_port, 1747262965f5SChuck Lever .connect = xs_connect, 174802107148SChuck Lever .buf_alloc = rpc_malloc, 174902107148SChuck Lever .buf_free = rpc_free, 1750262965f5SChuck Lever .send_request = xs_tcp_send_request, 1751fe3aca29SChuck Lever .set_retrans_timeout = xprt_set_retrans_timeout_def, 17529903cd1cSChuck Lever .close = xs_close, 17539903cd1cSChuck Lever .destroy = xs_destroy, 1754262ca07dSChuck Lever .print_stats = xs_tcp_print_stats, 1755a246b010SChuck Lever }; 1756a246b010SChuck Lever 175796802a09SFrank van Maarseveen static struct rpc_xprt *xs_setup_xprt(struct rpc_xprtsock_create *args, unsigned int slot_table_size) 1758c8541ecdSChuck Lever { 1759c8541ecdSChuck Lever struct rpc_xprt *xprt; 1760ffc2e518SChuck Lever struct sock_xprt *new; 1761c8541ecdSChuck Lever 176296802a09SFrank van Maarseveen if (args->addrlen > sizeof(xprt->addr)) { 1763c8541ecdSChuck Lever dprintk("RPC: xs_setup_xprt: address too large\n"); 1764c8541ecdSChuck Lever return ERR_PTR(-EBADF); 1765c8541ecdSChuck Lever } 1766c8541ecdSChuck Lever 1767ffc2e518SChuck Lever new = kzalloc(sizeof(*new), GFP_KERNEL); 1768ffc2e518SChuck Lever if (new == NULL) { 176946121cf7SChuck Lever dprintk("RPC: xs_setup_xprt: couldn't allocate " 177046121cf7SChuck Lever "rpc_xprt\n"); 1771c8541ecdSChuck Lever return ERR_PTR(-ENOMEM); 1772c8541ecdSChuck Lever } 1773ffc2e518SChuck Lever xprt = &new->xprt; 1774c8541ecdSChuck Lever 1775c8541ecdSChuck Lever xprt->max_reqs = slot_table_size; 1776c8541ecdSChuck Lever xprt->slot = kcalloc(xprt->max_reqs, sizeof(struct rpc_rqst), GFP_KERNEL); 1777c8541ecdSChuck Lever if (xprt->slot == NULL) { 1778c8541ecdSChuck Lever kfree(xprt); 177946121cf7SChuck Lever dprintk("RPC: xs_setup_xprt: couldn't allocate slot " 178046121cf7SChuck Lever "table\n"); 1781c8541ecdSChuck Lever return ERR_PTR(-ENOMEM); 1782c8541ecdSChuck Lever } 1783c8541ecdSChuck Lever 178496802a09SFrank van Maarseveen memcpy(&xprt->addr, args->dstaddr, args->addrlen); 178596802a09SFrank van Maarseveen xprt->addrlen = args->addrlen; 1786d3bc9a1dSFrank van Maarseveen if (args->srcaddr) 1787d3bc9a1dSFrank van Maarseveen memcpy(&new->addr, args->srcaddr, args->addrlen); 1788c8475461SChuck Lever new->port = xs_get_random_port(); 1789c8541ecdSChuck Lever 1790c8541ecdSChuck Lever return xprt; 1791c8541ecdSChuck Lever } 1792c8541ecdSChuck Lever 17939903cd1cSChuck Lever /** 17949903cd1cSChuck Lever * xs_setup_udp - Set up transport to use a UDP socket 179596802a09SFrank van Maarseveen * @args: rpc transport creation arguments 17969903cd1cSChuck Lever * 17979903cd1cSChuck Lever */ 179896802a09SFrank van Maarseveen struct rpc_xprt *xs_setup_udp(struct rpc_xprtsock_create *args) 1799a246b010SChuck Lever { 18008f9d5b1aSChuck Lever struct sockaddr *addr = args->dstaddr; 1801c8541ecdSChuck Lever struct rpc_xprt *xprt; 1802c8475461SChuck Lever struct sock_xprt *transport; 1803a246b010SChuck Lever 180496802a09SFrank van Maarseveen xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries); 1805c8541ecdSChuck Lever if (IS_ERR(xprt)) 1806c8541ecdSChuck Lever return xprt; 1807c8475461SChuck Lever transport = container_of(xprt, struct sock_xprt, xprt); 1808a246b010SChuck Lever 1809ec739ef0SChuck Lever xprt->prot = IPPROTO_UDP; 1810808012fbSChuck Lever xprt->tsh_size = 0; 1811a246b010SChuck Lever /* XXX: header size can vary due to auth type, IPv6, etc. */ 1812a246b010SChuck Lever xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); 1813a246b010SChuck Lever 181403bf4b70SChuck Lever xprt->bind_timeout = XS_BIND_TO; 181503bf4b70SChuck Lever xprt->connect_timeout = XS_UDP_CONN_TO; 181603bf4b70SChuck Lever xprt->reestablish_timeout = XS_UDP_REEST_TO; 181703bf4b70SChuck Lever xprt->idle_timeout = XS_IDLE_DISC_TO; 1818a246b010SChuck Lever 1819262965f5SChuck Lever xprt->ops = &xs_udp_ops; 1820a246b010SChuck Lever 182196802a09SFrank van Maarseveen if (args->timeout) 182296802a09SFrank van Maarseveen xprt->timeout = *args->timeout; 1823a246b010SChuck Lever else 18249903cd1cSChuck Lever xprt_set_timeout(&xprt->timeout, 5, 5 * HZ); 1825a246b010SChuck Lever 18268f9d5b1aSChuck Lever switch (addr->sa_family) { 18278f9d5b1aSChuck Lever case AF_INET: 18288f9d5b1aSChuck Lever if (((struct sockaddr_in *)addr)->sin_port != htons(0)) 18298f9d5b1aSChuck Lever xprt_set_bound(xprt); 18308f9d5b1aSChuck Lever 18318f9d5b1aSChuck Lever INIT_DELAYED_WORK(&transport->connect_worker, 18328f9d5b1aSChuck Lever xs_udp_connect_worker4); 1833ba10f2c2SChuck Lever xs_format_ipv4_peer_addresses(xprt); 18348f9d5b1aSChuck Lever break; 18358f9d5b1aSChuck Lever case AF_INET6: 18368f9d5b1aSChuck Lever if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0)) 18378f9d5b1aSChuck Lever xprt_set_bound(xprt); 18388f9d5b1aSChuck Lever 18398f9d5b1aSChuck Lever INIT_DELAYED_WORK(&transport->connect_worker, 18408f9d5b1aSChuck Lever xs_udp_connect_worker6); 18418f9d5b1aSChuck Lever xs_format_ipv6_peer_addresses(xprt); 18428f9d5b1aSChuck Lever break; 18438f9d5b1aSChuck Lever default: 18448f9d5b1aSChuck Lever kfree(xprt); 18458f9d5b1aSChuck Lever return ERR_PTR(-EAFNOSUPPORT); 18468f9d5b1aSChuck Lever } 18478f9d5b1aSChuck Lever 1848edb267a6SChuck Lever dprintk("RPC: set up transport to address %s\n", 18497559c7a2SChuck Lever xprt->address_strings[RPC_DISPLAY_ALL]); 1850edb267a6SChuck Lever 1851c8541ecdSChuck Lever return xprt; 1852a246b010SChuck Lever } 1853a246b010SChuck Lever 18549903cd1cSChuck Lever /** 18559903cd1cSChuck Lever * xs_setup_tcp - Set up transport to use a TCP socket 185696802a09SFrank van Maarseveen * @args: rpc transport creation arguments 18579903cd1cSChuck Lever * 18589903cd1cSChuck Lever */ 185996802a09SFrank van Maarseveen struct rpc_xprt *xs_setup_tcp(struct rpc_xprtsock_create *args) 1860a246b010SChuck Lever { 18618f9d5b1aSChuck Lever struct sockaddr *addr = args->dstaddr; 1862c8541ecdSChuck Lever struct rpc_xprt *xprt; 1863c8475461SChuck Lever struct sock_xprt *transport; 1864a246b010SChuck Lever 186596802a09SFrank van Maarseveen xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); 1866c8541ecdSChuck Lever if (IS_ERR(xprt)) 1867c8541ecdSChuck Lever return xprt; 1868c8475461SChuck Lever transport = container_of(xprt, struct sock_xprt, xprt); 1869a246b010SChuck Lever 1870ec739ef0SChuck Lever xprt->prot = IPPROTO_TCP; 1871808012fbSChuck Lever xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); 1872808012fbSChuck Lever xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 1873a246b010SChuck Lever 187403bf4b70SChuck Lever xprt->bind_timeout = XS_BIND_TO; 187503bf4b70SChuck Lever xprt->connect_timeout = XS_TCP_CONN_TO; 187603bf4b70SChuck Lever xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 187703bf4b70SChuck Lever xprt->idle_timeout = XS_IDLE_DISC_TO; 1878a246b010SChuck Lever 1879262965f5SChuck Lever xprt->ops = &xs_tcp_ops; 1880a246b010SChuck Lever 188196802a09SFrank van Maarseveen if (args->timeout) 188296802a09SFrank van Maarseveen xprt->timeout = *args->timeout; 1883a246b010SChuck Lever else 18849903cd1cSChuck Lever xprt_set_timeout(&xprt->timeout, 2, 60 * HZ); 1885a246b010SChuck Lever 18868f9d5b1aSChuck Lever switch (addr->sa_family) { 18878f9d5b1aSChuck Lever case AF_INET: 18888f9d5b1aSChuck Lever if (((struct sockaddr_in *)addr)->sin_port != htons(0)) 18898f9d5b1aSChuck Lever xprt_set_bound(xprt); 18908f9d5b1aSChuck Lever 18918f9d5b1aSChuck Lever INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_connect_worker4); 1892ba10f2c2SChuck Lever xs_format_ipv4_peer_addresses(xprt); 18938f9d5b1aSChuck Lever break; 18948f9d5b1aSChuck Lever case AF_INET6: 18958f9d5b1aSChuck Lever if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0)) 18968f9d5b1aSChuck Lever xprt_set_bound(xprt); 18978f9d5b1aSChuck Lever 18988f9d5b1aSChuck Lever INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_connect_worker6); 18998f9d5b1aSChuck Lever xs_format_ipv6_peer_addresses(xprt); 19008f9d5b1aSChuck Lever break; 19018f9d5b1aSChuck Lever default: 19028f9d5b1aSChuck Lever kfree(xprt); 19038f9d5b1aSChuck Lever return ERR_PTR(-EAFNOSUPPORT); 19048f9d5b1aSChuck Lever } 19058f9d5b1aSChuck Lever 1906edb267a6SChuck Lever dprintk("RPC: set up transport to address %s\n", 19077559c7a2SChuck Lever xprt->address_strings[RPC_DISPLAY_ALL]); 1908edb267a6SChuck Lever 1909c8541ecdSChuck Lever return xprt; 1910a246b010SChuck Lever } 1911282b32e1SChuck Lever 1912282b32e1SChuck Lever /** 1913fbf76683SChuck Lever * init_socket_xprt - set up xprtsock's sysctls 1914282b32e1SChuck Lever * 1915282b32e1SChuck Lever */ 1916282b32e1SChuck Lever int init_socket_xprt(void) 1917282b32e1SChuck Lever { 1918fbf76683SChuck Lever #ifdef RPC_DEBUG 19192b1bec5fSEric W. Biederman if (!sunrpc_table_header) 19200b4d4147SEric W. Biederman sunrpc_table_header = register_sysctl_table(sunrpc_table); 1921fbf76683SChuck Lever #endif 1922fbf76683SChuck Lever 1923282b32e1SChuck Lever return 0; 1924282b32e1SChuck Lever } 1925282b32e1SChuck Lever 1926282b32e1SChuck Lever /** 1927fbf76683SChuck Lever * cleanup_socket_xprt - remove xprtsock's sysctls 1928282b32e1SChuck Lever * 1929282b32e1SChuck Lever */ 1930282b32e1SChuck Lever void cleanup_socket_xprt(void) 1931282b32e1SChuck Lever { 1932fbf76683SChuck Lever #ifdef RPC_DEBUG 1933fbf76683SChuck Lever if (sunrpc_table_header) { 1934fbf76683SChuck Lever unregister_sysctl_table(sunrpc_table_header); 1935fbf76683SChuck Lever sunrpc_table_header = NULL; 1936fbf76683SChuck Lever } 1937fbf76683SChuck Lever #endif 1938282b32e1SChuck Lever } 1939