xref: /openbmc/linux/net/sunrpc/xprtsock.c (revision 4cfc7e6019caa3e97d2a81c48c8d575d7b38d751)
1a246b010SChuck Lever /*
2a246b010SChuck Lever  * linux/net/sunrpc/xprtsock.c
3a246b010SChuck Lever  *
4a246b010SChuck Lever  * Client-side transport implementation for sockets.
5a246b010SChuck Lever  *
6113aa838SAlan Cox  * TCP callback races fixes (C) 1998 Red Hat
7113aa838SAlan Cox  * TCP send fixes (C) 1998 Red Hat
8a246b010SChuck Lever  * TCP NFS related read + write fixes
9a246b010SChuck Lever  *  (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
10a246b010SChuck Lever  *
11a246b010SChuck Lever  * Rewrite of larges part of the code in order to stabilize TCP stuff.
12a246b010SChuck Lever  * Fix behaviour when socket buffer is full.
13a246b010SChuck Lever  *  (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
1455aa4f58SChuck Lever  *
1555aa4f58SChuck Lever  * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
168f9d5b1aSChuck Lever  *
178f9d5b1aSChuck Lever  * IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005.
188f9d5b1aSChuck Lever  *   <gilles.quillard@bull.net>
19a246b010SChuck Lever  */
20a246b010SChuck Lever 
21a246b010SChuck Lever #include <linux/types.h>
22a246b010SChuck Lever #include <linux/slab.h>
23bc25571eS\"Talpey, Thomas\ #include <linux/module.h>
24a246b010SChuck Lever #include <linux/capability.h>
25a246b010SChuck Lever #include <linux/pagemap.h>
26a246b010SChuck Lever #include <linux/errno.h>
27a246b010SChuck Lever #include <linux/socket.h>
28a246b010SChuck Lever #include <linux/in.h>
29a246b010SChuck Lever #include <linux/net.h>
30a246b010SChuck Lever #include <linux/mm.h>
31a246b010SChuck Lever #include <linux/udp.h>
32a246b010SChuck Lever #include <linux/tcp.h>
33a246b010SChuck Lever #include <linux/sunrpc/clnt.h>
3402107148SChuck Lever #include <linux/sunrpc/sched.h>
35*4cfc7e60SRahul Iyer #include <linux/sunrpc/svcsock.h>
3649c36fccS\"Talpey, Thomas\ #include <linux/sunrpc/xprtsock.h>
37a246b010SChuck Lever #include <linux/file.h>
3844b98efdSRicardo Labiaga #ifdef CONFIG_NFS_V4_1
3944b98efdSRicardo Labiaga #include <linux/sunrpc/bc_xprt.h>
4044b98efdSRicardo Labiaga #endif
41a246b010SChuck Lever 
42a246b010SChuck Lever #include <net/sock.h>
43a246b010SChuck Lever #include <net/checksum.h>
44a246b010SChuck Lever #include <net/udp.h>
45a246b010SChuck Lever #include <net/tcp.h>
46a246b010SChuck Lever 
47*4cfc7e60SRahul Iyer #include "sunrpc.h"
489903cd1cSChuck Lever /*
49c556b754SChuck Lever  * xprtsock tunables
50c556b754SChuck Lever  */
51c556b754SChuck Lever unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
52c556b754SChuck Lever unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE;
53c556b754SChuck Lever 
54c556b754SChuck Lever unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
55c556b754SChuck Lever unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
56c556b754SChuck Lever 
577d1e8255STrond Myklebust #define XS_TCP_LINGER_TO	(15U * HZ)
5825fe6142STrond Myklebust static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO;
597d1e8255STrond Myklebust 
60c556b754SChuck Lever /*
61fbf76683SChuck Lever  * We can register our own files under /proc/sys/sunrpc by
62fbf76683SChuck Lever  * calling register_sysctl_table() again.  The files in that
63fbf76683SChuck Lever  * directory become the union of all files registered there.
64fbf76683SChuck Lever  *
65fbf76683SChuck Lever  * We simply need to make sure that we don't collide with
66fbf76683SChuck Lever  * someone else's file names!
67fbf76683SChuck Lever  */
68fbf76683SChuck Lever 
69fbf76683SChuck Lever #ifdef RPC_DEBUG
70fbf76683SChuck Lever 
71fbf76683SChuck Lever static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
72fbf76683SChuck Lever static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
73fbf76683SChuck Lever static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
74fbf76683SChuck Lever static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
75fbf76683SChuck Lever 
76fbf76683SChuck Lever static struct ctl_table_header *sunrpc_table_header;
77fbf76683SChuck Lever 
78fbf76683SChuck Lever /*
79fbf76683SChuck Lever  * FIXME: changing the UDP slot table size should also resize the UDP
80fbf76683SChuck Lever  *        socket buffers for existing UDP transports
81fbf76683SChuck Lever  */
82fbf76683SChuck Lever static ctl_table xs_tunables_table[] = {
83fbf76683SChuck Lever 	{
84fbf76683SChuck Lever 		.ctl_name	= CTL_SLOTTABLE_UDP,
85fbf76683SChuck Lever 		.procname	= "udp_slot_table_entries",
86fbf76683SChuck Lever 		.data		= &xprt_udp_slot_table_entries,
87fbf76683SChuck Lever 		.maxlen		= sizeof(unsigned int),
88fbf76683SChuck Lever 		.mode		= 0644,
89fbf76683SChuck Lever 		.proc_handler	= &proc_dointvec_minmax,
90fbf76683SChuck Lever 		.strategy	= &sysctl_intvec,
91fbf76683SChuck Lever 		.extra1		= &min_slot_table_size,
92fbf76683SChuck Lever 		.extra2		= &max_slot_table_size
93fbf76683SChuck Lever 	},
94fbf76683SChuck Lever 	{
95fbf76683SChuck Lever 		.ctl_name	= CTL_SLOTTABLE_TCP,
96fbf76683SChuck Lever 		.procname	= "tcp_slot_table_entries",
97fbf76683SChuck Lever 		.data		= &xprt_tcp_slot_table_entries,
98fbf76683SChuck Lever 		.maxlen		= sizeof(unsigned int),
99fbf76683SChuck Lever 		.mode		= 0644,
100fbf76683SChuck Lever 		.proc_handler	= &proc_dointvec_minmax,
101fbf76683SChuck Lever 		.strategy	= &sysctl_intvec,
102fbf76683SChuck Lever 		.extra1		= &min_slot_table_size,
103fbf76683SChuck Lever 		.extra2		= &max_slot_table_size
104fbf76683SChuck Lever 	},
105fbf76683SChuck Lever 	{
106fbf76683SChuck Lever 		.ctl_name	= CTL_MIN_RESVPORT,
107fbf76683SChuck Lever 		.procname	= "min_resvport",
108fbf76683SChuck Lever 		.data		= &xprt_min_resvport,
109fbf76683SChuck Lever 		.maxlen		= sizeof(unsigned int),
110fbf76683SChuck Lever 		.mode		= 0644,
111fbf76683SChuck Lever 		.proc_handler	= &proc_dointvec_minmax,
112fbf76683SChuck Lever 		.strategy	= &sysctl_intvec,
113fbf76683SChuck Lever 		.extra1		= &xprt_min_resvport_limit,
114fbf76683SChuck Lever 		.extra2		= &xprt_max_resvport_limit
115fbf76683SChuck Lever 	},
116fbf76683SChuck Lever 	{
117fbf76683SChuck Lever 		.ctl_name	= CTL_MAX_RESVPORT,
118fbf76683SChuck Lever 		.procname	= "max_resvport",
119fbf76683SChuck Lever 		.data		= &xprt_max_resvport,
120fbf76683SChuck Lever 		.maxlen		= sizeof(unsigned int),
121fbf76683SChuck Lever 		.mode		= 0644,
122fbf76683SChuck Lever 		.proc_handler	= &proc_dointvec_minmax,
123fbf76683SChuck Lever 		.strategy	= &sysctl_intvec,
124fbf76683SChuck Lever 		.extra1		= &xprt_min_resvport_limit,
125fbf76683SChuck Lever 		.extra2		= &xprt_max_resvport_limit
126fbf76683SChuck Lever 	},
127fbf76683SChuck Lever 	{
12825fe6142STrond Myklebust 		.procname	= "tcp_fin_timeout",
12925fe6142STrond Myklebust 		.data		= &xs_tcp_fin_timeout,
13025fe6142STrond Myklebust 		.maxlen		= sizeof(xs_tcp_fin_timeout),
13125fe6142STrond Myklebust 		.mode		= 0644,
13225fe6142STrond Myklebust 		.proc_handler	= &proc_dointvec_jiffies,
13325fe6142STrond Myklebust 		.strategy	= sysctl_jiffies
13425fe6142STrond Myklebust 	},
13525fe6142STrond Myklebust 	{
136fbf76683SChuck Lever 		.ctl_name = 0,
137fbf76683SChuck Lever 	},
138fbf76683SChuck Lever };
139fbf76683SChuck Lever 
140fbf76683SChuck Lever static ctl_table sunrpc_table[] = {
141fbf76683SChuck Lever 	{
142fbf76683SChuck Lever 		.ctl_name	= CTL_SUNRPC,
143fbf76683SChuck Lever 		.procname	= "sunrpc",
144fbf76683SChuck Lever 		.mode		= 0555,
145fbf76683SChuck Lever 		.child		= xs_tunables_table
146fbf76683SChuck Lever 	},
147fbf76683SChuck Lever 	{
148fbf76683SChuck Lever 		.ctl_name = 0,
149fbf76683SChuck Lever 	},
150fbf76683SChuck Lever };
151fbf76683SChuck Lever 
152fbf76683SChuck Lever #endif
153fbf76683SChuck Lever 
154fbf76683SChuck Lever /*
15503bf4b70SChuck Lever  * Time out for an RPC UDP socket connect.  UDP socket connects are
15603bf4b70SChuck Lever  * synchronous, but we set a timeout anyway in case of resource
15703bf4b70SChuck Lever  * exhaustion on the local host.
15803bf4b70SChuck Lever  */
15903bf4b70SChuck Lever #define XS_UDP_CONN_TO		(5U * HZ)
16003bf4b70SChuck Lever 
16103bf4b70SChuck Lever /*
16203bf4b70SChuck Lever  * Wait duration for an RPC TCP connection to be established.  Solaris
16303bf4b70SChuck Lever  * NFS over TCP uses 60 seconds, for example, which is in line with how
16403bf4b70SChuck Lever  * long a server takes to reboot.
16503bf4b70SChuck Lever  */
16603bf4b70SChuck Lever #define XS_TCP_CONN_TO		(60U * HZ)
16703bf4b70SChuck Lever 
16803bf4b70SChuck Lever /*
16903bf4b70SChuck Lever  * Wait duration for a reply from the RPC portmapper.
17003bf4b70SChuck Lever  */
17103bf4b70SChuck Lever #define XS_BIND_TO		(60U * HZ)
17203bf4b70SChuck Lever 
17303bf4b70SChuck Lever /*
17403bf4b70SChuck Lever  * Delay if a UDP socket connect error occurs.  This is most likely some
17503bf4b70SChuck Lever  * kind of resource problem on the local host.
17603bf4b70SChuck Lever  */
17703bf4b70SChuck Lever #define XS_UDP_REEST_TO		(2U * HZ)
17803bf4b70SChuck Lever 
17903bf4b70SChuck Lever /*
18003bf4b70SChuck Lever  * The reestablish timeout allows clients to delay for a bit before attempting
18103bf4b70SChuck Lever  * to reconnect to a server that just dropped our connection.
18203bf4b70SChuck Lever  *
18303bf4b70SChuck Lever  * We implement an exponential backoff when trying to reestablish a TCP
18403bf4b70SChuck Lever  * transport connection with the server.  Some servers like to drop a TCP
18503bf4b70SChuck Lever  * connection when they are overworked, so we start with a short timeout and
18603bf4b70SChuck Lever  * increase over time if the server is down or not responding.
18703bf4b70SChuck Lever  */
18803bf4b70SChuck Lever #define XS_TCP_INIT_REEST_TO	(3U * HZ)
18903bf4b70SChuck Lever #define XS_TCP_MAX_REEST_TO	(5U * 60 * HZ)
19003bf4b70SChuck Lever 
19103bf4b70SChuck Lever /*
19203bf4b70SChuck Lever  * TCP idle timeout; client drops the transport socket if it is idle
19303bf4b70SChuck Lever  * for this long.  Note that we also timeout UDP sockets to prevent
19403bf4b70SChuck Lever  * holding port numbers when there is no RPC traffic.
19503bf4b70SChuck Lever  */
19603bf4b70SChuck Lever #define XS_IDLE_DISC_TO		(5U * 60 * HZ)
19703bf4b70SChuck Lever 
198a246b010SChuck Lever #ifdef RPC_DEBUG
199a246b010SChuck Lever # undef  RPC_DEBUG_DATA
2009903cd1cSChuck Lever # define RPCDBG_FACILITY	RPCDBG_TRANS
201a246b010SChuck Lever #endif
202a246b010SChuck Lever 
203a246b010SChuck Lever #ifdef RPC_DEBUG_DATA
2049903cd1cSChuck Lever static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
205a246b010SChuck Lever {
206a246b010SChuck Lever 	u8 *buf = (u8 *) packet;
207a246b010SChuck Lever 	int j;
208a246b010SChuck Lever 
209a246b010SChuck Lever 	dprintk("RPC:       %s\n", msg);
210a246b010SChuck Lever 	for (j = 0; j < count && j < 128; j += 4) {
211a246b010SChuck Lever 		if (!(j & 31)) {
212a246b010SChuck Lever 			if (j)
213a246b010SChuck Lever 				dprintk("\n");
214a246b010SChuck Lever 			dprintk("0x%04x ", j);
215a246b010SChuck Lever 		}
216a246b010SChuck Lever 		dprintk("%02x%02x%02x%02x ",
217a246b010SChuck Lever 			buf[j], buf[j+1], buf[j+2], buf[j+3]);
218a246b010SChuck Lever 	}
219a246b010SChuck Lever 	dprintk("\n");
220a246b010SChuck Lever }
221a246b010SChuck Lever #else
2229903cd1cSChuck Lever static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
223a246b010SChuck Lever {
224a246b010SChuck Lever 	/* NOP */
225a246b010SChuck Lever }
226a246b010SChuck Lever #endif
227a246b010SChuck Lever 
228ffc2e518SChuck Lever struct sock_xprt {
229ffc2e518SChuck Lever 	struct rpc_xprt		xprt;
230ee0ac0c2SChuck Lever 
231ee0ac0c2SChuck Lever 	/*
232ee0ac0c2SChuck Lever 	 * Network layer
233ee0ac0c2SChuck Lever 	 */
234ee0ac0c2SChuck Lever 	struct socket *		sock;
235ee0ac0c2SChuck Lever 	struct sock *		inet;
23651971139SChuck Lever 
23751971139SChuck Lever 	/*
23851971139SChuck Lever 	 * State of TCP reply receive
23951971139SChuck Lever 	 */
24051971139SChuck Lever 	__be32			tcp_fraghdr,
24151971139SChuck Lever 				tcp_xid;
24251971139SChuck Lever 
24351971139SChuck Lever 	u32			tcp_offset,
24451971139SChuck Lever 				tcp_reclen;
24551971139SChuck Lever 
24651971139SChuck Lever 	unsigned long		tcp_copied,
24751971139SChuck Lever 				tcp_flags;
248c8475461SChuck Lever 
249c8475461SChuck Lever 	/*
250c8475461SChuck Lever 	 * Connection of transports
251c8475461SChuck Lever 	 */
25234161db6STrond Myklebust 	struct delayed_work	connect_worker;
253fbfffbd5SChuck Lever 	struct sockaddr_storage	srcaddr;
254fbfffbd5SChuck Lever 	unsigned short		srcport;
2557c6e066eSChuck Lever 
2567c6e066eSChuck Lever 	/*
2577c6e066eSChuck Lever 	 * UDP socket buffer size parameters
2587c6e066eSChuck Lever 	 */
2597c6e066eSChuck Lever 	size_t			rcvsize,
2607c6e066eSChuck Lever 				sndsize;
261314dfd79SChuck Lever 
262314dfd79SChuck Lever 	/*
263314dfd79SChuck Lever 	 * Saved socket callback addresses
264314dfd79SChuck Lever 	 */
265314dfd79SChuck Lever 	void			(*old_data_ready)(struct sock *, int);
266314dfd79SChuck Lever 	void			(*old_state_change)(struct sock *);
267314dfd79SChuck Lever 	void			(*old_write_space)(struct sock *);
2682a9e1cfaSTrond Myklebust 	void			(*old_error_report)(struct sock *);
269ffc2e518SChuck Lever };
270ffc2e518SChuck Lever 
271e136d092SChuck Lever /*
272e136d092SChuck Lever  * TCP receive state flags
273e136d092SChuck Lever  */
274e136d092SChuck Lever #define TCP_RCV_LAST_FRAG	(1UL << 0)
275e136d092SChuck Lever #define TCP_RCV_COPY_FRAGHDR	(1UL << 1)
276e136d092SChuck Lever #define TCP_RCV_COPY_XID	(1UL << 2)
277e136d092SChuck Lever #define TCP_RCV_COPY_DATA	(1UL << 3)
278f4a2e418SRicardo Labiaga #define TCP_RCV_READ_CALLDIR	(1UL << 4)
279f4a2e418SRicardo Labiaga #define TCP_RCV_COPY_CALLDIR	(1UL << 5)
28018dca02aSRicardo Labiaga 
28118dca02aSRicardo Labiaga /*
28218dca02aSRicardo Labiaga  * TCP RPC flags
28318dca02aSRicardo Labiaga  */
284f4a2e418SRicardo Labiaga #define TCP_RPC_REPLY		(1UL << 6)
285e136d092SChuck Lever 
28695392c59SChuck Lever static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
287edb267a6SChuck Lever {
28895392c59SChuck Lever 	return (struct sockaddr *) &xprt->addr;
28995392c59SChuck Lever }
29095392c59SChuck Lever 
29195392c59SChuck Lever static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt)
29295392c59SChuck Lever {
29395392c59SChuck Lever 	return (struct sockaddr_in *) &xprt->addr;
29495392c59SChuck Lever }
29595392c59SChuck Lever 
29695392c59SChuck Lever static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt)
29795392c59SChuck Lever {
29895392c59SChuck Lever 	return (struct sockaddr_in6 *) &xprt->addr;
29995392c59SChuck Lever }
30095392c59SChuck Lever 
301c877b849SChuck Lever static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
302c877b849SChuck Lever {
303c877b849SChuck Lever 	struct sockaddr *sap = xs_addr(xprt);
3049dc3b095SChuck Lever 	struct sockaddr_in6 *sin6;
3059dc3b095SChuck Lever 	struct sockaddr_in *sin;
306c877b849SChuck Lever 	char buf[128];
307c877b849SChuck Lever 
308c877b849SChuck Lever 	(void)rpc_ntop(sap, buf, sizeof(buf));
309c877b849SChuck Lever 	xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL);
310c877b849SChuck Lever 
3119dc3b095SChuck Lever 	switch (sap->sa_family) {
3129dc3b095SChuck Lever 	case AF_INET:
3139dc3b095SChuck Lever 		sin = xs_addr_in(xprt);
3149dc3b095SChuck Lever 		(void)snprintf(buf, sizeof(buf), "%02x%02x%02x%02x",
3159dc3b095SChuck Lever 					NIPQUAD(sin->sin_addr.s_addr));
3169dc3b095SChuck Lever 		break;
3179dc3b095SChuck Lever 	case AF_INET6:
3189dc3b095SChuck Lever 		sin6 = xs_addr_in6(xprt);
3199dc3b095SChuck Lever 		(void)snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
3209dc3b095SChuck Lever 		break;
3219dc3b095SChuck Lever 	default:
3229dc3b095SChuck Lever 		BUG();
3239dc3b095SChuck Lever 	}
3249dc3b095SChuck Lever 	xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
3259dc3b095SChuck Lever }
3269dc3b095SChuck Lever 
3279dc3b095SChuck Lever static void xs_format_common_peer_ports(struct rpc_xprt *xprt)
3289dc3b095SChuck Lever {
3299dc3b095SChuck Lever 	struct sockaddr *sap = xs_addr(xprt);
3309dc3b095SChuck Lever 	char buf[128];
3319dc3b095SChuck Lever 
332c877b849SChuck Lever 	(void)snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
333c877b849SChuck Lever 	xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
334c877b849SChuck Lever 
335c877b849SChuck Lever 	(void)snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
336c877b849SChuck Lever 	xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
337c877b849SChuck Lever }
338c877b849SChuck Lever 
3399dc3b095SChuck Lever static void xs_format_peer_addresses(struct rpc_xprt *xprt,
340b454ae90SChuck Lever 				     const char *protocol,
341b454ae90SChuck Lever 				     const char *netid)
342edb267a6SChuck Lever {
343b454ae90SChuck Lever 	xprt->address_strings[RPC_DISPLAY_PROTO] = protocol;
344b454ae90SChuck Lever 	xprt->address_strings[RPC_DISPLAY_NETID] = netid;
345c877b849SChuck Lever 	xs_format_common_peer_addresses(xprt);
3469dc3b095SChuck Lever 	xs_format_common_peer_ports(xprt);
347edb267a6SChuck Lever }
348edb267a6SChuck Lever 
3499dc3b095SChuck Lever static void xs_update_peer_port(struct rpc_xprt *xprt)
3504b6473fbSChuck Lever {
3519dc3b095SChuck Lever 	kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
3529dc3b095SChuck Lever 	kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
3534b6473fbSChuck Lever 
3549dc3b095SChuck Lever 	xs_format_common_peer_ports(xprt);
355edb267a6SChuck Lever }
356edb267a6SChuck Lever 
357edb267a6SChuck Lever static void xs_free_peer_addresses(struct rpc_xprt *xprt)
358edb267a6SChuck Lever {
35933e01dc7SChuck Lever 	unsigned int i;
36033e01dc7SChuck Lever 
36133e01dc7SChuck Lever 	for (i = 0; i < RPC_DISPLAY_MAX; i++)
36233e01dc7SChuck Lever 		switch (i) {
36333e01dc7SChuck Lever 		case RPC_DISPLAY_PROTO:
36433e01dc7SChuck Lever 		case RPC_DISPLAY_NETID:
36533e01dc7SChuck Lever 			continue;
36633e01dc7SChuck Lever 		default:
36733e01dc7SChuck Lever 			kfree(xprt->address_strings[i]);
36833e01dc7SChuck Lever 		}
369edb267a6SChuck Lever }
370edb267a6SChuck Lever 
371b4b5cc85SChuck Lever #define XS_SENDMSG_FLAGS	(MSG_DONTWAIT | MSG_NOSIGNAL)
372b4b5cc85SChuck Lever 
37324c5684bSTrond Myklebust static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen, struct kvec *vec, unsigned int base, int more)
374b4b5cc85SChuck Lever {
375b4b5cc85SChuck Lever 	struct msghdr msg = {
376b4b5cc85SChuck Lever 		.msg_name	= addr,
377b4b5cc85SChuck Lever 		.msg_namelen	= addrlen,
37824c5684bSTrond Myklebust 		.msg_flags	= XS_SENDMSG_FLAGS | (more ? MSG_MORE : 0),
37924c5684bSTrond Myklebust 	};
38024c5684bSTrond Myklebust 	struct kvec iov = {
38124c5684bSTrond Myklebust 		.iov_base	= vec->iov_base + base,
38224c5684bSTrond Myklebust 		.iov_len	= vec->iov_len - base,
383b4b5cc85SChuck Lever 	};
384b4b5cc85SChuck Lever 
38524c5684bSTrond Myklebust 	if (iov.iov_len != 0)
386b4b5cc85SChuck Lever 		return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
387b4b5cc85SChuck Lever 	return kernel_sendmsg(sock, &msg, NULL, 0, 0);
388b4b5cc85SChuck Lever }
389b4b5cc85SChuck Lever 
39024c5684bSTrond Myklebust static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more)
391b4b5cc85SChuck Lever {
39224c5684bSTrond Myklebust 	struct page **ppage;
39324c5684bSTrond Myklebust 	unsigned int remainder;
39424c5684bSTrond Myklebust 	int err, sent = 0;
395b4b5cc85SChuck Lever 
39624c5684bSTrond Myklebust 	remainder = xdr->page_len - base;
39724c5684bSTrond Myklebust 	base += xdr->page_base;
39824c5684bSTrond Myklebust 	ppage = xdr->pages + (base >> PAGE_SHIFT);
39924c5684bSTrond Myklebust 	base &= ~PAGE_MASK;
40024c5684bSTrond Myklebust 	for(;;) {
40124c5684bSTrond Myklebust 		unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder);
40224c5684bSTrond Myklebust 		int flags = XS_SENDMSG_FLAGS;
40324c5684bSTrond Myklebust 
40424c5684bSTrond Myklebust 		remainder -= len;
40524c5684bSTrond Myklebust 		if (remainder != 0 || more)
40624c5684bSTrond Myklebust 			flags |= MSG_MORE;
40724c5684bSTrond Myklebust 		err = sock->ops->sendpage(sock, *ppage, base, len, flags);
40824c5684bSTrond Myklebust 		if (remainder == 0 || err != len)
40924c5684bSTrond Myklebust 			break;
41024c5684bSTrond Myklebust 		sent += err;
41124c5684bSTrond Myklebust 		ppage++;
41224c5684bSTrond Myklebust 		base = 0;
41324c5684bSTrond Myklebust 	}
41424c5684bSTrond Myklebust 	if (sent == 0)
41524c5684bSTrond Myklebust 		return err;
41624c5684bSTrond Myklebust 	if (err > 0)
41724c5684bSTrond Myklebust 		sent += err;
41824c5684bSTrond Myklebust 	return sent;
419b4b5cc85SChuck Lever }
420b4b5cc85SChuck Lever 
4219903cd1cSChuck Lever /**
4229903cd1cSChuck Lever  * xs_sendpages - write pages directly to a socket
4239903cd1cSChuck Lever  * @sock: socket to send on
4249903cd1cSChuck Lever  * @addr: UDP only -- address of destination
4259903cd1cSChuck Lever  * @addrlen: UDP only -- length of destination address
4269903cd1cSChuck Lever  * @xdr: buffer containing this request
4279903cd1cSChuck Lever  * @base: starting position in the buffer
4289903cd1cSChuck Lever  *
429a246b010SChuck Lever  */
43024c5684bSTrond Myklebust static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base)
431a246b010SChuck Lever {
43224c5684bSTrond Myklebust 	unsigned int remainder = xdr->len - base;
43324c5684bSTrond Myklebust 	int err, sent = 0;
434a246b010SChuck Lever 
435262965f5SChuck Lever 	if (unlikely(!sock))
436fba91afbSTrond Myklebust 		return -ENOTSOCK;
437262965f5SChuck Lever 
438262965f5SChuck Lever 	clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
43924c5684bSTrond Myklebust 	if (base != 0) {
44024c5684bSTrond Myklebust 		addr = NULL;
44124c5684bSTrond Myklebust 		addrlen = 0;
44224c5684bSTrond Myklebust 	}
443262965f5SChuck Lever 
44424c5684bSTrond Myklebust 	if (base < xdr->head[0].iov_len || addr != NULL) {
44524c5684bSTrond Myklebust 		unsigned int len = xdr->head[0].iov_len - base;
44624c5684bSTrond Myklebust 		remainder -= len;
44724c5684bSTrond Myklebust 		err = xs_send_kvec(sock, addr, addrlen, &xdr->head[0], base, remainder != 0);
44824c5684bSTrond Myklebust 		if (remainder == 0 || err != len)
449a246b010SChuck Lever 			goto out;
45024c5684bSTrond Myklebust 		sent += err;
451a246b010SChuck Lever 		base = 0;
452a246b010SChuck Lever 	} else
45324c5684bSTrond Myklebust 		base -= xdr->head[0].iov_len;
454a246b010SChuck Lever 
45524c5684bSTrond Myklebust 	if (base < xdr->page_len) {
45624c5684bSTrond Myklebust 		unsigned int len = xdr->page_len - base;
45724c5684bSTrond Myklebust 		remainder -= len;
45824c5684bSTrond Myklebust 		err = xs_send_pagedata(sock, xdr, base, remainder != 0);
45924c5684bSTrond Myklebust 		if (remainder == 0 || err != len)
460a246b010SChuck Lever 			goto out;
46124c5684bSTrond Myklebust 		sent += err;
462a246b010SChuck Lever 		base = 0;
46324c5684bSTrond Myklebust 	} else
46424c5684bSTrond Myklebust 		base -= xdr->page_len;
46524c5684bSTrond Myklebust 
46624c5684bSTrond Myklebust 	if (base >= xdr->tail[0].iov_len)
46724c5684bSTrond Myklebust 		return sent;
46824c5684bSTrond Myklebust 	err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0);
469a246b010SChuck Lever out:
47024c5684bSTrond Myklebust 	if (sent == 0)
47124c5684bSTrond Myklebust 		return err;
47224c5684bSTrond Myklebust 	if (err > 0)
47324c5684bSTrond Myklebust 		sent += err;
47424c5684bSTrond Myklebust 	return sent;
475a246b010SChuck Lever }
476a246b010SChuck Lever 
477b6ddf64fSTrond Myklebust static void xs_nospace_callback(struct rpc_task *task)
478b6ddf64fSTrond Myklebust {
479b6ddf64fSTrond Myklebust 	struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt);
480b6ddf64fSTrond Myklebust 
481b6ddf64fSTrond Myklebust 	transport->inet->sk_write_pending--;
482b6ddf64fSTrond Myklebust 	clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
483b6ddf64fSTrond Myklebust }
484b6ddf64fSTrond Myklebust 
4859903cd1cSChuck Lever /**
486262965f5SChuck Lever  * xs_nospace - place task on wait queue if transmit was incomplete
487262965f5SChuck Lever  * @task: task to put to sleep
4889903cd1cSChuck Lever  *
489a246b010SChuck Lever  */
4905e3771ceSTrond Myklebust static int xs_nospace(struct rpc_task *task)
491a246b010SChuck Lever {
492262965f5SChuck Lever 	struct rpc_rqst *req = task->tk_rqstp;
493262965f5SChuck Lever 	struct rpc_xprt *xprt = req->rq_xprt;
494ee0ac0c2SChuck Lever 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
4955e3771ceSTrond Myklebust 	int ret = 0;
496a246b010SChuck Lever 
49746121cf7SChuck Lever 	dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
498262965f5SChuck Lever 			task->tk_pid, req->rq_slen - req->rq_bytes_sent,
499262965f5SChuck Lever 			req->rq_slen);
500a246b010SChuck Lever 
501262965f5SChuck Lever 	/* Protect against races with write_space */
502262965f5SChuck Lever 	spin_lock_bh(&xprt->transport_lock);
503a246b010SChuck Lever 
504262965f5SChuck Lever 	/* Don't race with disconnect */
505b6ddf64fSTrond Myklebust 	if (xprt_connected(xprt)) {
506b6ddf64fSTrond Myklebust 		if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) {
5075e3771ceSTrond Myklebust 			ret = -EAGAIN;
508b6ddf64fSTrond Myklebust 			/*
509b6ddf64fSTrond Myklebust 			 * Notify TCP that we're limited by the application
510b6ddf64fSTrond Myklebust 			 * window size
511b6ddf64fSTrond Myklebust 			 */
512b6ddf64fSTrond Myklebust 			set_bit(SOCK_NOSPACE, &transport->sock->flags);
513b6ddf64fSTrond Myklebust 			transport->inet->sk_write_pending++;
514b6ddf64fSTrond Myklebust 			/* ...and wait for more buffer space */
515b6ddf64fSTrond Myklebust 			xprt_wait_for_buffer_space(task, xs_nospace_callback);
516b6ddf64fSTrond Myklebust 		}
517b6ddf64fSTrond Myklebust 	} else {
518b6ddf64fSTrond Myklebust 		clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
5195e3771ceSTrond Myklebust 		ret = -ENOTCONN;
520b6ddf64fSTrond Myklebust 	}
521a246b010SChuck Lever 
522262965f5SChuck Lever 	spin_unlock_bh(&xprt->transport_lock);
5235e3771ceSTrond Myklebust 	return ret;
524a246b010SChuck Lever }
525a246b010SChuck Lever 
5269903cd1cSChuck Lever /**
527262965f5SChuck Lever  * xs_udp_send_request - write an RPC request to a UDP socket
5289903cd1cSChuck Lever  * @task: address of RPC task that manages the state of an RPC request
5299903cd1cSChuck Lever  *
5309903cd1cSChuck Lever  * Return values:
5319903cd1cSChuck Lever  *        0:	The request has been sent
5329903cd1cSChuck Lever  *   EAGAIN:	The socket was blocked, please call again later to
5339903cd1cSChuck Lever  *		complete the request
534262965f5SChuck Lever  * ENOTCONN:	Caller needs to invoke connect logic then call again
5359903cd1cSChuck Lever  *    other:	Some other error occured, the request was not sent
5369903cd1cSChuck Lever  */
537262965f5SChuck Lever static int xs_udp_send_request(struct rpc_task *task)
538a246b010SChuck Lever {
539a246b010SChuck Lever 	struct rpc_rqst *req = task->tk_rqstp;
540a246b010SChuck Lever 	struct rpc_xprt *xprt = req->rq_xprt;
541ee0ac0c2SChuck Lever 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
542262965f5SChuck Lever 	struct xdr_buf *xdr = &req->rq_snd_buf;
543262965f5SChuck Lever 	int status;
544262965f5SChuck Lever 
545262965f5SChuck Lever 	xs_pktdump("packet data:",
546262965f5SChuck Lever 				req->rq_svec->iov_base,
547262965f5SChuck Lever 				req->rq_svec->iov_len);
548262965f5SChuck Lever 
54901d37c42STrond Myklebust 	if (!xprt_bound(xprt))
55001d37c42STrond Myklebust 		return -ENOTCONN;
551ee0ac0c2SChuck Lever 	status = xs_sendpages(transport->sock,
55295392c59SChuck Lever 			      xs_addr(xprt),
553ee0ac0c2SChuck Lever 			      xprt->addrlen, xdr,
554ee0ac0c2SChuck Lever 			      req->rq_bytes_sent);
555262965f5SChuck Lever 
556262965f5SChuck Lever 	dprintk("RPC:       xs_udp_send_request(%u) = %d\n",
557262965f5SChuck Lever 			xdr->len - req->rq_bytes_sent, status);
558262965f5SChuck Lever 
5592199700fSTrond Myklebust 	if (status >= 0) {
5601321d8d9SChuck Lever 		task->tk_bytes_sent += status;
5612199700fSTrond Myklebust 		if (status >= req->rq_slen)
562262965f5SChuck Lever 			return 0;
563262965f5SChuck Lever 		/* Still some bytes left; set up for a retry later. */
564262965f5SChuck Lever 		status = -EAGAIN;
5652199700fSTrond Myklebust 	}
566c8485e4dSTrond Myklebust 	if (!transport->sock)
567c8485e4dSTrond Myklebust 		goto out;
568262965f5SChuck Lever 
569262965f5SChuck Lever 	switch (status) {
570fba91afbSTrond Myklebust 	case -ENOTSOCK:
571fba91afbSTrond Myklebust 		status = -ENOTCONN;
572fba91afbSTrond Myklebust 		/* Should we call xs_close() here? */
573fba91afbSTrond Myklebust 		break;
574b6ddf64fSTrond Myklebust 	case -EAGAIN:
5755e3771ceSTrond Myklebust 		status = xs_nospace(task);
576b6ddf64fSTrond Myklebust 		break;
577c8485e4dSTrond Myklebust 	default:
578c8485e4dSTrond Myklebust 		dprintk("RPC:       sendmsg returned unrecognized error %d\n",
579c8485e4dSTrond Myklebust 			-status);
580262965f5SChuck Lever 	case -ENETUNREACH:
581262965f5SChuck Lever 	case -EPIPE:
582262965f5SChuck Lever 	case -ECONNREFUSED:
583262965f5SChuck Lever 		/* When the server has died, an ICMP port unreachable message
584262965f5SChuck Lever 		 * prompts ECONNREFUSED. */
585b6ddf64fSTrond Myklebust 		clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
586262965f5SChuck Lever 	}
587c8485e4dSTrond Myklebust out:
588262965f5SChuck Lever 	return status;
589262965f5SChuck Lever }
590262965f5SChuck Lever 
591e06799f9STrond Myklebust /**
592e06799f9STrond Myklebust  * xs_tcp_shutdown - gracefully shut down a TCP socket
593e06799f9STrond Myklebust  * @xprt: transport
594e06799f9STrond Myklebust  *
595e06799f9STrond Myklebust  * Initiates a graceful shutdown of the TCP socket by calling the
596e06799f9STrond Myklebust  * equivalent of shutdown(SHUT_WR);
597e06799f9STrond Myklebust  */
598e06799f9STrond Myklebust static void xs_tcp_shutdown(struct rpc_xprt *xprt)
599e06799f9STrond Myklebust {
600e06799f9STrond Myklebust 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
601e06799f9STrond Myklebust 	struct socket *sock = transport->sock;
602e06799f9STrond Myklebust 
603e06799f9STrond Myklebust 	if (sock != NULL)
604e06799f9STrond Myklebust 		kernel_sock_shutdown(sock, SHUT_WR);
605e06799f9STrond Myklebust }
606e06799f9STrond Myklebust 
607808012fbSChuck Lever static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf)
608808012fbSChuck Lever {
609808012fbSChuck Lever 	u32 reclen = buf->len - sizeof(rpc_fraghdr);
610808012fbSChuck Lever 	rpc_fraghdr *base = buf->head[0].iov_base;
611808012fbSChuck Lever 	*base = htonl(RPC_LAST_STREAM_FRAGMENT | reclen);
612808012fbSChuck Lever }
613808012fbSChuck Lever 
614262965f5SChuck Lever /**
615262965f5SChuck Lever  * xs_tcp_send_request - write an RPC request to a TCP socket
616262965f5SChuck Lever  * @task: address of RPC task that manages the state of an RPC request
617262965f5SChuck Lever  *
618262965f5SChuck Lever  * Return values:
619262965f5SChuck Lever  *        0:	The request has been sent
620262965f5SChuck Lever  *   EAGAIN:	The socket was blocked, please call again later to
621262965f5SChuck Lever  *		complete the request
622262965f5SChuck Lever  * ENOTCONN:	Caller needs to invoke connect logic then call again
623262965f5SChuck Lever  *    other:	Some other error occured, the request was not sent
624262965f5SChuck Lever  *
625262965f5SChuck Lever  * XXX: In the case of soft timeouts, should we eventually give up
626262965f5SChuck Lever  *	if sendmsg is not able to make progress?
627262965f5SChuck Lever  */
628262965f5SChuck Lever static int xs_tcp_send_request(struct rpc_task *task)
629262965f5SChuck Lever {
630262965f5SChuck Lever 	struct rpc_rqst *req = task->tk_rqstp;
631262965f5SChuck Lever 	struct rpc_xprt *xprt = req->rq_xprt;
632ee0ac0c2SChuck Lever 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
633262965f5SChuck Lever 	struct xdr_buf *xdr = &req->rq_snd_buf;
634b595bb15SChuck Lever 	int status;
635a246b010SChuck Lever 
636808012fbSChuck Lever 	xs_encode_tcp_record_marker(&req->rq_snd_buf);
637262965f5SChuck Lever 
638262965f5SChuck Lever 	xs_pktdump("packet data:",
639262965f5SChuck Lever 				req->rq_svec->iov_base,
640262965f5SChuck Lever 				req->rq_svec->iov_len);
641a246b010SChuck Lever 
642a246b010SChuck Lever 	/* Continue transmitting the packet/record. We must be careful
643a246b010SChuck Lever 	 * to cope with writespace callbacks arriving _after_ we have
644262965f5SChuck Lever 	 * called sendmsg(). */
645a246b010SChuck Lever 	while (1) {
646ee0ac0c2SChuck Lever 		status = xs_sendpages(transport->sock,
647ee0ac0c2SChuck Lever 					NULL, 0, xdr, req->rq_bytes_sent);
648a246b010SChuck Lever 
649262965f5SChuck Lever 		dprintk("RPC:       xs_tcp_send_request(%u) = %d\n",
650262965f5SChuck Lever 				xdr->len - req->rq_bytes_sent, status);
651262965f5SChuck Lever 
652262965f5SChuck Lever 		if (unlikely(status < 0))
653a246b010SChuck Lever 			break;
654a246b010SChuck Lever 
655a246b010SChuck Lever 		/* If we've sent the entire packet, immediately
656a246b010SChuck Lever 		 * reset the count of bytes sent. */
657262965f5SChuck Lever 		req->rq_bytes_sent += status;
658ef759a2eSChuck Lever 		task->tk_bytes_sent += status;
659262965f5SChuck Lever 		if (likely(req->rq_bytes_sent >= req->rq_slen)) {
660a246b010SChuck Lever 			req->rq_bytes_sent = 0;
661a246b010SChuck Lever 			return 0;
662a246b010SChuck Lever 		}
663262965f5SChuck Lever 
66406b4b681STrond Myklebust 		if (status != 0)
66506b4b681STrond Myklebust 			continue;
666a246b010SChuck Lever 		status = -EAGAIN;
667a246b010SChuck Lever 		break;
668a246b010SChuck Lever 	}
669c8485e4dSTrond Myklebust 	if (!transport->sock)
670c8485e4dSTrond Myklebust 		goto out;
671a246b010SChuck Lever 
672262965f5SChuck Lever 	switch (status) {
673fba91afbSTrond Myklebust 	case -ENOTSOCK:
674fba91afbSTrond Myklebust 		status = -ENOTCONN;
675fba91afbSTrond Myklebust 		/* Should we call xs_close() here? */
676fba91afbSTrond Myklebust 		break;
677262965f5SChuck Lever 	case -EAGAIN:
6785e3771ceSTrond Myklebust 		status = xs_nospace(task);
679262965f5SChuck Lever 		break;
680262965f5SChuck Lever 	default:
681262965f5SChuck Lever 		dprintk("RPC:       sendmsg returned unrecognized error %d\n",
682262965f5SChuck Lever 			-status);
683a246b010SChuck Lever 	case -ECONNRESET:
68455420c24STrond Myklebust 	case -EPIPE:
685e06799f9STrond Myklebust 		xs_tcp_shutdown(xprt);
686a246b010SChuck Lever 	case -ECONNREFUSED:
687a246b010SChuck Lever 	case -ENOTCONN:
688a246b010SChuck Lever 		clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
689a246b010SChuck Lever 	}
690c8485e4dSTrond Myklebust out:
691a246b010SChuck Lever 	return status;
692a246b010SChuck Lever }
693a246b010SChuck Lever 
6949903cd1cSChuck Lever /**
695e0ab53deSTrond Myklebust  * xs_tcp_release_xprt - clean up after a tcp transmission
696e0ab53deSTrond Myklebust  * @xprt: transport
697e0ab53deSTrond Myklebust  * @task: rpc task
698e0ab53deSTrond Myklebust  *
699e0ab53deSTrond Myklebust  * This cleans up if an error causes us to abort the transmission of a request.
700e0ab53deSTrond Myklebust  * In this case, the socket may need to be reset in order to avoid confusing
701e0ab53deSTrond Myklebust  * the server.
702e0ab53deSTrond Myklebust  */
703e0ab53deSTrond Myklebust static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
704e0ab53deSTrond Myklebust {
705e0ab53deSTrond Myklebust 	struct rpc_rqst *req;
706e0ab53deSTrond Myklebust 
707e0ab53deSTrond Myklebust 	if (task != xprt->snd_task)
708e0ab53deSTrond Myklebust 		return;
709e0ab53deSTrond Myklebust 	if (task == NULL)
710e0ab53deSTrond Myklebust 		goto out_release;
711e0ab53deSTrond Myklebust 	req = task->tk_rqstp;
712e0ab53deSTrond Myklebust 	if (req->rq_bytes_sent == 0)
713e0ab53deSTrond Myklebust 		goto out_release;
714e0ab53deSTrond Myklebust 	if (req->rq_bytes_sent == req->rq_snd_buf.len)
715e0ab53deSTrond Myklebust 		goto out_release;
716e0ab53deSTrond Myklebust 	set_bit(XPRT_CLOSE_WAIT, &task->tk_xprt->state);
717e0ab53deSTrond Myklebust out_release:
718e0ab53deSTrond Myklebust 	xprt_release_xprt(xprt, task);
719e0ab53deSTrond Myklebust }
720e0ab53deSTrond Myklebust 
7212a9e1cfaSTrond Myklebust static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
7222a9e1cfaSTrond Myklebust {
7232a9e1cfaSTrond Myklebust 	transport->old_data_ready = sk->sk_data_ready;
7242a9e1cfaSTrond Myklebust 	transport->old_state_change = sk->sk_state_change;
7252a9e1cfaSTrond Myklebust 	transport->old_write_space = sk->sk_write_space;
7262a9e1cfaSTrond Myklebust 	transport->old_error_report = sk->sk_error_report;
7272a9e1cfaSTrond Myklebust }
7282a9e1cfaSTrond Myklebust 
7292a9e1cfaSTrond Myklebust static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
7302a9e1cfaSTrond Myklebust {
7312a9e1cfaSTrond Myklebust 	sk->sk_data_ready = transport->old_data_ready;
7322a9e1cfaSTrond Myklebust 	sk->sk_state_change = transport->old_state_change;
7332a9e1cfaSTrond Myklebust 	sk->sk_write_space = transport->old_write_space;
7342a9e1cfaSTrond Myklebust 	sk->sk_error_report = transport->old_error_report;
7352a9e1cfaSTrond Myklebust }
7362a9e1cfaSTrond Myklebust 
737fe315e76SChuck Lever static void xs_reset_transport(struct sock_xprt *transport)
738a246b010SChuck Lever {
739ee0ac0c2SChuck Lever 	struct socket *sock = transport->sock;
740ee0ac0c2SChuck Lever 	struct sock *sk = transport->inet;
741a246b010SChuck Lever 
742fe315e76SChuck Lever 	if (sk == NULL)
743fe315e76SChuck Lever 		return;
7449903cd1cSChuck Lever 
745a246b010SChuck Lever 	write_lock_bh(&sk->sk_callback_lock);
746ee0ac0c2SChuck Lever 	transport->inet = NULL;
747ee0ac0c2SChuck Lever 	transport->sock = NULL;
748a246b010SChuck Lever 
749a246b010SChuck Lever 	sk->sk_user_data = NULL;
7502a9e1cfaSTrond Myklebust 
7512a9e1cfaSTrond Myklebust 	xs_restore_old_callbacks(transport, sk);
752a246b010SChuck Lever 	write_unlock_bh(&sk->sk_callback_lock);
753a246b010SChuck Lever 
754a246b010SChuck Lever 	sk->sk_no_check = 0;
755a246b010SChuck Lever 
756a246b010SChuck Lever 	sock_release(sock);
757fe315e76SChuck Lever }
758fe315e76SChuck Lever 
759fe315e76SChuck Lever /**
760fe315e76SChuck Lever  * xs_close - close a socket
761fe315e76SChuck Lever  * @xprt: transport
762fe315e76SChuck Lever  *
763fe315e76SChuck Lever  * This is used when all requests are complete; ie, no DRC state remains
764fe315e76SChuck Lever  * on the server we want to save.
765f75e6745STrond Myklebust  *
766f75e6745STrond Myklebust  * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with
767f75e6745STrond Myklebust  * xs_reset_transport() zeroing the socket from underneath a writer.
768fe315e76SChuck Lever  */
769fe315e76SChuck Lever static void xs_close(struct rpc_xprt *xprt)
770fe315e76SChuck Lever {
771fe315e76SChuck Lever 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
772fe315e76SChuck Lever 
773fe315e76SChuck Lever 	dprintk("RPC:       xs_close xprt %p\n", xprt);
774fe315e76SChuck Lever 
775fe315e76SChuck Lever 	xs_reset_transport(transport);
776fe315e76SChuck Lever 
777632e3bdcSTrond Myklebust 	smp_mb__before_clear_bit();
7787d1e8255STrond Myklebust 	clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
779632e3bdcSTrond Myklebust 	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
7803b948ae5STrond Myklebust 	clear_bit(XPRT_CLOSING, &xprt->state);
781632e3bdcSTrond Myklebust 	smp_mb__after_clear_bit();
78262da3b24STrond Myklebust 	xprt_disconnect_done(xprt);
783a246b010SChuck Lever }
784a246b010SChuck Lever 
785f75e6745STrond Myklebust static void xs_tcp_close(struct rpc_xprt *xprt)
786f75e6745STrond Myklebust {
787f75e6745STrond Myklebust 	if (test_and_clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state))
788f75e6745STrond Myklebust 		xs_close(xprt);
789f75e6745STrond Myklebust 	else
790f75e6745STrond Myklebust 		xs_tcp_shutdown(xprt);
791f75e6745STrond Myklebust }
792f75e6745STrond Myklebust 
7939903cd1cSChuck Lever /**
7949903cd1cSChuck Lever  * xs_destroy - prepare to shutdown a transport
7959903cd1cSChuck Lever  * @xprt: doomed transport
7969903cd1cSChuck Lever  *
7979903cd1cSChuck Lever  */
7989903cd1cSChuck Lever static void xs_destroy(struct rpc_xprt *xprt)
799a246b010SChuck Lever {
800c8475461SChuck Lever 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
801c8475461SChuck Lever 
8029903cd1cSChuck Lever 	dprintk("RPC:       xs_destroy xprt %p\n", xprt);
8039903cd1cSChuck Lever 
804c1384c9cSTrond Myklebust 	cancel_rearming_delayed_work(&transport->connect_worker);
805a246b010SChuck Lever 
8069903cd1cSChuck Lever 	xs_close(xprt);
807edb267a6SChuck Lever 	xs_free_peer_addresses(xprt);
808a246b010SChuck Lever 	kfree(xprt->slot);
809c8541ecdSChuck Lever 	kfree(xprt);
810bc25571eS\"Talpey, Thomas\ 	module_put(THIS_MODULE);
811a246b010SChuck Lever }
812a246b010SChuck Lever 
8139903cd1cSChuck Lever static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
8149903cd1cSChuck Lever {
8159903cd1cSChuck Lever 	return (struct rpc_xprt *) sk->sk_user_data;
8169903cd1cSChuck Lever }
8179903cd1cSChuck Lever 
8189903cd1cSChuck Lever /**
8199903cd1cSChuck Lever  * xs_udp_data_ready - "data ready" callback for UDP sockets
8209903cd1cSChuck Lever  * @sk: socket with data to read
8219903cd1cSChuck Lever  * @len: how much data to read
8229903cd1cSChuck Lever  *
823a246b010SChuck Lever  */
8249903cd1cSChuck Lever static void xs_udp_data_ready(struct sock *sk, int len)
825a246b010SChuck Lever {
826a246b010SChuck Lever 	struct rpc_task *task;
827a246b010SChuck Lever 	struct rpc_xprt *xprt;
828a246b010SChuck Lever 	struct rpc_rqst *rovr;
829a246b010SChuck Lever 	struct sk_buff *skb;
830a246b010SChuck Lever 	int err, repsize, copied;
831d8ed029dSAlexey Dobriyan 	u32 _xid;
832d8ed029dSAlexey Dobriyan 	__be32 *xp;
833a246b010SChuck Lever 
834a246b010SChuck Lever 	read_lock(&sk->sk_callback_lock);
8359903cd1cSChuck Lever 	dprintk("RPC:       xs_udp_data_ready...\n");
8369903cd1cSChuck Lever 	if (!(xprt = xprt_from_sock(sk)))
837a246b010SChuck Lever 		goto out;
838a246b010SChuck Lever 
839a246b010SChuck Lever 	if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL)
840a246b010SChuck Lever 		goto out;
841a246b010SChuck Lever 
842a246b010SChuck Lever 	if (xprt->shutdown)
843a246b010SChuck Lever 		goto dropit;
844a246b010SChuck Lever 
845a246b010SChuck Lever 	repsize = skb->len - sizeof(struct udphdr);
846a246b010SChuck Lever 	if (repsize < 4) {
8479903cd1cSChuck Lever 		dprintk("RPC:       impossible RPC reply size %d!\n", repsize);
848a246b010SChuck Lever 		goto dropit;
849a246b010SChuck Lever 	}
850a246b010SChuck Lever 
851a246b010SChuck Lever 	/* Copy the XID from the skb... */
852a246b010SChuck Lever 	xp = skb_header_pointer(skb, sizeof(struct udphdr),
853a246b010SChuck Lever 				sizeof(_xid), &_xid);
854a246b010SChuck Lever 	if (xp == NULL)
855a246b010SChuck Lever 		goto dropit;
856a246b010SChuck Lever 
857a246b010SChuck Lever 	/* Look up and lock the request corresponding to the given XID */
8584a0f8c04SChuck Lever 	spin_lock(&xprt->transport_lock);
859a246b010SChuck Lever 	rovr = xprt_lookup_rqst(xprt, *xp);
860a246b010SChuck Lever 	if (!rovr)
861a246b010SChuck Lever 		goto out_unlock;
862a246b010SChuck Lever 	task = rovr->rq_task;
863a246b010SChuck Lever 
864a246b010SChuck Lever 	if ((copied = rovr->rq_private_buf.buflen) > repsize)
865a246b010SChuck Lever 		copied = repsize;
866a246b010SChuck Lever 
867a246b010SChuck Lever 	/* Suck it into the iovec, verify checksum if not done by hw. */
8681781f7f5SHerbert Xu 	if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
8691781f7f5SHerbert Xu 		UDPX_INC_STATS_BH(sk, UDP_MIB_INERRORS);
870a246b010SChuck Lever 		goto out_unlock;
8711781f7f5SHerbert Xu 	}
8721781f7f5SHerbert Xu 
8731781f7f5SHerbert Xu 	UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS);
874a246b010SChuck Lever 
875a246b010SChuck Lever 	/* Something worked... */
876adf30907SEric Dumazet 	dst_confirm(skb_dst(skb));
877a246b010SChuck Lever 
8781570c1e4SChuck Lever 	xprt_adjust_cwnd(task, copied);
8791570c1e4SChuck Lever 	xprt_update_rtt(task);
8801570c1e4SChuck Lever 	xprt_complete_rqst(task, copied);
881a246b010SChuck Lever 
882a246b010SChuck Lever  out_unlock:
8834a0f8c04SChuck Lever 	spin_unlock(&xprt->transport_lock);
884a246b010SChuck Lever  dropit:
885a246b010SChuck Lever 	skb_free_datagram(sk, skb);
886a246b010SChuck Lever  out:
887a246b010SChuck Lever 	read_unlock(&sk->sk_callback_lock);
888a246b010SChuck Lever }
889a246b010SChuck Lever 
890dd456471SChuck Lever static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
891a246b010SChuck Lever {
89251971139SChuck Lever 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
893a246b010SChuck Lever 	size_t len, used;
894a246b010SChuck Lever 	char *p;
895a246b010SChuck Lever 
89651971139SChuck Lever 	p = ((char *) &transport->tcp_fraghdr) + transport->tcp_offset;
89751971139SChuck Lever 	len = sizeof(transport->tcp_fraghdr) - transport->tcp_offset;
8989d292316SChuck Lever 	used = xdr_skb_read_bits(desc, p, len);
89951971139SChuck Lever 	transport->tcp_offset += used;
900a246b010SChuck Lever 	if (used != len)
901a246b010SChuck Lever 		return;
902808012fbSChuck Lever 
90351971139SChuck Lever 	transport->tcp_reclen = ntohl(transport->tcp_fraghdr);
90451971139SChuck Lever 	if (transport->tcp_reclen & RPC_LAST_STREAM_FRAGMENT)
905e136d092SChuck Lever 		transport->tcp_flags |= TCP_RCV_LAST_FRAG;
906a246b010SChuck Lever 	else
907e136d092SChuck Lever 		transport->tcp_flags &= ~TCP_RCV_LAST_FRAG;
90851971139SChuck Lever 	transport->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK;
909808012fbSChuck Lever 
910e136d092SChuck Lever 	transport->tcp_flags &= ~TCP_RCV_COPY_FRAGHDR;
91151971139SChuck Lever 	transport->tcp_offset = 0;
912808012fbSChuck Lever 
913a246b010SChuck Lever 	/* Sanity check of the record length */
91418dca02aSRicardo Labiaga 	if (unlikely(transport->tcp_reclen < 8)) {
9159903cd1cSChuck Lever 		dprintk("RPC:       invalid TCP record fragment length\n");
9163ebb067dSTrond Myklebust 		xprt_force_disconnect(xprt);
9179903cd1cSChuck Lever 		return;
918a246b010SChuck Lever 	}
919a246b010SChuck Lever 	dprintk("RPC:       reading TCP record fragment of length %d\n",
92051971139SChuck Lever 			transport->tcp_reclen);
921a246b010SChuck Lever }
922a246b010SChuck Lever 
92351971139SChuck Lever static void xs_tcp_check_fraghdr(struct sock_xprt *transport)
924a246b010SChuck Lever {
92551971139SChuck Lever 	if (transport->tcp_offset == transport->tcp_reclen) {
926e136d092SChuck Lever 		transport->tcp_flags |= TCP_RCV_COPY_FRAGHDR;
92751971139SChuck Lever 		transport->tcp_offset = 0;
928e136d092SChuck Lever 		if (transport->tcp_flags & TCP_RCV_LAST_FRAG) {
929e136d092SChuck Lever 			transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
930e136d092SChuck Lever 			transport->tcp_flags |= TCP_RCV_COPY_XID;
93151971139SChuck Lever 			transport->tcp_copied = 0;
932a246b010SChuck Lever 		}
933a246b010SChuck Lever 	}
934a246b010SChuck Lever }
935a246b010SChuck Lever 
936dd456471SChuck Lever static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_reader *desc)
937a246b010SChuck Lever {
938a246b010SChuck Lever 	size_t len, used;
939a246b010SChuck Lever 	char *p;
940a246b010SChuck Lever 
94151971139SChuck Lever 	len = sizeof(transport->tcp_xid) - transport->tcp_offset;
942a246b010SChuck Lever 	dprintk("RPC:       reading XID (%Zu bytes)\n", len);
94351971139SChuck Lever 	p = ((char *) &transport->tcp_xid) + transport->tcp_offset;
9449d292316SChuck Lever 	used = xdr_skb_read_bits(desc, p, len);
94551971139SChuck Lever 	transport->tcp_offset += used;
946a246b010SChuck Lever 	if (used != len)
947a246b010SChuck Lever 		return;
948e136d092SChuck Lever 	transport->tcp_flags &= ~TCP_RCV_COPY_XID;
949f4a2e418SRicardo Labiaga 	transport->tcp_flags |= TCP_RCV_READ_CALLDIR;
95051971139SChuck Lever 	transport->tcp_copied = 4;
95118dca02aSRicardo Labiaga 	dprintk("RPC:       reading %s XID %08x\n",
95218dca02aSRicardo Labiaga 			(transport->tcp_flags & TCP_RPC_REPLY) ? "reply for"
95318dca02aSRicardo Labiaga 							      : "request with",
95451971139SChuck Lever 			ntohl(transport->tcp_xid));
95551971139SChuck Lever 	xs_tcp_check_fraghdr(transport);
956a246b010SChuck Lever }
957a246b010SChuck Lever 
95818dca02aSRicardo Labiaga static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
95918dca02aSRicardo Labiaga 				       struct xdr_skb_reader *desc)
960a246b010SChuck Lever {
96118dca02aSRicardo Labiaga 	size_t len, used;
96218dca02aSRicardo Labiaga 	u32 offset;
96318dca02aSRicardo Labiaga 	__be32	calldir;
96418dca02aSRicardo Labiaga 
96518dca02aSRicardo Labiaga 	/*
96618dca02aSRicardo Labiaga 	 * We want transport->tcp_offset to be 8 at the end of this routine
96718dca02aSRicardo Labiaga 	 * (4 bytes for the xid and 4 bytes for the call/reply flag).
96818dca02aSRicardo Labiaga 	 * When this function is called for the first time,
96918dca02aSRicardo Labiaga 	 * transport->tcp_offset is 4 (after having already read the xid).
97018dca02aSRicardo Labiaga 	 */
97118dca02aSRicardo Labiaga 	offset = transport->tcp_offset - sizeof(transport->tcp_xid);
97218dca02aSRicardo Labiaga 	len = sizeof(calldir) - offset;
97318dca02aSRicardo Labiaga 	dprintk("RPC:       reading CALL/REPLY flag (%Zu bytes)\n", len);
97418dca02aSRicardo Labiaga 	used = xdr_skb_read_bits(desc, &calldir, len);
97518dca02aSRicardo Labiaga 	transport->tcp_offset += used;
97618dca02aSRicardo Labiaga 	if (used != len)
97718dca02aSRicardo Labiaga 		return;
978f4a2e418SRicardo Labiaga 	transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR;
979f4a2e418SRicardo Labiaga 	transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
98018dca02aSRicardo Labiaga 	transport->tcp_flags |= TCP_RCV_COPY_DATA;
981f4a2e418SRicardo Labiaga 	/*
982f4a2e418SRicardo Labiaga 	 * We don't yet have the XDR buffer, so we will write the calldir
983f4a2e418SRicardo Labiaga 	 * out after we get the buffer from the 'struct rpc_rqst'
984f4a2e418SRicardo Labiaga 	 */
98518dca02aSRicardo Labiaga 	if (ntohl(calldir) == RPC_REPLY)
98618dca02aSRicardo Labiaga 		transport->tcp_flags |= TCP_RPC_REPLY;
98718dca02aSRicardo Labiaga 	else
98818dca02aSRicardo Labiaga 		transport->tcp_flags &= ~TCP_RPC_REPLY;
98918dca02aSRicardo Labiaga 	dprintk("RPC:       reading %s CALL/REPLY flag %08x\n",
99018dca02aSRicardo Labiaga 			(transport->tcp_flags & TCP_RPC_REPLY) ?
99118dca02aSRicardo Labiaga 				"reply for" : "request with", calldir);
99218dca02aSRicardo Labiaga 	xs_tcp_check_fraghdr(transport);
99318dca02aSRicardo Labiaga }
99418dca02aSRicardo Labiaga 
99544b98efdSRicardo Labiaga static inline void xs_tcp_read_common(struct rpc_xprt *xprt,
99644b98efdSRicardo Labiaga 				     struct xdr_skb_reader *desc,
99744b98efdSRicardo Labiaga 				     struct rpc_rqst *req)
998a246b010SChuck Lever {
99944b98efdSRicardo Labiaga 	struct sock_xprt *transport =
100044b98efdSRicardo Labiaga 				container_of(xprt, struct sock_xprt, xprt);
1001a246b010SChuck Lever 	struct xdr_buf *rcvbuf;
1002a246b010SChuck Lever 	size_t len;
1003a246b010SChuck Lever 	ssize_t r;
1004a246b010SChuck Lever 
1005a246b010SChuck Lever 	rcvbuf = &req->rq_private_buf;
1006f4a2e418SRicardo Labiaga 
1007f4a2e418SRicardo Labiaga 	if (transport->tcp_flags & TCP_RCV_COPY_CALLDIR) {
1008f4a2e418SRicardo Labiaga 		/*
1009f4a2e418SRicardo Labiaga 		 * Save the RPC direction in the XDR buffer
1010f4a2e418SRicardo Labiaga 		 */
1011f4a2e418SRicardo Labiaga 		__be32	calldir = transport->tcp_flags & TCP_RPC_REPLY ?
1012f4a2e418SRicardo Labiaga 					htonl(RPC_REPLY) : 0;
1013f4a2e418SRicardo Labiaga 
1014f4a2e418SRicardo Labiaga 		memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied,
1015f4a2e418SRicardo Labiaga 			&calldir, sizeof(calldir));
1016f4a2e418SRicardo Labiaga 		transport->tcp_copied += sizeof(calldir);
1017f4a2e418SRicardo Labiaga 		transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR;
1018a246b010SChuck Lever 	}
1019a246b010SChuck Lever 
1020a246b010SChuck Lever 	len = desc->count;
102151971139SChuck Lever 	if (len > transport->tcp_reclen - transport->tcp_offset) {
1022dd456471SChuck Lever 		struct xdr_skb_reader my_desc;
1023a246b010SChuck Lever 
102451971139SChuck Lever 		len = transport->tcp_reclen - transport->tcp_offset;
1025a246b010SChuck Lever 		memcpy(&my_desc, desc, sizeof(my_desc));
1026a246b010SChuck Lever 		my_desc.count = len;
102751971139SChuck Lever 		r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
10289d292316SChuck Lever 					  &my_desc, xdr_skb_read_bits);
1029a246b010SChuck Lever 		desc->count -= r;
1030a246b010SChuck Lever 		desc->offset += r;
1031a246b010SChuck Lever 	} else
103251971139SChuck Lever 		r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
10339d292316SChuck Lever 					  desc, xdr_skb_read_bits);
1034a246b010SChuck Lever 
1035a246b010SChuck Lever 	if (r > 0) {
103651971139SChuck Lever 		transport->tcp_copied += r;
103751971139SChuck Lever 		transport->tcp_offset += r;
1038a246b010SChuck Lever 	}
1039a246b010SChuck Lever 	if (r != len) {
1040a246b010SChuck Lever 		/* Error when copying to the receive buffer,
1041a246b010SChuck Lever 		 * usually because we weren't able to allocate
1042a246b010SChuck Lever 		 * additional buffer pages. All we can do now
1043e136d092SChuck Lever 		 * is turn off TCP_RCV_COPY_DATA, so the request
1044a246b010SChuck Lever 		 * will not receive any additional updates,
1045a246b010SChuck Lever 		 * and time out.
1046a246b010SChuck Lever 		 * Any remaining data from this record will
1047a246b010SChuck Lever 		 * be discarded.
1048a246b010SChuck Lever 		 */
1049e136d092SChuck Lever 		transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1050a246b010SChuck Lever 		dprintk("RPC:       XID %08x truncated request\n",
105151971139SChuck Lever 				ntohl(transport->tcp_xid));
105246121cf7SChuck Lever 		dprintk("RPC:       xprt = %p, tcp_copied = %lu, "
105346121cf7SChuck Lever 				"tcp_offset = %u, tcp_reclen = %u\n",
105446121cf7SChuck Lever 				xprt, transport->tcp_copied,
105546121cf7SChuck Lever 				transport->tcp_offset, transport->tcp_reclen);
105644b98efdSRicardo Labiaga 		return;
1057a246b010SChuck Lever 	}
1058a246b010SChuck Lever 
1059a246b010SChuck Lever 	dprintk("RPC:       XID %08x read %Zd bytes\n",
106051971139SChuck Lever 			ntohl(transport->tcp_xid), r);
106146121cf7SChuck Lever 	dprintk("RPC:       xprt = %p, tcp_copied = %lu, tcp_offset = %u, "
106246121cf7SChuck Lever 			"tcp_reclen = %u\n", xprt, transport->tcp_copied,
106346121cf7SChuck Lever 			transport->tcp_offset, transport->tcp_reclen);
1064a246b010SChuck Lever 
106551971139SChuck Lever 	if (transport->tcp_copied == req->rq_private_buf.buflen)
1066e136d092SChuck Lever 		transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
106751971139SChuck Lever 	else if (transport->tcp_offset == transport->tcp_reclen) {
1068e136d092SChuck Lever 		if (transport->tcp_flags & TCP_RCV_LAST_FRAG)
1069e136d092SChuck Lever 			transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1070a246b010SChuck Lever 	}
1071a246b010SChuck Lever 
107244b98efdSRicardo Labiaga 	return;
107344b98efdSRicardo Labiaga }
107444b98efdSRicardo Labiaga 
107544b98efdSRicardo Labiaga /*
107644b98efdSRicardo Labiaga  * Finds the request corresponding to the RPC xid and invokes the common
107744b98efdSRicardo Labiaga  * tcp read code to read the data.
107844b98efdSRicardo Labiaga  */
107944b98efdSRicardo Labiaga static inline int xs_tcp_read_reply(struct rpc_xprt *xprt,
108044b98efdSRicardo Labiaga 				    struct xdr_skb_reader *desc)
108144b98efdSRicardo Labiaga {
108244b98efdSRicardo Labiaga 	struct sock_xprt *transport =
108344b98efdSRicardo Labiaga 				container_of(xprt, struct sock_xprt, xprt);
108444b98efdSRicardo Labiaga 	struct rpc_rqst *req;
108544b98efdSRicardo Labiaga 
108644b98efdSRicardo Labiaga 	dprintk("RPC:       read reply XID %08x\n", ntohl(transport->tcp_xid));
108744b98efdSRicardo Labiaga 
108844b98efdSRicardo Labiaga 	/* Find and lock the request corresponding to this xid */
108944b98efdSRicardo Labiaga 	spin_lock(&xprt->transport_lock);
109044b98efdSRicardo Labiaga 	req = xprt_lookup_rqst(xprt, transport->tcp_xid);
109144b98efdSRicardo Labiaga 	if (!req) {
109244b98efdSRicardo Labiaga 		dprintk("RPC:       XID %08x request not found!\n",
109344b98efdSRicardo Labiaga 				ntohl(transport->tcp_xid));
109444b98efdSRicardo Labiaga 		spin_unlock(&xprt->transport_lock);
109544b98efdSRicardo Labiaga 		return -1;
109644b98efdSRicardo Labiaga 	}
109744b98efdSRicardo Labiaga 
109844b98efdSRicardo Labiaga 	xs_tcp_read_common(xprt, desc, req);
109944b98efdSRicardo Labiaga 
1100e136d092SChuck Lever 	if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
110151971139SChuck Lever 		xprt_complete_rqst(req->rq_task, transport->tcp_copied);
110244b98efdSRicardo Labiaga 
11034a0f8c04SChuck Lever 	spin_unlock(&xprt->transport_lock);
110444b98efdSRicardo Labiaga 	return 0;
110544b98efdSRicardo Labiaga }
110644b98efdSRicardo Labiaga 
110744b98efdSRicardo Labiaga #if defined(CONFIG_NFS_V4_1)
110844b98efdSRicardo Labiaga /*
110944b98efdSRicardo Labiaga  * Obtains an rpc_rqst previously allocated and invokes the common
111044b98efdSRicardo Labiaga  * tcp read code to read the data.  The result is placed in the callback
111144b98efdSRicardo Labiaga  * queue.
111244b98efdSRicardo Labiaga  * If we're unable to obtain the rpc_rqst we schedule the closing of the
111344b98efdSRicardo Labiaga  * connection and return -1.
111444b98efdSRicardo Labiaga  */
111544b98efdSRicardo Labiaga static inline int xs_tcp_read_callback(struct rpc_xprt *xprt,
111644b98efdSRicardo Labiaga 				       struct xdr_skb_reader *desc)
111744b98efdSRicardo Labiaga {
111844b98efdSRicardo Labiaga 	struct sock_xprt *transport =
111944b98efdSRicardo Labiaga 				container_of(xprt, struct sock_xprt, xprt);
112044b98efdSRicardo Labiaga 	struct rpc_rqst *req;
112144b98efdSRicardo Labiaga 
112244b98efdSRicardo Labiaga 	req = xprt_alloc_bc_request(xprt);
112344b98efdSRicardo Labiaga 	if (req == NULL) {
112444b98efdSRicardo Labiaga 		printk(KERN_WARNING "Callback slot table overflowed\n");
112544b98efdSRicardo Labiaga 		xprt_force_disconnect(xprt);
112644b98efdSRicardo Labiaga 		return -1;
112744b98efdSRicardo Labiaga 	}
112844b98efdSRicardo Labiaga 
112944b98efdSRicardo Labiaga 	req->rq_xid = transport->tcp_xid;
113044b98efdSRicardo Labiaga 	dprintk("RPC:       read callback  XID %08x\n", ntohl(req->rq_xid));
113144b98efdSRicardo Labiaga 	xs_tcp_read_common(xprt, desc, req);
113244b98efdSRicardo Labiaga 
113344b98efdSRicardo Labiaga 	if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) {
113444b98efdSRicardo Labiaga 		struct svc_serv *bc_serv = xprt->bc_serv;
113544b98efdSRicardo Labiaga 
113644b98efdSRicardo Labiaga 		/*
113744b98efdSRicardo Labiaga 		 * Add callback request to callback list.  The callback
113844b98efdSRicardo Labiaga 		 * service sleeps on the sv_cb_waitq waiting for new
113944b98efdSRicardo Labiaga 		 * requests.  Wake it up after adding enqueing the
114044b98efdSRicardo Labiaga 		 * request.
114144b98efdSRicardo Labiaga 		 */
114244b98efdSRicardo Labiaga 		dprintk("RPC:       add callback request to list\n");
114344b98efdSRicardo Labiaga 		spin_lock(&bc_serv->sv_cb_lock);
114444b98efdSRicardo Labiaga 		list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
114544b98efdSRicardo Labiaga 		spin_unlock(&bc_serv->sv_cb_lock);
114644b98efdSRicardo Labiaga 		wake_up(&bc_serv->sv_cb_waitq);
114744b98efdSRicardo Labiaga 	}
114844b98efdSRicardo Labiaga 
114944b98efdSRicardo Labiaga 	req->rq_private_buf.len = transport->tcp_copied;
115044b98efdSRicardo Labiaga 
115144b98efdSRicardo Labiaga 	return 0;
115244b98efdSRicardo Labiaga }
115344b98efdSRicardo Labiaga 
115444b98efdSRicardo Labiaga static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
115544b98efdSRicardo Labiaga 					struct xdr_skb_reader *desc)
115644b98efdSRicardo Labiaga {
115744b98efdSRicardo Labiaga 	struct sock_xprt *transport =
115844b98efdSRicardo Labiaga 				container_of(xprt, struct sock_xprt, xprt);
115944b98efdSRicardo Labiaga 
116044b98efdSRicardo Labiaga 	return (transport->tcp_flags & TCP_RPC_REPLY) ?
116144b98efdSRicardo Labiaga 		xs_tcp_read_reply(xprt, desc) :
116244b98efdSRicardo Labiaga 		xs_tcp_read_callback(xprt, desc);
116344b98efdSRicardo Labiaga }
116444b98efdSRicardo Labiaga #else
116544b98efdSRicardo Labiaga static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
116644b98efdSRicardo Labiaga 					struct xdr_skb_reader *desc)
116744b98efdSRicardo Labiaga {
116844b98efdSRicardo Labiaga 	return xs_tcp_read_reply(xprt, desc);
116944b98efdSRicardo Labiaga }
117044b98efdSRicardo Labiaga #endif /* CONFIG_NFS_V4_1 */
117144b98efdSRicardo Labiaga 
117244b98efdSRicardo Labiaga /*
117344b98efdSRicardo Labiaga  * Read data off the transport.  This can be either an RPC_CALL or an
117444b98efdSRicardo Labiaga  * RPC_REPLY.  Relay the processing to helper functions.
117544b98efdSRicardo Labiaga  */
117644b98efdSRicardo Labiaga static void xs_tcp_read_data(struct rpc_xprt *xprt,
117744b98efdSRicardo Labiaga 				    struct xdr_skb_reader *desc)
117844b98efdSRicardo Labiaga {
117944b98efdSRicardo Labiaga 	struct sock_xprt *transport =
118044b98efdSRicardo Labiaga 				container_of(xprt, struct sock_xprt, xprt);
118144b98efdSRicardo Labiaga 
118244b98efdSRicardo Labiaga 	if (_xs_tcp_read_data(xprt, desc) == 0)
118351971139SChuck Lever 		xs_tcp_check_fraghdr(transport);
118444b98efdSRicardo Labiaga 	else {
118544b98efdSRicardo Labiaga 		/*
118644b98efdSRicardo Labiaga 		 * The transport_lock protects the request handling.
118744b98efdSRicardo Labiaga 		 * There's no need to hold it to update the tcp_flags.
118844b98efdSRicardo Labiaga 		 */
118944b98efdSRicardo Labiaga 		transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
119044b98efdSRicardo Labiaga 	}
1191a246b010SChuck Lever }
1192a246b010SChuck Lever 
1193dd456471SChuck Lever static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc)
1194a246b010SChuck Lever {
1195a246b010SChuck Lever 	size_t len;
1196a246b010SChuck Lever 
119751971139SChuck Lever 	len = transport->tcp_reclen - transport->tcp_offset;
1198a246b010SChuck Lever 	if (len > desc->count)
1199a246b010SChuck Lever 		len = desc->count;
1200a246b010SChuck Lever 	desc->count -= len;
1201a246b010SChuck Lever 	desc->offset += len;
120251971139SChuck Lever 	transport->tcp_offset += len;
1203a246b010SChuck Lever 	dprintk("RPC:       discarded %Zu bytes\n", len);
120451971139SChuck Lever 	xs_tcp_check_fraghdr(transport);
1205a246b010SChuck Lever }
1206a246b010SChuck Lever 
12079903cd1cSChuck Lever static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len)
1208a246b010SChuck Lever {
1209a246b010SChuck Lever 	struct rpc_xprt *xprt = rd_desc->arg.data;
121051971139SChuck Lever 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1211dd456471SChuck Lever 	struct xdr_skb_reader desc = {
1212a246b010SChuck Lever 		.skb	= skb,
1213a246b010SChuck Lever 		.offset	= offset,
1214a246b010SChuck Lever 		.count	= len,
1215a246b010SChuck Lever 	};
1216a246b010SChuck Lever 
12179903cd1cSChuck Lever 	dprintk("RPC:       xs_tcp_data_recv started\n");
1218a246b010SChuck Lever 	do {
1219a246b010SChuck Lever 		/* Read in a new fragment marker if necessary */
1220a246b010SChuck Lever 		/* Can we ever really expect to get completely empty fragments? */
1221e136d092SChuck Lever 		if (transport->tcp_flags & TCP_RCV_COPY_FRAGHDR) {
12229903cd1cSChuck Lever 			xs_tcp_read_fraghdr(xprt, &desc);
1223a246b010SChuck Lever 			continue;
1224a246b010SChuck Lever 		}
1225a246b010SChuck Lever 		/* Read in the xid if necessary */
1226e136d092SChuck Lever 		if (transport->tcp_flags & TCP_RCV_COPY_XID) {
122751971139SChuck Lever 			xs_tcp_read_xid(transport, &desc);
1228a246b010SChuck Lever 			continue;
1229a246b010SChuck Lever 		}
123018dca02aSRicardo Labiaga 		/* Read in the call/reply flag */
1231f4a2e418SRicardo Labiaga 		if (transport->tcp_flags & TCP_RCV_READ_CALLDIR) {
123218dca02aSRicardo Labiaga 			xs_tcp_read_calldir(transport, &desc);
123318dca02aSRicardo Labiaga 			continue;
123418dca02aSRicardo Labiaga 		}
1235a246b010SChuck Lever 		/* Read in the request data */
1236e136d092SChuck Lever 		if (transport->tcp_flags & TCP_RCV_COPY_DATA) {
123744b98efdSRicardo Labiaga 			xs_tcp_read_data(xprt, &desc);
1238a246b010SChuck Lever 			continue;
1239a246b010SChuck Lever 		}
1240a246b010SChuck Lever 		/* Skip over any trailing bytes on short reads */
124151971139SChuck Lever 		xs_tcp_read_discard(transport, &desc);
1242a246b010SChuck Lever 	} while (desc.count);
12439903cd1cSChuck Lever 	dprintk("RPC:       xs_tcp_data_recv done\n");
1244a246b010SChuck Lever 	return len - desc.count;
1245a246b010SChuck Lever }
1246a246b010SChuck Lever 
12479903cd1cSChuck Lever /**
12489903cd1cSChuck Lever  * xs_tcp_data_ready - "data ready" callback for TCP sockets
12499903cd1cSChuck Lever  * @sk: socket with data to read
12509903cd1cSChuck Lever  * @bytes: how much data to read
12519903cd1cSChuck Lever  *
12529903cd1cSChuck Lever  */
12539903cd1cSChuck Lever static void xs_tcp_data_ready(struct sock *sk, int bytes)
1254a246b010SChuck Lever {
1255a246b010SChuck Lever 	struct rpc_xprt *xprt;
1256a246b010SChuck Lever 	read_descriptor_t rd_desc;
1257ff2d7db8STrond Myklebust 	int read;
1258a246b010SChuck Lever 
12599903cd1cSChuck Lever 	dprintk("RPC:       xs_tcp_data_ready...\n");
126046121cf7SChuck Lever 
126146121cf7SChuck Lever 	read_lock(&sk->sk_callback_lock);
12629903cd1cSChuck Lever 	if (!(xprt = xprt_from_sock(sk)))
1263a246b010SChuck Lever 		goto out;
1264a246b010SChuck Lever 	if (xprt->shutdown)
1265a246b010SChuck Lever 		goto out;
1266a246b010SChuck Lever 
12679903cd1cSChuck Lever 	/* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
1268a246b010SChuck Lever 	rd_desc.arg.data = xprt;
1269ff2d7db8STrond Myklebust 	do {
1270a246b010SChuck Lever 		rd_desc.count = 65536;
1271ff2d7db8STrond Myklebust 		read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
1272ff2d7db8STrond Myklebust 	} while (read > 0);
1273a246b010SChuck Lever out:
1274a246b010SChuck Lever 	read_unlock(&sk->sk_callback_lock);
1275a246b010SChuck Lever }
1276a246b010SChuck Lever 
12777d1e8255STrond Myklebust /*
12787d1e8255STrond Myklebust  * Do the equivalent of linger/linger2 handling for dealing with
12797d1e8255STrond Myklebust  * broken servers that don't close the socket in a timely
12807d1e8255STrond Myklebust  * fashion
12817d1e8255STrond Myklebust  */
12827d1e8255STrond Myklebust static void xs_tcp_schedule_linger_timeout(struct rpc_xprt *xprt,
12837d1e8255STrond Myklebust 		unsigned long timeout)
12847d1e8255STrond Myklebust {
12857d1e8255STrond Myklebust 	struct sock_xprt *transport;
12867d1e8255STrond Myklebust 
12877d1e8255STrond Myklebust 	if (xprt_test_and_set_connecting(xprt))
12887d1e8255STrond Myklebust 		return;
12897d1e8255STrond Myklebust 	set_bit(XPRT_CONNECTION_ABORT, &xprt->state);
12907d1e8255STrond Myklebust 	transport = container_of(xprt, struct sock_xprt, xprt);
12917d1e8255STrond Myklebust 	queue_delayed_work(rpciod_workqueue, &transport->connect_worker,
12927d1e8255STrond Myklebust 			   timeout);
12937d1e8255STrond Myklebust }
12947d1e8255STrond Myklebust 
12957d1e8255STrond Myklebust static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt)
12967d1e8255STrond Myklebust {
12977d1e8255STrond Myklebust 	struct sock_xprt *transport;
12987d1e8255STrond Myklebust 
12997d1e8255STrond Myklebust 	transport = container_of(xprt, struct sock_xprt, xprt);
13007d1e8255STrond Myklebust 
13017d1e8255STrond Myklebust 	if (!test_bit(XPRT_CONNECTION_ABORT, &xprt->state) ||
13027d1e8255STrond Myklebust 	    !cancel_delayed_work(&transport->connect_worker))
13037d1e8255STrond Myklebust 		return;
13047d1e8255STrond Myklebust 	clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
13057d1e8255STrond Myklebust 	xprt_clear_connecting(xprt);
13067d1e8255STrond Myklebust }
13077d1e8255STrond Myklebust 
13087d1e8255STrond Myklebust static void xs_sock_mark_closed(struct rpc_xprt *xprt)
13097d1e8255STrond Myklebust {
13107d1e8255STrond Myklebust 	smp_mb__before_clear_bit();
13117d1e8255STrond Myklebust 	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
13127d1e8255STrond Myklebust 	clear_bit(XPRT_CLOSING, &xprt->state);
13137d1e8255STrond Myklebust 	smp_mb__after_clear_bit();
13147d1e8255STrond Myklebust 	/* Mark transport as closed and wake up all pending tasks */
13157d1e8255STrond Myklebust 	xprt_disconnect_done(xprt);
13167d1e8255STrond Myklebust }
13177d1e8255STrond Myklebust 
13189903cd1cSChuck Lever /**
13199903cd1cSChuck Lever  * xs_tcp_state_change - callback to handle TCP socket state changes
13209903cd1cSChuck Lever  * @sk: socket whose state has changed
13219903cd1cSChuck Lever  *
13229903cd1cSChuck Lever  */
13239903cd1cSChuck Lever static void xs_tcp_state_change(struct sock *sk)
1324a246b010SChuck Lever {
1325a246b010SChuck Lever 	struct rpc_xprt *xprt;
1326a246b010SChuck Lever 
1327a246b010SChuck Lever 	read_lock(&sk->sk_callback_lock);
1328a246b010SChuck Lever 	if (!(xprt = xprt_from_sock(sk)))
1329a246b010SChuck Lever 		goto out;
13309903cd1cSChuck Lever 	dprintk("RPC:       xs_tcp_state_change client %p...\n", xprt);
1331a246b010SChuck Lever 	dprintk("RPC:       state %x conn %d dead %d zapped %d\n",
1332a246b010SChuck Lever 			sk->sk_state, xprt_connected(xprt),
1333a246b010SChuck Lever 			sock_flag(sk, SOCK_DEAD),
1334a246b010SChuck Lever 			sock_flag(sk, SOCK_ZAPPED));
1335a246b010SChuck Lever 
1336a246b010SChuck Lever 	switch (sk->sk_state) {
1337a246b010SChuck Lever 	case TCP_ESTABLISHED:
13384a0f8c04SChuck Lever 		spin_lock_bh(&xprt->transport_lock);
1339a246b010SChuck Lever 		if (!xprt_test_and_set_connected(xprt)) {
134051971139SChuck Lever 			struct sock_xprt *transport = container_of(xprt,
134151971139SChuck Lever 					struct sock_xprt, xprt);
134251971139SChuck Lever 
1343a246b010SChuck Lever 			/* Reset TCP record info */
134451971139SChuck Lever 			transport->tcp_offset = 0;
134551971139SChuck Lever 			transport->tcp_reclen = 0;
134651971139SChuck Lever 			transport->tcp_copied = 0;
1347e136d092SChuck Lever 			transport->tcp_flags =
1348e136d092SChuck Lever 				TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID;
134951971139SChuck Lever 
13502a491991STrond Myklebust 			xprt_wake_pending_tasks(xprt, -EAGAIN);
1351a246b010SChuck Lever 		}
13524a0f8c04SChuck Lever 		spin_unlock_bh(&xprt->transport_lock);
1353a246b010SChuck Lever 		break;
13543b948ae5STrond Myklebust 	case TCP_FIN_WAIT1:
13553b948ae5STrond Myklebust 		/* The client initiated a shutdown of the socket */
13567c1d71cfSTrond Myklebust 		xprt->connect_cookie++;
1357663b8858STrond Myklebust 		xprt->reestablish_timeout = 0;
13583b948ae5STrond Myklebust 		set_bit(XPRT_CLOSING, &xprt->state);
13593b948ae5STrond Myklebust 		smp_mb__before_clear_bit();
13603b948ae5STrond Myklebust 		clear_bit(XPRT_CONNECTED, &xprt->state);
1361ef803670STrond Myklebust 		clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
13623b948ae5STrond Myklebust 		smp_mb__after_clear_bit();
136325fe6142STrond Myklebust 		xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout);
1364a246b010SChuck Lever 		break;
1365632e3bdcSTrond Myklebust 	case TCP_CLOSE_WAIT:
13663b948ae5STrond Myklebust 		/* The server initiated a shutdown of the socket */
136766af1e55STrond Myklebust 		xprt_force_disconnect(xprt);
1368663b8858STrond Myklebust 	case TCP_SYN_SENT:
13697c1d71cfSTrond Myklebust 		xprt->connect_cookie++;
1370663b8858STrond Myklebust 	case TCP_CLOSING:
1371663b8858STrond Myklebust 		/*
1372663b8858STrond Myklebust 		 * If the server closed down the connection, make sure that
1373663b8858STrond Myklebust 		 * we back off before reconnecting
1374663b8858STrond Myklebust 		 */
1375663b8858STrond Myklebust 		if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
1376663b8858STrond Myklebust 			xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
13773b948ae5STrond Myklebust 		break;
13783b948ae5STrond Myklebust 	case TCP_LAST_ACK:
1379670f9457STrond Myklebust 		set_bit(XPRT_CLOSING, &xprt->state);
138025fe6142STrond Myklebust 		xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout);
13813b948ae5STrond Myklebust 		smp_mb__before_clear_bit();
13823b948ae5STrond Myklebust 		clear_bit(XPRT_CONNECTED, &xprt->state);
13833b948ae5STrond Myklebust 		smp_mb__after_clear_bit();
13843b948ae5STrond Myklebust 		break;
13853b948ae5STrond Myklebust 	case TCP_CLOSE:
13867d1e8255STrond Myklebust 		xs_tcp_cancel_linger_timeout(xprt);
13877d1e8255STrond Myklebust 		xs_sock_mark_closed(xprt);
1388a246b010SChuck Lever 	}
1389a246b010SChuck Lever  out:
1390a246b010SChuck Lever 	read_unlock(&sk->sk_callback_lock);
1391a246b010SChuck Lever }
1392a246b010SChuck Lever 
13939903cd1cSChuck Lever /**
1394482f32e6STrond Myklebust  * xs_error_report - callback mainly for catching socket errors
13952a9e1cfaSTrond Myklebust  * @sk: socket
13962a9e1cfaSTrond Myklebust  */
1397482f32e6STrond Myklebust static void xs_error_report(struct sock *sk)
13982a9e1cfaSTrond Myklebust {
13992a9e1cfaSTrond Myklebust 	struct rpc_xprt *xprt;
14002a9e1cfaSTrond Myklebust 
14012a9e1cfaSTrond Myklebust 	read_lock(&sk->sk_callback_lock);
14022a9e1cfaSTrond Myklebust 	if (!(xprt = xprt_from_sock(sk)))
14032a9e1cfaSTrond Myklebust 		goto out;
14042a9e1cfaSTrond Myklebust 	dprintk("RPC:       %s client %p...\n"
14052a9e1cfaSTrond Myklebust 			"RPC:       error %d\n",
14062a9e1cfaSTrond Myklebust 			__func__, xprt, sk->sk_err);
1407482f32e6STrond Myklebust 	xprt_wake_pending_tasks(xprt, -EAGAIN);
14082a9e1cfaSTrond Myklebust out:
14092a9e1cfaSTrond Myklebust 	read_unlock(&sk->sk_callback_lock);
14102a9e1cfaSTrond Myklebust }
14112a9e1cfaSTrond Myklebust 
14121f0fa154SIlpo Järvinen static void xs_write_space(struct sock *sk)
14131f0fa154SIlpo Järvinen {
14141f0fa154SIlpo Järvinen 	struct socket *sock;
14151f0fa154SIlpo Järvinen 	struct rpc_xprt *xprt;
14161f0fa154SIlpo Järvinen 
14171f0fa154SIlpo Järvinen 	if (unlikely(!(sock = sk->sk_socket)))
14181f0fa154SIlpo Järvinen 		return;
14191f0fa154SIlpo Järvinen 	clear_bit(SOCK_NOSPACE, &sock->flags);
14201f0fa154SIlpo Järvinen 
14211f0fa154SIlpo Järvinen 	if (unlikely(!(xprt = xprt_from_sock(sk))))
14221f0fa154SIlpo Järvinen 		return;
14231f0fa154SIlpo Järvinen 	if (test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags) == 0)
14241f0fa154SIlpo Järvinen 		return;
14251f0fa154SIlpo Järvinen 
14261f0fa154SIlpo Järvinen 	xprt_write_space(xprt);
14271f0fa154SIlpo Järvinen }
14281f0fa154SIlpo Järvinen 
14292a9e1cfaSTrond Myklebust /**
1430c7b2cae8SChuck Lever  * xs_udp_write_space - callback invoked when socket buffer space
1431c7b2cae8SChuck Lever  *                             becomes available
14329903cd1cSChuck Lever  * @sk: socket whose state has changed
14339903cd1cSChuck Lever  *
1434a246b010SChuck Lever  * Called when more output buffer space is available for this socket.
1435a246b010SChuck Lever  * We try not to wake our writers until they can make "significant"
1436c7b2cae8SChuck Lever  * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1437a246b010SChuck Lever  * with a bunch of small requests.
1438a246b010SChuck Lever  */
1439c7b2cae8SChuck Lever static void xs_udp_write_space(struct sock *sk)
1440a246b010SChuck Lever {
1441a246b010SChuck Lever 	read_lock(&sk->sk_callback_lock);
1442c7b2cae8SChuck Lever 
1443c7b2cae8SChuck Lever 	/* from net/core/sock.c:sock_def_write_space */
14441f0fa154SIlpo Järvinen 	if (sock_writeable(sk))
14451f0fa154SIlpo Järvinen 		xs_write_space(sk);
1446c7b2cae8SChuck Lever 
1447c7b2cae8SChuck Lever 	read_unlock(&sk->sk_callback_lock);
1448c7b2cae8SChuck Lever }
1449c7b2cae8SChuck Lever 
1450c7b2cae8SChuck Lever /**
1451c7b2cae8SChuck Lever  * xs_tcp_write_space - callback invoked when socket buffer space
1452c7b2cae8SChuck Lever  *                             becomes available
1453c7b2cae8SChuck Lever  * @sk: socket whose state has changed
1454c7b2cae8SChuck Lever  *
1455c7b2cae8SChuck Lever  * Called when more output buffer space is available for this socket.
1456c7b2cae8SChuck Lever  * We try not to wake our writers until they can make "significant"
1457c7b2cae8SChuck Lever  * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1458c7b2cae8SChuck Lever  * with a bunch of small requests.
1459c7b2cae8SChuck Lever  */
1460c7b2cae8SChuck Lever static void xs_tcp_write_space(struct sock *sk)
1461c7b2cae8SChuck Lever {
1462c7b2cae8SChuck Lever 	read_lock(&sk->sk_callback_lock);
1463c7b2cae8SChuck Lever 
1464c7b2cae8SChuck Lever 	/* from net/core/stream.c:sk_stream_write_space */
14651f0fa154SIlpo Järvinen 	if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
14661f0fa154SIlpo Järvinen 		xs_write_space(sk);
1467c7b2cae8SChuck Lever 
1468a246b010SChuck Lever 	read_unlock(&sk->sk_callback_lock);
1469a246b010SChuck Lever }
1470a246b010SChuck Lever 
1471470056c2SChuck Lever static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
1472a246b010SChuck Lever {
1473ee0ac0c2SChuck Lever 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1474ee0ac0c2SChuck Lever 	struct sock *sk = transport->inet;
1475a246b010SChuck Lever 
14767c6e066eSChuck Lever 	if (transport->rcvsize) {
1477a246b010SChuck Lever 		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
14787c6e066eSChuck Lever 		sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2;
1479a246b010SChuck Lever 	}
14807c6e066eSChuck Lever 	if (transport->sndsize) {
1481a246b010SChuck Lever 		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
14827c6e066eSChuck Lever 		sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2;
1483a246b010SChuck Lever 		sk->sk_write_space(sk);
1484a246b010SChuck Lever 	}
1485a246b010SChuck Lever }
1486a246b010SChuck Lever 
148743118c29SChuck Lever /**
1488470056c2SChuck Lever  * xs_udp_set_buffer_size - set send and receive limits
148943118c29SChuck Lever  * @xprt: generic transport
1490470056c2SChuck Lever  * @sndsize: requested size of send buffer, in bytes
1491470056c2SChuck Lever  * @rcvsize: requested size of receive buffer, in bytes
149243118c29SChuck Lever  *
1493470056c2SChuck Lever  * Set socket send and receive buffer size limits.
149443118c29SChuck Lever  */
1495470056c2SChuck Lever static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize)
149643118c29SChuck Lever {
14977c6e066eSChuck Lever 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
14987c6e066eSChuck Lever 
14997c6e066eSChuck Lever 	transport->sndsize = 0;
1500470056c2SChuck Lever 	if (sndsize)
15017c6e066eSChuck Lever 		transport->sndsize = sndsize + 1024;
15027c6e066eSChuck Lever 	transport->rcvsize = 0;
1503470056c2SChuck Lever 	if (rcvsize)
15047c6e066eSChuck Lever 		transport->rcvsize = rcvsize + 1024;
1505470056c2SChuck Lever 
1506470056c2SChuck Lever 	xs_udp_do_set_buffer_size(xprt);
150743118c29SChuck Lever }
150843118c29SChuck Lever 
150946c0ee8bSChuck Lever /**
151046c0ee8bSChuck Lever  * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
151146c0ee8bSChuck Lever  * @task: task that timed out
151246c0ee8bSChuck Lever  *
151346c0ee8bSChuck Lever  * Adjust the congestion window after a retransmit timeout has occurred.
151446c0ee8bSChuck Lever  */
151546c0ee8bSChuck Lever static void xs_udp_timer(struct rpc_task *task)
151646c0ee8bSChuck Lever {
151746c0ee8bSChuck Lever 	xprt_adjust_cwnd(task, -ETIMEDOUT);
151846c0ee8bSChuck Lever }
151946c0ee8bSChuck Lever 
1520b85d8806SChuck Lever static unsigned short xs_get_random_port(void)
1521b85d8806SChuck Lever {
1522b85d8806SChuck Lever 	unsigned short range = xprt_max_resvport - xprt_min_resvport;
1523b85d8806SChuck Lever 	unsigned short rand = (unsigned short) net_random() % range;
1524b85d8806SChuck Lever 	return rand + xprt_min_resvport;
1525b85d8806SChuck Lever }
1526b85d8806SChuck Lever 
152792200412SChuck Lever /**
152892200412SChuck Lever  * xs_set_port - reset the port number in the remote endpoint address
152992200412SChuck Lever  * @xprt: generic transport
153092200412SChuck Lever  * @port: new port number
153192200412SChuck Lever  *
153292200412SChuck Lever  */
153392200412SChuck Lever static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
153492200412SChuck Lever {
153592200412SChuck Lever 	dprintk("RPC:       setting port for xprt %p to %u\n", xprt, port);
1536c4efcb1dSChuck Lever 
15379dc3b095SChuck Lever 	rpc_set_port(xs_addr(xprt), port);
15389dc3b095SChuck Lever 	xs_update_peer_port(xprt);
153992200412SChuck Lever }
154092200412SChuck Lever 
154167a391d7STrond Myklebust static unsigned short xs_get_srcport(struct sock_xprt *transport, struct socket *sock)
154267a391d7STrond Myklebust {
1543fbfffbd5SChuck Lever 	unsigned short port = transport->srcport;
154467a391d7STrond Myklebust 
154567a391d7STrond Myklebust 	if (port == 0 && transport->xprt.resvport)
154667a391d7STrond Myklebust 		port = xs_get_random_port();
154767a391d7STrond Myklebust 	return port;
154867a391d7STrond Myklebust }
154967a391d7STrond Myklebust 
155067a391d7STrond Myklebust static unsigned short xs_next_srcport(struct sock_xprt *transport, struct socket *sock, unsigned short port)
155167a391d7STrond Myklebust {
1552fbfffbd5SChuck Lever 	if (transport->srcport != 0)
1553fbfffbd5SChuck Lever 		transport->srcport = 0;
155467a391d7STrond Myklebust 	if (!transport->xprt.resvport)
155567a391d7STrond Myklebust 		return 0;
155667a391d7STrond Myklebust 	if (port <= xprt_min_resvport || port > xprt_max_resvport)
155767a391d7STrond Myklebust 		return xprt_max_resvport;
155867a391d7STrond Myklebust 	return --port;
155967a391d7STrond Myklebust }
156067a391d7STrond Myklebust 
15617dc753f0SChuck Lever static int xs_bind4(struct sock_xprt *transport, struct socket *sock)
1562a246b010SChuck Lever {
1563a246b010SChuck Lever 	struct sockaddr_in myaddr = {
1564a246b010SChuck Lever 		.sin_family = AF_INET,
1565a246b010SChuck Lever 	};
1566d3bc9a1dSFrank van Maarseveen 	struct sockaddr_in *sa;
156767a391d7STrond Myklebust 	int err, nloop = 0;
156867a391d7STrond Myklebust 	unsigned short port = xs_get_srcport(transport, sock);
156967a391d7STrond Myklebust 	unsigned short last;
1570a246b010SChuck Lever 
1571fbfffbd5SChuck Lever 	sa = (struct sockaddr_in *)&transport->srcaddr;
1572d3bc9a1dSFrank van Maarseveen 	myaddr.sin_addr = sa->sin_addr;
1573a246b010SChuck Lever 	do {
1574a246b010SChuck Lever 		myaddr.sin_port = htons(port);
1575e6242e92SSridhar Samudrala 		err = kernel_bind(sock, (struct sockaddr *) &myaddr,
1576a246b010SChuck Lever 						sizeof(myaddr));
157767a391d7STrond Myklebust 		if (port == 0)
1578d3bc9a1dSFrank van Maarseveen 			break;
1579a246b010SChuck Lever 		if (err == 0) {
1580fbfffbd5SChuck Lever 			transport->srcport = port;
1581d3bc9a1dSFrank van Maarseveen 			break;
1582a246b010SChuck Lever 		}
158367a391d7STrond Myklebust 		last = port;
158467a391d7STrond Myklebust 		port = xs_next_srcport(transport, sock, port);
158567a391d7STrond Myklebust 		if (port > last)
158667a391d7STrond Myklebust 			nloop++;
158767a391d7STrond Myklebust 	} while (err == -EADDRINUSE && nloop != 2);
158821454aaaSHarvey Harrison 	dprintk("RPC:       %s %pI4:%u: %s (%d)\n",
158921454aaaSHarvey Harrison 			__func__, &myaddr.sin_addr,
15907dc753f0SChuck Lever 			port, err ? "failed" : "ok", err);
1591a246b010SChuck Lever 	return err;
1592a246b010SChuck Lever }
1593a246b010SChuck Lever 
159490058d37SChuck Lever static int xs_bind6(struct sock_xprt *transport, struct socket *sock)
159590058d37SChuck Lever {
159690058d37SChuck Lever 	struct sockaddr_in6 myaddr = {
159790058d37SChuck Lever 		.sin6_family = AF_INET6,
159890058d37SChuck Lever 	};
159990058d37SChuck Lever 	struct sockaddr_in6 *sa;
160067a391d7STrond Myklebust 	int err, nloop = 0;
160167a391d7STrond Myklebust 	unsigned short port = xs_get_srcport(transport, sock);
160267a391d7STrond Myklebust 	unsigned short last;
160390058d37SChuck Lever 
1604fbfffbd5SChuck Lever 	sa = (struct sockaddr_in6 *)&transport->srcaddr;
160590058d37SChuck Lever 	myaddr.sin6_addr = sa->sin6_addr;
160690058d37SChuck Lever 	do {
160790058d37SChuck Lever 		myaddr.sin6_port = htons(port);
160890058d37SChuck Lever 		err = kernel_bind(sock, (struct sockaddr *) &myaddr,
160990058d37SChuck Lever 						sizeof(myaddr));
161067a391d7STrond Myklebust 		if (port == 0)
161190058d37SChuck Lever 			break;
161290058d37SChuck Lever 		if (err == 0) {
1613fbfffbd5SChuck Lever 			transport->srcport = port;
161490058d37SChuck Lever 			break;
161590058d37SChuck Lever 		}
161667a391d7STrond Myklebust 		last = port;
161767a391d7STrond Myklebust 		port = xs_next_srcport(transport, sock, port);
161867a391d7STrond Myklebust 		if (port > last)
161967a391d7STrond Myklebust 			nloop++;
162067a391d7STrond Myklebust 	} while (err == -EADDRINUSE && nloop != 2);
16215b095d98SHarvey Harrison 	dprintk("RPC:       xs_bind6 %pI6:%u: %s (%d)\n",
1622fdb46ee7SHarvey Harrison 		&myaddr.sin6_addr, port, err ? "failed" : "ok", err);
1623a246b010SChuck Lever 	return err;
1624a246b010SChuck Lever }
1625a246b010SChuck Lever 
1626ed07536eSPeter Zijlstra #ifdef CONFIG_DEBUG_LOCK_ALLOC
1627ed07536eSPeter Zijlstra static struct lock_class_key xs_key[2];
1628ed07536eSPeter Zijlstra static struct lock_class_key xs_slock_key[2];
1629ed07536eSPeter Zijlstra 
16308945ee5eSChuck Lever static inline void xs_reclassify_socket4(struct socket *sock)
1631ed07536eSPeter Zijlstra {
1632ed07536eSPeter Zijlstra 	struct sock *sk = sock->sk;
16338945ee5eSChuck Lever 
163402b3d346SJohn Heffner 	BUG_ON(sock_owned_by_user(sk));
16358945ee5eSChuck Lever 	sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC",
16368945ee5eSChuck Lever 		&xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]);
1637ed07536eSPeter Zijlstra }
16388945ee5eSChuck Lever 
16398945ee5eSChuck Lever static inline void xs_reclassify_socket6(struct socket *sock)
16408945ee5eSChuck Lever {
16418945ee5eSChuck Lever 	struct sock *sk = sock->sk;
16428945ee5eSChuck Lever 
1643f4921affSLinus Torvalds 	BUG_ON(sock_owned_by_user(sk));
16448945ee5eSChuck Lever 	sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
16458945ee5eSChuck Lever 		&xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]);
1646ed07536eSPeter Zijlstra }
1647ed07536eSPeter Zijlstra #else
16488945ee5eSChuck Lever static inline void xs_reclassify_socket4(struct socket *sock)
16498945ee5eSChuck Lever {
16508945ee5eSChuck Lever }
16518945ee5eSChuck Lever 
16528945ee5eSChuck Lever static inline void xs_reclassify_socket6(struct socket *sock)
1653ed07536eSPeter Zijlstra {
1654ed07536eSPeter Zijlstra }
1655ed07536eSPeter Zijlstra #endif
1656ed07536eSPeter Zijlstra 
165716be2d20SChuck Lever static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
1658a246b010SChuck Lever {
165916be2d20SChuck Lever 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1660edb267a6SChuck Lever 
1661ee0ac0c2SChuck Lever 	if (!transport->inet) {
1662b0d93ad5SChuck Lever 		struct sock *sk = sock->sk;
1663b0d93ad5SChuck Lever 
1664b0d93ad5SChuck Lever 		write_lock_bh(&sk->sk_callback_lock);
1665b0d93ad5SChuck Lever 
16662a9e1cfaSTrond Myklebust 		xs_save_old_callbacks(transport, sk);
16672a9e1cfaSTrond Myklebust 
1668b0d93ad5SChuck Lever 		sk->sk_user_data = xprt;
1669b0d93ad5SChuck Lever 		sk->sk_data_ready = xs_udp_data_ready;
1670b0d93ad5SChuck Lever 		sk->sk_write_space = xs_udp_write_space;
1671482f32e6STrond Myklebust 		sk->sk_error_report = xs_error_report;
1672b0d93ad5SChuck Lever 		sk->sk_no_check = UDP_CSUM_NORCV;
1673b079fa7bSTrond Myklebust 		sk->sk_allocation = GFP_ATOMIC;
1674b0d93ad5SChuck Lever 
1675b0d93ad5SChuck Lever 		xprt_set_connected(xprt);
1676b0d93ad5SChuck Lever 
1677b0d93ad5SChuck Lever 		/* Reset to new socket */
1678ee0ac0c2SChuck Lever 		transport->sock = sock;
1679ee0ac0c2SChuck Lever 		transport->inet = sk;
1680b0d93ad5SChuck Lever 
1681b0d93ad5SChuck Lever 		write_unlock_bh(&sk->sk_callback_lock);
1682b0d93ad5SChuck Lever 	}
1683470056c2SChuck Lever 	xs_udp_do_set_buffer_size(xprt);
168416be2d20SChuck Lever }
168516be2d20SChuck Lever 
1686a246b010SChuck Lever /**
16879c3d72deSChuck Lever  * xs_udp_connect_worker4 - set up a UDP socket
1688a246b010SChuck Lever  * @work: RPC transport to connect
1689a246b010SChuck Lever  *
1690a246b010SChuck Lever  * Invoked by a work queue tasklet.
1691a246b010SChuck Lever  */
16929c3d72deSChuck Lever static void xs_udp_connect_worker4(struct work_struct *work)
1693a246b010SChuck Lever {
1694a246b010SChuck Lever 	struct sock_xprt *transport =
1695a246b010SChuck Lever 		container_of(work, struct sock_xprt, connect_worker.work);
1696a246b010SChuck Lever 	struct rpc_xprt *xprt = &transport->xprt;
1697a246b010SChuck Lever 	struct socket *sock = transport->sock;
1698a246b010SChuck Lever 	int err, status = -EIO;
1699a246b010SChuck Lever 
170001d37c42STrond Myklebust 	if (xprt->shutdown)
17019903cd1cSChuck Lever 		goto out;
17029903cd1cSChuck Lever 
1703a246b010SChuck Lever 	/* Start by resetting any existing state */
1704fe315e76SChuck Lever 	xs_reset_transport(transport);
1705a246b010SChuck Lever 
1706fe315e76SChuck Lever 	err = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
1707fe315e76SChuck Lever 	if (err < 0) {
17089903cd1cSChuck Lever 		dprintk("RPC:       can't create UDP transport socket (%d).\n", -err);
1709a246b010SChuck Lever 		goto out;
1710a246b010SChuck Lever 	}
17118945ee5eSChuck Lever 	xs_reclassify_socket4(sock);
1712a246b010SChuck Lever 
17137dc753f0SChuck Lever 	if (xs_bind4(transport, sock)) {
17149903cd1cSChuck Lever 		sock_release(sock);
17159903cd1cSChuck Lever 		goto out;
1716a246b010SChuck Lever 	}
1717b0d93ad5SChuck Lever 
1718c740eff8SChuck Lever 	dprintk("RPC:       worker connecting xprt %p via %s to "
1719c740eff8SChuck Lever 				"%s (port %s)\n", xprt,
1720c740eff8SChuck Lever 			xprt->address_strings[RPC_DISPLAY_PROTO],
1721c740eff8SChuck Lever 			xprt->address_strings[RPC_DISPLAY_ADDR],
1722c740eff8SChuck Lever 			xprt->address_strings[RPC_DISPLAY_PORT]);
1723b0d93ad5SChuck Lever 
172416be2d20SChuck Lever 	xs_udp_finish_connecting(xprt, sock);
1725a246b010SChuck Lever 	status = 0;
1726b0d93ad5SChuck Lever out:
1727b0d93ad5SChuck Lever 	xprt_clear_connecting(xprt);
17287d1e8255STrond Myklebust 	xprt_wake_pending_tasks(xprt, status);
1729b0d93ad5SChuck Lever }
1730b0d93ad5SChuck Lever 
173168e220bdSChuck Lever /**
173268e220bdSChuck Lever  * xs_udp_connect_worker6 - set up a UDP socket
173368e220bdSChuck Lever  * @work: RPC transport to connect
173468e220bdSChuck Lever  *
173568e220bdSChuck Lever  * Invoked by a work queue tasklet.
173668e220bdSChuck Lever  */
173768e220bdSChuck Lever static void xs_udp_connect_worker6(struct work_struct *work)
173868e220bdSChuck Lever {
173968e220bdSChuck Lever 	struct sock_xprt *transport =
174068e220bdSChuck Lever 		container_of(work, struct sock_xprt, connect_worker.work);
174168e220bdSChuck Lever 	struct rpc_xprt *xprt = &transport->xprt;
174268e220bdSChuck Lever 	struct socket *sock = transport->sock;
174368e220bdSChuck Lever 	int err, status = -EIO;
174468e220bdSChuck Lever 
174501d37c42STrond Myklebust 	if (xprt->shutdown)
174668e220bdSChuck Lever 		goto out;
174768e220bdSChuck Lever 
174868e220bdSChuck Lever 	/* Start by resetting any existing state */
1749fe315e76SChuck Lever 	xs_reset_transport(transport);
175068e220bdSChuck Lever 
1751fe315e76SChuck Lever 	err = sock_create_kern(PF_INET6, SOCK_DGRAM, IPPROTO_UDP, &sock);
1752fe315e76SChuck Lever 	if (err < 0) {
175368e220bdSChuck Lever 		dprintk("RPC:       can't create UDP transport socket (%d).\n", -err);
175468e220bdSChuck Lever 		goto out;
175568e220bdSChuck Lever 	}
17568945ee5eSChuck Lever 	xs_reclassify_socket6(sock);
175768e220bdSChuck Lever 
175868e220bdSChuck Lever 	if (xs_bind6(transport, sock) < 0) {
175968e220bdSChuck Lever 		sock_release(sock);
176068e220bdSChuck Lever 		goto out;
176168e220bdSChuck Lever 	}
176268e220bdSChuck Lever 
1763c740eff8SChuck Lever 	dprintk("RPC:       worker connecting xprt %p via %s to "
1764c740eff8SChuck Lever 				"%s (port %s)\n", xprt,
1765c740eff8SChuck Lever 			xprt->address_strings[RPC_DISPLAY_PROTO],
1766c740eff8SChuck Lever 			xprt->address_strings[RPC_DISPLAY_ADDR],
1767c740eff8SChuck Lever 			xprt->address_strings[RPC_DISPLAY_PORT]);
176868e220bdSChuck Lever 
176968e220bdSChuck Lever 	xs_udp_finish_connecting(xprt, sock);
1770a246b010SChuck Lever 	status = 0;
1771b0d93ad5SChuck Lever out:
1772b0d93ad5SChuck Lever 	xprt_clear_connecting(xprt);
17737d1e8255STrond Myklebust 	xprt_wake_pending_tasks(xprt, status);
1774b0d93ad5SChuck Lever }
1775b0d93ad5SChuck Lever 
17763167e12cSChuck Lever /*
17773167e12cSChuck Lever  * We need to preserve the port number so the reply cache on the server can
17783167e12cSChuck Lever  * find our cached RPC replies when we get around to reconnecting.
17793167e12cSChuck Lever  */
178040d2549dSTrond Myklebust static void xs_abort_connection(struct rpc_xprt *xprt, struct sock_xprt *transport)
17813167e12cSChuck Lever {
17823167e12cSChuck Lever 	int result;
17833167e12cSChuck Lever 	struct sockaddr any;
17843167e12cSChuck Lever 
17853167e12cSChuck Lever 	dprintk("RPC:       disconnecting xprt %p to reuse port\n", xprt);
17863167e12cSChuck Lever 
17873167e12cSChuck Lever 	/*
17883167e12cSChuck Lever 	 * Disconnect the transport socket by doing a connect operation
17893167e12cSChuck Lever 	 * with AF_UNSPEC.  This should return immediately...
17903167e12cSChuck Lever 	 */
17913167e12cSChuck Lever 	memset(&any, 0, sizeof(any));
17923167e12cSChuck Lever 	any.sa_family = AF_UNSPEC;
1793ee0ac0c2SChuck Lever 	result = kernel_connect(transport->sock, &any, sizeof(any), 0);
17947d1e8255STrond Myklebust 	if (!result)
17957d1e8255STrond Myklebust 		xs_sock_mark_closed(xprt);
17967d1e8255STrond Myklebust 	else
17973167e12cSChuck Lever 		dprintk("RPC:       AF_UNSPEC connect return code %d\n",
17983167e12cSChuck Lever 				result);
17993167e12cSChuck Lever }
18003167e12cSChuck Lever 
180140d2549dSTrond Myklebust static void xs_tcp_reuse_connection(struct rpc_xprt *xprt, struct sock_xprt *transport)
180240d2549dSTrond Myklebust {
180340d2549dSTrond Myklebust 	unsigned int state = transport->inet->sk_state;
180440d2549dSTrond Myklebust 
180540d2549dSTrond Myklebust 	if (state == TCP_CLOSE && transport->sock->state == SS_UNCONNECTED)
180640d2549dSTrond Myklebust 		return;
180740d2549dSTrond Myklebust 	if ((1 << state) & (TCPF_ESTABLISHED|TCPF_SYN_SENT))
180840d2549dSTrond Myklebust 		return;
180940d2549dSTrond Myklebust 	xs_abort_connection(xprt, transport);
181040d2549dSTrond Myklebust }
181140d2549dSTrond Myklebust 
181216be2d20SChuck Lever static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
1813b0d93ad5SChuck Lever {
181416be2d20SChuck Lever 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1815edb267a6SChuck Lever 
1816ee0ac0c2SChuck Lever 	if (!transport->inet) {
1817b0d93ad5SChuck Lever 		struct sock *sk = sock->sk;
1818b0d93ad5SChuck Lever 
1819b0d93ad5SChuck Lever 		write_lock_bh(&sk->sk_callback_lock);
1820b0d93ad5SChuck Lever 
18212a9e1cfaSTrond Myklebust 		xs_save_old_callbacks(transport, sk);
18222a9e1cfaSTrond Myklebust 
1823b0d93ad5SChuck Lever 		sk->sk_user_data = xprt;
1824b0d93ad5SChuck Lever 		sk->sk_data_ready = xs_tcp_data_ready;
1825b0d93ad5SChuck Lever 		sk->sk_state_change = xs_tcp_state_change;
1826b0d93ad5SChuck Lever 		sk->sk_write_space = xs_tcp_write_space;
1827482f32e6STrond Myklebust 		sk->sk_error_report = xs_error_report;
1828b079fa7bSTrond Myklebust 		sk->sk_allocation = GFP_ATOMIC;
18293167e12cSChuck Lever 
18303167e12cSChuck Lever 		/* socket options */
18313167e12cSChuck Lever 		sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
18323167e12cSChuck Lever 		sock_reset_flag(sk, SOCK_LINGER);
18333167e12cSChuck Lever 		tcp_sk(sk)->linger2 = 0;
18343167e12cSChuck Lever 		tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
1835b0d93ad5SChuck Lever 
1836b0d93ad5SChuck Lever 		xprt_clear_connected(xprt);
1837b0d93ad5SChuck Lever 
1838b0d93ad5SChuck Lever 		/* Reset to new socket */
1839ee0ac0c2SChuck Lever 		transport->sock = sock;
1840ee0ac0c2SChuck Lever 		transport->inet = sk;
1841b0d93ad5SChuck Lever 
1842b0d93ad5SChuck Lever 		write_unlock_bh(&sk->sk_callback_lock);
1843b0d93ad5SChuck Lever 	}
1844b0d93ad5SChuck Lever 
184501d37c42STrond Myklebust 	if (!xprt_bound(xprt))
184601d37c42STrond Myklebust 		return -ENOTCONN;
184701d37c42STrond Myklebust 
1848b0d93ad5SChuck Lever 	/* Tell the socket layer to start connecting... */
1849262ca07dSChuck Lever 	xprt->stat.connect_count++;
1850262ca07dSChuck Lever 	xprt->stat.connect_start = jiffies;
185195392c59SChuck Lever 	return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
185216be2d20SChuck Lever }
185316be2d20SChuck Lever 
185416be2d20SChuck Lever /**
1855b61d59ffSTrond Myklebust  * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint
1856b61d59ffSTrond Myklebust  * @xprt: RPC transport to connect
1857b61d59ffSTrond Myklebust  * @transport: socket transport to connect
1858b61d59ffSTrond Myklebust  * @create_sock: function to create a socket of the correct type
185916be2d20SChuck Lever  *
186016be2d20SChuck Lever  * Invoked by a work queue tasklet.
186116be2d20SChuck Lever  */
1862b61d59ffSTrond Myklebust static void xs_tcp_setup_socket(struct rpc_xprt *xprt,
1863b61d59ffSTrond Myklebust 		struct sock_xprt *transport,
1864b61d59ffSTrond Myklebust 		struct socket *(*create_sock)(struct rpc_xprt *,
1865b61d59ffSTrond Myklebust 			struct sock_xprt *))
186616be2d20SChuck Lever {
186716be2d20SChuck Lever 	struct socket *sock = transport->sock;
1868b61d59ffSTrond Myklebust 	int status = -EIO;
186916be2d20SChuck Lever 
187001d37c42STrond Myklebust 	if (xprt->shutdown)
187116be2d20SChuck Lever 		goto out;
187216be2d20SChuck Lever 
187316be2d20SChuck Lever 	if (!sock) {
18747d1e8255STrond Myklebust 		clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
1875b61d59ffSTrond Myklebust 		sock = create_sock(xprt, transport);
1876b61d59ffSTrond Myklebust 		if (IS_ERR(sock)) {
1877b61d59ffSTrond Myklebust 			status = PTR_ERR(sock);
187816be2d20SChuck Lever 			goto out;
187916be2d20SChuck Lever 		}
18807d1e8255STrond Myklebust 	} else {
18817d1e8255STrond Myklebust 		int abort_and_exit;
18827d1e8255STrond Myklebust 
18837d1e8255STrond Myklebust 		abort_and_exit = test_and_clear_bit(XPRT_CONNECTION_ABORT,
18847d1e8255STrond Myklebust 				&xprt->state);
188516be2d20SChuck Lever 		/* "close" the socket, preserving the local port */
188640d2549dSTrond Myklebust 		xs_tcp_reuse_connection(xprt, transport);
188716be2d20SChuck Lever 
18887d1e8255STrond Myklebust 		if (abort_and_exit)
18897d1e8255STrond Myklebust 			goto out_eagain;
18907d1e8255STrond Myklebust 	}
18917d1e8255STrond Myklebust 
1892c740eff8SChuck Lever 	dprintk("RPC:       worker connecting xprt %p via %s to "
1893c740eff8SChuck Lever 				"%s (port %s)\n", xprt,
1894c740eff8SChuck Lever 			xprt->address_strings[RPC_DISPLAY_PROTO],
1895c740eff8SChuck Lever 			xprt->address_strings[RPC_DISPLAY_ADDR],
1896c740eff8SChuck Lever 			xprt->address_strings[RPC_DISPLAY_PORT]);
189716be2d20SChuck Lever 
189816be2d20SChuck Lever 	status = xs_tcp_finish_connecting(xprt, sock);
1899a246b010SChuck Lever 	dprintk("RPC:       %p connect status %d connected %d sock state %d\n",
190046121cf7SChuck Lever 			xprt, -status, xprt_connected(xprt),
190146121cf7SChuck Lever 			sock->sk->sk_state);
1902a246b010SChuck Lever 	switch (status) {
1903f75e6745STrond Myklebust 	default:
1904f75e6745STrond Myklebust 		printk("%s: connect returned unhandled error %d\n",
1905f75e6745STrond Myklebust 			__func__, status);
1906f75e6745STrond Myklebust 	case -EADDRNOTAVAIL:
1907f75e6745STrond Myklebust 		/* We're probably in TIME_WAIT. Get rid of existing socket,
1908f75e6745STrond Myklebust 		 * and retry
1909f75e6745STrond Myklebust 		 */
1910f75e6745STrond Myklebust 		set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
1911f75e6745STrond Myklebust 		xprt_force_disconnect(xprt);
191288b5ed73STrond Myklebust 		break;
19138a2cec29STrond Myklebust 	case -ECONNREFUSED:
19148a2cec29STrond Myklebust 	case -ECONNRESET:
19158a2cec29STrond Myklebust 	case -ENETUNREACH:
19168a2cec29STrond Myklebust 		/* retry with existing socket, after a delay */
19172a491991STrond Myklebust 	case 0:
1918a246b010SChuck Lever 	case -EINPROGRESS:
1919a246b010SChuck Lever 	case -EALREADY:
19207d1e8255STrond Myklebust 		xprt_clear_connecting(xprt);
19217d1e8255STrond Myklebust 		return;
19228a2cec29STrond Myklebust 	}
19237d1e8255STrond Myklebust out_eagain:
19242a491991STrond Myklebust 	status = -EAGAIN;
1925a246b010SChuck Lever out:
19262226feb6SChuck Lever 	xprt_clear_connecting(xprt);
19277d1e8255STrond Myklebust 	xprt_wake_pending_tasks(xprt, status);
1928a246b010SChuck Lever }
1929a246b010SChuck Lever 
1930b61d59ffSTrond Myklebust static struct socket *xs_create_tcp_sock4(struct rpc_xprt *xprt,
1931b61d59ffSTrond Myklebust 		struct sock_xprt *transport)
1932b61d59ffSTrond Myklebust {
1933b61d59ffSTrond Myklebust 	struct socket *sock;
1934b61d59ffSTrond Myklebust 	int err;
1935b61d59ffSTrond Myklebust 
1936b61d59ffSTrond Myklebust 	/* start from scratch */
1937b61d59ffSTrond Myklebust 	err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
1938b61d59ffSTrond Myklebust 	if (err < 0) {
1939b61d59ffSTrond Myklebust 		dprintk("RPC:       can't create TCP transport socket (%d).\n",
1940b61d59ffSTrond Myklebust 				-err);
1941b61d59ffSTrond Myklebust 		goto out_err;
1942b61d59ffSTrond Myklebust 	}
1943b61d59ffSTrond Myklebust 	xs_reclassify_socket4(sock);
1944b61d59ffSTrond Myklebust 
1945b61d59ffSTrond Myklebust 	if (xs_bind4(transport, sock) < 0) {
1946b61d59ffSTrond Myklebust 		sock_release(sock);
1947b61d59ffSTrond Myklebust 		goto out_err;
1948b61d59ffSTrond Myklebust 	}
1949b61d59ffSTrond Myklebust 	return sock;
1950b61d59ffSTrond Myklebust out_err:
1951b61d59ffSTrond Myklebust 	return ERR_PTR(-EIO);
1952b61d59ffSTrond Myklebust }
1953b61d59ffSTrond Myklebust 
1954b61d59ffSTrond Myklebust /**
1955a246b010SChuck Lever  * xs_tcp_connect_worker4 - connect a TCP socket to a remote endpoint
1956a246b010SChuck Lever  * @work: RPC transport to connect
1957a246b010SChuck Lever  *
1958a246b010SChuck Lever  * Invoked by a work queue tasklet.
1959a246b010SChuck Lever  */
1960a246b010SChuck Lever static void xs_tcp_connect_worker4(struct work_struct *work)
1961a246b010SChuck Lever {
1962a246b010SChuck Lever 	struct sock_xprt *transport =
1963a246b010SChuck Lever 		container_of(work, struct sock_xprt, connect_worker.work);
1964a246b010SChuck Lever 	struct rpc_xprt *xprt = &transport->xprt;
1965a246b010SChuck Lever 
1966b61d59ffSTrond Myklebust 	xs_tcp_setup_socket(xprt, transport, xs_create_tcp_sock4);
1967b61d59ffSTrond Myklebust }
1968a246b010SChuck Lever 
1969b61d59ffSTrond Myklebust static struct socket *xs_create_tcp_sock6(struct rpc_xprt *xprt,
1970b61d59ffSTrond Myklebust 		struct sock_xprt *transport)
1971b61d59ffSTrond Myklebust {
1972b61d59ffSTrond Myklebust 	struct socket *sock;
1973b61d59ffSTrond Myklebust 	int err;
1974b61d59ffSTrond Myklebust 
1975a246b010SChuck Lever 	/* start from scratch */
1976b61d59ffSTrond Myklebust 	err = sock_create_kern(PF_INET6, SOCK_STREAM, IPPROTO_TCP, &sock);
1977b61d59ffSTrond Myklebust 	if (err < 0) {
1978b61d59ffSTrond Myklebust 		dprintk("RPC:       can't create TCP transport socket (%d).\n",
1979b61d59ffSTrond Myklebust 				-err);
1980b61d59ffSTrond Myklebust 		goto out_err;
19819903cd1cSChuck Lever 	}
1982b61d59ffSTrond Myklebust 	xs_reclassify_socket6(sock);
19839903cd1cSChuck Lever 
1984b61d59ffSTrond Myklebust 	if (xs_bind6(transport, sock) < 0) {
1985a246b010SChuck Lever 		sock_release(sock);
1986b61d59ffSTrond Myklebust 		goto out_err;
1987a246b010SChuck Lever 	}
1988b61d59ffSTrond Myklebust 	return sock;
1989b61d59ffSTrond Myklebust out_err:
1990b61d59ffSTrond Myklebust 	return ERR_PTR(-EIO);
1991a246b010SChuck Lever }
1992a246b010SChuck Lever 
1993a246b010SChuck Lever /**
199468e220bdSChuck Lever  * xs_tcp_connect_worker6 - connect a TCP socket to a remote endpoint
199568e220bdSChuck Lever  * @work: RPC transport to connect
199668e220bdSChuck Lever  *
199768e220bdSChuck Lever  * Invoked by a work queue tasklet.
199868e220bdSChuck Lever  */
199968e220bdSChuck Lever static void xs_tcp_connect_worker6(struct work_struct *work)
200068e220bdSChuck Lever {
200168e220bdSChuck Lever 	struct sock_xprt *transport =
200268e220bdSChuck Lever 		container_of(work, struct sock_xprt, connect_worker.work);
200368e220bdSChuck Lever 	struct rpc_xprt *xprt = &transport->xprt;
200468e220bdSChuck Lever 
2005b61d59ffSTrond Myklebust 	xs_tcp_setup_socket(xprt, transport, xs_create_tcp_sock6);
200668e220bdSChuck Lever }
200768e220bdSChuck Lever 
200868e220bdSChuck Lever /**
2009a246b010SChuck Lever  * xs_connect - connect a socket to a remote endpoint
2010a246b010SChuck Lever  * @task: address of RPC task that manages state of connect request
2011a246b010SChuck Lever  *
2012a246b010SChuck Lever  * TCP: If the remote end dropped the connection, delay reconnecting.
201303bf4b70SChuck Lever  *
201403bf4b70SChuck Lever  * UDP socket connects are synchronous, but we use a work queue anyway
201503bf4b70SChuck Lever  * to guarantee that even unprivileged user processes can set up a
201603bf4b70SChuck Lever  * socket on a privileged port.
201703bf4b70SChuck Lever  *
201803bf4b70SChuck Lever  * If a UDP socket connect fails, the delay behavior here prevents
201903bf4b70SChuck Lever  * retry floods (hard mounts).
2020a246b010SChuck Lever  */
2021a246b010SChuck Lever static void xs_connect(struct rpc_task *task)
2022a246b010SChuck Lever {
2023a246b010SChuck Lever 	struct rpc_xprt *xprt = task->tk_xprt;
2024ee0ac0c2SChuck Lever 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2025a246b010SChuck Lever 
2026b0d93ad5SChuck Lever 	if (xprt_test_and_set_connecting(xprt))
2027b0d93ad5SChuck Lever 		return;
2028b0d93ad5SChuck Lever 
2029ee0ac0c2SChuck Lever 	if (transport->sock != NULL) {
203046121cf7SChuck Lever 		dprintk("RPC:       xs_connect delayed xprt %p for %lu "
203146121cf7SChuck Lever 				"seconds\n",
203203bf4b70SChuck Lever 				xprt, xprt->reestablish_timeout / HZ);
2033c1384c9cSTrond Myklebust 		queue_delayed_work(rpciod_workqueue,
2034c1384c9cSTrond Myklebust 				   &transport->connect_worker,
203503bf4b70SChuck Lever 				   xprt->reestablish_timeout);
203603bf4b70SChuck Lever 		xprt->reestablish_timeout <<= 1;
203703bf4b70SChuck Lever 		if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO)
203803bf4b70SChuck Lever 			xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
20399903cd1cSChuck Lever 	} else {
20409903cd1cSChuck Lever 		dprintk("RPC:       xs_connect scheduled xprt %p\n", xprt);
2041c1384c9cSTrond Myklebust 		queue_delayed_work(rpciod_workqueue,
2042c1384c9cSTrond Myklebust 				   &transport->connect_worker, 0);
2043a246b010SChuck Lever 	}
2044a246b010SChuck Lever }
2045a246b010SChuck Lever 
2046e06799f9STrond Myklebust static void xs_tcp_connect(struct rpc_task *task)
2047e06799f9STrond Myklebust {
2048e06799f9STrond Myklebust 	struct rpc_xprt *xprt = task->tk_xprt;
2049e06799f9STrond Myklebust 
2050e06799f9STrond Myklebust 	/* Exit if we need to wait for socket shutdown to complete */
2051e06799f9STrond Myklebust 	if (test_bit(XPRT_CLOSING, &xprt->state))
2052e06799f9STrond Myklebust 		return;
2053e06799f9STrond Myklebust 	xs_connect(task);
2054e06799f9STrond Myklebust }
2055e06799f9STrond Myklebust 
2056262ca07dSChuck Lever /**
2057262ca07dSChuck Lever  * xs_udp_print_stats - display UDP socket-specifc stats
2058262ca07dSChuck Lever  * @xprt: rpc_xprt struct containing statistics
2059262ca07dSChuck Lever  * @seq: output file
2060262ca07dSChuck Lever  *
2061262ca07dSChuck Lever  */
2062262ca07dSChuck Lever static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2063262ca07dSChuck Lever {
2064c8475461SChuck Lever 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2065c8475461SChuck Lever 
2066262ca07dSChuck Lever 	seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %Lu %Lu\n",
2067fbfffbd5SChuck Lever 			transport->srcport,
2068262ca07dSChuck Lever 			xprt->stat.bind_count,
2069262ca07dSChuck Lever 			xprt->stat.sends,
2070262ca07dSChuck Lever 			xprt->stat.recvs,
2071262ca07dSChuck Lever 			xprt->stat.bad_xids,
2072262ca07dSChuck Lever 			xprt->stat.req_u,
2073262ca07dSChuck Lever 			xprt->stat.bklog_u);
2074262ca07dSChuck Lever }
2075262ca07dSChuck Lever 
2076262ca07dSChuck Lever /**
2077262ca07dSChuck Lever  * xs_tcp_print_stats - display TCP socket-specifc stats
2078262ca07dSChuck Lever  * @xprt: rpc_xprt struct containing statistics
2079262ca07dSChuck Lever  * @seq: output file
2080262ca07dSChuck Lever  *
2081262ca07dSChuck Lever  */
2082262ca07dSChuck Lever static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2083262ca07dSChuck Lever {
2084c8475461SChuck Lever 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2085262ca07dSChuck Lever 	long idle_time = 0;
2086262ca07dSChuck Lever 
2087262ca07dSChuck Lever 	if (xprt_connected(xprt))
2088262ca07dSChuck Lever 		idle_time = (long)(jiffies - xprt->last_used) / HZ;
2089262ca07dSChuck Lever 
2090262ca07dSChuck Lever 	seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu\n",
2091fbfffbd5SChuck Lever 			transport->srcport,
2092262ca07dSChuck Lever 			xprt->stat.bind_count,
2093262ca07dSChuck Lever 			xprt->stat.connect_count,
2094262ca07dSChuck Lever 			xprt->stat.connect_time,
2095262ca07dSChuck Lever 			idle_time,
2096262ca07dSChuck Lever 			xprt->stat.sends,
2097262ca07dSChuck Lever 			xprt->stat.recvs,
2098262ca07dSChuck Lever 			xprt->stat.bad_xids,
2099262ca07dSChuck Lever 			xprt->stat.req_u,
2100262ca07dSChuck Lever 			xprt->stat.bklog_u);
2101262ca07dSChuck Lever }
2102262ca07dSChuck Lever 
2103*4cfc7e60SRahul Iyer /*
2104*4cfc7e60SRahul Iyer  * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
2105*4cfc7e60SRahul Iyer  * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
2106*4cfc7e60SRahul Iyer  * to use the server side send routines.
2107*4cfc7e60SRahul Iyer  */
2108*4cfc7e60SRahul Iyer void *bc_malloc(struct rpc_task *task, size_t size)
2109*4cfc7e60SRahul Iyer {
2110*4cfc7e60SRahul Iyer 	struct page *page;
2111*4cfc7e60SRahul Iyer 	struct rpc_buffer *buf;
2112*4cfc7e60SRahul Iyer 
2113*4cfc7e60SRahul Iyer 	BUG_ON(size > PAGE_SIZE - sizeof(struct rpc_buffer));
2114*4cfc7e60SRahul Iyer 	page = alloc_page(GFP_KERNEL);
2115*4cfc7e60SRahul Iyer 
2116*4cfc7e60SRahul Iyer 	if (!page)
2117*4cfc7e60SRahul Iyer 		return NULL;
2118*4cfc7e60SRahul Iyer 
2119*4cfc7e60SRahul Iyer 	buf = page_address(page);
2120*4cfc7e60SRahul Iyer 	buf->len = PAGE_SIZE;
2121*4cfc7e60SRahul Iyer 
2122*4cfc7e60SRahul Iyer 	return buf->data;
2123*4cfc7e60SRahul Iyer }
2124*4cfc7e60SRahul Iyer 
2125*4cfc7e60SRahul Iyer /*
2126*4cfc7e60SRahul Iyer  * Free the space allocated in the bc_alloc routine
2127*4cfc7e60SRahul Iyer  */
2128*4cfc7e60SRahul Iyer void bc_free(void *buffer)
2129*4cfc7e60SRahul Iyer {
2130*4cfc7e60SRahul Iyer 	struct rpc_buffer *buf;
2131*4cfc7e60SRahul Iyer 
2132*4cfc7e60SRahul Iyer 	if (!buffer)
2133*4cfc7e60SRahul Iyer 		return;
2134*4cfc7e60SRahul Iyer 
2135*4cfc7e60SRahul Iyer 	buf = container_of(buffer, struct rpc_buffer, data);
2136*4cfc7e60SRahul Iyer 	free_page((unsigned long)buf);
2137*4cfc7e60SRahul Iyer }
2138*4cfc7e60SRahul Iyer 
2139*4cfc7e60SRahul Iyer /*
2140*4cfc7e60SRahul Iyer  * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex
2141*4cfc7e60SRahul Iyer  * held. Borrows heavily from svc_tcp_sendto and xs_tcp_send_request.
2142*4cfc7e60SRahul Iyer  */
2143*4cfc7e60SRahul Iyer static int bc_sendto(struct rpc_rqst *req)
2144*4cfc7e60SRahul Iyer {
2145*4cfc7e60SRahul Iyer 	int len;
2146*4cfc7e60SRahul Iyer 	struct xdr_buf *xbufp = &req->rq_snd_buf;
2147*4cfc7e60SRahul Iyer 	struct rpc_xprt *xprt = req->rq_xprt;
2148*4cfc7e60SRahul Iyer 	struct sock_xprt *transport =
2149*4cfc7e60SRahul Iyer 				container_of(xprt, struct sock_xprt, xprt);
2150*4cfc7e60SRahul Iyer 	struct socket *sock = transport->sock;
2151*4cfc7e60SRahul Iyer 	unsigned long headoff;
2152*4cfc7e60SRahul Iyer 	unsigned long tailoff;
2153*4cfc7e60SRahul Iyer 
2154*4cfc7e60SRahul Iyer 	/*
2155*4cfc7e60SRahul Iyer 	 * Set up the rpc header and record marker stuff
2156*4cfc7e60SRahul Iyer 	 */
2157*4cfc7e60SRahul Iyer 	xs_encode_tcp_record_marker(xbufp);
2158*4cfc7e60SRahul Iyer 
2159*4cfc7e60SRahul Iyer 	tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK;
2160*4cfc7e60SRahul Iyer 	headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK;
2161*4cfc7e60SRahul Iyer 	len = svc_send_common(sock, xbufp,
2162*4cfc7e60SRahul Iyer 			      virt_to_page(xbufp->head[0].iov_base), headoff,
2163*4cfc7e60SRahul Iyer 			      xbufp->tail[0].iov_base, tailoff);
2164*4cfc7e60SRahul Iyer 
2165*4cfc7e60SRahul Iyer 	if (len != xbufp->len) {
2166*4cfc7e60SRahul Iyer 		printk(KERN_NOTICE "Error sending entire callback!\n");
2167*4cfc7e60SRahul Iyer 		len = -EAGAIN;
2168*4cfc7e60SRahul Iyer 	}
2169*4cfc7e60SRahul Iyer 
2170*4cfc7e60SRahul Iyer 	return len;
2171*4cfc7e60SRahul Iyer }
2172*4cfc7e60SRahul Iyer 
2173*4cfc7e60SRahul Iyer /*
2174*4cfc7e60SRahul Iyer  * The send routine. Borrows from svc_send
2175*4cfc7e60SRahul Iyer  */
2176*4cfc7e60SRahul Iyer static int bc_send_request(struct rpc_task *task)
2177*4cfc7e60SRahul Iyer {
2178*4cfc7e60SRahul Iyer 	struct rpc_rqst *req = task->tk_rqstp;
2179*4cfc7e60SRahul Iyer 	struct svc_xprt	*xprt;
2180*4cfc7e60SRahul Iyer 	struct svc_sock         *svsk;
2181*4cfc7e60SRahul Iyer 	u32                     len;
2182*4cfc7e60SRahul Iyer 
2183*4cfc7e60SRahul Iyer 	dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid));
2184*4cfc7e60SRahul Iyer 	/*
2185*4cfc7e60SRahul Iyer 	 * Get the server socket associated with this callback xprt
2186*4cfc7e60SRahul Iyer 	 */
2187*4cfc7e60SRahul Iyer 	xprt = req->rq_xprt->bc_xprt;
2188*4cfc7e60SRahul Iyer 	svsk = container_of(xprt, struct svc_sock, sk_xprt);
2189*4cfc7e60SRahul Iyer 
2190*4cfc7e60SRahul Iyer 	/*
2191*4cfc7e60SRahul Iyer 	 * Grab the mutex to serialize data as the connection is shared
2192*4cfc7e60SRahul Iyer 	 * with the fore channel
2193*4cfc7e60SRahul Iyer 	 */
2194*4cfc7e60SRahul Iyer 	if (!mutex_trylock(&xprt->xpt_mutex)) {
2195*4cfc7e60SRahul Iyer 		rpc_sleep_on(&xprt->xpt_bc_pending, task, NULL);
2196*4cfc7e60SRahul Iyer 		if (!mutex_trylock(&xprt->xpt_mutex))
2197*4cfc7e60SRahul Iyer 			return -EAGAIN;
2198*4cfc7e60SRahul Iyer 		rpc_wake_up_queued_task(&xprt->xpt_bc_pending, task);
2199*4cfc7e60SRahul Iyer 	}
2200*4cfc7e60SRahul Iyer 	if (test_bit(XPT_DEAD, &xprt->xpt_flags))
2201*4cfc7e60SRahul Iyer 		len = -ENOTCONN;
2202*4cfc7e60SRahul Iyer 	else
2203*4cfc7e60SRahul Iyer 		len = bc_sendto(req);
2204*4cfc7e60SRahul Iyer 	mutex_unlock(&xprt->xpt_mutex);
2205*4cfc7e60SRahul Iyer 
2206*4cfc7e60SRahul Iyer 	if (len > 0)
2207*4cfc7e60SRahul Iyer 		len = 0;
2208*4cfc7e60SRahul Iyer 
2209*4cfc7e60SRahul Iyer 	return len;
2210*4cfc7e60SRahul Iyer }
2211*4cfc7e60SRahul Iyer 
2212*4cfc7e60SRahul Iyer /*
2213*4cfc7e60SRahul Iyer  * The close routine. Since this is client initiated, we do nothing
2214*4cfc7e60SRahul Iyer  */
2215*4cfc7e60SRahul Iyer 
2216*4cfc7e60SRahul Iyer static void bc_close(struct rpc_xprt *xprt)
2217*4cfc7e60SRahul Iyer {
2218*4cfc7e60SRahul Iyer 	return;
2219*4cfc7e60SRahul Iyer }
2220*4cfc7e60SRahul Iyer 
2221*4cfc7e60SRahul Iyer /*
2222*4cfc7e60SRahul Iyer  * The xprt destroy routine. Again, because this connection is client
2223*4cfc7e60SRahul Iyer  * initiated, we do nothing
2224*4cfc7e60SRahul Iyer  */
2225*4cfc7e60SRahul Iyer 
2226*4cfc7e60SRahul Iyer static void bc_destroy(struct rpc_xprt *xprt)
2227*4cfc7e60SRahul Iyer {
2228*4cfc7e60SRahul Iyer 	return;
2229*4cfc7e60SRahul Iyer }
2230*4cfc7e60SRahul Iyer 
2231262965f5SChuck Lever static struct rpc_xprt_ops xs_udp_ops = {
223243118c29SChuck Lever 	.set_buffer_size	= xs_udp_set_buffer_size,
223312a80469SChuck Lever 	.reserve_xprt		= xprt_reserve_xprt_cong,
223449e9a890SChuck Lever 	.release_xprt		= xprt_release_xprt_cong,
223545160d62SChuck Lever 	.rpcbind		= rpcb_getport_async,
223692200412SChuck Lever 	.set_port		= xs_set_port,
22379903cd1cSChuck Lever 	.connect		= xs_connect,
223802107148SChuck Lever 	.buf_alloc		= rpc_malloc,
223902107148SChuck Lever 	.buf_free		= rpc_free,
2240262965f5SChuck Lever 	.send_request		= xs_udp_send_request,
2241fe3aca29SChuck Lever 	.set_retrans_timeout	= xprt_set_retrans_timeout_rtt,
224246c0ee8bSChuck Lever 	.timer			= xs_udp_timer,
2243a58dd398SChuck Lever 	.release_request	= xprt_release_rqst_cong,
2244262965f5SChuck Lever 	.close			= xs_close,
2245262965f5SChuck Lever 	.destroy		= xs_destroy,
2246262ca07dSChuck Lever 	.print_stats		= xs_udp_print_stats,
2247262965f5SChuck Lever };
2248262965f5SChuck Lever 
2249262965f5SChuck Lever static struct rpc_xprt_ops xs_tcp_ops = {
225012a80469SChuck Lever 	.reserve_xprt		= xprt_reserve_xprt,
2251e0ab53deSTrond Myklebust 	.release_xprt		= xs_tcp_release_xprt,
225245160d62SChuck Lever 	.rpcbind		= rpcb_getport_async,
225392200412SChuck Lever 	.set_port		= xs_set_port,
2254e06799f9STrond Myklebust 	.connect		= xs_tcp_connect,
225502107148SChuck Lever 	.buf_alloc		= rpc_malloc,
225602107148SChuck Lever 	.buf_free		= rpc_free,
2257262965f5SChuck Lever 	.send_request		= xs_tcp_send_request,
2258fe3aca29SChuck Lever 	.set_retrans_timeout	= xprt_set_retrans_timeout_def,
22590d90ba1cSRicardo Labiaga #if defined(CONFIG_NFS_V4_1)
22600d90ba1cSRicardo Labiaga 	.release_request	= bc_release_request,
22610d90ba1cSRicardo Labiaga #endif /* CONFIG_NFS_V4_1 */
2262f75e6745STrond Myklebust 	.close			= xs_tcp_close,
22639903cd1cSChuck Lever 	.destroy		= xs_destroy,
2264262ca07dSChuck Lever 	.print_stats		= xs_tcp_print_stats,
2265a246b010SChuck Lever };
2266a246b010SChuck Lever 
2267*4cfc7e60SRahul Iyer /*
2268*4cfc7e60SRahul Iyer  * The rpc_xprt_ops for the server backchannel
2269*4cfc7e60SRahul Iyer  */
2270*4cfc7e60SRahul Iyer 
2271*4cfc7e60SRahul Iyer static struct rpc_xprt_ops bc_tcp_ops = {
2272*4cfc7e60SRahul Iyer 	.reserve_xprt		= xprt_reserve_xprt,
2273*4cfc7e60SRahul Iyer 	.release_xprt		= xprt_release_xprt,
2274*4cfc7e60SRahul Iyer 	.buf_alloc		= bc_malloc,
2275*4cfc7e60SRahul Iyer 	.buf_free		= bc_free,
2276*4cfc7e60SRahul Iyer 	.send_request		= bc_send_request,
2277*4cfc7e60SRahul Iyer 	.set_retrans_timeout	= xprt_set_retrans_timeout_def,
2278*4cfc7e60SRahul Iyer 	.close			= bc_close,
2279*4cfc7e60SRahul Iyer 	.destroy		= bc_destroy,
2280*4cfc7e60SRahul Iyer 	.print_stats		= xs_tcp_print_stats,
2281*4cfc7e60SRahul Iyer };
2282*4cfc7e60SRahul Iyer 
22833c341b0bS\"Talpey, Thomas\ static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
2284bc25571eS\"Talpey, Thomas\ 				      unsigned int slot_table_size)
2285c8541ecdSChuck Lever {
2286c8541ecdSChuck Lever 	struct rpc_xprt *xprt;
2287ffc2e518SChuck Lever 	struct sock_xprt *new;
2288c8541ecdSChuck Lever 
228996802a09SFrank van Maarseveen 	if (args->addrlen > sizeof(xprt->addr)) {
2290c8541ecdSChuck Lever 		dprintk("RPC:       xs_setup_xprt: address too large\n");
2291c8541ecdSChuck Lever 		return ERR_PTR(-EBADF);
2292c8541ecdSChuck Lever 	}
2293c8541ecdSChuck Lever 
2294ffc2e518SChuck Lever 	new = kzalloc(sizeof(*new), GFP_KERNEL);
2295ffc2e518SChuck Lever 	if (new == NULL) {
229646121cf7SChuck Lever 		dprintk("RPC:       xs_setup_xprt: couldn't allocate "
229746121cf7SChuck Lever 				"rpc_xprt\n");
2298c8541ecdSChuck Lever 		return ERR_PTR(-ENOMEM);
2299c8541ecdSChuck Lever 	}
2300ffc2e518SChuck Lever 	xprt = &new->xprt;
2301c8541ecdSChuck Lever 
2302c8541ecdSChuck Lever 	xprt->max_reqs = slot_table_size;
2303c8541ecdSChuck Lever 	xprt->slot = kcalloc(xprt->max_reqs, sizeof(struct rpc_rqst), GFP_KERNEL);
2304c8541ecdSChuck Lever 	if (xprt->slot == NULL) {
2305c8541ecdSChuck Lever 		kfree(xprt);
230646121cf7SChuck Lever 		dprintk("RPC:       xs_setup_xprt: couldn't allocate slot "
230746121cf7SChuck Lever 				"table\n");
2308c8541ecdSChuck Lever 		return ERR_PTR(-ENOMEM);
2309c8541ecdSChuck Lever 	}
2310c8541ecdSChuck Lever 
231196802a09SFrank van Maarseveen 	memcpy(&xprt->addr, args->dstaddr, args->addrlen);
231296802a09SFrank van Maarseveen 	xprt->addrlen = args->addrlen;
2313d3bc9a1dSFrank van Maarseveen 	if (args->srcaddr)
2314fbfffbd5SChuck Lever 		memcpy(&new->srcaddr, args->srcaddr, args->addrlen);
2315c8541ecdSChuck Lever 
2316c8541ecdSChuck Lever 	return xprt;
2317c8541ecdSChuck Lever }
2318c8541ecdSChuck Lever 
23192881ae74STrond Myklebust static const struct rpc_timeout xs_udp_default_timeout = {
23202881ae74STrond Myklebust 	.to_initval = 5 * HZ,
23212881ae74STrond Myklebust 	.to_maxval = 30 * HZ,
23222881ae74STrond Myklebust 	.to_increment = 5 * HZ,
23232881ae74STrond Myklebust 	.to_retries = 5,
23242881ae74STrond Myklebust };
23252881ae74STrond Myklebust 
23269903cd1cSChuck Lever /**
23279903cd1cSChuck Lever  * xs_setup_udp - Set up transport to use a UDP socket
232896802a09SFrank van Maarseveen  * @args: rpc transport creation arguments
23299903cd1cSChuck Lever  *
23309903cd1cSChuck Lever  */
2331483066d6SAdrian Bunk static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2332a246b010SChuck Lever {
23338f9d5b1aSChuck Lever 	struct sockaddr *addr = args->dstaddr;
2334c8541ecdSChuck Lever 	struct rpc_xprt *xprt;
2335c8475461SChuck Lever 	struct sock_xprt *transport;
2336a246b010SChuck Lever 
233796802a09SFrank van Maarseveen 	xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries);
2338c8541ecdSChuck Lever 	if (IS_ERR(xprt))
2339c8541ecdSChuck Lever 		return xprt;
2340c8475461SChuck Lever 	transport = container_of(xprt, struct sock_xprt, xprt);
2341a246b010SChuck Lever 
2342ec739ef0SChuck Lever 	xprt->prot = IPPROTO_UDP;
2343808012fbSChuck Lever 	xprt->tsh_size = 0;
2344a246b010SChuck Lever 	/* XXX: header size can vary due to auth type, IPv6, etc. */
2345a246b010SChuck Lever 	xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
2346a246b010SChuck Lever 
234703bf4b70SChuck Lever 	xprt->bind_timeout = XS_BIND_TO;
234803bf4b70SChuck Lever 	xprt->connect_timeout = XS_UDP_CONN_TO;
234903bf4b70SChuck Lever 	xprt->reestablish_timeout = XS_UDP_REEST_TO;
235003bf4b70SChuck Lever 	xprt->idle_timeout = XS_IDLE_DISC_TO;
2351a246b010SChuck Lever 
2352262965f5SChuck Lever 	xprt->ops = &xs_udp_ops;
2353a246b010SChuck Lever 
2354ba7392bbSTrond Myklebust 	xprt->timeout = &xs_udp_default_timeout;
2355a246b010SChuck Lever 
23568f9d5b1aSChuck Lever 	switch (addr->sa_family) {
23578f9d5b1aSChuck Lever 	case AF_INET:
23588f9d5b1aSChuck Lever 		if (((struct sockaddr_in *)addr)->sin_port != htons(0))
23598f9d5b1aSChuck Lever 			xprt_set_bound(xprt);
23608f9d5b1aSChuck Lever 
23618f9d5b1aSChuck Lever 		INIT_DELAYED_WORK(&transport->connect_worker,
23628f9d5b1aSChuck Lever 					xs_udp_connect_worker4);
23639dc3b095SChuck Lever 		xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP);
23648f9d5b1aSChuck Lever 		break;
23658f9d5b1aSChuck Lever 	case AF_INET6:
23668f9d5b1aSChuck Lever 		if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
23678f9d5b1aSChuck Lever 			xprt_set_bound(xprt);
23688f9d5b1aSChuck Lever 
23698f9d5b1aSChuck Lever 		INIT_DELAYED_WORK(&transport->connect_worker,
23708f9d5b1aSChuck Lever 					xs_udp_connect_worker6);
23719dc3b095SChuck Lever 		xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
23728f9d5b1aSChuck Lever 		break;
23738f9d5b1aSChuck Lever 	default:
23748f9d5b1aSChuck Lever 		kfree(xprt);
23758f9d5b1aSChuck Lever 		return ERR_PTR(-EAFNOSUPPORT);
23768f9d5b1aSChuck Lever 	}
23778f9d5b1aSChuck Lever 
2378c740eff8SChuck Lever 	if (xprt_bound(xprt))
2379c740eff8SChuck Lever 		dprintk("RPC:       set up xprt to %s (port %s) via %s\n",
2380c740eff8SChuck Lever 				xprt->address_strings[RPC_DISPLAY_ADDR],
2381c740eff8SChuck Lever 				xprt->address_strings[RPC_DISPLAY_PORT],
2382c740eff8SChuck Lever 				xprt->address_strings[RPC_DISPLAY_PROTO]);
2383c740eff8SChuck Lever 	else
2384c740eff8SChuck Lever 		dprintk("RPC:       set up xprt to %s (autobind) via %s\n",
2385c740eff8SChuck Lever 				xprt->address_strings[RPC_DISPLAY_ADDR],
2386c740eff8SChuck Lever 				xprt->address_strings[RPC_DISPLAY_PROTO]);
2387edb267a6SChuck Lever 
2388bc25571eS\"Talpey, Thomas\ 	if (try_module_get(THIS_MODULE))
2389c8541ecdSChuck Lever 		return xprt;
2390bc25571eS\"Talpey, Thomas\ 
2391bc25571eS\"Talpey, Thomas\ 	kfree(xprt->slot);
2392bc25571eS\"Talpey, Thomas\ 	kfree(xprt);
2393bc25571eS\"Talpey, Thomas\ 	return ERR_PTR(-EINVAL);
2394a246b010SChuck Lever }
2395a246b010SChuck Lever 
23962881ae74STrond Myklebust static const struct rpc_timeout xs_tcp_default_timeout = {
23972881ae74STrond Myklebust 	.to_initval = 60 * HZ,
23982881ae74STrond Myklebust 	.to_maxval = 60 * HZ,
23992881ae74STrond Myklebust 	.to_retries = 2,
24002881ae74STrond Myklebust };
24012881ae74STrond Myklebust 
24029903cd1cSChuck Lever /**
24039903cd1cSChuck Lever  * xs_setup_tcp - Set up transport to use a TCP socket
240496802a09SFrank van Maarseveen  * @args: rpc transport creation arguments
24059903cd1cSChuck Lever  *
24069903cd1cSChuck Lever  */
2407483066d6SAdrian Bunk static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2408a246b010SChuck Lever {
24098f9d5b1aSChuck Lever 	struct sockaddr *addr = args->dstaddr;
2410c8541ecdSChuck Lever 	struct rpc_xprt *xprt;
2411c8475461SChuck Lever 	struct sock_xprt *transport;
2412a246b010SChuck Lever 
241396802a09SFrank van Maarseveen 	xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
2414c8541ecdSChuck Lever 	if (IS_ERR(xprt))
2415c8541ecdSChuck Lever 		return xprt;
2416c8475461SChuck Lever 	transport = container_of(xprt, struct sock_xprt, xprt);
2417a246b010SChuck Lever 
2418ec739ef0SChuck Lever 	xprt->prot = IPPROTO_TCP;
2419808012fbSChuck Lever 	xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
2420808012fbSChuck Lever 	xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2421a246b010SChuck Lever 
242203bf4b70SChuck Lever 	xprt->bind_timeout = XS_BIND_TO;
242303bf4b70SChuck Lever 	xprt->connect_timeout = XS_TCP_CONN_TO;
242403bf4b70SChuck Lever 	xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
242503bf4b70SChuck Lever 	xprt->idle_timeout = XS_IDLE_DISC_TO;
2426a246b010SChuck Lever 
2427262965f5SChuck Lever 	xprt->ops = &xs_tcp_ops;
2428ba7392bbSTrond Myklebust 	xprt->timeout = &xs_tcp_default_timeout;
2429a246b010SChuck Lever 
24308f9d5b1aSChuck Lever 	switch (addr->sa_family) {
24318f9d5b1aSChuck Lever 	case AF_INET:
24328f9d5b1aSChuck Lever 		if (((struct sockaddr_in *)addr)->sin_port != htons(0))
24338f9d5b1aSChuck Lever 			xprt_set_bound(xprt);
24348f9d5b1aSChuck Lever 
24359dc3b095SChuck Lever 		INIT_DELAYED_WORK(&transport->connect_worker,
24369dc3b095SChuck Lever 					xs_tcp_connect_worker4);
24379dc3b095SChuck Lever 		xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
24388f9d5b1aSChuck Lever 		break;
24398f9d5b1aSChuck Lever 	case AF_INET6:
24408f9d5b1aSChuck Lever 		if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
24418f9d5b1aSChuck Lever 			xprt_set_bound(xprt);
24428f9d5b1aSChuck Lever 
24439dc3b095SChuck Lever 		INIT_DELAYED_WORK(&transport->connect_worker,
24449dc3b095SChuck Lever 					xs_tcp_connect_worker6);
24459dc3b095SChuck Lever 		xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
24468f9d5b1aSChuck Lever 		break;
24478f9d5b1aSChuck Lever 	default:
24488f9d5b1aSChuck Lever 		kfree(xprt);
24498f9d5b1aSChuck Lever 		return ERR_PTR(-EAFNOSUPPORT);
24508f9d5b1aSChuck Lever 	}
24518f9d5b1aSChuck Lever 
2452c740eff8SChuck Lever 	if (xprt_bound(xprt))
2453c740eff8SChuck Lever 		dprintk("RPC:       set up xprt to %s (port %s) via %s\n",
2454c740eff8SChuck Lever 				xprt->address_strings[RPC_DISPLAY_ADDR],
2455c740eff8SChuck Lever 				xprt->address_strings[RPC_DISPLAY_PORT],
2456c740eff8SChuck Lever 				xprt->address_strings[RPC_DISPLAY_PROTO]);
2457c740eff8SChuck Lever 	else
2458c740eff8SChuck Lever 		dprintk("RPC:       set up xprt to %s (autobind) via %s\n",
2459c740eff8SChuck Lever 				xprt->address_strings[RPC_DISPLAY_ADDR],
2460c740eff8SChuck Lever 				xprt->address_strings[RPC_DISPLAY_PROTO]);
2461c740eff8SChuck Lever 
2462edb267a6SChuck Lever 
2463bc25571eS\"Talpey, Thomas\ 	if (try_module_get(THIS_MODULE))
2464c8541ecdSChuck Lever 		return xprt;
2465bc25571eS\"Talpey, Thomas\ 
2466bc25571eS\"Talpey, Thomas\ 	kfree(xprt->slot);
2467bc25571eS\"Talpey, Thomas\ 	kfree(xprt);
2468bc25571eS\"Talpey, Thomas\ 	return ERR_PTR(-EINVAL);
2469a246b010SChuck Lever }
2470282b32e1SChuck Lever 
2471bc25571eS\"Talpey, Thomas\ static struct xprt_class	xs_udp_transport = {
2472bc25571eS\"Talpey, Thomas\ 	.list		= LIST_HEAD_INIT(xs_udp_transport.list),
2473bc25571eS\"Talpey, Thomas\ 	.name		= "udp",
2474bc25571eS\"Talpey, Thomas\ 	.owner		= THIS_MODULE,
24754fa016ebS\"Talpey, Thomas\ 	.ident		= IPPROTO_UDP,
2476bc25571eS\"Talpey, Thomas\ 	.setup		= xs_setup_udp,
2477bc25571eS\"Talpey, Thomas\ };
2478bc25571eS\"Talpey, Thomas\ 
2479bc25571eS\"Talpey, Thomas\ static struct xprt_class	xs_tcp_transport = {
2480bc25571eS\"Talpey, Thomas\ 	.list		= LIST_HEAD_INIT(xs_tcp_transport.list),
2481bc25571eS\"Talpey, Thomas\ 	.name		= "tcp",
2482bc25571eS\"Talpey, Thomas\ 	.owner		= THIS_MODULE,
24834fa016ebS\"Talpey, Thomas\ 	.ident		= IPPROTO_TCP,
2484bc25571eS\"Talpey, Thomas\ 	.setup		= xs_setup_tcp,
2485bc25571eS\"Talpey, Thomas\ };
2486bc25571eS\"Talpey, Thomas\ 
2487282b32e1SChuck Lever /**
2488bc25571eS\"Talpey, Thomas\  * init_socket_xprt - set up xprtsock's sysctls, register with RPC client
2489282b32e1SChuck Lever  *
2490282b32e1SChuck Lever  */
2491282b32e1SChuck Lever int init_socket_xprt(void)
2492282b32e1SChuck Lever {
2493fbf76683SChuck Lever #ifdef RPC_DEBUG
24942b1bec5fSEric W. Biederman 	if (!sunrpc_table_header)
24950b4d4147SEric W. Biederman 		sunrpc_table_header = register_sysctl_table(sunrpc_table);
2496fbf76683SChuck Lever #endif
2497fbf76683SChuck Lever 
2498bc25571eS\"Talpey, Thomas\ 	xprt_register_transport(&xs_udp_transport);
2499bc25571eS\"Talpey, Thomas\ 	xprt_register_transport(&xs_tcp_transport);
2500bc25571eS\"Talpey, Thomas\ 
2501282b32e1SChuck Lever 	return 0;
2502282b32e1SChuck Lever }
2503282b32e1SChuck Lever 
2504282b32e1SChuck Lever /**
2505bc25571eS\"Talpey, Thomas\  * cleanup_socket_xprt - remove xprtsock's sysctls, unregister
2506282b32e1SChuck Lever  *
2507282b32e1SChuck Lever  */
2508282b32e1SChuck Lever void cleanup_socket_xprt(void)
2509282b32e1SChuck Lever {
2510fbf76683SChuck Lever #ifdef RPC_DEBUG
2511fbf76683SChuck Lever 	if (sunrpc_table_header) {
2512fbf76683SChuck Lever 		unregister_sysctl_table(sunrpc_table_header);
2513fbf76683SChuck Lever 		sunrpc_table_header = NULL;
2514fbf76683SChuck Lever 	}
2515fbf76683SChuck Lever #endif
2516bc25571eS\"Talpey, Thomas\ 
2517bc25571eS\"Talpey, Thomas\ 	xprt_unregister_transport(&xs_udp_transport);
2518bc25571eS\"Talpey, Thomas\ 	xprt_unregister_transport(&xs_tcp_transport);
2519282b32e1SChuck Lever }
2520cbf11071STrond Myklebust 
2521cbf11071STrond Myklebust static int param_set_uint_minmax(const char *val, struct kernel_param *kp,
2522cbf11071STrond Myklebust 		unsigned int min, unsigned int max)
2523cbf11071STrond Myklebust {
2524cbf11071STrond Myklebust 	unsigned long num;
2525cbf11071STrond Myklebust 	int ret;
2526cbf11071STrond Myklebust 
2527cbf11071STrond Myklebust 	if (!val)
2528cbf11071STrond Myklebust 		return -EINVAL;
2529cbf11071STrond Myklebust 	ret = strict_strtoul(val, 0, &num);
2530cbf11071STrond Myklebust 	if (ret == -EINVAL || num < min || num > max)
2531cbf11071STrond Myklebust 		return -EINVAL;
2532cbf11071STrond Myklebust 	*((unsigned int *)kp->arg) = num;
2533cbf11071STrond Myklebust 	return 0;
2534cbf11071STrond Myklebust }
2535cbf11071STrond Myklebust 
2536cbf11071STrond Myklebust static int param_set_portnr(const char *val, struct kernel_param *kp)
2537cbf11071STrond Myklebust {
2538cbf11071STrond Myklebust 	return param_set_uint_minmax(val, kp,
2539cbf11071STrond Myklebust 			RPC_MIN_RESVPORT,
2540cbf11071STrond Myklebust 			RPC_MAX_RESVPORT);
2541cbf11071STrond Myklebust }
2542cbf11071STrond Myklebust 
2543cbf11071STrond Myklebust static int param_get_portnr(char *buffer, struct kernel_param *kp)
2544cbf11071STrond Myklebust {
2545cbf11071STrond Myklebust 	return param_get_uint(buffer, kp);
2546cbf11071STrond Myklebust }
2547cbf11071STrond Myklebust #define param_check_portnr(name, p) \
2548cbf11071STrond Myklebust 	__param_check(name, p, unsigned int);
2549cbf11071STrond Myklebust 
2550cbf11071STrond Myklebust module_param_named(min_resvport, xprt_min_resvport, portnr, 0644);
2551cbf11071STrond Myklebust module_param_named(max_resvport, xprt_max_resvport, portnr, 0644);
2552cbf11071STrond Myklebust 
2553cbf11071STrond Myklebust static int param_set_slot_table_size(const char *val, struct kernel_param *kp)
2554cbf11071STrond Myklebust {
2555cbf11071STrond Myklebust 	return param_set_uint_minmax(val, kp,
2556cbf11071STrond Myklebust 			RPC_MIN_SLOT_TABLE,
2557cbf11071STrond Myklebust 			RPC_MAX_SLOT_TABLE);
2558cbf11071STrond Myklebust }
2559cbf11071STrond Myklebust 
2560cbf11071STrond Myklebust static int param_get_slot_table_size(char *buffer, struct kernel_param *kp)
2561cbf11071STrond Myklebust {
2562cbf11071STrond Myklebust 	return param_get_uint(buffer, kp);
2563cbf11071STrond Myklebust }
2564cbf11071STrond Myklebust #define param_check_slot_table_size(name, p) \
2565cbf11071STrond Myklebust 	__param_check(name, p, unsigned int);
2566cbf11071STrond Myklebust 
2567cbf11071STrond Myklebust module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries,
2568cbf11071STrond Myklebust 		   slot_table_size, 0644);
2569cbf11071STrond Myklebust module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries,
2570cbf11071STrond Myklebust 		   slot_table_size, 0644);
2571cbf11071STrond Myklebust 
2572