xref: /openbmc/linux/net/sunrpc/xprtsock.c (revision 0b9e79431377df452348e78262dd5a3dc359eeef)
1a246b010SChuck Lever /*
2a246b010SChuck Lever  * linux/net/sunrpc/xprtsock.c
3a246b010SChuck Lever  *
4a246b010SChuck Lever  * Client-side transport implementation for sockets.
5a246b010SChuck Lever  *
6113aa838SAlan Cox  * TCP callback races fixes (C) 1998 Red Hat
7113aa838SAlan Cox  * TCP send fixes (C) 1998 Red Hat
8a246b010SChuck Lever  * TCP NFS related read + write fixes
9a246b010SChuck Lever  *  (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
10a246b010SChuck Lever  *
11a246b010SChuck Lever  * Rewrite of larges part of the code in order to stabilize TCP stuff.
12a246b010SChuck Lever  * Fix behaviour when socket buffer is full.
13a246b010SChuck Lever  *  (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
1455aa4f58SChuck Lever  *
1555aa4f58SChuck Lever  * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
168f9d5b1aSChuck Lever  *
178f9d5b1aSChuck Lever  * IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005.
188f9d5b1aSChuck Lever  *   <gilles.quillard@bull.net>
19a246b010SChuck Lever  */
20a246b010SChuck Lever 
21a246b010SChuck Lever #include <linux/types.h>
22a246b010SChuck Lever #include <linux/slab.h>
23bc25571eS\"Talpey, Thomas\ #include <linux/module.h>
24a246b010SChuck Lever #include <linux/capability.h>
25a246b010SChuck Lever #include <linux/pagemap.h>
26a246b010SChuck Lever #include <linux/errno.h>
27a246b010SChuck Lever #include <linux/socket.h>
28a246b010SChuck Lever #include <linux/in.h>
29a246b010SChuck Lever #include <linux/net.h>
30a246b010SChuck Lever #include <linux/mm.h>
31a246b010SChuck Lever #include <linux/udp.h>
32a246b010SChuck Lever #include <linux/tcp.h>
33a246b010SChuck Lever #include <linux/sunrpc/clnt.h>
3402107148SChuck Lever #include <linux/sunrpc/sched.h>
354cfc7e60SRahul Iyer #include <linux/sunrpc/svcsock.h>
3649c36fccS\"Talpey, Thomas\ #include <linux/sunrpc/xprtsock.h>
37a246b010SChuck Lever #include <linux/file.h>
3844b98efdSRicardo Labiaga #ifdef CONFIG_NFS_V4_1
3944b98efdSRicardo Labiaga #include <linux/sunrpc/bc_xprt.h>
4044b98efdSRicardo Labiaga #endif
41a246b010SChuck Lever 
42a246b010SChuck Lever #include <net/sock.h>
43a246b010SChuck Lever #include <net/checksum.h>
44a246b010SChuck Lever #include <net/udp.h>
45a246b010SChuck Lever #include <net/tcp.h>
46a246b010SChuck Lever 
474cfc7e60SRahul Iyer #include "sunrpc.h"
489903cd1cSChuck Lever /*
49c556b754SChuck Lever  * xprtsock tunables
50c556b754SChuck Lever  */
51c556b754SChuck Lever unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
52c556b754SChuck Lever unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE;
53c556b754SChuck Lever 
54c556b754SChuck Lever unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
55c556b754SChuck Lever unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
56c556b754SChuck Lever 
577d1e8255STrond Myklebust #define XS_TCP_LINGER_TO	(15U * HZ)
5825fe6142STrond Myklebust static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO;
597d1e8255STrond Myklebust 
60c556b754SChuck Lever /*
61fbf76683SChuck Lever  * We can register our own files under /proc/sys/sunrpc by
62fbf76683SChuck Lever  * calling register_sysctl_table() again.  The files in that
63fbf76683SChuck Lever  * directory become the union of all files registered there.
64fbf76683SChuck Lever  *
65fbf76683SChuck Lever  * We simply need to make sure that we don't collide with
66fbf76683SChuck Lever  * someone else's file names!
67fbf76683SChuck Lever  */
68fbf76683SChuck Lever 
69fbf76683SChuck Lever #ifdef RPC_DEBUG
70fbf76683SChuck Lever 
71fbf76683SChuck Lever static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
72fbf76683SChuck Lever static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
73fbf76683SChuck Lever static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
74fbf76683SChuck Lever static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
75fbf76683SChuck Lever 
76fbf76683SChuck Lever static struct ctl_table_header *sunrpc_table_header;
77fbf76683SChuck Lever 
78fbf76683SChuck Lever /*
79fbf76683SChuck Lever  * FIXME: changing the UDP slot table size should also resize the UDP
80fbf76683SChuck Lever  *        socket buffers for existing UDP transports
81fbf76683SChuck Lever  */
82fbf76683SChuck Lever static ctl_table xs_tunables_table[] = {
83fbf76683SChuck Lever 	{
84fbf76683SChuck Lever 		.procname	= "udp_slot_table_entries",
85fbf76683SChuck Lever 		.data		= &xprt_udp_slot_table_entries,
86fbf76683SChuck Lever 		.maxlen		= sizeof(unsigned int),
87fbf76683SChuck Lever 		.mode		= 0644,
886d456111SEric W. Biederman 		.proc_handler	= proc_dointvec_minmax,
89fbf76683SChuck Lever 		.extra1		= &min_slot_table_size,
90fbf76683SChuck Lever 		.extra2		= &max_slot_table_size
91fbf76683SChuck Lever 	},
92fbf76683SChuck Lever 	{
93fbf76683SChuck Lever 		.procname	= "tcp_slot_table_entries",
94fbf76683SChuck Lever 		.data		= &xprt_tcp_slot_table_entries,
95fbf76683SChuck Lever 		.maxlen		= sizeof(unsigned int),
96fbf76683SChuck Lever 		.mode		= 0644,
976d456111SEric W. Biederman 		.proc_handler	= proc_dointvec_minmax,
98fbf76683SChuck Lever 		.extra1		= &min_slot_table_size,
99fbf76683SChuck Lever 		.extra2		= &max_slot_table_size
100fbf76683SChuck Lever 	},
101fbf76683SChuck Lever 	{
102fbf76683SChuck Lever 		.procname	= "min_resvport",
103fbf76683SChuck Lever 		.data		= &xprt_min_resvport,
104fbf76683SChuck Lever 		.maxlen		= sizeof(unsigned int),
105fbf76683SChuck Lever 		.mode		= 0644,
1066d456111SEric W. Biederman 		.proc_handler	= proc_dointvec_minmax,
107fbf76683SChuck Lever 		.extra1		= &xprt_min_resvport_limit,
108fbf76683SChuck Lever 		.extra2		= &xprt_max_resvport_limit
109fbf76683SChuck Lever 	},
110fbf76683SChuck Lever 	{
111fbf76683SChuck Lever 		.procname	= "max_resvport",
112fbf76683SChuck Lever 		.data		= &xprt_max_resvport,
113fbf76683SChuck Lever 		.maxlen		= sizeof(unsigned int),
114fbf76683SChuck Lever 		.mode		= 0644,
1156d456111SEric W. Biederman 		.proc_handler	= proc_dointvec_minmax,
116fbf76683SChuck Lever 		.extra1		= &xprt_min_resvport_limit,
117fbf76683SChuck Lever 		.extra2		= &xprt_max_resvport_limit
118fbf76683SChuck Lever 	},
119fbf76683SChuck Lever 	{
12025fe6142STrond Myklebust 		.procname	= "tcp_fin_timeout",
12125fe6142STrond Myklebust 		.data		= &xs_tcp_fin_timeout,
12225fe6142STrond Myklebust 		.maxlen		= sizeof(xs_tcp_fin_timeout),
12325fe6142STrond Myklebust 		.mode		= 0644,
1246d456111SEric W. Biederman 		.proc_handler	= proc_dointvec_jiffies,
12525fe6142STrond Myklebust 	},
126f8572d8fSEric W. Biederman 	{ },
127fbf76683SChuck Lever };
128fbf76683SChuck Lever 
129fbf76683SChuck Lever static ctl_table sunrpc_table[] = {
130fbf76683SChuck Lever 	{
131fbf76683SChuck Lever 		.procname	= "sunrpc",
132fbf76683SChuck Lever 		.mode		= 0555,
133fbf76683SChuck Lever 		.child		= xs_tunables_table
134fbf76683SChuck Lever 	},
135f8572d8fSEric W. Biederman 	{ },
136fbf76683SChuck Lever };
137fbf76683SChuck Lever 
138fbf76683SChuck Lever #endif
139fbf76683SChuck Lever 
140fbf76683SChuck Lever /*
14103bf4b70SChuck Lever  * Time out for an RPC UDP socket connect.  UDP socket connects are
14203bf4b70SChuck Lever  * synchronous, but we set a timeout anyway in case of resource
14303bf4b70SChuck Lever  * exhaustion on the local host.
14403bf4b70SChuck Lever  */
14503bf4b70SChuck Lever #define XS_UDP_CONN_TO		(5U * HZ)
14603bf4b70SChuck Lever 
14703bf4b70SChuck Lever /*
14803bf4b70SChuck Lever  * Wait duration for an RPC TCP connection to be established.  Solaris
14903bf4b70SChuck Lever  * NFS over TCP uses 60 seconds, for example, which is in line with how
15003bf4b70SChuck Lever  * long a server takes to reboot.
15103bf4b70SChuck Lever  */
15203bf4b70SChuck Lever #define XS_TCP_CONN_TO		(60U * HZ)
15303bf4b70SChuck Lever 
15403bf4b70SChuck Lever /*
15503bf4b70SChuck Lever  * Wait duration for a reply from the RPC portmapper.
15603bf4b70SChuck Lever  */
15703bf4b70SChuck Lever #define XS_BIND_TO		(60U * HZ)
15803bf4b70SChuck Lever 
15903bf4b70SChuck Lever /*
16003bf4b70SChuck Lever  * Delay if a UDP socket connect error occurs.  This is most likely some
16103bf4b70SChuck Lever  * kind of resource problem on the local host.
16203bf4b70SChuck Lever  */
16303bf4b70SChuck Lever #define XS_UDP_REEST_TO		(2U * HZ)
16403bf4b70SChuck Lever 
16503bf4b70SChuck Lever /*
16603bf4b70SChuck Lever  * The reestablish timeout allows clients to delay for a bit before attempting
16703bf4b70SChuck Lever  * to reconnect to a server that just dropped our connection.
16803bf4b70SChuck Lever  *
16903bf4b70SChuck Lever  * We implement an exponential backoff when trying to reestablish a TCP
17003bf4b70SChuck Lever  * transport connection with the server.  Some servers like to drop a TCP
17103bf4b70SChuck Lever  * connection when they are overworked, so we start with a short timeout and
17203bf4b70SChuck Lever  * increase over time if the server is down or not responding.
17303bf4b70SChuck Lever  */
17403bf4b70SChuck Lever #define XS_TCP_INIT_REEST_TO	(3U * HZ)
17503bf4b70SChuck Lever #define XS_TCP_MAX_REEST_TO	(5U * 60 * HZ)
17603bf4b70SChuck Lever 
17703bf4b70SChuck Lever /*
17803bf4b70SChuck Lever  * TCP idle timeout; client drops the transport socket if it is idle
17903bf4b70SChuck Lever  * for this long.  Note that we also timeout UDP sockets to prevent
18003bf4b70SChuck Lever  * holding port numbers when there is no RPC traffic.
18103bf4b70SChuck Lever  */
18203bf4b70SChuck Lever #define XS_IDLE_DISC_TO		(5U * 60 * HZ)
18303bf4b70SChuck Lever 
184a246b010SChuck Lever #ifdef RPC_DEBUG
185a246b010SChuck Lever # undef  RPC_DEBUG_DATA
1869903cd1cSChuck Lever # define RPCDBG_FACILITY	RPCDBG_TRANS
187a246b010SChuck Lever #endif
188a246b010SChuck Lever 
189a246b010SChuck Lever #ifdef RPC_DEBUG_DATA
1909903cd1cSChuck Lever static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
191a246b010SChuck Lever {
192a246b010SChuck Lever 	u8 *buf = (u8 *) packet;
193a246b010SChuck Lever 	int j;
194a246b010SChuck Lever 
195a246b010SChuck Lever 	dprintk("RPC:       %s\n", msg);
196a246b010SChuck Lever 	for (j = 0; j < count && j < 128; j += 4) {
197a246b010SChuck Lever 		if (!(j & 31)) {
198a246b010SChuck Lever 			if (j)
199a246b010SChuck Lever 				dprintk("\n");
200a246b010SChuck Lever 			dprintk("0x%04x ", j);
201a246b010SChuck Lever 		}
202a246b010SChuck Lever 		dprintk("%02x%02x%02x%02x ",
203a246b010SChuck Lever 			buf[j], buf[j+1], buf[j+2], buf[j+3]);
204a246b010SChuck Lever 	}
205a246b010SChuck Lever 	dprintk("\n");
206a246b010SChuck Lever }
207a246b010SChuck Lever #else
2089903cd1cSChuck Lever static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
209a246b010SChuck Lever {
210a246b010SChuck Lever 	/* NOP */
211a246b010SChuck Lever }
212a246b010SChuck Lever #endif
213a246b010SChuck Lever 
214ffc2e518SChuck Lever struct sock_xprt {
215ffc2e518SChuck Lever 	struct rpc_xprt		xprt;
216ee0ac0c2SChuck Lever 
217ee0ac0c2SChuck Lever 	/*
218ee0ac0c2SChuck Lever 	 * Network layer
219ee0ac0c2SChuck Lever 	 */
220ee0ac0c2SChuck Lever 	struct socket *		sock;
221ee0ac0c2SChuck Lever 	struct sock *		inet;
22251971139SChuck Lever 
22351971139SChuck Lever 	/*
22451971139SChuck Lever 	 * State of TCP reply receive
22551971139SChuck Lever 	 */
22651971139SChuck Lever 	__be32			tcp_fraghdr,
22751971139SChuck Lever 				tcp_xid;
22851971139SChuck Lever 
22951971139SChuck Lever 	u32			tcp_offset,
23051971139SChuck Lever 				tcp_reclen;
23151971139SChuck Lever 
23251971139SChuck Lever 	unsigned long		tcp_copied,
23351971139SChuck Lever 				tcp_flags;
234c8475461SChuck Lever 
235c8475461SChuck Lever 	/*
236c8475461SChuck Lever 	 * Connection of transports
237c8475461SChuck Lever 	 */
23834161db6STrond Myklebust 	struct delayed_work	connect_worker;
239fbfffbd5SChuck Lever 	struct sockaddr_storage	srcaddr;
240fbfffbd5SChuck Lever 	unsigned short		srcport;
2417c6e066eSChuck Lever 
2427c6e066eSChuck Lever 	/*
2437c6e066eSChuck Lever 	 * UDP socket buffer size parameters
2447c6e066eSChuck Lever 	 */
2457c6e066eSChuck Lever 	size_t			rcvsize,
2467c6e066eSChuck Lever 				sndsize;
247314dfd79SChuck Lever 
248314dfd79SChuck Lever 	/*
249314dfd79SChuck Lever 	 * Saved socket callback addresses
250314dfd79SChuck Lever 	 */
251314dfd79SChuck Lever 	void			(*old_data_ready)(struct sock *, int);
252314dfd79SChuck Lever 	void			(*old_state_change)(struct sock *);
253314dfd79SChuck Lever 	void			(*old_write_space)(struct sock *);
2542a9e1cfaSTrond Myklebust 	void			(*old_error_report)(struct sock *);
255ffc2e518SChuck Lever };
256ffc2e518SChuck Lever 
257e136d092SChuck Lever /*
258e136d092SChuck Lever  * TCP receive state flags
259e136d092SChuck Lever  */
260e136d092SChuck Lever #define TCP_RCV_LAST_FRAG	(1UL << 0)
261e136d092SChuck Lever #define TCP_RCV_COPY_FRAGHDR	(1UL << 1)
262e136d092SChuck Lever #define TCP_RCV_COPY_XID	(1UL << 2)
263e136d092SChuck Lever #define TCP_RCV_COPY_DATA	(1UL << 3)
264f4a2e418SRicardo Labiaga #define TCP_RCV_READ_CALLDIR	(1UL << 4)
265f4a2e418SRicardo Labiaga #define TCP_RCV_COPY_CALLDIR	(1UL << 5)
26618dca02aSRicardo Labiaga 
26718dca02aSRicardo Labiaga /*
26818dca02aSRicardo Labiaga  * TCP RPC flags
26918dca02aSRicardo Labiaga  */
270f4a2e418SRicardo Labiaga #define TCP_RPC_REPLY		(1UL << 6)
271e136d092SChuck Lever 
27295392c59SChuck Lever static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
273edb267a6SChuck Lever {
27495392c59SChuck Lever 	return (struct sockaddr *) &xprt->addr;
27595392c59SChuck Lever }
27695392c59SChuck Lever 
27795392c59SChuck Lever static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt)
27895392c59SChuck Lever {
27995392c59SChuck Lever 	return (struct sockaddr_in *) &xprt->addr;
28095392c59SChuck Lever }
28195392c59SChuck Lever 
28295392c59SChuck Lever static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt)
28395392c59SChuck Lever {
28495392c59SChuck Lever 	return (struct sockaddr_in6 *) &xprt->addr;
28595392c59SChuck Lever }
28695392c59SChuck Lever 
287c877b849SChuck Lever static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
288c877b849SChuck Lever {
289c877b849SChuck Lever 	struct sockaddr *sap = xs_addr(xprt);
2909dc3b095SChuck Lever 	struct sockaddr_in6 *sin6;
2919dc3b095SChuck Lever 	struct sockaddr_in *sin;
292c877b849SChuck Lever 	char buf[128];
293c877b849SChuck Lever 
294c877b849SChuck Lever 	(void)rpc_ntop(sap, buf, sizeof(buf));
295c877b849SChuck Lever 	xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL);
296c877b849SChuck Lever 
2979dc3b095SChuck Lever 	switch (sap->sa_family) {
2989dc3b095SChuck Lever 	case AF_INET:
2999dc3b095SChuck Lever 		sin = xs_addr_in(xprt);
300fc0b5791SJoe Perches 		snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
3019dc3b095SChuck Lever 		break;
3029dc3b095SChuck Lever 	case AF_INET6:
3039dc3b095SChuck Lever 		sin6 = xs_addr_in6(xprt);
304fc0b5791SJoe Perches 		snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
3059dc3b095SChuck Lever 		break;
3069dc3b095SChuck Lever 	default:
3079dc3b095SChuck Lever 		BUG();
3089dc3b095SChuck Lever 	}
3099dc3b095SChuck Lever 	xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
3109dc3b095SChuck Lever }
3119dc3b095SChuck Lever 
3129dc3b095SChuck Lever static void xs_format_common_peer_ports(struct rpc_xprt *xprt)
3139dc3b095SChuck Lever {
3149dc3b095SChuck Lever 	struct sockaddr *sap = xs_addr(xprt);
3159dc3b095SChuck Lever 	char buf[128];
3169dc3b095SChuck Lever 
31781160e66SJoe Perches 	snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
318c877b849SChuck Lever 	xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
319c877b849SChuck Lever 
32081160e66SJoe Perches 	snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
321c877b849SChuck Lever 	xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
322c877b849SChuck Lever }
323c877b849SChuck Lever 
3249dc3b095SChuck Lever static void xs_format_peer_addresses(struct rpc_xprt *xprt,
325b454ae90SChuck Lever 				     const char *protocol,
326b454ae90SChuck Lever 				     const char *netid)
327edb267a6SChuck Lever {
328b454ae90SChuck Lever 	xprt->address_strings[RPC_DISPLAY_PROTO] = protocol;
329b454ae90SChuck Lever 	xprt->address_strings[RPC_DISPLAY_NETID] = netid;
330c877b849SChuck Lever 	xs_format_common_peer_addresses(xprt);
3319dc3b095SChuck Lever 	xs_format_common_peer_ports(xprt);
332edb267a6SChuck Lever }
333edb267a6SChuck Lever 
3349dc3b095SChuck Lever static void xs_update_peer_port(struct rpc_xprt *xprt)
3354b6473fbSChuck Lever {
3369dc3b095SChuck Lever 	kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
3379dc3b095SChuck Lever 	kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
3384b6473fbSChuck Lever 
3399dc3b095SChuck Lever 	xs_format_common_peer_ports(xprt);
340edb267a6SChuck Lever }
341edb267a6SChuck Lever 
342edb267a6SChuck Lever static void xs_free_peer_addresses(struct rpc_xprt *xprt)
343edb267a6SChuck Lever {
34433e01dc7SChuck Lever 	unsigned int i;
34533e01dc7SChuck Lever 
34633e01dc7SChuck Lever 	for (i = 0; i < RPC_DISPLAY_MAX; i++)
34733e01dc7SChuck Lever 		switch (i) {
34833e01dc7SChuck Lever 		case RPC_DISPLAY_PROTO:
34933e01dc7SChuck Lever 		case RPC_DISPLAY_NETID:
35033e01dc7SChuck Lever 			continue;
35133e01dc7SChuck Lever 		default:
35233e01dc7SChuck Lever 			kfree(xprt->address_strings[i]);
35333e01dc7SChuck Lever 		}
354edb267a6SChuck Lever }
355edb267a6SChuck Lever 
356b4b5cc85SChuck Lever #define XS_SENDMSG_FLAGS	(MSG_DONTWAIT | MSG_NOSIGNAL)
357b4b5cc85SChuck Lever 
35824c5684bSTrond Myklebust static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen, struct kvec *vec, unsigned int base, int more)
359b4b5cc85SChuck Lever {
360b4b5cc85SChuck Lever 	struct msghdr msg = {
361b4b5cc85SChuck Lever 		.msg_name	= addr,
362b4b5cc85SChuck Lever 		.msg_namelen	= addrlen,
36324c5684bSTrond Myklebust 		.msg_flags	= XS_SENDMSG_FLAGS | (more ? MSG_MORE : 0),
36424c5684bSTrond Myklebust 	};
36524c5684bSTrond Myklebust 	struct kvec iov = {
36624c5684bSTrond Myklebust 		.iov_base	= vec->iov_base + base,
36724c5684bSTrond Myklebust 		.iov_len	= vec->iov_len - base,
368b4b5cc85SChuck Lever 	};
369b4b5cc85SChuck Lever 
37024c5684bSTrond Myklebust 	if (iov.iov_len != 0)
371b4b5cc85SChuck Lever 		return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
372b4b5cc85SChuck Lever 	return kernel_sendmsg(sock, &msg, NULL, 0, 0);
373b4b5cc85SChuck Lever }
374b4b5cc85SChuck Lever 
37524c5684bSTrond Myklebust static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more)
376b4b5cc85SChuck Lever {
37724c5684bSTrond Myklebust 	struct page **ppage;
37824c5684bSTrond Myklebust 	unsigned int remainder;
37924c5684bSTrond Myklebust 	int err, sent = 0;
380b4b5cc85SChuck Lever 
38124c5684bSTrond Myklebust 	remainder = xdr->page_len - base;
38224c5684bSTrond Myklebust 	base += xdr->page_base;
38324c5684bSTrond Myklebust 	ppage = xdr->pages + (base >> PAGE_SHIFT);
38424c5684bSTrond Myklebust 	base &= ~PAGE_MASK;
38524c5684bSTrond Myklebust 	for(;;) {
38624c5684bSTrond Myklebust 		unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder);
38724c5684bSTrond Myklebust 		int flags = XS_SENDMSG_FLAGS;
38824c5684bSTrond Myklebust 
38924c5684bSTrond Myklebust 		remainder -= len;
39024c5684bSTrond Myklebust 		if (remainder != 0 || more)
39124c5684bSTrond Myklebust 			flags |= MSG_MORE;
39224c5684bSTrond Myklebust 		err = sock->ops->sendpage(sock, *ppage, base, len, flags);
39324c5684bSTrond Myklebust 		if (remainder == 0 || err != len)
39424c5684bSTrond Myklebust 			break;
39524c5684bSTrond Myklebust 		sent += err;
39624c5684bSTrond Myklebust 		ppage++;
39724c5684bSTrond Myklebust 		base = 0;
39824c5684bSTrond Myklebust 	}
39924c5684bSTrond Myklebust 	if (sent == 0)
40024c5684bSTrond Myklebust 		return err;
40124c5684bSTrond Myklebust 	if (err > 0)
40224c5684bSTrond Myklebust 		sent += err;
40324c5684bSTrond Myklebust 	return sent;
404b4b5cc85SChuck Lever }
405b4b5cc85SChuck Lever 
4069903cd1cSChuck Lever /**
4079903cd1cSChuck Lever  * xs_sendpages - write pages directly to a socket
4089903cd1cSChuck Lever  * @sock: socket to send on
4099903cd1cSChuck Lever  * @addr: UDP only -- address of destination
4109903cd1cSChuck Lever  * @addrlen: UDP only -- length of destination address
4119903cd1cSChuck Lever  * @xdr: buffer containing this request
4129903cd1cSChuck Lever  * @base: starting position in the buffer
4139903cd1cSChuck Lever  *
414a246b010SChuck Lever  */
41524c5684bSTrond Myklebust static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base)
416a246b010SChuck Lever {
41724c5684bSTrond Myklebust 	unsigned int remainder = xdr->len - base;
41824c5684bSTrond Myklebust 	int err, sent = 0;
419a246b010SChuck Lever 
420262965f5SChuck Lever 	if (unlikely(!sock))
421fba91afbSTrond Myklebust 		return -ENOTSOCK;
422262965f5SChuck Lever 
423262965f5SChuck Lever 	clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
42424c5684bSTrond Myklebust 	if (base != 0) {
42524c5684bSTrond Myklebust 		addr = NULL;
42624c5684bSTrond Myklebust 		addrlen = 0;
42724c5684bSTrond Myklebust 	}
428262965f5SChuck Lever 
42924c5684bSTrond Myklebust 	if (base < xdr->head[0].iov_len || addr != NULL) {
43024c5684bSTrond Myklebust 		unsigned int len = xdr->head[0].iov_len - base;
43124c5684bSTrond Myklebust 		remainder -= len;
43224c5684bSTrond Myklebust 		err = xs_send_kvec(sock, addr, addrlen, &xdr->head[0], base, remainder != 0);
43324c5684bSTrond Myklebust 		if (remainder == 0 || err != len)
434a246b010SChuck Lever 			goto out;
43524c5684bSTrond Myklebust 		sent += err;
436a246b010SChuck Lever 		base = 0;
437a246b010SChuck Lever 	} else
43824c5684bSTrond Myklebust 		base -= xdr->head[0].iov_len;
439a246b010SChuck Lever 
44024c5684bSTrond Myklebust 	if (base < xdr->page_len) {
44124c5684bSTrond Myklebust 		unsigned int len = xdr->page_len - base;
44224c5684bSTrond Myklebust 		remainder -= len;
44324c5684bSTrond Myklebust 		err = xs_send_pagedata(sock, xdr, base, remainder != 0);
44424c5684bSTrond Myklebust 		if (remainder == 0 || err != len)
445a246b010SChuck Lever 			goto out;
44624c5684bSTrond Myklebust 		sent += err;
447a246b010SChuck Lever 		base = 0;
44824c5684bSTrond Myklebust 	} else
44924c5684bSTrond Myklebust 		base -= xdr->page_len;
45024c5684bSTrond Myklebust 
45124c5684bSTrond Myklebust 	if (base >= xdr->tail[0].iov_len)
45224c5684bSTrond Myklebust 		return sent;
45324c5684bSTrond Myklebust 	err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0);
454a246b010SChuck Lever out:
45524c5684bSTrond Myklebust 	if (sent == 0)
45624c5684bSTrond Myklebust 		return err;
45724c5684bSTrond Myklebust 	if (err > 0)
45824c5684bSTrond Myklebust 		sent += err;
45924c5684bSTrond Myklebust 	return sent;
460a246b010SChuck Lever }
461a246b010SChuck Lever 
462b6ddf64fSTrond Myklebust static void xs_nospace_callback(struct rpc_task *task)
463b6ddf64fSTrond Myklebust {
464b6ddf64fSTrond Myklebust 	struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt);
465b6ddf64fSTrond Myklebust 
466b6ddf64fSTrond Myklebust 	transport->inet->sk_write_pending--;
467b6ddf64fSTrond Myklebust 	clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
468b6ddf64fSTrond Myklebust }
469b6ddf64fSTrond Myklebust 
4709903cd1cSChuck Lever /**
471262965f5SChuck Lever  * xs_nospace - place task on wait queue if transmit was incomplete
472262965f5SChuck Lever  * @task: task to put to sleep
4739903cd1cSChuck Lever  *
474a246b010SChuck Lever  */
4755e3771ceSTrond Myklebust static int xs_nospace(struct rpc_task *task)
476a246b010SChuck Lever {
477262965f5SChuck Lever 	struct rpc_rqst *req = task->tk_rqstp;
478262965f5SChuck Lever 	struct rpc_xprt *xprt = req->rq_xprt;
479ee0ac0c2SChuck Lever 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
4805e3771ceSTrond Myklebust 	int ret = 0;
481a246b010SChuck Lever 
48246121cf7SChuck Lever 	dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
483262965f5SChuck Lever 			task->tk_pid, req->rq_slen - req->rq_bytes_sent,
484262965f5SChuck Lever 			req->rq_slen);
485a246b010SChuck Lever 
486262965f5SChuck Lever 	/* Protect against races with write_space */
487262965f5SChuck Lever 	spin_lock_bh(&xprt->transport_lock);
488a246b010SChuck Lever 
489262965f5SChuck Lever 	/* Don't race with disconnect */
490b6ddf64fSTrond Myklebust 	if (xprt_connected(xprt)) {
491b6ddf64fSTrond Myklebust 		if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) {
4925e3771ceSTrond Myklebust 			ret = -EAGAIN;
493b6ddf64fSTrond Myklebust 			/*
494b6ddf64fSTrond Myklebust 			 * Notify TCP that we're limited by the application
495b6ddf64fSTrond Myklebust 			 * window size
496b6ddf64fSTrond Myklebust 			 */
497b6ddf64fSTrond Myklebust 			set_bit(SOCK_NOSPACE, &transport->sock->flags);
498b6ddf64fSTrond Myklebust 			transport->inet->sk_write_pending++;
499b6ddf64fSTrond Myklebust 			/* ...and wait for more buffer space */
500b6ddf64fSTrond Myklebust 			xprt_wait_for_buffer_space(task, xs_nospace_callback);
501b6ddf64fSTrond Myklebust 		}
502b6ddf64fSTrond Myklebust 	} else {
503b6ddf64fSTrond Myklebust 		clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
5045e3771ceSTrond Myklebust 		ret = -ENOTCONN;
505b6ddf64fSTrond Myklebust 	}
506a246b010SChuck Lever 
507262965f5SChuck Lever 	spin_unlock_bh(&xprt->transport_lock);
5085e3771ceSTrond Myklebust 	return ret;
509a246b010SChuck Lever }
510a246b010SChuck Lever 
5119903cd1cSChuck Lever /**
512262965f5SChuck Lever  * xs_udp_send_request - write an RPC request to a UDP socket
5139903cd1cSChuck Lever  * @task: address of RPC task that manages the state of an RPC request
5149903cd1cSChuck Lever  *
5159903cd1cSChuck Lever  * Return values:
5169903cd1cSChuck Lever  *        0:	The request has been sent
5179903cd1cSChuck Lever  *   EAGAIN:	The socket was blocked, please call again later to
5189903cd1cSChuck Lever  *		complete the request
519262965f5SChuck Lever  * ENOTCONN:	Caller needs to invoke connect logic then call again
5209903cd1cSChuck Lever  *    other:	Some other error occured, the request was not sent
5219903cd1cSChuck Lever  */
522262965f5SChuck Lever static int xs_udp_send_request(struct rpc_task *task)
523a246b010SChuck Lever {
524a246b010SChuck Lever 	struct rpc_rqst *req = task->tk_rqstp;
525a246b010SChuck Lever 	struct rpc_xprt *xprt = req->rq_xprt;
526ee0ac0c2SChuck Lever 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
527262965f5SChuck Lever 	struct xdr_buf *xdr = &req->rq_snd_buf;
528262965f5SChuck Lever 	int status;
529262965f5SChuck Lever 
530262965f5SChuck Lever 	xs_pktdump("packet data:",
531262965f5SChuck Lever 				req->rq_svec->iov_base,
532262965f5SChuck Lever 				req->rq_svec->iov_len);
533262965f5SChuck Lever 
53401d37c42STrond Myklebust 	if (!xprt_bound(xprt))
53501d37c42STrond Myklebust 		return -ENOTCONN;
536ee0ac0c2SChuck Lever 	status = xs_sendpages(transport->sock,
53795392c59SChuck Lever 			      xs_addr(xprt),
538ee0ac0c2SChuck Lever 			      xprt->addrlen, xdr,
539ee0ac0c2SChuck Lever 			      req->rq_bytes_sent);
540262965f5SChuck Lever 
541262965f5SChuck Lever 	dprintk("RPC:       xs_udp_send_request(%u) = %d\n",
542262965f5SChuck Lever 			xdr->len - req->rq_bytes_sent, status);
543262965f5SChuck Lever 
5442199700fSTrond Myklebust 	if (status >= 0) {
5451321d8d9SChuck Lever 		task->tk_bytes_sent += status;
5462199700fSTrond Myklebust 		if (status >= req->rq_slen)
547262965f5SChuck Lever 			return 0;
548262965f5SChuck Lever 		/* Still some bytes left; set up for a retry later. */
549262965f5SChuck Lever 		status = -EAGAIN;
5502199700fSTrond Myklebust 	}
551262965f5SChuck Lever 
552262965f5SChuck Lever 	switch (status) {
553fba91afbSTrond Myklebust 	case -ENOTSOCK:
554fba91afbSTrond Myklebust 		status = -ENOTCONN;
555fba91afbSTrond Myklebust 		/* Should we call xs_close() here? */
556fba91afbSTrond Myklebust 		break;
557b6ddf64fSTrond Myklebust 	case -EAGAIN:
5585e3771ceSTrond Myklebust 		status = xs_nospace(task);
559b6ddf64fSTrond Myklebust 		break;
560c8485e4dSTrond Myklebust 	default:
561c8485e4dSTrond Myklebust 		dprintk("RPC:       sendmsg returned unrecognized error %d\n",
562c8485e4dSTrond Myklebust 			-status);
563262965f5SChuck Lever 	case -ENETUNREACH:
564262965f5SChuck Lever 	case -EPIPE:
565262965f5SChuck Lever 	case -ECONNREFUSED:
566262965f5SChuck Lever 		/* When the server has died, an ICMP port unreachable message
567262965f5SChuck Lever 		 * prompts ECONNREFUSED. */
568b6ddf64fSTrond Myklebust 		clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
569262965f5SChuck Lever 	}
5705fe46e9dSBian Naimeng 
571262965f5SChuck Lever 	return status;
572262965f5SChuck Lever }
573262965f5SChuck Lever 
574e06799f9STrond Myklebust /**
575e06799f9STrond Myklebust  * xs_tcp_shutdown - gracefully shut down a TCP socket
576e06799f9STrond Myklebust  * @xprt: transport
577e06799f9STrond Myklebust  *
578e06799f9STrond Myklebust  * Initiates a graceful shutdown of the TCP socket by calling the
579e06799f9STrond Myklebust  * equivalent of shutdown(SHUT_WR);
580e06799f9STrond Myklebust  */
581e06799f9STrond Myklebust static void xs_tcp_shutdown(struct rpc_xprt *xprt)
582e06799f9STrond Myklebust {
583e06799f9STrond Myklebust 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
584e06799f9STrond Myklebust 	struct socket *sock = transport->sock;
585e06799f9STrond Myklebust 
586e06799f9STrond Myklebust 	if (sock != NULL)
587e06799f9STrond Myklebust 		kernel_sock_shutdown(sock, SHUT_WR);
588e06799f9STrond Myklebust }
589e06799f9STrond Myklebust 
590808012fbSChuck Lever static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf)
591808012fbSChuck Lever {
592808012fbSChuck Lever 	u32 reclen = buf->len - sizeof(rpc_fraghdr);
593808012fbSChuck Lever 	rpc_fraghdr *base = buf->head[0].iov_base;
594808012fbSChuck Lever 	*base = htonl(RPC_LAST_STREAM_FRAGMENT | reclen);
595808012fbSChuck Lever }
596808012fbSChuck Lever 
597262965f5SChuck Lever /**
598262965f5SChuck Lever  * xs_tcp_send_request - write an RPC request to a TCP socket
599262965f5SChuck Lever  * @task: address of RPC task that manages the state of an RPC request
600262965f5SChuck Lever  *
601262965f5SChuck Lever  * Return values:
602262965f5SChuck Lever  *        0:	The request has been sent
603262965f5SChuck Lever  *   EAGAIN:	The socket was blocked, please call again later to
604262965f5SChuck Lever  *		complete the request
605262965f5SChuck Lever  * ENOTCONN:	Caller needs to invoke connect logic then call again
606262965f5SChuck Lever  *    other:	Some other error occured, the request was not sent
607262965f5SChuck Lever  *
608262965f5SChuck Lever  * XXX: In the case of soft timeouts, should we eventually give up
609262965f5SChuck Lever  *	if sendmsg is not able to make progress?
610262965f5SChuck Lever  */
611262965f5SChuck Lever static int xs_tcp_send_request(struct rpc_task *task)
612262965f5SChuck Lever {
613262965f5SChuck Lever 	struct rpc_rqst *req = task->tk_rqstp;
614262965f5SChuck Lever 	struct rpc_xprt *xprt = req->rq_xprt;
615ee0ac0c2SChuck Lever 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
616262965f5SChuck Lever 	struct xdr_buf *xdr = &req->rq_snd_buf;
617b595bb15SChuck Lever 	int status;
618a246b010SChuck Lever 
619808012fbSChuck Lever 	xs_encode_tcp_record_marker(&req->rq_snd_buf);
620262965f5SChuck Lever 
621262965f5SChuck Lever 	xs_pktdump("packet data:",
622262965f5SChuck Lever 				req->rq_svec->iov_base,
623262965f5SChuck Lever 				req->rq_svec->iov_len);
624a246b010SChuck Lever 
625a246b010SChuck Lever 	/* Continue transmitting the packet/record. We must be careful
626a246b010SChuck Lever 	 * to cope with writespace callbacks arriving _after_ we have
627262965f5SChuck Lever 	 * called sendmsg(). */
628a246b010SChuck Lever 	while (1) {
629ee0ac0c2SChuck Lever 		status = xs_sendpages(transport->sock,
630ee0ac0c2SChuck Lever 					NULL, 0, xdr, req->rq_bytes_sent);
631a246b010SChuck Lever 
632262965f5SChuck Lever 		dprintk("RPC:       xs_tcp_send_request(%u) = %d\n",
633262965f5SChuck Lever 				xdr->len - req->rq_bytes_sent, status);
634262965f5SChuck Lever 
635262965f5SChuck Lever 		if (unlikely(status < 0))
636a246b010SChuck Lever 			break;
637a246b010SChuck Lever 
638a246b010SChuck Lever 		/* If we've sent the entire packet, immediately
639a246b010SChuck Lever 		 * reset the count of bytes sent. */
640262965f5SChuck Lever 		req->rq_bytes_sent += status;
641ef759a2eSChuck Lever 		task->tk_bytes_sent += status;
642262965f5SChuck Lever 		if (likely(req->rq_bytes_sent >= req->rq_slen)) {
643a246b010SChuck Lever 			req->rq_bytes_sent = 0;
644a246b010SChuck Lever 			return 0;
645a246b010SChuck Lever 		}
646262965f5SChuck Lever 
64706b4b681STrond Myklebust 		if (status != 0)
64806b4b681STrond Myklebust 			continue;
649a246b010SChuck Lever 		status = -EAGAIN;
650a246b010SChuck Lever 		break;
651a246b010SChuck Lever 	}
652a246b010SChuck Lever 
653262965f5SChuck Lever 	switch (status) {
654fba91afbSTrond Myklebust 	case -ENOTSOCK:
655fba91afbSTrond Myklebust 		status = -ENOTCONN;
656fba91afbSTrond Myklebust 		/* Should we call xs_close() here? */
657fba91afbSTrond Myklebust 		break;
658262965f5SChuck Lever 	case -EAGAIN:
6595e3771ceSTrond Myklebust 		status = xs_nospace(task);
660262965f5SChuck Lever 		break;
661262965f5SChuck Lever 	default:
662262965f5SChuck Lever 		dprintk("RPC:       sendmsg returned unrecognized error %d\n",
663262965f5SChuck Lever 			-status);
664a246b010SChuck Lever 	case -ECONNRESET:
66555420c24STrond Myklebust 	case -EPIPE:
666e06799f9STrond Myklebust 		xs_tcp_shutdown(xprt);
667a246b010SChuck Lever 	case -ECONNREFUSED:
668a246b010SChuck Lever 	case -ENOTCONN:
669a246b010SChuck Lever 		clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
670a246b010SChuck Lever 	}
6715fe46e9dSBian Naimeng 
672a246b010SChuck Lever 	return status;
673a246b010SChuck Lever }
674a246b010SChuck Lever 
6759903cd1cSChuck Lever /**
676e0ab53deSTrond Myklebust  * xs_tcp_release_xprt - clean up after a tcp transmission
677e0ab53deSTrond Myklebust  * @xprt: transport
678e0ab53deSTrond Myklebust  * @task: rpc task
679e0ab53deSTrond Myklebust  *
680e0ab53deSTrond Myklebust  * This cleans up if an error causes us to abort the transmission of a request.
681e0ab53deSTrond Myklebust  * In this case, the socket may need to be reset in order to avoid confusing
682e0ab53deSTrond Myklebust  * the server.
683e0ab53deSTrond Myklebust  */
684e0ab53deSTrond Myklebust static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
685e0ab53deSTrond Myklebust {
686e0ab53deSTrond Myklebust 	struct rpc_rqst *req;
687e0ab53deSTrond Myklebust 
688e0ab53deSTrond Myklebust 	if (task != xprt->snd_task)
689e0ab53deSTrond Myklebust 		return;
690e0ab53deSTrond Myklebust 	if (task == NULL)
691e0ab53deSTrond Myklebust 		goto out_release;
692e0ab53deSTrond Myklebust 	req = task->tk_rqstp;
693e0ab53deSTrond Myklebust 	if (req->rq_bytes_sent == 0)
694e0ab53deSTrond Myklebust 		goto out_release;
695e0ab53deSTrond Myklebust 	if (req->rq_bytes_sent == req->rq_snd_buf.len)
696e0ab53deSTrond Myklebust 		goto out_release;
697e0ab53deSTrond Myklebust 	set_bit(XPRT_CLOSE_WAIT, &task->tk_xprt->state);
698e0ab53deSTrond Myklebust out_release:
699e0ab53deSTrond Myklebust 	xprt_release_xprt(xprt, task);
700e0ab53deSTrond Myklebust }
701e0ab53deSTrond Myklebust 
7022a9e1cfaSTrond Myklebust static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
7032a9e1cfaSTrond Myklebust {
7042a9e1cfaSTrond Myklebust 	transport->old_data_ready = sk->sk_data_ready;
7052a9e1cfaSTrond Myklebust 	transport->old_state_change = sk->sk_state_change;
7062a9e1cfaSTrond Myklebust 	transport->old_write_space = sk->sk_write_space;
7072a9e1cfaSTrond Myklebust 	transport->old_error_report = sk->sk_error_report;
7082a9e1cfaSTrond Myklebust }
7092a9e1cfaSTrond Myklebust 
7102a9e1cfaSTrond Myklebust static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
7112a9e1cfaSTrond Myklebust {
7122a9e1cfaSTrond Myklebust 	sk->sk_data_ready = transport->old_data_ready;
7132a9e1cfaSTrond Myklebust 	sk->sk_state_change = transport->old_state_change;
7142a9e1cfaSTrond Myklebust 	sk->sk_write_space = transport->old_write_space;
7152a9e1cfaSTrond Myklebust 	sk->sk_error_report = transport->old_error_report;
7162a9e1cfaSTrond Myklebust }
7172a9e1cfaSTrond Myklebust 
718fe315e76SChuck Lever static void xs_reset_transport(struct sock_xprt *transport)
719a246b010SChuck Lever {
720ee0ac0c2SChuck Lever 	struct socket *sock = transport->sock;
721ee0ac0c2SChuck Lever 	struct sock *sk = transport->inet;
722a246b010SChuck Lever 
723fe315e76SChuck Lever 	if (sk == NULL)
724fe315e76SChuck Lever 		return;
7259903cd1cSChuck Lever 
726a246b010SChuck Lever 	write_lock_bh(&sk->sk_callback_lock);
727ee0ac0c2SChuck Lever 	transport->inet = NULL;
728ee0ac0c2SChuck Lever 	transport->sock = NULL;
729a246b010SChuck Lever 
730a246b010SChuck Lever 	sk->sk_user_data = NULL;
7312a9e1cfaSTrond Myklebust 
7322a9e1cfaSTrond Myklebust 	xs_restore_old_callbacks(transport, sk);
733a246b010SChuck Lever 	write_unlock_bh(&sk->sk_callback_lock);
734a246b010SChuck Lever 
735a246b010SChuck Lever 	sk->sk_no_check = 0;
736a246b010SChuck Lever 
737a246b010SChuck Lever 	sock_release(sock);
738fe315e76SChuck Lever }
739fe315e76SChuck Lever 
740fe315e76SChuck Lever /**
741fe315e76SChuck Lever  * xs_close - close a socket
742fe315e76SChuck Lever  * @xprt: transport
743fe315e76SChuck Lever  *
744fe315e76SChuck Lever  * This is used when all requests are complete; ie, no DRC state remains
745fe315e76SChuck Lever  * on the server we want to save.
746f75e6745STrond Myklebust  *
747f75e6745STrond Myklebust  * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with
748f75e6745STrond Myklebust  * xs_reset_transport() zeroing the socket from underneath a writer.
749fe315e76SChuck Lever  */
750fe315e76SChuck Lever static void xs_close(struct rpc_xprt *xprt)
751fe315e76SChuck Lever {
752fe315e76SChuck Lever 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
753fe315e76SChuck Lever 
754fe315e76SChuck Lever 	dprintk("RPC:       xs_close xprt %p\n", xprt);
755fe315e76SChuck Lever 
756fe315e76SChuck Lever 	xs_reset_transport(transport);
75761d0a8e6SNeil Brown 	xprt->reestablish_timeout = 0;
758fe315e76SChuck Lever 
759632e3bdcSTrond Myklebust 	smp_mb__before_clear_bit();
7607d1e8255STrond Myklebust 	clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
761632e3bdcSTrond Myklebust 	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
7623b948ae5STrond Myklebust 	clear_bit(XPRT_CLOSING, &xprt->state);
763632e3bdcSTrond Myklebust 	smp_mb__after_clear_bit();
76462da3b24STrond Myklebust 	xprt_disconnect_done(xprt);
765a246b010SChuck Lever }
766a246b010SChuck Lever 
767f75e6745STrond Myklebust static void xs_tcp_close(struct rpc_xprt *xprt)
768f75e6745STrond Myklebust {
769f75e6745STrond Myklebust 	if (test_and_clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state))
770f75e6745STrond Myklebust 		xs_close(xprt);
771f75e6745STrond Myklebust 	else
772f75e6745STrond Myklebust 		xs_tcp_shutdown(xprt);
773f75e6745STrond Myklebust }
774f75e6745STrond Myklebust 
7759903cd1cSChuck Lever /**
7769903cd1cSChuck Lever  * xs_destroy - prepare to shutdown a transport
7779903cd1cSChuck Lever  * @xprt: doomed transport
7789903cd1cSChuck Lever  *
7799903cd1cSChuck Lever  */
7809903cd1cSChuck Lever static void xs_destroy(struct rpc_xprt *xprt)
781a246b010SChuck Lever {
782c8475461SChuck Lever 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
783c8475461SChuck Lever 
7849903cd1cSChuck Lever 	dprintk("RPC:       xs_destroy xprt %p\n", xprt);
7859903cd1cSChuck Lever 
786c1384c9cSTrond Myklebust 	cancel_rearming_delayed_work(&transport->connect_worker);
787a246b010SChuck Lever 
7889903cd1cSChuck Lever 	xs_close(xprt);
789edb267a6SChuck Lever 	xs_free_peer_addresses(xprt);
790a246b010SChuck Lever 	kfree(xprt->slot);
791c8541ecdSChuck Lever 	kfree(xprt);
792bc25571eS\"Talpey, Thomas\ 	module_put(THIS_MODULE);
793a246b010SChuck Lever }
794a246b010SChuck Lever 
7959903cd1cSChuck Lever static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
7969903cd1cSChuck Lever {
7979903cd1cSChuck Lever 	return (struct rpc_xprt *) sk->sk_user_data;
7989903cd1cSChuck Lever }
7999903cd1cSChuck Lever 
8009903cd1cSChuck Lever /**
8019903cd1cSChuck Lever  * xs_udp_data_ready - "data ready" callback for UDP sockets
8029903cd1cSChuck Lever  * @sk: socket with data to read
8039903cd1cSChuck Lever  * @len: how much data to read
8049903cd1cSChuck Lever  *
805a246b010SChuck Lever  */
8069903cd1cSChuck Lever static void xs_udp_data_ready(struct sock *sk, int len)
807a246b010SChuck Lever {
808a246b010SChuck Lever 	struct rpc_task *task;
809a246b010SChuck Lever 	struct rpc_xprt *xprt;
810a246b010SChuck Lever 	struct rpc_rqst *rovr;
811a246b010SChuck Lever 	struct sk_buff *skb;
812a246b010SChuck Lever 	int err, repsize, copied;
813d8ed029dSAlexey Dobriyan 	u32 _xid;
814d8ed029dSAlexey Dobriyan 	__be32 *xp;
815a246b010SChuck Lever 
816a246b010SChuck Lever 	read_lock(&sk->sk_callback_lock);
8179903cd1cSChuck Lever 	dprintk("RPC:       xs_udp_data_ready...\n");
8189903cd1cSChuck Lever 	if (!(xprt = xprt_from_sock(sk)))
819a246b010SChuck Lever 		goto out;
820a246b010SChuck Lever 
821a246b010SChuck Lever 	if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL)
822a246b010SChuck Lever 		goto out;
823a246b010SChuck Lever 
824a246b010SChuck Lever 	if (xprt->shutdown)
825a246b010SChuck Lever 		goto dropit;
826a246b010SChuck Lever 
827a246b010SChuck Lever 	repsize = skb->len - sizeof(struct udphdr);
828a246b010SChuck Lever 	if (repsize < 4) {
8299903cd1cSChuck Lever 		dprintk("RPC:       impossible RPC reply size %d!\n", repsize);
830a246b010SChuck Lever 		goto dropit;
831a246b010SChuck Lever 	}
832a246b010SChuck Lever 
833a246b010SChuck Lever 	/* Copy the XID from the skb... */
834a246b010SChuck Lever 	xp = skb_header_pointer(skb, sizeof(struct udphdr),
835a246b010SChuck Lever 				sizeof(_xid), &_xid);
836a246b010SChuck Lever 	if (xp == NULL)
837a246b010SChuck Lever 		goto dropit;
838a246b010SChuck Lever 
839a246b010SChuck Lever 	/* Look up and lock the request corresponding to the given XID */
8404a0f8c04SChuck Lever 	spin_lock(&xprt->transport_lock);
841a246b010SChuck Lever 	rovr = xprt_lookup_rqst(xprt, *xp);
842a246b010SChuck Lever 	if (!rovr)
843a246b010SChuck Lever 		goto out_unlock;
844a246b010SChuck Lever 	task = rovr->rq_task;
845a246b010SChuck Lever 
846a246b010SChuck Lever 	if ((copied = rovr->rq_private_buf.buflen) > repsize)
847a246b010SChuck Lever 		copied = repsize;
848a246b010SChuck Lever 
849a246b010SChuck Lever 	/* Suck it into the iovec, verify checksum if not done by hw. */
8501781f7f5SHerbert Xu 	if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
8511781f7f5SHerbert Xu 		UDPX_INC_STATS_BH(sk, UDP_MIB_INERRORS);
852a246b010SChuck Lever 		goto out_unlock;
8531781f7f5SHerbert Xu 	}
8541781f7f5SHerbert Xu 
8551781f7f5SHerbert Xu 	UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS);
856a246b010SChuck Lever 
857a246b010SChuck Lever 	/* Something worked... */
858adf30907SEric Dumazet 	dst_confirm(skb_dst(skb));
859a246b010SChuck Lever 
8601570c1e4SChuck Lever 	xprt_adjust_cwnd(task, copied);
8611570c1e4SChuck Lever 	xprt_update_rtt(task);
8621570c1e4SChuck Lever 	xprt_complete_rqst(task, copied);
863a246b010SChuck Lever 
864a246b010SChuck Lever  out_unlock:
8654a0f8c04SChuck Lever 	spin_unlock(&xprt->transport_lock);
866a246b010SChuck Lever  dropit:
867a246b010SChuck Lever 	skb_free_datagram(sk, skb);
868a246b010SChuck Lever  out:
869a246b010SChuck Lever 	read_unlock(&sk->sk_callback_lock);
870a246b010SChuck Lever }
871a246b010SChuck Lever 
872dd456471SChuck Lever static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
873a246b010SChuck Lever {
87451971139SChuck Lever 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
875a246b010SChuck Lever 	size_t len, used;
876a246b010SChuck Lever 	char *p;
877a246b010SChuck Lever 
87851971139SChuck Lever 	p = ((char *) &transport->tcp_fraghdr) + transport->tcp_offset;
87951971139SChuck Lever 	len = sizeof(transport->tcp_fraghdr) - transport->tcp_offset;
8809d292316SChuck Lever 	used = xdr_skb_read_bits(desc, p, len);
88151971139SChuck Lever 	transport->tcp_offset += used;
882a246b010SChuck Lever 	if (used != len)
883a246b010SChuck Lever 		return;
884808012fbSChuck Lever 
88551971139SChuck Lever 	transport->tcp_reclen = ntohl(transport->tcp_fraghdr);
88651971139SChuck Lever 	if (transport->tcp_reclen & RPC_LAST_STREAM_FRAGMENT)
887e136d092SChuck Lever 		transport->tcp_flags |= TCP_RCV_LAST_FRAG;
888a246b010SChuck Lever 	else
889e136d092SChuck Lever 		transport->tcp_flags &= ~TCP_RCV_LAST_FRAG;
89051971139SChuck Lever 	transport->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK;
891808012fbSChuck Lever 
892e136d092SChuck Lever 	transport->tcp_flags &= ~TCP_RCV_COPY_FRAGHDR;
89351971139SChuck Lever 	transport->tcp_offset = 0;
894808012fbSChuck Lever 
895a246b010SChuck Lever 	/* Sanity check of the record length */
89618dca02aSRicardo Labiaga 	if (unlikely(transport->tcp_reclen < 8)) {
8979903cd1cSChuck Lever 		dprintk("RPC:       invalid TCP record fragment length\n");
8983ebb067dSTrond Myklebust 		xprt_force_disconnect(xprt);
8999903cd1cSChuck Lever 		return;
900a246b010SChuck Lever 	}
901a246b010SChuck Lever 	dprintk("RPC:       reading TCP record fragment of length %d\n",
90251971139SChuck Lever 			transport->tcp_reclen);
903a246b010SChuck Lever }
904a246b010SChuck Lever 
90551971139SChuck Lever static void xs_tcp_check_fraghdr(struct sock_xprt *transport)
906a246b010SChuck Lever {
90751971139SChuck Lever 	if (transport->tcp_offset == transport->tcp_reclen) {
908e136d092SChuck Lever 		transport->tcp_flags |= TCP_RCV_COPY_FRAGHDR;
90951971139SChuck Lever 		transport->tcp_offset = 0;
910e136d092SChuck Lever 		if (transport->tcp_flags & TCP_RCV_LAST_FRAG) {
911e136d092SChuck Lever 			transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
912e136d092SChuck Lever 			transport->tcp_flags |= TCP_RCV_COPY_XID;
91351971139SChuck Lever 			transport->tcp_copied = 0;
914a246b010SChuck Lever 		}
915a246b010SChuck Lever 	}
916a246b010SChuck Lever }
917a246b010SChuck Lever 
918dd456471SChuck Lever static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_reader *desc)
919a246b010SChuck Lever {
920a246b010SChuck Lever 	size_t len, used;
921a246b010SChuck Lever 	char *p;
922a246b010SChuck Lever 
92351971139SChuck Lever 	len = sizeof(transport->tcp_xid) - transport->tcp_offset;
924a246b010SChuck Lever 	dprintk("RPC:       reading XID (%Zu bytes)\n", len);
92551971139SChuck Lever 	p = ((char *) &transport->tcp_xid) + transport->tcp_offset;
9269d292316SChuck Lever 	used = xdr_skb_read_bits(desc, p, len);
92751971139SChuck Lever 	transport->tcp_offset += used;
928a246b010SChuck Lever 	if (used != len)
929a246b010SChuck Lever 		return;
930e136d092SChuck Lever 	transport->tcp_flags &= ~TCP_RCV_COPY_XID;
931f4a2e418SRicardo Labiaga 	transport->tcp_flags |= TCP_RCV_READ_CALLDIR;
93251971139SChuck Lever 	transport->tcp_copied = 4;
93318dca02aSRicardo Labiaga 	dprintk("RPC:       reading %s XID %08x\n",
93418dca02aSRicardo Labiaga 			(transport->tcp_flags & TCP_RPC_REPLY) ? "reply for"
93518dca02aSRicardo Labiaga 							      : "request with",
93651971139SChuck Lever 			ntohl(transport->tcp_xid));
93751971139SChuck Lever 	xs_tcp_check_fraghdr(transport);
938a246b010SChuck Lever }
939a246b010SChuck Lever 
94018dca02aSRicardo Labiaga static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
94118dca02aSRicardo Labiaga 				       struct xdr_skb_reader *desc)
942a246b010SChuck Lever {
94318dca02aSRicardo Labiaga 	size_t len, used;
94418dca02aSRicardo Labiaga 	u32 offset;
94518dca02aSRicardo Labiaga 	__be32	calldir;
94618dca02aSRicardo Labiaga 
94718dca02aSRicardo Labiaga 	/*
94818dca02aSRicardo Labiaga 	 * We want transport->tcp_offset to be 8 at the end of this routine
94918dca02aSRicardo Labiaga 	 * (4 bytes for the xid and 4 bytes for the call/reply flag).
95018dca02aSRicardo Labiaga 	 * When this function is called for the first time,
95118dca02aSRicardo Labiaga 	 * transport->tcp_offset is 4 (after having already read the xid).
95218dca02aSRicardo Labiaga 	 */
95318dca02aSRicardo Labiaga 	offset = transport->tcp_offset - sizeof(transport->tcp_xid);
95418dca02aSRicardo Labiaga 	len = sizeof(calldir) - offset;
95518dca02aSRicardo Labiaga 	dprintk("RPC:       reading CALL/REPLY flag (%Zu bytes)\n", len);
95618dca02aSRicardo Labiaga 	used = xdr_skb_read_bits(desc, &calldir, len);
95718dca02aSRicardo Labiaga 	transport->tcp_offset += used;
95818dca02aSRicardo Labiaga 	if (used != len)
95918dca02aSRicardo Labiaga 		return;
960f4a2e418SRicardo Labiaga 	transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR;
961f4a2e418SRicardo Labiaga 	transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
96218dca02aSRicardo Labiaga 	transport->tcp_flags |= TCP_RCV_COPY_DATA;
963f4a2e418SRicardo Labiaga 	/*
964f4a2e418SRicardo Labiaga 	 * We don't yet have the XDR buffer, so we will write the calldir
965f4a2e418SRicardo Labiaga 	 * out after we get the buffer from the 'struct rpc_rqst'
966f4a2e418SRicardo Labiaga 	 */
96718dca02aSRicardo Labiaga 	if (ntohl(calldir) == RPC_REPLY)
96818dca02aSRicardo Labiaga 		transport->tcp_flags |= TCP_RPC_REPLY;
96918dca02aSRicardo Labiaga 	else
97018dca02aSRicardo Labiaga 		transport->tcp_flags &= ~TCP_RPC_REPLY;
97118dca02aSRicardo Labiaga 	dprintk("RPC:       reading %s CALL/REPLY flag %08x\n",
97218dca02aSRicardo Labiaga 			(transport->tcp_flags & TCP_RPC_REPLY) ?
97318dca02aSRicardo Labiaga 				"reply for" : "request with", calldir);
97418dca02aSRicardo Labiaga 	xs_tcp_check_fraghdr(transport);
97518dca02aSRicardo Labiaga }
97618dca02aSRicardo Labiaga 
97744b98efdSRicardo Labiaga static inline void xs_tcp_read_common(struct rpc_xprt *xprt,
97844b98efdSRicardo Labiaga 				     struct xdr_skb_reader *desc,
97944b98efdSRicardo Labiaga 				     struct rpc_rqst *req)
980a246b010SChuck Lever {
98144b98efdSRicardo Labiaga 	struct sock_xprt *transport =
98244b98efdSRicardo Labiaga 				container_of(xprt, struct sock_xprt, xprt);
983a246b010SChuck Lever 	struct xdr_buf *rcvbuf;
984a246b010SChuck Lever 	size_t len;
985a246b010SChuck Lever 	ssize_t r;
986a246b010SChuck Lever 
987a246b010SChuck Lever 	rcvbuf = &req->rq_private_buf;
988f4a2e418SRicardo Labiaga 
989f4a2e418SRicardo Labiaga 	if (transport->tcp_flags & TCP_RCV_COPY_CALLDIR) {
990f4a2e418SRicardo Labiaga 		/*
991f4a2e418SRicardo Labiaga 		 * Save the RPC direction in the XDR buffer
992f4a2e418SRicardo Labiaga 		 */
993f4a2e418SRicardo Labiaga 		__be32	calldir = transport->tcp_flags & TCP_RPC_REPLY ?
994f4a2e418SRicardo Labiaga 					htonl(RPC_REPLY) : 0;
995f4a2e418SRicardo Labiaga 
996f4a2e418SRicardo Labiaga 		memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied,
997f4a2e418SRicardo Labiaga 			&calldir, sizeof(calldir));
998f4a2e418SRicardo Labiaga 		transport->tcp_copied += sizeof(calldir);
999f4a2e418SRicardo Labiaga 		transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR;
1000a246b010SChuck Lever 	}
1001a246b010SChuck Lever 
1002a246b010SChuck Lever 	len = desc->count;
100351971139SChuck Lever 	if (len > transport->tcp_reclen - transport->tcp_offset) {
1004dd456471SChuck Lever 		struct xdr_skb_reader my_desc;
1005a246b010SChuck Lever 
100651971139SChuck Lever 		len = transport->tcp_reclen - transport->tcp_offset;
1007a246b010SChuck Lever 		memcpy(&my_desc, desc, sizeof(my_desc));
1008a246b010SChuck Lever 		my_desc.count = len;
100951971139SChuck Lever 		r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
10109d292316SChuck Lever 					  &my_desc, xdr_skb_read_bits);
1011a246b010SChuck Lever 		desc->count -= r;
1012a246b010SChuck Lever 		desc->offset += r;
1013a246b010SChuck Lever 	} else
101451971139SChuck Lever 		r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
10159d292316SChuck Lever 					  desc, xdr_skb_read_bits);
1016a246b010SChuck Lever 
1017a246b010SChuck Lever 	if (r > 0) {
101851971139SChuck Lever 		transport->tcp_copied += r;
101951971139SChuck Lever 		transport->tcp_offset += r;
1020a246b010SChuck Lever 	}
1021a246b010SChuck Lever 	if (r != len) {
1022a246b010SChuck Lever 		/* Error when copying to the receive buffer,
1023a246b010SChuck Lever 		 * usually because we weren't able to allocate
1024a246b010SChuck Lever 		 * additional buffer pages. All we can do now
1025e136d092SChuck Lever 		 * is turn off TCP_RCV_COPY_DATA, so the request
1026a246b010SChuck Lever 		 * will not receive any additional updates,
1027a246b010SChuck Lever 		 * and time out.
1028a246b010SChuck Lever 		 * Any remaining data from this record will
1029a246b010SChuck Lever 		 * be discarded.
1030a246b010SChuck Lever 		 */
1031e136d092SChuck Lever 		transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1032a246b010SChuck Lever 		dprintk("RPC:       XID %08x truncated request\n",
103351971139SChuck Lever 				ntohl(transport->tcp_xid));
103446121cf7SChuck Lever 		dprintk("RPC:       xprt = %p, tcp_copied = %lu, "
103546121cf7SChuck Lever 				"tcp_offset = %u, tcp_reclen = %u\n",
103646121cf7SChuck Lever 				xprt, transport->tcp_copied,
103746121cf7SChuck Lever 				transport->tcp_offset, transport->tcp_reclen);
103844b98efdSRicardo Labiaga 		return;
1039a246b010SChuck Lever 	}
1040a246b010SChuck Lever 
1041a246b010SChuck Lever 	dprintk("RPC:       XID %08x read %Zd bytes\n",
104251971139SChuck Lever 			ntohl(transport->tcp_xid), r);
104346121cf7SChuck Lever 	dprintk("RPC:       xprt = %p, tcp_copied = %lu, tcp_offset = %u, "
104446121cf7SChuck Lever 			"tcp_reclen = %u\n", xprt, transport->tcp_copied,
104546121cf7SChuck Lever 			transport->tcp_offset, transport->tcp_reclen);
1046a246b010SChuck Lever 
104751971139SChuck Lever 	if (transport->tcp_copied == req->rq_private_buf.buflen)
1048e136d092SChuck Lever 		transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
104951971139SChuck Lever 	else if (transport->tcp_offset == transport->tcp_reclen) {
1050e136d092SChuck Lever 		if (transport->tcp_flags & TCP_RCV_LAST_FRAG)
1051e136d092SChuck Lever 			transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1052a246b010SChuck Lever 	}
1053a246b010SChuck Lever 
105444b98efdSRicardo Labiaga 	return;
105544b98efdSRicardo Labiaga }
105644b98efdSRicardo Labiaga 
105744b98efdSRicardo Labiaga /*
105844b98efdSRicardo Labiaga  * Finds the request corresponding to the RPC xid and invokes the common
105944b98efdSRicardo Labiaga  * tcp read code to read the data.
106044b98efdSRicardo Labiaga  */
106144b98efdSRicardo Labiaga static inline int xs_tcp_read_reply(struct rpc_xprt *xprt,
106244b98efdSRicardo Labiaga 				    struct xdr_skb_reader *desc)
106344b98efdSRicardo Labiaga {
106444b98efdSRicardo Labiaga 	struct sock_xprt *transport =
106544b98efdSRicardo Labiaga 				container_of(xprt, struct sock_xprt, xprt);
106644b98efdSRicardo Labiaga 	struct rpc_rqst *req;
106744b98efdSRicardo Labiaga 
106844b98efdSRicardo Labiaga 	dprintk("RPC:       read reply XID %08x\n", ntohl(transport->tcp_xid));
106944b98efdSRicardo Labiaga 
107044b98efdSRicardo Labiaga 	/* Find and lock the request corresponding to this xid */
107144b98efdSRicardo Labiaga 	spin_lock(&xprt->transport_lock);
107244b98efdSRicardo Labiaga 	req = xprt_lookup_rqst(xprt, transport->tcp_xid);
107344b98efdSRicardo Labiaga 	if (!req) {
107444b98efdSRicardo Labiaga 		dprintk("RPC:       XID %08x request not found!\n",
107544b98efdSRicardo Labiaga 				ntohl(transport->tcp_xid));
107644b98efdSRicardo Labiaga 		spin_unlock(&xprt->transport_lock);
107744b98efdSRicardo Labiaga 		return -1;
107844b98efdSRicardo Labiaga 	}
107944b98efdSRicardo Labiaga 
108044b98efdSRicardo Labiaga 	xs_tcp_read_common(xprt, desc, req);
108144b98efdSRicardo Labiaga 
1082e136d092SChuck Lever 	if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
108351971139SChuck Lever 		xprt_complete_rqst(req->rq_task, transport->tcp_copied);
108444b98efdSRicardo Labiaga 
10854a0f8c04SChuck Lever 	spin_unlock(&xprt->transport_lock);
108644b98efdSRicardo Labiaga 	return 0;
108744b98efdSRicardo Labiaga }
108844b98efdSRicardo Labiaga 
108944b98efdSRicardo Labiaga #if defined(CONFIG_NFS_V4_1)
109044b98efdSRicardo Labiaga /*
109144b98efdSRicardo Labiaga  * Obtains an rpc_rqst previously allocated and invokes the common
109244b98efdSRicardo Labiaga  * tcp read code to read the data.  The result is placed in the callback
109344b98efdSRicardo Labiaga  * queue.
109444b98efdSRicardo Labiaga  * If we're unable to obtain the rpc_rqst we schedule the closing of the
109544b98efdSRicardo Labiaga  * connection and return -1.
109644b98efdSRicardo Labiaga  */
109744b98efdSRicardo Labiaga static inline int xs_tcp_read_callback(struct rpc_xprt *xprt,
109844b98efdSRicardo Labiaga 				       struct xdr_skb_reader *desc)
109944b98efdSRicardo Labiaga {
110044b98efdSRicardo Labiaga 	struct sock_xprt *transport =
110144b98efdSRicardo Labiaga 				container_of(xprt, struct sock_xprt, xprt);
110244b98efdSRicardo Labiaga 	struct rpc_rqst *req;
110344b98efdSRicardo Labiaga 
110444b98efdSRicardo Labiaga 	req = xprt_alloc_bc_request(xprt);
110544b98efdSRicardo Labiaga 	if (req == NULL) {
110644b98efdSRicardo Labiaga 		printk(KERN_WARNING "Callback slot table overflowed\n");
110744b98efdSRicardo Labiaga 		xprt_force_disconnect(xprt);
110844b98efdSRicardo Labiaga 		return -1;
110944b98efdSRicardo Labiaga 	}
111044b98efdSRicardo Labiaga 
111144b98efdSRicardo Labiaga 	req->rq_xid = transport->tcp_xid;
111244b98efdSRicardo Labiaga 	dprintk("RPC:       read callback  XID %08x\n", ntohl(req->rq_xid));
111344b98efdSRicardo Labiaga 	xs_tcp_read_common(xprt, desc, req);
111444b98efdSRicardo Labiaga 
111544b98efdSRicardo Labiaga 	if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) {
111644b98efdSRicardo Labiaga 		struct svc_serv *bc_serv = xprt->bc_serv;
111744b98efdSRicardo Labiaga 
111844b98efdSRicardo Labiaga 		/*
111944b98efdSRicardo Labiaga 		 * Add callback request to callback list.  The callback
112044b98efdSRicardo Labiaga 		 * service sleeps on the sv_cb_waitq waiting for new
112144b98efdSRicardo Labiaga 		 * requests.  Wake it up after adding enqueing the
112244b98efdSRicardo Labiaga 		 * request.
112344b98efdSRicardo Labiaga 		 */
112444b98efdSRicardo Labiaga 		dprintk("RPC:       add callback request to list\n");
112544b98efdSRicardo Labiaga 		spin_lock(&bc_serv->sv_cb_lock);
112644b98efdSRicardo Labiaga 		list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
112744b98efdSRicardo Labiaga 		spin_unlock(&bc_serv->sv_cb_lock);
112844b98efdSRicardo Labiaga 		wake_up(&bc_serv->sv_cb_waitq);
112944b98efdSRicardo Labiaga 	}
113044b98efdSRicardo Labiaga 
113144b98efdSRicardo Labiaga 	req->rq_private_buf.len = transport->tcp_copied;
113244b98efdSRicardo Labiaga 
113344b98efdSRicardo Labiaga 	return 0;
113444b98efdSRicardo Labiaga }
113544b98efdSRicardo Labiaga 
113644b98efdSRicardo Labiaga static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
113744b98efdSRicardo Labiaga 					struct xdr_skb_reader *desc)
113844b98efdSRicardo Labiaga {
113944b98efdSRicardo Labiaga 	struct sock_xprt *transport =
114044b98efdSRicardo Labiaga 				container_of(xprt, struct sock_xprt, xprt);
114144b98efdSRicardo Labiaga 
114244b98efdSRicardo Labiaga 	return (transport->tcp_flags & TCP_RPC_REPLY) ?
114344b98efdSRicardo Labiaga 		xs_tcp_read_reply(xprt, desc) :
114444b98efdSRicardo Labiaga 		xs_tcp_read_callback(xprt, desc);
114544b98efdSRicardo Labiaga }
114644b98efdSRicardo Labiaga #else
114744b98efdSRicardo Labiaga static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
114844b98efdSRicardo Labiaga 					struct xdr_skb_reader *desc)
114944b98efdSRicardo Labiaga {
115044b98efdSRicardo Labiaga 	return xs_tcp_read_reply(xprt, desc);
115144b98efdSRicardo Labiaga }
115244b98efdSRicardo Labiaga #endif /* CONFIG_NFS_V4_1 */
115344b98efdSRicardo Labiaga 
115444b98efdSRicardo Labiaga /*
115544b98efdSRicardo Labiaga  * Read data off the transport.  This can be either an RPC_CALL or an
115644b98efdSRicardo Labiaga  * RPC_REPLY.  Relay the processing to helper functions.
115744b98efdSRicardo Labiaga  */
115844b98efdSRicardo Labiaga static void xs_tcp_read_data(struct rpc_xprt *xprt,
115944b98efdSRicardo Labiaga 				    struct xdr_skb_reader *desc)
116044b98efdSRicardo Labiaga {
116144b98efdSRicardo Labiaga 	struct sock_xprt *transport =
116244b98efdSRicardo Labiaga 				container_of(xprt, struct sock_xprt, xprt);
116344b98efdSRicardo Labiaga 
116444b98efdSRicardo Labiaga 	if (_xs_tcp_read_data(xprt, desc) == 0)
116551971139SChuck Lever 		xs_tcp_check_fraghdr(transport);
116644b98efdSRicardo Labiaga 	else {
116744b98efdSRicardo Labiaga 		/*
116844b98efdSRicardo Labiaga 		 * The transport_lock protects the request handling.
116944b98efdSRicardo Labiaga 		 * There's no need to hold it to update the tcp_flags.
117044b98efdSRicardo Labiaga 		 */
117144b98efdSRicardo Labiaga 		transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
117244b98efdSRicardo Labiaga 	}
1173a246b010SChuck Lever }
1174a246b010SChuck Lever 
1175dd456471SChuck Lever static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc)
1176a246b010SChuck Lever {
1177a246b010SChuck Lever 	size_t len;
1178a246b010SChuck Lever 
117951971139SChuck Lever 	len = transport->tcp_reclen - transport->tcp_offset;
1180a246b010SChuck Lever 	if (len > desc->count)
1181a246b010SChuck Lever 		len = desc->count;
1182a246b010SChuck Lever 	desc->count -= len;
1183a246b010SChuck Lever 	desc->offset += len;
118451971139SChuck Lever 	transport->tcp_offset += len;
1185a246b010SChuck Lever 	dprintk("RPC:       discarded %Zu bytes\n", len);
118651971139SChuck Lever 	xs_tcp_check_fraghdr(transport);
1187a246b010SChuck Lever }
1188a246b010SChuck Lever 
11899903cd1cSChuck Lever static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len)
1190a246b010SChuck Lever {
1191a246b010SChuck Lever 	struct rpc_xprt *xprt = rd_desc->arg.data;
119251971139SChuck Lever 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1193dd456471SChuck Lever 	struct xdr_skb_reader desc = {
1194a246b010SChuck Lever 		.skb	= skb,
1195a246b010SChuck Lever 		.offset	= offset,
1196a246b010SChuck Lever 		.count	= len,
1197a246b010SChuck Lever 	};
1198a246b010SChuck Lever 
11999903cd1cSChuck Lever 	dprintk("RPC:       xs_tcp_data_recv started\n");
1200a246b010SChuck Lever 	do {
1201a246b010SChuck Lever 		/* Read in a new fragment marker if necessary */
1202a246b010SChuck Lever 		/* Can we ever really expect to get completely empty fragments? */
1203e136d092SChuck Lever 		if (transport->tcp_flags & TCP_RCV_COPY_FRAGHDR) {
12049903cd1cSChuck Lever 			xs_tcp_read_fraghdr(xprt, &desc);
1205a246b010SChuck Lever 			continue;
1206a246b010SChuck Lever 		}
1207a246b010SChuck Lever 		/* Read in the xid if necessary */
1208e136d092SChuck Lever 		if (transport->tcp_flags & TCP_RCV_COPY_XID) {
120951971139SChuck Lever 			xs_tcp_read_xid(transport, &desc);
1210a246b010SChuck Lever 			continue;
1211a246b010SChuck Lever 		}
121218dca02aSRicardo Labiaga 		/* Read in the call/reply flag */
1213f4a2e418SRicardo Labiaga 		if (transport->tcp_flags & TCP_RCV_READ_CALLDIR) {
121418dca02aSRicardo Labiaga 			xs_tcp_read_calldir(transport, &desc);
121518dca02aSRicardo Labiaga 			continue;
121618dca02aSRicardo Labiaga 		}
1217a246b010SChuck Lever 		/* Read in the request data */
1218e136d092SChuck Lever 		if (transport->tcp_flags & TCP_RCV_COPY_DATA) {
121944b98efdSRicardo Labiaga 			xs_tcp_read_data(xprt, &desc);
1220a246b010SChuck Lever 			continue;
1221a246b010SChuck Lever 		}
1222a246b010SChuck Lever 		/* Skip over any trailing bytes on short reads */
122351971139SChuck Lever 		xs_tcp_read_discard(transport, &desc);
1224a246b010SChuck Lever 	} while (desc.count);
12259903cd1cSChuck Lever 	dprintk("RPC:       xs_tcp_data_recv done\n");
1226a246b010SChuck Lever 	return len - desc.count;
1227a246b010SChuck Lever }
1228a246b010SChuck Lever 
12299903cd1cSChuck Lever /**
12309903cd1cSChuck Lever  * xs_tcp_data_ready - "data ready" callback for TCP sockets
12319903cd1cSChuck Lever  * @sk: socket with data to read
12329903cd1cSChuck Lever  * @bytes: how much data to read
12339903cd1cSChuck Lever  *
12349903cd1cSChuck Lever  */
12359903cd1cSChuck Lever static void xs_tcp_data_ready(struct sock *sk, int bytes)
1236a246b010SChuck Lever {
1237a246b010SChuck Lever 	struct rpc_xprt *xprt;
1238a246b010SChuck Lever 	read_descriptor_t rd_desc;
1239ff2d7db8STrond Myklebust 	int read;
1240a246b010SChuck Lever 
12419903cd1cSChuck Lever 	dprintk("RPC:       xs_tcp_data_ready...\n");
124246121cf7SChuck Lever 
124346121cf7SChuck Lever 	read_lock(&sk->sk_callback_lock);
12449903cd1cSChuck Lever 	if (!(xprt = xprt_from_sock(sk)))
1245a246b010SChuck Lever 		goto out;
1246a246b010SChuck Lever 	if (xprt->shutdown)
1247a246b010SChuck Lever 		goto out;
1248a246b010SChuck Lever 
124961d0a8e6SNeil Brown 	/* Any data means we had a useful conversation, so
125061d0a8e6SNeil Brown 	 * the we don't need to delay the next reconnect
125161d0a8e6SNeil Brown 	 */
125261d0a8e6SNeil Brown 	if (xprt->reestablish_timeout)
125361d0a8e6SNeil Brown 		xprt->reestablish_timeout = 0;
125461d0a8e6SNeil Brown 
12559903cd1cSChuck Lever 	/* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
1256a246b010SChuck Lever 	rd_desc.arg.data = xprt;
1257ff2d7db8STrond Myklebust 	do {
1258a246b010SChuck Lever 		rd_desc.count = 65536;
1259ff2d7db8STrond Myklebust 		read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
1260ff2d7db8STrond Myklebust 	} while (read > 0);
1261a246b010SChuck Lever out:
1262a246b010SChuck Lever 	read_unlock(&sk->sk_callback_lock);
1263a246b010SChuck Lever }
1264a246b010SChuck Lever 
12657d1e8255STrond Myklebust /*
12667d1e8255STrond Myklebust  * Do the equivalent of linger/linger2 handling for dealing with
12677d1e8255STrond Myklebust  * broken servers that don't close the socket in a timely
12687d1e8255STrond Myklebust  * fashion
12697d1e8255STrond Myklebust  */
12707d1e8255STrond Myklebust static void xs_tcp_schedule_linger_timeout(struct rpc_xprt *xprt,
12717d1e8255STrond Myklebust 		unsigned long timeout)
12727d1e8255STrond Myklebust {
12737d1e8255STrond Myklebust 	struct sock_xprt *transport;
12747d1e8255STrond Myklebust 
12757d1e8255STrond Myklebust 	if (xprt_test_and_set_connecting(xprt))
12767d1e8255STrond Myklebust 		return;
12777d1e8255STrond Myklebust 	set_bit(XPRT_CONNECTION_ABORT, &xprt->state);
12787d1e8255STrond Myklebust 	transport = container_of(xprt, struct sock_xprt, xprt);
12797d1e8255STrond Myklebust 	queue_delayed_work(rpciod_workqueue, &transport->connect_worker,
12807d1e8255STrond Myklebust 			   timeout);
12817d1e8255STrond Myklebust }
12827d1e8255STrond Myklebust 
12837d1e8255STrond Myklebust static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt)
12847d1e8255STrond Myklebust {
12857d1e8255STrond Myklebust 	struct sock_xprt *transport;
12867d1e8255STrond Myklebust 
12877d1e8255STrond Myklebust 	transport = container_of(xprt, struct sock_xprt, xprt);
12887d1e8255STrond Myklebust 
12897d1e8255STrond Myklebust 	if (!test_bit(XPRT_CONNECTION_ABORT, &xprt->state) ||
12907d1e8255STrond Myklebust 	    !cancel_delayed_work(&transport->connect_worker))
12917d1e8255STrond Myklebust 		return;
12927d1e8255STrond Myklebust 	clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
12937d1e8255STrond Myklebust 	xprt_clear_connecting(xprt);
12947d1e8255STrond Myklebust }
12957d1e8255STrond Myklebust 
12967d1e8255STrond Myklebust static void xs_sock_mark_closed(struct rpc_xprt *xprt)
12977d1e8255STrond Myklebust {
12987d1e8255STrond Myklebust 	smp_mb__before_clear_bit();
12997d1e8255STrond Myklebust 	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
13007d1e8255STrond Myklebust 	clear_bit(XPRT_CLOSING, &xprt->state);
13017d1e8255STrond Myklebust 	smp_mb__after_clear_bit();
13027d1e8255STrond Myklebust 	/* Mark transport as closed and wake up all pending tasks */
13037d1e8255STrond Myklebust 	xprt_disconnect_done(xprt);
13047d1e8255STrond Myklebust }
13057d1e8255STrond Myklebust 
13069903cd1cSChuck Lever /**
13079903cd1cSChuck Lever  * xs_tcp_state_change - callback to handle TCP socket state changes
13089903cd1cSChuck Lever  * @sk: socket whose state has changed
13099903cd1cSChuck Lever  *
13109903cd1cSChuck Lever  */
13119903cd1cSChuck Lever static void xs_tcp_state_change(struct sock *sk)
1312a246b010SChuck Lever {
1313a246b010SChuck Lever 	struct rpc_xprt *xprt;
1314a246b010SChuck Lever 
1315a246b010SChuck Lever 	read_lock(&sk->sk_callback_lock);
1316a246b010SChuck Lever 	if (!(xprt = xprt_from_sock(sk)))
1317a246b010SChuck Lever 		goto out;
13189903cd1cSChuck Lever 	dprintk("RPC:       xs_tcp_state_change client %p...\n", xprt);
1319a246b010SChuck Lever 	dprintk("RPC:       state %x conn %d dead %d zapped %d\n",
1320a246b010SChuck Lever 			sk->sk_state, xprt_connected(xprt),
1321a246b010SChuck Lever 			sock_flag(sk, SOCK_DEAD),
1322a246b010SChuck Lever 			sock_flag(sk, SOCK_ZAPPED));
1323a246b010SChuck Lever 
1324a246b010SChuck Lever 	switch (sk->sk_state) {
1325a246b010SChuck Lever 	case TCP_ESTABLISHED:
13264a0f8c04SChuck Lever 		spin_lock_bh(&xprt->transport_lock);
1327a246b010SChuck Lever 		if (!xprt_test_and_set_connected(xprt)) {
132851971139SChuck Lever 			struct sock_xprt *transport = container_of(xprt,
132951971139SChuck Lever 					struct sock_xprt, xprt);
133051971139SChuck Lever 
1331a246b010SChuck Lever 			/* Reset TCP record info */
133251971139SChuck Lever 			transport->tcp_offset = 0;
133351971139SChuck Lever 			transport->tcp_reclen = 0;
133451971139SChuck Lever 			transport->tcp_copied = 0;
1335e136d092SChuck Lever 			transport->tcp_flags =
1336e136d092SChuck Lever 				TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID;
133751971139SChuck Lever 
13382a491991STrond Myklebust 			xprt_wake_pending_tasks(xprt, -EAGAIN);
1339a246b010SChuck Lever 		}
13404a0f8c04SChuck Lever 		spin_unlock_bh(&xprt->transport_lock);
1341a246b010SChuck Lever 		break;
13423b948ae5STrond Myklebust 	case TCP_FIN_WAIT1:
13433b948ae5STrond Myklebust 		/* The client initiated a shutdown of the socket */
13447c1d71cfSTrond Myklebust 		xprt->connect_cookie++;
1345663b8858STrond Myklebust 		xprt->reestablish_timeout = 0;
13463b948ae5STrond Myklebust 		set_bit(XPRT_CLOSING, &xprt->state);
13473b948ae5STrond Myklebust 		smp_mb__before_clear_bit();
13483b948ae5STrond Myklebust 		clear_bit(XPRT_CONNECTED, &xprt->state);
1349ef803670STrond Myklebust 		clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
13503b948ae5STrond Myklebust 		smp_mb__after_clear_bit();
135125fe6142STrond Myklebust 		xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout);
1352a246b010SChuck Lever 		break;
1353632e3bdcSTrond Myklebust 	case TCP_CLOSE_WAIT:
13543b948ae5STrond Myklebust 		/* The server initiated a shutdown of the socket */
135566af1e55STrond Myklebust 		xprt_force_disconnect(xprt);
1356663b8858STrond Myklebust 	case TCP_SYN_SENT:
13577c1d71cfSTrond Myklebust 		xprt->connect_cookie++;
1358663b8858STrond Myklebust 	case TCP_CLOSING:
1359663b8858STrond Myklebust 		/*
1360663b8858STrond Myklebust 		 * If the server closed down the connection, make sure that
1361663b8858STrond Myklebust 		 * we back off before reconnecting
1362663b8858STrond Myklebust 		 */
1363663b8858STrond Myklebust 		if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
1364663b8858STrond Myklebust 			xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
13653b948ae5STrond Myklebust 		break;
13663b948ae5STrond Myklebust 	case TCP_LAST_ACK:
1367670f9457STrond Myklebust 		set_bit(XPRT_CLOSING, &xprt->state);
136825fe6142STrond Myklebust 		xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout);
13693b948ae5STrond Myklebust 		smp_mb__before_clear_bit();
13703b948ae5STrond Myklebust 		clear_bit(XPRT_CONNECTED, &xprt->state);
13713b948ae5STrond Myklebust 		smp_mb__after_clear_bit();
13723b948ae5STrond Myklebust 		break;
13733b948ae5STrond Myklebust 	case TCP_CLOSE:
13747d1e8255STrond Myklebust 		xs_tcp_cancel_linger_timeout(xprt);
13757d1e8255STrond Myklebust 		xs_sock_mark_closed(xprt);
1376a246b010SChuck Lever 	}
1377a246b010SChuck Lever  out:
1378a246b010SChuck Lever 	read_unlock(&sk->sk_callback_lock);
1379a246b010SChuck Lever }
1380a246b010SChuck Lever 
13819903cd1cSChuck Lever /**
1382482f32e6STrond Myklebust  * xs_error_report - callback mainly for catching socket errors
13832a9e1cfaSTrond Myklebust  * @sk: socket
13842a9e1cfaSTrond Myklebust  */
1385482f32e6STrond Myklebust static void xs_error_report(struct sock *sk)
13862a9e1cfaSTrond Myklebust {
13872a9e1cfaSTrond Myklebust 	struct rpc_xprt *xprt;
13882a9e1cfaSTrond Myklebust 
13892a9e1cfaSTrond Myklebust 	read_lock(&sk->sk_callback_lock);
13902a9e1cfaSTrond Myklebust 	if (!(xprt = xprt_from_sock(sk)))
13912a9e1cfaSTrond Myklebust 		goto out;
13922a9e1cfaSTrond Myklebust 	dprintk("RPC:       %s client %p...\n"
13932a9e1cfaSTrond Myklebust 			"RPC:       error %d\n",
13942a9e1cfaSTrond Myklebust 			__func__, xprt, sk->sk_err);
1395482f32e6STrond Myklebust 	xprt_wake_pending_tasks(xprt, -EAGAIN);
13962a9e1cfaSTrond Myklebust out:
13972a9e1cfaSTrond Myklebust 	read_unlock(&sk->sk_callback_lock);
13982a9e1cfaSTrond Myklebust }
13992a9e1cfaSTrond Myklebust 
14001f0fa154SIlpo Järvinen static void xs_write_space(struct sock *sk)
14011f0fa154SIlpo Järvinen {
14021f0fa154SIlpo Järvinen 	struct socket *sock;
14031f0fa154SIlpo Järvinen 	struct rpc_xprt *xprt;
14041f0fa154SIlpo Järvinen 
14051f0fa154SIlpo Järvinen 	if (unlikely(!(sock = sk->sk_socket)))
14061f0fa154SIlpo Järvinen 		return;
14071f0fa154SIlpo Järvinen 	clear_bit(SOCK_NOSPACE, &sock->flags);
14081f0fa154SIlpo Järvinen 
14091f0fa154SIlpo Järvinen 	if (unlikely(!(xprt = xprt_from_sock(sk))))
14101f0fa154SIlpo Järvinen 		return;
14111f0fa154SIlpo Järvinen 	if (test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags) == 0)
14121f0fa154SIlpo Järvinen 		return;
14131f0fa154SIlpo Järvinen 
14141f0fa154SIlpo Järvinen 	xprt_write_space(xprt);
14151f0fa154SIlpo Järvinen }
14161f0fa154SIlpo Järvinen 
14172a9e1cfaSTrond Myklebust /**
1418c7b2cae8SChuck Lever  * xs_udp_write_space - callback invoked when socket buffer space
1419c7b2cae8SChuck Lever  *                             becomes available
14209903cd1cSChuck Lever  * @sk: socket whose state has changed
14219903cd1cSChuck Lever  *
1422a246b010SChuck Lever  * Called when more output buffer space is available for this socket.
1423a246b010SChuck Lever  * We try not to wake our writers until they can make "significant"
1424c7b2cae8SChuck Lever  * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1425a246b010SChuck Lever  * with a bunch of small requests.
1426a246b010SChuck Lever  */
1427c7b2cae8SChuck Lever static void xs_udp_write_space(struct sock *sk)
1428a246b010SChuck Lever {
1429a246b010SChuck Lever 	read_lock(&sk->sk_callback_lock);
1430c7b2cae8SChuck Lever 
1431c7b2cae8SChuck Lever 	/* from net/core/sock.c:sock_def_write_space */
14321f0fa154SIlpo Järvinen 	if (sock_writeable(sk))
14331f0fa154SIlpo Järvinen 		xs_write_space(sk);
1434c7b2cae8SChuck Lever 
1435c7b2cae8SChuck Lever 	read_unlock(&sk->sk_callback_lock);
1436c7b2cae8SChuck Lever }
1437c7b2cae8SChuck Lever 
1438c7b2cae8SChuck Lever /**
1439c7b2cae8SChuck Lever  * xs_tcp_write_space - callback invoked when socket buffer space
1440c7b2cae8SChuck Lever  *                             becomes available
1441c7b2cae8SChuck Lever  * @sk: socket whose state has changed
1442c7b2cae8SChuck Lever  *
1443c7b2cae8SChuck Lever  * Called when more output buffer space is available for this socket.
1444c7b2cae8SChuck Lever  * We try not to wake our writers until they can make "significant"
1445c7b2cae8SChuck Lever  * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1446c7b2cae8SChuck Lever  * with a bunch of small requests.
1447c7b2cae8SChuck Lever  */
1448c7b2cae8SChuck Lever static void xs_tcp_write_space(struct sock *sk)
1449c7b2cae8SChuck Lever {
1450c7b2cae8SChuck Lever 	read_lock(&sk->sk_callback_lock);
1451c7b2cae8SChuck Lever 
1452c7b2cae8SChuck Lever 	/* from net/core/stream.c:sk_stream_write_space */
14531f0fa154SIlpo Järvinen 	if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
14541f0fa154SIlpo Järvinen 		xs_write_space(sk);
1455c7b2cae8SChuck Lever 
1456a246b010SChuck Lever 	read_unlock(&sk->sk_callback_lock);
1457a246b010SChuck Lever }
1458a246b010SChuck Lever 
1459470056c2SChuck Lever static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
1460a246b010SChuck Lever {
1461ee0ac0c2SChuck Lever 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1462ee0ac0c2SChuck Lever 	struct sock *sk = transport->inet;
1463a246b010SChuck Lever 
14647c6e066eSChuck Lever 	if (transport->rcvsize) {
1465a246b010SChuck Lever 		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
14667c6e066eSChuck Lever 		sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2;
1467a246b010SChuck Lever 	}
14687c6e066eSChuck Lever 	if (transport->sndsize) {
1469a246b010SChuck Lever 		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
14707c6e066eSChuck Lever 		sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2;
1471a246b010SChuck Lever 		sk->sk_write_space(sk);
1472a246b010SChuck Lever 	}
1473a246b010SChuck Lever }
1474a246b010SChuck Lever 
147543118c29SChuck Lever /**
1476470056c2SChuck Lever  * xs_udp_set_buffer_size - set send and receive limits
147743118c29SChuck Lever  * @xprt: generic transport
1478470056c2SChuck Lever  * @sndsize: requested size of send buffer, in bytes
1479470056c2SChuck Lever  * @rcvsize: requested size of receive buffer, in bytes
148043118c29SChuck Lever  *
1481470056c2SChuck Lever  * Set socket send and receive buffer size limits.
148243118c29SChuck Lever  */
1483470056c2SChuck Lever static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize)
148443118c29SChuck Lever {
14857c6e066eSChuck Lever 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
14867c6e066eSChuck Lever 
14877c6e066eSChuck Lever 	transport->sndsize = 0;
1488470056c2SChuck Lever 	if (sndsize)
14897c6e066eSChuck Lever 		transport->sndsize = sndsize + 1024;
14907c6e066eSChuck Lever 	transport->rcvsize = 0;
1491470056c2SChuck Lever 	if (rcvsize)
14927c6e066eSChuck Lever 		transport->rcvsize = rcvsize + 1024;
1493470056c2SChuck Lever 
1494470056c2SChuck Lever 	xs_udp_do_set_buffer_size(xprt);
149543118c29SChuck Lever }
149643118c29SChuck Lever 
149746c0ee8bSChuck Lever /**
149846c0ee8bSChuck Lever  * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
149946c0ee8bSChuck Lever  * @task: task that timed out
150046c0ee8bSChuck Lever  *
150146c0ee8bSChuck Lever  * Adjust the congestion window after a retransmit timeout has occurred.
150246c0ee8bSChuck Lever  */
150346c0ee8bSChuck Lever static void xs_udp_timer(struct rpc_task *task)
150446c0ee8bSChuck Lever {
150546c0ee8bSChuck Lever 	xprt_adjust_cwnd(task, -ETIMEDOUT);
150646c0ee8bSChuck Lever }
150746c0ee8bSChuck Lever 
1508b85d8806SChuck Lever static unsigned short xs_get_random_port(void)
1509b85d8806SChuck Lever {
1510b85d8806SChuck Lever 	unsigned short range = xprt_max_resvport - xprt_min_resvport;
1511b85d8806SChuck Lever 	unsigned short rand = (unsigned short) net_random() % range;
1512b85d8806SChuck Lever 	return rand + xprt_min_resvport;
1513b85d8806SChuck Lever }
1514b85d8806SChuck Lever 
151592200412SChuck Lever /**
151692200412SChuck Lever  * xs_set_port - reset the port number in the remote endpoint address
151792200412SChuck Lever  * @xprt: generic transport
151892200412SChuck Lever  * @port: new port number
151992200412SChuck Lever  *
152092200412SChuck Lever  */
152192200412SChuck Lever static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
152292200412SChuck Lever {
152392200412SChuck Lever 	dprintk("RPC:       setting port for xprt %p to %u\n", xprt, port);
1524c4efcb1dSChuck Lever 
15259dc3b095SChuck Lever 	rpc_set_port(xs_addr(xprt), port);
15269dc3b095SChuck Lever 	xs_update_peer_port(xprt);
152792200412SChuck Lever }
152892200412SChuck Lever 
152967a391d7STrond Myklebust static unsigned short xs_get_srcport(struct sock_xprt *transport, struct socket *sock)
153067a391d7STrond Myklebust {
1531fbfffbd5SChuck Lever 	unsigned short port = transport->srcport;
153267a391d7STrond Myklebust 
153367a391d7STrond Myklebust 	if (port == 0 && transport->xprt.resvport)
153467a391d7STrond Myklebust 		port = xs_get_random_port();
153567a391d7STrond Myklebust 	return port;
153667a391d7STrond Myklebust }
153767a391d7STrond Myklebust 
153867a391d7STrond Myklebust static unsigned short xs_next_srcport(struct sock_xprt *transport, struct socket *sock, unsigned short port)
153967a391d7STrond Myklebust {
1540fbfffbd5SChuck Lever 	if (transport->srcport != 0)
1541fbfffbd5SChuck Lever 		transport->srcport = 0;
154267a391d7STrond Myklebust 	if (!transport->xprt.resvport)
154367a391d7STrond Myklebust 		return 0;
154467a391d7STrond Myklebust 	if (port <= xprt_min_resvport || port > xprt_max_resvport)
154567a391d7STrond Myklebust 		return xprt_max_resvport;
154667a391d7STrond Myklebust 	return --port;
154767a391d7STrond Myklebust }
154867a391d7STrond Myklebust 
15497dc753f0SChuck Lever static int xs_bind4(struct sock_xprt *transport, struct socket *sock)
1550a246b010SChuck Lever {
1551a246b010SChuck Lever 	struct sockaddr_in myaddr = {
1552a246b010SChuck Lever 		.sin_family = AF_INET,
1553a246b010SChuck Lever 	};
1554d3bc9a1dSFrank van Maarseveen 	struct sockaddr_in *sa;
155567a391d7STrond Myklebust 	int err, nloop = 0;
155667a391d7STrond Myklebust 	unsigned short port = xs_get_srcport(transport, sock);
155767a391d7STrond Myklebust 	unsigned short last;
1558a246b010SChuck Lever 
1559fbfffbd5SChuck Lever 	sa = (struct sockaddr_in *)&transport->srcaddr;
1560d3bc9a1dSFrank van Maarseveen 	myaddr.sin_addr = sa->sin_addr;
1561a246b010SChuck Lever 	do {
1562a246b010SChuck Lever 		myaddr.sin_port = htons(port);
1563e6242e92SSridhar Samudrala 		err = kernel_bind(sock, (struct sockaddr *) &myaddr,
1564a246b010SChuck Lever 						sizeof(myaddr));
156567a391d7STrond Myklebust 		if (port == 0)
1566d3bc9a1dSFrank van Maarseveen 			break;
1567a246b010SChuck Lever 		if (err == 0) {
1568fbfffbd5SChuck Lever 			transport->srcport = port;
1569d3bc9a1dSFrank van Maarseveen 			break;
1570a246b010SChuck Lever 		}
157167a391d7STrond Myklebust 		last = port;
157267a391d7STrond Myklebust 		port = xs_next_srcport(transport, sock, port);
157367a391d7STrond Myklebust 		if (port > last)
157467a391d7STrond Myklebust 			nloop++;
157567a391d7STrond Myklebust 	} while (err == -EADDRINUSE && nloop != 2);
157621454aaaSHarvey Harrison 	dprintk("RPC:       %s %pI4:%u: %s (%d)\n",
157721454aaaSHarvey Harrison 			__func__, &myaddr.sin_addr,
15787dc753f0SChuck Lever 			port, err ? "failed" : "ok", err);
1579a246b010SChuck Lever 	return err;
1580a246b010SChuck Lever }
1581a246b010SChuck Lever 
158290058d37SChuck Lever static int xs_bind6(struct sock_xprt *transport, struct socket *sock)
158390058d37SChuck Lever {
158490058d37SChuck Lever 	struct sockaddr_in6 myaddr = {
158590058d37SChuck Lever 		.sin6_family = AF_INET6,
158690058d37SChuck Lever 	};
158790058d37SChuck Lever 	struct sockaddr_in6 *sa;
158867a391d7STrond Myklebust 	int err, nloop = 0;
158967a391d7STrond Myklebust 	unsigned short port = xs_get_srcport(transport, sock);
159067a391d7STrond Myklebust 	unsigned short last;
159190058d37SChuck Lever 
1592fbfffbd5SChuck Lever 	sa = (struct sockaddr_in6 *)&transport->srcaddr;
159390058d37SChuck Lever 	myaddr.sin6_addr = sa->sin6_addr;
159490058d37SChuck Lever 	do {
159590058d37SChuck Lever 		myaddr.sin6_port = htons(port);
159690058d37SChuck Lever 		err = kernel_bind(sock, (struct sockaddr *) &myaddr,
159790058d37SChuck Lever 						sizeof(myaddr));
159867a391d7STrond Myklebust 		if (port == 0)
159990058d37SChuck Lever 			break;
160090058d37SChuck Lever 		if (err == 0) {
1601fbfffbd5SChuck Lever 			transport->srcport = port;
160290058d37SChuck Lever 			break;
160390058d37SChuck Lever 		}
160467a391d7STrond Myklebust 		last = port;
160567a391d7STrond Myklebust 		port = xs_next_srcport(transport, sock, port);
160667a391d7STrond Myklebust 		if (port > last)
160767a391d7STrond Myklebust 			nloop++;
160867a391d7STrond Myklebust 	} while (err == -EADDRINUSE && nloop != 2);
16095b095d98SHarvey Harrison 	dprintk("RPC:       xs_bind6 %pI6:%u: %s (%d)\n",
1610fdb46ee7SHarvey Harrison 		&myaddr.sin6_addr, port, err ? "failed" : "ok", err);
1611a246b010SChuck Lever 	return err;
1612a246b010SChuck Lever }
1613a246b010SChuck Lever 
1614ed07536eSPeter Zijlstra #ifdef CONFIG_DEBUG_LOCK_ALLOC
1615ed07536eSPeter Zijlstra static struct lock_class_key xs_key[2];
1616ed07536eSPeter Zijlstra static struct lock_class_key xs_slock_key[2];
1617ed07536eSPeter Zijlstra 
16188945ee5eSChuck Lever static inline void xs_reclassify_socket4(struct socket *sock)
1619ed07536eSPeter Zijlstra {
1620ed07536eSPeter Zijlstra 	struct sock *sk = sock->sk;
16218945ee5eSChuck Lever 
162202b3d346SJohn Heffner 	BUG_ON(sock_owned_by_user(sk));
16238945ee5eSChuck Lever 	sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC",
16248945ee5eSChuck Lever 		&xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]);
1625ed07536eSPeter Zijlstra }
16268945ee5eSChuck Lever 
16278945ee5eSChuck Lever static inline void xs_reclassify_socket6(struct socket *sock)
16288945ee5eSChuck Lever {
16298945ee5eSChuck Lever 	struct sock *sk = sock->sk;
16308945ee5eSChuck Lever 
1631f4921affSLinus Torvalds 	BUG_ON(sock_owned_by_user(sk));
16328945ee5eSChuck Lever 	sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
16338945ee5eSChuck Lever 		&xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]);
1634ed07536eSPeter Zijlstra }
1635ed07536eSPeter Zijlstra #else
16368945ee5eSChuck Lever static inline void xs_reclassify_socket4(struct socket *sock)
16378945ee5eSChuck Lever {
16388945ee5eSChuck Lever }
16398945ee5eSChuck Lever 
16408945ee5eSChuck Lever static inline void xs_reclassify_socket6(struct socket *sock)
1641ed07536eSPeter Zijlstra {
1642ed07536eSPeter Zijlstra }
1643ed07536eSPeter Zijlstra #endif
1644ed07536eSPeter Zijlstra 
164516be2d20SChuck Lever static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
1646a246b010SChuck Lever {
164716be2d20SChuck Lever 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1648edb267a6SChuck Lever 
1649ee0ac0c2SChuck Lever 	if (!transport->inet) {
1650b0d93ad5SChuck Lever 		struct sock *sk = sock->sk;
1651b0d93ad5SChuck Lever 
1652b0d93ad5SChuck Lever 		write_lock_bh(&sk->sk_callback_lock);
1653b0d93ad5SChuck Lever 
16542a9e1cfaSTrond Myklebust 		xs_save_old_callbacks(transport, sk);
16552a9e1cfaSTrond Myklebust 
1656b0d93ad5SChuck Lever 		sk->sk_user_data = xprt;
1657b0d93ad5SChuck Lever 		sk->sk_data_ready = xs_udp_data_ready;
1658b0d93ad5SChuck Lever 		sk->sk_write_space = xs_udp_write_space;
1659482f32e6STrond Myklebust 		sk->sk_error_report = xs_error_report;
1660b0d93ad5SChuck Lever 		sk->sk_no_check = UDP_CSUM_NORCV;
1661b079fa7bSTrond Myklebust 		sk->sk_allocation = GFP_ATOMIC;
1662b0d93ad5SChuck Lever 
1663b0d93ad5SChuck Lever 		xprt_set_connected(xprt);
1664b0d93ad5SChuck Lever 
1665b0d93ad5SChuck Lever 		/* Reset to new socket */
1666ee0ac0c2SChuck Lever 		transport->sock = sock;
1667ee0ac0c2SChuck Lever 		transport->inet = sk;
1668b0d93ad5SChuck Lever 
1669b0d93ad5SChuck Lever 		write_unlock_bh(&sk->sk_callback_lock);
1670b0d93ad5SChuck Lever 	}
1671470056c2SChuck Lever 	xs_udp_do_set_buffer_size(xprt);
167216be2d20SChuck Lever }
167316be2d20SChuck Lever 
1674a246b010SChuck Lever /**
16759c3d72deSChuck Lever  * xs_udp_connect_worker4 - set up a UDP socket
1676a246b010SChuck Lever  * @work: RPC transport to connect
1677a246b010SChuck Lever  *
1678a246b010SChuck Lever  * Invoked by a work queue tasklet.
1679a246b010SChuck Lever  */
16809c3d72deSChuck Lever static void xs_udp_connect_worker4(struct work_struct *work)
1681a246b010SChuck Lever {
1682a246b010SChuck Lever 	struct sock_xprt *transport =
1683a246b010SChuck Lever 		container_of(work, struct sock_xprt, connect_worker.work);
1684a246b010SChuck Lever 	struct rpc_xprt *xprt = &transport->xprt;
1685a246b010SChuck Lever 	struct socket *sock = transport->sock;
1686a246b010SChuck Lever 	int err, status = -EIO;
1687a246b010SChuck Lever 
168801d37c42STrond Myklebust 	if (xprt->shutdown)
16899903cd1cSChuck Lever 		goto out;
16909903cd1cSChuck Lever 
1691a246b010SChuck Lever 	/* Start by resetting any existing state */
1692fe315e76SChuck Lever 	xs_reset_transport(transport);
1693a246b010SChuck Lever 
1694fe315e76SChuck Lever 	err = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
1695fe315e76SChuck Lever 	if (err < 0) {
16969903cd1cSChuck Lever 		dprintk("RPC:       can't create UDP transport socket (%d).\n", -err);
1697a246b010SChuck Lever 		goto out;
1698a246b010SChuck Lever 	}
16998945ee5eSChuck Lever 	xs_reclassify_socket4(sock);
1700a246b010SChuck Lever 
17017dc753f0SChuck Lever 	if (xs_bind4(transport, sock)) {
17029903cd1cSChuck Lever 		sock_release(sock);
17039903cd1cSChuck Lever 		goto out;
1704a246b010SChuck Lever 	}
1705b0d93ad5SChuck Lever 
1706c740eff8SChuck Lever 	dprintk("RPC:       worker connecting xprt %p via %s to "
1707c740eff8SChuck Lever 				"%s (port %s)\n", xprt,
1708c740eff8SChuck Lever 			xprt->address_strings[RPC_DISPLAY_PROTO],
1709c740eff8SChuck Lever 			xprt->address_strings[RPC_DISPLAY_ADDR],
1710c740eff8SChuck Lever 			xprt->address_strings[RPC_DISPLAY_PORT]);
1711b0d93ad5SChuck Lever 
171216be2d20SChuck Lever 	xs_udp_finish_connecting(xprt, sock);
1713a246b010SChuck Lever 	status = 0;
1714b0d93ad5SChuck Lever out:
1715b0d93ad5SChuck Lever 	xprt_clear_connecting(xprt);
17167d1e8255STrond Myklebust 	xprt_wake_pending_tasks(xprt, status);
1717b0d93ad5SChuck Lever }
1718b0d93ad5SChuck Lever 
171968e220bdSChuck Lever /**
172068e220bdSChuck Lever  * xs_udp_connect_worker6 - set up a UDP socket
172168e220bdSChuck Lever  * @work: RPC transport to connect
172268e220bdSChuck Lever  *
172368e220bdSChuck Lever  * Invoked by a work queue tasklet.
172468e220bdSChuck Lever  */
172568e220bdSChuck Lever static void xs_udp_connect_worker6(struct work_struct *work)
172668e220bdSChuck Lever {
172768e220bdSChuck Lever 	struct sock_xprt *transport =
172868e220bdSChuck Lever 		container_of(work, struct sock_xprt, connect_worker.work);
172968e220bdSChuck Lever 	struct rpc_xprt *xprt = &transport->xprt;
173068e220bdSChuck Lever 	struct socket *sock = transport->sock;
173168e220bdSChuck Lever 	int err, status = -EIO;
173268e220bdSChuck Lever 
173301d37c42STrond Myklebust 	if (xprt->shutdown)
173468e220bdSChuck Lever 		goto out;
173568e220bdSChuck Lever 
173668e220bdSChuck Lever 	/* Start by resetting any existing state */
1737fe315e76SChuck Lever 	xs_reset_transport(transport);
173868e220bdSChuck Lever 
1739fe315e76SChuck Lever 	err = sock_create_kern(PF_INET6, SOCK_DGRAM, IPPROTO_UDP, &sock);
1740fe315e76SChuck Lever 	if (err < 0) {
174168e220bdSChuck Lever 		dprintk("RPC:       can't create UDP transport socket (%d).\n", -err);
174268e220bdSChuck Lever 		goto out;
174368e220bdSChuck Lever 	}
17448945ee5eSChuck Lever 	xs_reclassify_socket6(sock);
174568e220bdSChuck Lever 
174668e220bdSChuck Lever 	if (xs_bind6(transport, sock) < 0) {
174768e220bdSChuck Lever 		sock_release(sock);
174868e220bdSChuck Lever 		goto out;
174968e220bdSChuck Lever 	}
175068e220bdSChuck Lever 
1751c740eff8SChuck Lever 	dprintk("RPC:       worker connecting xprt %p via %s to "
1752c740eff8SChuck Lever 				"%s (port %s)\n", xprt,
1753c740eff8SChuck Lever 			xprt->address_strings[RPC_DISPLAY_PROTO],
1754c740eff8SChuck Lever 			xprt->address_strings[RPC_DISPLAY_ADDR],
1755c740eff8SChuck Lever 			xprt->address_strings[RPC_DISPLAY_PORT]);
175668e220bdSChuck Lever 
175768e220bdSChuck Lever 	xs_udp_finish_connecting(xprt, sock);
1758a246b010SChuck Lever 	status = 0;
1759b0d93ad5SChuck Lever out:
1760b0d93ad5SChuck Lever 	xprt_clear_connecting(xprt);
17617d1e8255STrond Myklebust 	xprt_wake_pending_tasks(xprt, status);
1762b0d93ad5SChuck Lever }
1763b0d93ad5SChuck Lever 
17643167e12cSChuck Lever /*
17653167e12cSChuck Lever  * We need to preserve the port number so the reply cache on the server can
17663167e12cSChuck Lever  * find our cached RPC replies when we get around to reconnecting.
17673167e12cSChuck Lever  */
176840d2549dSTrond Myklebust static void xs_abort_connection(struct rpc_xprt *xprt, struct sock_xprt *transport)
17693167e12cSChuck Lever {
17703167e12cSChuck Lever 	int result;
17713167e12cSChuck Lever 	struct sockaddr any;
17723167e12cSChuck Lever 
17733167e12cSChuck Lever 	dprintk("RPC:       disconnecting xprt %p to reuse port\n", xprt);
17743167e12cSChuck Lever 
17753167e12cSChuck Lever 	/*
17763167e12cSChuck Lever 	 * Disconnect the transport socket by doing a connect operation
17773167e12cSChuck Lever 	 * with AF_UNSPEC.  This should return immediately...
17783167e12cSChuck Lever 	 */
17793167e12cSChuck Lever 	memset(&any, 0, sizeof(any));
17803167e12cSChuck Lever 	any.sa_family = AF_UNSPEC;
1781ee0ac0c2SChuck Lever 	result = kernel_connect(transport->sock, &any, sizeof(any), 0);
17827d1e8255STrond Myklebust 	if (!result)
17837d1e8255STrond Myklebust 		xs_sock_mark_closed(xprt);
17847d1e8255STrond Myklebust 	else
17853167e12cSChuck Lever 		dprintk("RPC:       AF_UNSPEC connect return code %d\n",
17863167e12cSChuck Lever 				result);
17873167e12cSChuck Lever }
17883167e12cSChuck Lever 
178940d2549dSTrond Myklebust static void xs_tcp_reuse_connection(struct rpc_xprt *xprt, struct sock_xprt *transport)
179040d2549dSTrond Myklebust {
179140d2549dSTrond Myklebust 	unsigned int state = transport->inet->sk_state;
179240d2549dSTrond Myklebust 
179340d2549dSTrond Myklebust 	if (state == TCP_CLOSE && transport->sock->state == SS_UNCONNECTED)
179440d2549dSTrond Myklebust 		return;
179540d2549dSTrond Myklebust 	if ((1 << state) & (TCPF_ESTABLISHED|TCPF_SYN_SENT))
179640d2549dSTrond Myklebust 		return;
179740d2549dSTrond Myklebust 	xs_abort_connection(xprt, transport);
179840d2549dSTrond Myklebust }
179940d2549dSTrond Myklebust 
180016be2d20SChuck Lever static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
1801b0d93ad5SChuck Lever {
180216be2d20SChuck Lever 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1803edb267a6SChuck Lever 
1804ee0ac0c2SChuck Lever 	if (!transport->inet) {
1805b0d93ad5SChuck Lever 		struct sock *sk = sock->sk;
1806b0d93ad5SChuck Lever 
1807b0d93ad5SChuck Lever 		write_lock_bh(&sk->sk_callback_lock);
1808b0d93ad5SChuck Lever 
18092a9e1cfaSTrond Myklebust 		xs_save_old_callbacks(transport, sk);
18102a9e1cfaSTrond Myklebust 
1811b0d93ad5SChuck Lever 		sk->sk_user_data = xprt;
1812b0d93ad5SChuck Lever 		sk->sk_data_ready = xs_tcp_data_ready;
1813b0d93ad5SChuck Lever 		sk->sk_state_change = xs_tcp_state_change;
1814b0d93ad5SChuck Lever 		sk->sk_write_space = xs_tcp_write_space;
1815482f32e6STrond Myklebust 		sk->sk_error_report = xs_error_report;
1816b079fa7bSTrond Myklebust 		sk->sk_allocation = GFP_ATOMIC;
18173167e12cSChuck Lever 
18183167e12cSChuck Lever 		/* socket options */
18193167e12cSChuck Lever 		sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
18203167e12cSChuck Lever 		sock_reset_flag(sk, SOCK_LINGER);
18213167e12cSChuck Lever 		tcp_sk(sk)->linger2 = 0;
18223167e12cSChuck Lever 		tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
1823b0d93ad5SChuck Lever 
1824b0d93ad5SChuck Lever 		xprt_clear_connected(xprt);
1825b0d93ad5SChuck Lever 
1826b0d93ad5SChuck Lever 		/* Reset to new socket */
1827ee0ac0c2SChuck Lever 		transport->sock = sock;
1828ee0ac0c2SChuck Lever 		transport->inet = sk;
1829b0d93ad5SChuck Lever 
1830b0d93ad5SChuck Lever 		write_unlock_bh(&sk->sk_callback_lock);
1831b0d93ad5SChuck Lever 	}
1832b0d93ad5SChuck Lever 
183301d37c42STrond Myklebust 	if (!xprt_bound(xprt))
183401d37c42STrond Myklebust 		return -ENOTCONN;
183501d37c42STrond Myklebust 
1836b0d93ad5SChuck Lever 	/* Tell the socket layer to start connecting... */
1837262ca07dSChuck Lever 	xprt->stat.connect_count++;
1838262ca07dSChuck Lever 	xprt->stat.connect_start = jiffies;
183995392c59SChuck Lever 	return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
184016be2d20SChuck Lever }
184116be2d20SChuck Lever 
184216be2d20SChuck Lever /**
1843b61d59ffSTrond Myklebust  * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint
1844b61d59ffSTrond Myklebust  * @xprt: RPC transport to connect
1845b61d59ffSTrond Myklebust  * @transport: socket transport to connect
1846b61d59ffSTrond Myklebust  * @create_sock: function to create a socket of the correct type
184716be2d20SChuck Lever  *
184816be2d20SChuck Lever  * Invoked by a work queue tasklet.
184916be2d20SChuck Lever  */
1850b61d59ffSTrond Myklebust static void xs_tcp_setup_socket(struct rpc_xprt *xprt,
1851b61d59ffSTrond Myklebust 		struct sock_xprt *transport,
1852b61d59ffSTrond Myklebust 		struct socket *(*create_sock)(struct rpc_xprt *,
1853b61d59ffSTrond Myklebust 			struct sock_xprt *))
185416be2d20SChuck Lever {
185516be2d20SChuck Lever 	struct socket *sock = transport->sock;
1856b61d59ffSTrond Myklebust 	int status = -EIO;
185716be2d20SChuck Lever 
185801d37c42STrond Myklebust 	if (xprt->shutdown)
185916be2d20SChuck Lever 		goto out;
186016be2d20SChuck Lever 
186116be2d20SChuck Lever 	if (!sock) {
18627d1e8255STrond Myklebust 		clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
1863b61d59ffSTrond Myklebust 		sock = create_sock(xprt, transport);
1864b61d59ffSTrond Myklebust 		if (IS_ERR(sock)) {
1865b61d59ffSTrond Myklebust 			status = PTR_ERR(sock);
186616be2d20SChuck Lever 			goto out;
186716be2d20SChuck Lever 		}
18687d1e8255STrond Myklebust 	} else {
18697d1e8255STrond Myklebust 		int abort_and_exit;
18707d1e8255STrond Myklebust 
18717d1e8255STrond Myklebust 		abort_and_exit = test_and_clear_bit(XPRT_CONNECTION_ABORT,
18727d1e8255STrond Myklebust 				&xprt->state);
187316be2d20SChuck Lever 		/* "close" the socket, preserving the local port */
187440d2549dSTrond Myklebust 		xs_tcp_reuse_connection(xprt, transport);
187516be2d20SChuck Lever 
18767d1e8255STrond Myklebust 		if (abort_and_exit)
18777d1e8255STrond Myklebust 			goto out_eagain;
18787d1e8255STrond Myklebust 	}
18797d1e8255STrond Myklebust 
1880c740eff8SChuck Lever 	dprintk("RPC:       worker connecting xprt %p via %s to "
1881c740eff8SChuck Lever 				"%s (port %s)\n", xprt,
1882c740eff8SChuck Lever 			xprt->address_strings[RPC_DISPLAY_PROTO],
1883c740eff8SChuck Lever 			xprt->address_strings[RPC_DISPLAY_ADDR],
1884c740eff8SChuck Lever 			xprt->address_strings[RPC_DISPLAY_PORT]);
188516be2d20SChuck Lever 
188616be2d20SChuck Lever 	status = xs_tcp_finish_connecting(xprt, sock);
1887a246b010SChuck Lever 	dprintk("RPC:       %p connect status %d connected %d sock state %d\n",
188846121cf7SChuck Lever 			xprt, -status, xprt_connected(xprt),
188946121cf7SChuck Lever 			sock->sk->sk_state);
1890a246b010SChuck Lever 	switch (status) {
1891f75e6745STrond Myklebust 	default:
1892f75e6745STrond Myklebust 		printk("%s: connect returned unhandled error %d\n",
1893f75e6745STrond Myklebust 			__func__, status);
1894f75e6745STrond Myklebust 	case -EADDRNOTAVAIL:
1895f75e6745STrond Myklebust 		/* We're probably in TIME_WAIT. Get rid of existing socket,
1896f75e6745STrond Myklebust 		 * and retry
1897f75e6745STrond Myklebust 		 */
1898f75e6745STrond Myklebust 		set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
1899f75e6745STrond Myklebust 		xprt_force_disconnect(xprt);
190088b5ed73STrond Myklebust 		break;
19018a2cec29STrond Myklebust 	case -ECONNREFUSED:
19028a2cec29STrond Myklebust 	case -ECONNRESET:
19038a2cec29STrond Myklebust 	case -ENETUNREACH:
19048a2cec29STrond Myklebust 		/* retry with existing socket, after a delay */
19052a491991STrond Myklebust 	case 0:
1906a246b010SChuck Lever 	case -EINPROGRESS:
1907a246b010SChuck Lever 	case -EALREADY:
19087d1e8255STrond Myklebust 		xprt_clear_connecting(xprt);
19097d1e8255STrond Myklebust 		return;
19109fcfe0c8STrond Myklebust 	case -EINVAL:
19119fcfe0c8STrond Myklebust 		/* Happens, for instance, if the user specified a link
19129fcfe0c8STrond Myklebust 		 * local IPv6 address without a scope-id.
19139fcfe0c8STrond Myklebust 		 */
19149fcfe0c8STrond Myklebust 		goto out;
19158a2cec29STrond Myklebust 	}
19167d1e8255STrond Myklebust out_eagain:
19172a491991STrond Myklebust 	status = -EAGAIN;
1918a246b010SChuck Lever out:
19192226feb6SChuck Lever 	xprt_clear_connecting(xprt);
19207d1e8255STrond Myklebust 	xprt_wake_pending_tasks(xprt, status);
1921a246b010SChuck Lever }
1922a246b010SChuck Lever 
1923b61d59ffSTrond Myklebust static struct socket *xs_create_tcp_sock4(struct rpc_xprt *xprt,
1924b61d59ffSTrond Myklebust 		struct sock_xprt *transport)
1925b61d59ffSTrond Myklebust {
1926b61d59ffSTrond Myklebust 	struct socket *sock;
1927b61d59ffSTrond Myklebust 	int err;
1928b61d59ffSTrond Myklebust 
1929b61d59ffSTrond Myklebust 	/* start from scratch */
1930b61d59ffSTrond Myklebust 	err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
1931b61d59ffSTrond Myklebust 	if (err < 0) {
1932b61d59ffSTrond Myklebust 		dprintk("RPC:       can't create TCP transport socket (%d).\n",
1933b61d59ffSTrond Myklebust 				-err);
1934b61d59ffSTrond Myklebust 		goto out_err;
1935b61d59ffSTrond Myklebust 	}
1936b61d59ffSTrond Myklebust 	xs_reclassify_socket4(sock);
1937b61d59ffSTrond Myklebust 
1938b61d59ffSTrond Myklebust 	if (xs_bind4(transport, sock) < 0) {
1939b61d59ffSTrond Myklebust 		sock_release(sock);
1940b61d59ffSTrond Myklebust 		goto out_err;
1941b61d59ffSTrond Myklebust 	}
1942b61d59ffSTrond Myklebust 	return sock;
1943b61d59ffSTrond Myklebust out_err:
1944b61d59ffSTrond Myklebust 	return ERR_PTR(-EIO);
1945b61d59ffSTrond Myklebust }
1946b61d59ffSTrond Myklebust 
1947b61d59ffSTrond Myklebust /**
1948a246b010SChuck Lever  * xs_tcp_connect_worker4 - connect a TCP socket to a remote endpoint
1949a246b010SChuck Lever  * @work: RPC transport to connect
1950a246b010SChuck Lever  *
1951a246b010SChuck Lever  * Invoked by a work queue tasklet.
1952a246b010SChuck Lever  */
1953a246b010SChuck Lever static void xs_tcp_connect_worker4(struct work_struct *work)
1954a246b010SChuck Lever {
1955a246b010SChuck Lever 	struct sock_xprt *transport =
1956a246b010SChuck Lever 		container_of(work, struct sock_xprt, connect_worker.work);
1957a246b010SChuck Lever 	struct rpc_xprt *xprt = &transport->xprt;
1958a246b010SChuck Lever 
1959b61d59ffSTrond Myklebust 	xs_tcp_setup_socket(xprt, transport, xs_create_tcp_sock4);
1960b61d59ffSTrond Myklebust }
1961a246b010SChuck Lever 
1962b61d59ffSTrond Myklebust static struct socket *xs_create_tcp_sock6(struct rpc_xprt *xprt,
1963b61d59ffSTrond Myklebust 		struct sock_xprt *transport)
1964b61d59ffSTrond Myklebust {
1965b61d59ffSTrond Myklebust 	struct socket *sock;
1966b61d59ffSTrond Myklebust 	int err;
1967b61d59ffSTrond Myklebust 
1968a246b010SChuck Lever 	/* start from scratch */
1969b61d59ffSTrond Myklebust 	err = sock_create_kern(PF_INET6, SOCK_STREAM, IPPROTO_TCP, &sock);
1970b61d59ffSTrond Myklebust 	if (err < 0) {
1971b61d59ffSTrond Myklebust 		dprintk("RPC:       can't create TCP transport socket (%d).\n",
1972b61d59ffSTrond Myklebust 				-err);
1973b61d59ffSTrond Myklebust 		goto out_err;
19749903cd1cSChuck Lever 	}
1975b61d59ffSTrond Myklebust 	xs_reclassify_socket6(sock);
19769903cd1cSChuck Lever 
1977b61d59ffSTrond Myklebust 	if (xs_bind6(transport, sock) < 0) {
1978a246b010SChuck Lever 		sock_release(sock);
1979b61d59ffSTrond Myklebust 		goto out_err;
1980a246b010SChuck Lever 	}
1981b61d59ffSTrond Myklebust 	return sock;
1982b61d59ffSTrond Myklebust out_err:
1983b61d59ffSTrond Myklebust 	return ERR_PTR(-EIO);
1984a246b010SChuck Lever }
1985a246b010SChuck Lever 
1986a246b010SChuck Lever /**
198768e220bdSChuck Lever  * xs_tcp_connect_worker6 - connect a TCP socket to a remote endpoint
198868e220bdSChuck Lever  * @work: RPC transport to connect
198968e220bdSChuck Lever  *
199068e220bdSChuck Lever  * Invoked by a work queue tasklet.
199168e220bdSChuck Lever  */
199268e220bdSChuck Lever static void xs_tcp_connect_worker6(struct work_struct *work)
199368e220bdSChuck Lever {
199468e220bdSChuck Lever 	struct sock_xprt *transport =
199568e220bdSChuck Lever 		container_of(work, struct sock_xprt, connect_worker.work);
199668e220bdSChuck Lever 	struct rpc_xprt *xprt = &transport->xprt;
199768e220bdSChuck Lever 
1998b61d59ffSTrond Myklebust 	xs_tcp_setup_socket(xprt, transport, xs_create_tcp_sock6);
199968e220bdSChuck Lever }
200068e220bdSChuck Lever 
200168e220bdSChuck Lever /**
2002a246b010SChuck Lever  * xs_connect - connect a socket to a remote endpoint
2003a246b010SChuck Lever  * @task: address of RPC task that manages state of connect request
2004a246b010SChuck Lever  *
2005a246b010SChuck Lever  * TCP: If the remote end dropped the connection, delay reconnecting.
200603bf4b70SChuck Lever  *
200703bf4b70SChuck Lever  * UDP socket connects are synchronous, but we use a work queue anyway
200803bf4b70SChuck Lever  * to guarantee that even unprivileged user processes can set up a
200903bf4b70SChuck Lever  * socket on a privileged port.
201003bf4b70SChuck Lever  *
201103bf4b70SChuck Lever  * If a UDP socket connect fails, the delay behavior here prevents
201203bf4b70SChuck Lever  * retry floods (hard mounts).
2013a246b010SChuck Lever  */
2014a246b010SChuck Lever static void xs_connect(struct rpc_task *task)
2015a246b010SChuck Lever {
2016a246b010SChuck Lever 	struct rpc_xprt *xprt = task->tk_xprt;
2017ee0ac0c2SChuck Lever 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2018a246b010SChuck Lever 
201909a21c41SChuck Lever 	if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) {
202046121cf7SChuck Lever 		dprintk("RPC:       xs_connect delayed xprt %p for %lu "
202146121cf7SChuck Lever 				"seconds\n",
202203bf4b70SChuck Lever 				xprt, xprt->reestablish_timeout / HZ);
2023c1384c9cSTrond Myklebust 		queue_delayed_work(rpciod_workqueue,
2024c1384c9cSTrond Myklebust 				   &transport->connect_worker,
202503bf4b70SChuck Lever 				   xprt->reestablish_timeout);
202603bf4b70SChuck Lever 		xprt->reestablish_timeout <<= 1;
202761d0a8e6SNeil Brown 		if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
202861d0a8e6SNeil Brown 			xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
202903bf4b70SChuck Lever 		if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO)
203003bf4b70SChuck Lever 			xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
20319903cd1cSChuck Lever 	} else {
20329903cd1cSChuck Lever 		dprintk("RPC:       xs_connect scheduled xprt %p\n", xprt);
2033c1384c9cSTrond Myklebust 		queue_delayed_work(rpciod_workqueue,
2034c1384c9cSTrond Myklebust 				   &transport->connect_worker, 0);
2035a246b010SChuck Lever 	}
2036a246b010SChuck Lever }
2037a246b010SChuck Lever 
2038262ca07dSChuck Lever /**
2039262ca07dSChuck Lever  * xs_udp_print_stats - display UDP socket-specifc stats
2040262ca07dSChuck Lever  * @xprt: rpc_xprt struct containing statistics
2041262ca07dSChuck Lever  * @seq: output file
2042262ca07dSChuck Lever  *
2043262ca07dSChuck Lever  */
2044262ca07dSChuck Lever static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2045262ca07dSChuck Lever {
2046c8475461SChuck Lever 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2047c8475461SChuck Lever 
2048262ca07dSChuck Lever 	seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %Lu %Lu\n",
2049fbfffbd5SChuck Lever 			transport->srcport,
2050262ca07dSChuck Lever 			xprt->stat.bind_count,
2051262ca07dSChuck Lever 			xprt->stat.sends,
2052262ca07dSChuck Lever 			xprt->stat.recvs,
2053262ca07dSChuck Lever 			xprt->stat.bad_xids,
2054262ca07dSChuck Lever 			xprt->stat.req_u,
2055262ca07dSChuck Lever 			xprt->stat.bklog_u);
2056262ca07dSChuck Lever }
2057262ca07dSChuck Lever 
2058262ca07dSChuck Lever /**
2059262ca07dSChuck Lever  * xs_tcp_print_stats - display TCP socket-specifc stats
2060262ca07dSChuck Lever  * @xprt: rpc_xprt struct containing statistics
2061262ca07dSChuck Lever  * @seq: output file
2062262ca07dSChuck Lever  *
2063262ca07dSChuck Lever  */
2064262ca07dSChuck Lever static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2065262ca07dSChuck Lever {
2066c8475461SChuck Lever 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2067262ca07dSChuck Lever 	long idle_time = 0;
2068262ca07dSChuck Lever 
2069262ca07dSChuck Lever 	if (xprt_connected(xprt))
2070262ca07dSChuck Lever 		idle_time = (long)(jiffies - xprt->last_used) / HZ;
2071262ca07dSChuck Lever 
2072262ca07dSChuck Lever 	seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu\n",
2073fbfffbd5SChuck Lever 			transport->srcport,
2074262ca07dSChuck Lever 			xprt->stat.bind_count,
2075262ca07dSChuck Lever 			xprt->stat.connect_count,
2076262ca07dSChuck Lever 			xprt->stat.connect_time,
2077262ca07dSChuck Lever 			idle_time,
2078262ca07dSChuck Lever 			xprt->stat.sends,
2079262ca07dSChuck Lever 			xprt->stat.recvs,
2080262ca07dSChuck Lever 			xprt->stat.bad_xids,
2081262ca07dSChuck Lever 			xprt->stat.req_u,
2082262ca07dSChuck Lever 			xprt->stat.bklog_u);
2083262ca07dSChuck Lever }
2084262ca07dSChuck Lever 
20854cfc7e60SRahul Iyer /*
20864cfc7e60SRahul Iyer  * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
20874cfc7e60SRahul Iyer  * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
20884cfc7e60SRahul Iyer  * to use the server side send routines.
20894cfc7e60SRahul Iyer  */
20905a51f13aSH Hartley Sweeten static void *bc_malloc(struct rpc_task *task, size_t size)
20914cfc7e60SRahul Iyer {
20924cfc7e60SRahul Iyer 	struct page *page;
20934cfc7e60SRahul Iyer 	struct rpc_buffer *buf;
20944cfc7e60SRahul Iyer 
20954cfc7e60SRahul Iyer 	BUG_ON(size > PAGE_SIZE - sizeof(struct rpc_buffer));
20964cfc7e60SRahul Iyer 	page = alloc_page(GFP_KERNEL);
20974cfc7e60SRahul Iyer 
20984cfc7e60SRahul Iyer 	if (!page)
20994cfc7e60SRahul Iyer 		return NULL;
21004cfc7e60SRahul Iyer 
21014cfc7e60SRahul Iyer 	buf = page_address(page);
21024cfc7e60SRahul Iyer 	buf->len = PAGE_SIZE;
21034cfc7e60SRahul Iyer 
21044cfc7e60SRahul Iyer 	return buf->data;
21054cfc7e60SRahul Iyer }
21064cfc7e60SRahul Iyer 
21074cfc7e60SRahul Iyer /*
21084cfc7e60SRahul Iyer  * Free the space allocated in the bc_alloc routine
21094cfc7e60SRahul Iyer  */
21105a51f13aSH Hartley Sweeten static void bc_free(void *buffer)
21114cfc7e60SRahul Iyer {
21124cfc7e60SRahul Iyer 	struct rpc_buffer *buf;
21134cfc7e60SRahul Iyer 
21144cfc7e60SRahul Iyer 	if (!buffer)
21154cfc7e60SRahul Iyer 		return;
21164cfc7e60SRahul Iyer 
21174cfc7e60SRahul Iyer 	buf = container_of(buffer, struct rpc_buffer, data);
21184cfc7e60SRahul Iyer 	free_page((unsigned long)buf);
21194cfc7e60SRahul Iyer }
21204cfc7e60SRahul Iyer 
21214cfc7e60SRahul Iyer /*
21224cfc7e60SRahul Iyer  * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex
21234cfc7e60SRahul Iyer  * held. Borrows heavily from svc_tcp_sendto and xs_tcp_send_request.
21244cfc7e60SRahul Iyer  */
21254cfc7e60SRahul Iyer static int bc_sendto(struct rpc_rqst *req)
21264cfc7e60SRahul Iyer {
21274cfc7e60SRahul Iyer 	int len;
21284cfc7e60SRahul Iyer 	struct xdr_buf *xbufp = &req->rq_snd_buf;
21294cfc7e60SRahul Iyer 	struct rpc_xprt *xprt = req->rq_xprt;
21304cfc7e60SRahul Iyer 	struct sock_xprt *transport =
21314cfc7e60SRahul Iyer 				container_of(xprt, struct sock_xprt, xprt);
21324cfc7e60SRahul Iyer 	struct socket *sock = transport->sock;
21334cfc7e60SRahul Iyer 	unsigned long headoff;
21344cfc7e60SRahul Iyer 	unsigned long tailoff;
21354cfc7e60SRahul Iyer 
21364cfc7e60SRahul Iyer 	/*
21374cfc7e60SRahul Iyer 	 * Set up the rpc header and record marker stuff
21384cfc7e60SRahul Iyer 	 */
21394cfc7e60SRahul Iyer 	xs_encode_tcp_record_marker(xbufp);
21404cfc7e60SRahul Iyer 
21414cfc7e60SRahul Iyer 	tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK;
21424cfc7e60SRahul Iyer 	headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK;
21434cfc7e60SRahul Iyer 	len = svc_send_common(sock, xbufp,
21444cfc7e60SRahul Iyer 			      virt_to_page(xbufp->head[0].iov_base), headoff,
21454cfc7e60SRahul Iyer 			      xbufp->tail[0].iov_base, tailoff);
21464cfc7e60SRahul Iyer 
21474cfc7e60SRahul Iyer 	if (len != xbufp->len) {
21484cfc7e60SRahul Iyer 		printk(KERN_NOTICE "Error sending entire callback!\n");
21494cfc7e60SRahul Iyer 		len = -EAGAIN;
21504cfc7e60SRahul Iyer 	}
21514cfc7e60SRahul Iyer 
21524cfc7e60SRahul Iyer 	return len;
21534cfc7e60SRahul Iyer }
21544cfc7e60SRahul Iyer 
21554cfc7e60SRahul Iyer /*
21564cfc7e60SRahul Iyer  * The send routine. Borrows from svc_send
21574cfc7e60SRahul Iyer  */
21584cfc7e60SRahul Iyer static int bc_send_request(struct rpc_task *task)
21594cfc7e60SRahul Iyer {
21604cfc7e60SRahul Iyer 	struct rpc_rqst *req = task->tk_rqstp;
21614cfc7e60SRahul Iyer 	struct svc_xprt	*xprt;
21624cfc7e60SRahul Iyer 	struct svc_sock         *svsk;
21634cfc7e60SRahul Iyer 	u32                     len;
21644cfc7e60SRahul Iyer 
21654cfc7e60SRahul Iyer 	dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid));
21664cfc7e60SRahul Iyer 	/*
21674cfc7e60SRahul Iyer 	 * Get the server socket associated with this callback xprt
21684cfc7e60SRahul Iyer 	 */
21694cfc7e60SRahul Iyer 	xprt = req->rq_xprt->bc_xprt;
21704cfc7e60SRahul Iyer 	svsk = container_of(xprt, struct svc_sock, sk_xprt);
21714cfc7e60SRahul Iyer 
21724cfc7e60SRahul Iyer 	/*
21734cfc7e60SRahul Iyer 	 * Grab the mutex to serialize data as the connection is shared
21744cfc7e60SRahul Iyer 	 * with the fore channel
21754cfc7e60SRahul Iyer 	 */
21764cfc7e60SRahul Iyer 	if (!mutex_trylock(&xprt->xpt_mutex)) {
21774cfc7e60SRahul Iyer 		rpc_sleep_on(&xprt->xpt_bc_pending, task, NULL);
21784cfc7e60SRahul Iyer 		if (!mutex_trylock(&xprt->xpt_mutex))
21794cfc7e60SRahul Iyer 			return -EAGAIN;
21804cfc7e60SRahul Iyer 		rpc_wake_up_queued_task(&xprt->xpt_bc_pending, task);
21814cfc7e60SRahul Iyer 	}
21824cfc7e60SRahul Iyer 	if (test_bit(XPT_DEAD, &xprt->xpt_flags))
21834cfc7e60SRahul Iyer 		len = -ENOTCONN;
21844cfc7e60SRahul Iyer 	else
21854cfc7e60SRahul Iyer 		len = bc_sendto(req);
21864cfc7e60SRahul Iyer 	mutex_unlock(&xprt->xpt_mutex);
21874cfc7e60SRahul Iyer 
21884cfc7e60SRahul Iyer 	if (len > 0)
21894cfc7e60SRahul Iyer 		len = 0;
21904cfc7e60SRahul Iyer 
21914cfc7e60SRahul Iyer 	return len;
21924cfc7e60SRahul Iyer }
21934cfc7e60SRahul Iyer 
21944cfc7e60SRahul Iyer /*
21954cfc7e60SRahul Iyer  * The close routine. Since this is client initiated, we do nothing
21964cfc7e60SRahul Iyer  */
21974cfc7e60SRahul Iyer 
21984cfc7e60SRahul Iyer static void bc_close(struct rpc_xprt *xprt)
21994cfc7e60SRahul Iyer {
22004cfc7e60SRahul Iyer 	return;
22014cfc7e60SRahul Iyer }
22024cfc7e60SRahul Iyer 
22034cfc7e60SRahul Iyer /*
22044cfc7e60SRahul Iyer  * The xprt destroy routine. Again, because this connection is client
22054cfc7e60SRahul Iyer  * initiated, we do nothing
22064cfc7e60SRahul Iyer  */
22074cfc7e60SRahul Iyer 
22084cfc7e60SRahul Iyer static void bc_destroy(struct rpc_xprt *xprt)
22094cfc7e60SRahul Iyer {
22104cfc7e60SRahul Iyer 	return;
22114cfc7e60SRahul Iyer }
22124cfc7e60SRahul Iyer 
2213262965f5SChuck Lever static struct rpc_xprt_ops xs_udp_ops = {
221443118c29SChuck Lever 	.set_buffer_size	= xs_udp_set_buffer_size,
221512a80469SChuck Lever 	.reserve_xprt		= xprt_reserve_xprt_cong,
221649e9a890SChuck Lever 	.release_xprt		= xprt_release_xprt_cong,
221745160d62SChuck Lever 	.rpcbind		= rpcb_getport_async,
221892200412SChuck Lever 	.set_port		= xs_set_port,
22199903cd1cSChuck Lever 	.connect		= xs_connect,
222002107148SChuck Lever 	.buf_alloc		= rpc_malloc,
222102107148SChuck Lever 	.buf_free		= rpc_free,
2222262965f5SChuck Lever 	.send_request		= xs_udp_send_request,
2223fe3aca29SChuck Lever 	.set_retrans_timeout	= xprt_set_retrans_timeout_rtt,
222446c0ee8bSChuck Lever 	.timer			= xs_udp_timer,
2225a58dd398SChuck Lever 	.release_request	= xprt_release_rqst_cong,
2226262965f5SChuck Lever 	.close			= xs_close,
2227262965f5SChuck Lever 	.destroy		= xs_destroy,
2228262ca07dSChuck Lever 	.print_stats		= xs_udp_print_stats,
2229262965f5SChuck Lever };
2230262965f5SChuck Lever 
2231262965f5SChuck Lever static struct rpc_xprt_ops xs_tcp_ops = {
223212a80469SChuck Lever 	.reserve_xprt		= xprt_reserve_xprt,
2233e0ab53deSTrond Myklebust 	.release_xprt		= xs_tcp_release_xprt,
223445160d62SChuck Lever 	.rpcbind		= rpcb_getport_async,
223592200412SChuck Lever 	.set_port		= xs_set_port,
2236*0b9e7943STrond Myklebust 	.connect		= xs_connect,
223702107148SChuck Lever 	.buf_alloc		= rpc_malloc,
223802107148SChuck Lever 	.buf_free		= rpc_free,
2239262965f5SChuck Lever 	.send_request		= xs_tcp_send_request,
2240fe3aca29SChuck Lever 	.set_retrans_timeout	= xprt_set_retrans_timeout_def,
2241f75e6745STrond Myklebust 	.close			= xs_tcp_close,
22429903cd1cSChuck Lever 	.destroy		= xs_destroy,
2243262ca07dSChuck Lever 	.print_stats		= xs_tcp_print_stats,
2244a246b010SChuck Lever };
2245a246b010SChuck Lever 
22464cfc7e60SRahul Iyer /*
22474cfc7e60SRahul Iyer  * The rpc_xprt_ops for the server backchannel
22484cfc7e60SRahul Iyer  */
22494cfc7e60SRahul Iyer 
22504cfc7e60SRahul Iyer static struct rpc_xprt_ops bc_tcp_ops = {
22514cfc7e60SRahul Iyer 	.reserve_xprt		= xprt_reserve_xprt,
22524cfc7e60SRahul Iyer 	.release_xprt		= xprt_release_xprt,
22534cfc7e60SRahul Iyer 	.buf_alloc		= bc_malloc,
22544cfc7e60SRahul Iyer 	.buf_free		= bc_free,
22554cfc7e60SRahul Iyer 	.send_request		= bc_send_request,
22564cfc7e60SRahul Iyer 	.set_retrans_timeout	= xprt_set_retrans_timeout_def,
22574cfc7e60SRahul Iyer 	.close			= bc_close,
22584cfc7e60SRahul Iyer 	.destroy		= bc_destroy,
22594cfc7e60SRahul Iyer 	.print_stats		= xs_tcp_print_stats,
22604cfc7e60SRahul Iyer };
22614cfc7e60SRahul Iyer 
22623c341b0bS\"Talpey, Thomas\ static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
2263bc25571eS\"Talpey, Thomas\ 				      unsigned int slot_table_size)
2264c8541ecdSChuck Lever {
2265c8541ecdSChuck Lever 	struct rpc_xprt *xprt;
2266ffc2e518SChuck Lever 	struct sock_xprt *new;
2267c8541ecdSChuck Lever 
226896802a09SFrank van Maarseveen 	if (args->addrlen > sizeof(xprt->addr)) {
2269c8541ecdSChuck Lever 		dprintk("RPC:       xs_setup_xprt: address too large\n");
2270c8541ecdSChuck Lever 		return ERR_PTR(-EBADF);
2271c8541ecdSChuck Lever 	}
2272c8541ecdSChuck Lever 
2273ffc2e518SChuck Lever 	new = kzalloc(sizeof(*new), GFP_KERNEL);
2274ffc2e518SChuck Lever 	if (new == NULL) {
227546121cf7SChuck Lever 		dprintk("RPC:       xs_setup_xprt: couldn't allocate "
227646121cf7SChuck Lever 				"rpc_xprt\n");
2277c8541ecdSChuck Lever 		return ERR_PTR(-ENOMEM);
2278c8541ecdSChuck Lever 	}
2279ffc2e518SChuck Lever 	xprt = &new->xprt;
2280c8541ecdSChuck Lever 
2281c8541ecdSChuck Lever 	xprt->max_reqs = slot_table_size;
2282c8541ecdSChuck Lever 	xprt->slot = kcalloc(xprt->max_reqs, sizeof(struct rpc_rqst), GFP_KERNEL);
2283c8541ecdSChuck Lever 	if (xprt->slot == NULL) {
2284c8541ecdSChuck Lever 		kfree(xprt);
228546121cf7SChuck Lever 		dprintk("RPC:       xs_setup_xprt: couldn't allocate slot "
228646121cf7SChuck Lever 				"table\n");
2287c8541ecdSChuck Lever 		return ERR_PTR(-ENOMEM);
2288c8541ecdSChuck Lever 	}
2289c8541ecdSChuck Lever 
229096802a09SFrank van Maarseveen 	memcpy(&xprt->addr, args->dstaddr, args->addrlen);
229196802a09SFrank van Maarseveen 	xprt->addrlen = args->addrlen;
2292d3bc9a1dSFrank van Maarseveen 	if (args->srcaddr)
2293fbfffbd5SChuck Lever 		memcpy(&new->srcaddr, args->srcaddr, args->addrlen);
2294c8541ecdSChuck Lever 
2295c8541ecdSChuck Lever 	return xprt;
2296c8541ecdSChuck Lever }
2297c8541ecdSChuck Lever 
22982881ae74STrond Myklebust static const struct rpc_timeout xs_udp_default_timeout = {
22992881ae74STrond Myklebust 	.to_initval = 5 * HZ,
23002881ae74STrond Myklebust 	.to_maxval = 30 * HZ,
23012881ae74STrond Myklebust 	.to_increment = 5 * HZ,
23022881ae74STrond Myklebust 	.to_retries = 5,
23032881ae74STrond Myklebust };
23042881ae74STrond Myklebust 
23059903cd1cSChuck Lever /**
23069903cd1cSChuck Lever  * xs_setup_udp - Set up transport to use a UDP socket
230796802a09SFrank van Maarseveen  * @args: rpc transport creation arguments
23089903cd1cSChuck Lever  *
23099903cd1cSChuck Lever  */
2310483066d6SAdrian Bunk static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2311a246b010SChuck Lever {
23128f9d5b1aSChuck Lever 	struct sockaddr *addr = args->dstaddr;
2313c8541ecdSChuck Lever 	struct rpc_xprt *xprt;
2314c8475461SChuck Lever 	struct sock_xprt *transport;
2315a246b010SChuck Lever 
231696802a09SFrank van Maarseveen 	xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries);
2317c8541ecdSChuck Lever 	if (IS_ERR(xprt))
2318c8541ecdSChuck Lever 		return xprt;
2319c8475461SChuck Lever 	transport = container_of(xprt, struct sock_xprt, xprt);
2320a246b010SChuck Lever 
2321ec739ef0SChuck Lever 	xprt->prot = IPPROTO_UDP;
2322808012fbSChuck Lever 	xprt->tsh_size = 0;
2323a246b010SChuck Lever 	/* XXX: header size can vary due to auth type, IPv6, etc. */
2324a246b010SChuck Lever 	xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
2325a246b010SChuck Lever 
232603bf4b70SChuck Lever 	xprt->bind_timeout = XS_BIND_TO;
232703bf4b70SChuck Lever 	xprt->connect_timeout = XS_UDP_CONN_TO;
232803bf4b70SChuck Lever 	xprt->reestablish_timeout = XS_UDP_REEST_TO;
232903bf4b70SChuck Lever 	xprt->idle_timeout = XS_IDLE_DISC_TO;
2330a246b010SChuck Lever 
2331262965f5SChuck Lever 	xprt->ops = &xs_udp_ops;
2332a246b010SChuck Lever 
2333ba7392bbSTrond Myklebust 	xprt->timeout = &xs_udp_default_timeout;
2334a246b010SChuck Lever 
23358f9d5b1aSChuck Lever 	switch (addr->sa_family) {
23368f9d5b1aSChuck Lever 	case AF_INET:
23378f9d5b1aSChuck Lever 		if (((struct sockaddr_in *)addr)->sin_port != htons(0))
23388f9d5b1aSChuck Lever 			xprt_set_bound(xprt);
23398f9d5b1aSChuck Lever 
23408f9d5b1aSChuck Lever 		INIT_DELAYED_WORK(&transport->connect_worker,
23418f9d5b1aSChuck Lever 					xs_udp_connect_worker4);
23429dc3b095SChuck Lever 		xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP);
23438f9d5b1aSChuck Lever 		break;
23448f9d5b1aSChuck Lever 	case AF_INET6:
23458f9d5b1aSChuck Lever 		if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
23468f9d5b1aSChuck Lever 			xprt_set_bound(xprt);
23478f9d5b1aSChuck Lever 
23488f9d5b1aSChuck Lever 		INIT_DELAYED_WORK(&transport->connect_worker,
23498f9d5b1aSChuck Lever 					xs_udp_connect_worker6);
23509dc3b095SChuck Lever 		xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
23518f9d5b1aSChuck Lever 		break;
23528f9d5b1aSChuck Lever 	default:
23538f9d5b1aSChuck Lever 		kfree(xprt);
23548f9d5b1aSChuck Lever 		return ERR_PTR(-EAFNOSUPPORT);
23558f9d5b1aSChuck Lever 	}
23568f9d5b1aSChuck Lever 
2357c740eff8SChuck Lever 	if (xprt_bound(xprt))
2358c740eff8SChuck Lever 		dprintk("RPC:       set up xprt to %s (port %s) via %s\n",
2359c740eff8SChuck Lever 				xprt->address_strings[RPC_DISPLAY_ADDR],
2360c740eff8SChuck Lever 				xprt->address_strings[RPC_DISPLAY_PORT],
2361c740eff8SChuck Lever 				xprt->address_strings[RPC_DISPLAY_PROTO]);
2362c740eff8SChuck Lever 	else
2363c740eff8SChuck Lever 		dprintk("RPC:       set up xprt to %s (autobind) via %s\n",
2364c740eff8SChuck Lever 				xprt->address_strings[RPC_DISPLAY_ADDR],
2365c740eff8SChuck Lever 				xprt->address_strings[RPC_DISPLAY_PROTO]);
2366edb267a6SChuck Lever 
2367bc25571eS\"Talpey, Thomas\ 	if (try_module_get(THIS_MODULE))
2368c8541ecdSChuck Lever 		return xprt;
2369bc25571eS\"Talpey, Thomas\ 
2370bc25571eS\"Talpey, Thomas\ 	kfree(xprt->slot);
2371bc25571eS\"Talpey, Thomas\ 	kfree(xprt);
2372bc25571eS\"Talpey, Thomas\ 	return ERR_PTR(-EINVAL);
2373a246b010SChuck Lever }
2374a246b010SChuck Lever 
23752881ae74STrond Myklebust static const struct rpc_timeout xs_tcp_default_timeout = {
23762881ae74STrond Myklebust 	.to_initval = 60 * HZ,
23772881ae74STrond Myklebust 	.to_maxval = 60 * HZ,
23782881ae74STrond Myklebust 	.to_retries = 2,
23792881ae74STrond Myklebust };
23802881ae74STrond Myklebust 
23819903cd1cSChuck Lever /**
23829903cd1cSChuck Lever  * xs_setup_tcp - Set up transport to use a TCP socket
238396802a09SFrank van Maarseveen  * @args: rpc transport creation arguments
23849903cd1cSChuck Lever  *
23859903cd1cSChuck Lever  */
2386483066d6SAdrian Bunk static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2387a246b010SChuck Lever {
23888f9d5b1aSChuck Lever 	struct sockaddr *addr = args->dstaddr;
2389c8541ecdSChuck Lever 	struct rpc_xprt *xprt;
2390c8475461SChuck Lever 	struct sock_xprt *transport;
2391a246b010SChuck Lever 
239296802a09SFrank van Maarseveen 	xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
2393c8541ecdSChuck Lever 	if (IS_ERR(xprt))
2394c8541ecdSChuck Lever 		return xprt;
2395c8475461SChuck Lever 	transport = container_of(xprt, struct sock_xprt, xprt);
2396a246b010SChuck Lever 
2397ec739ef0SChuck Lever 	xprt->prot = IPPROTO_TCP;
2398808012fbSChuck Lever 	xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
2399808012fbSChuck Lever 	xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2400a246b010SChuck Lever 
240103bf4b70SChuck Lever 	xprt->bind_timeout = XS_BIND_TO;
240203bf4b70SChuck Lever 	xprt->connect_timeout = XS_TCP_CONN_TO;
240303bf4b70SChuck Lever 	xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
240403bf4b70SChuck Lever 	xprt->idle_timeout = XS_IDLE_DISC_TO;
2405a246b010SChuck Lever 
2406262965f5SChuck Lever 	xprt->ops = &xs_tcp_ops;
2407ba7392bbSTrond Myklebust 	xprt->timeout = &xs_tcp_default_timeout;
2408a246b010SChuck Lever 
24098f9d5b1aSChuck Lever 	switch (addr->sa_family) {
24108f9d5b1aSChuck Lever 	case AF_INET:
24118f9d5b1aSChuck Lever 		if (((struct sockaddr_in *)addr)->sin_port != htons(0))
24128f9d5b1aSChuck Lever 			xprt_set_bound(xprt);
24138f9d5b1aSChuck Lever 
24149dc3b095SChuck Lever 		INIT_DELAYED_WORK(&transport->connect_worker,
24159dc3b095SChuck Lever 					xs_tcp_connect_worker4);
24169dc3b095SChuck Lever 		xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
24178f9d5b1aSChuck Lever 		break;
24188f9d5b1aSChuck Lever 	case AF_INET6:
24198f9d5b1aSChuck Lever 		if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
24208f9d5b1aSChuck Lever 			xprt_set_bound(xprt);
24218f9d5b1aSChuck Lever 
24229dc3b095SChuck Lever 		INIT_DELAYED_WORK(&transport->connect_worker,
24239dc3b095SChuck Lever 					xs_tcp_connect_worker6);
24249dc3b095SChuck Lever 		xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
24258f9d5b1aSChuck Lever 		break;
24268f9d5b1aSChuck Lever 	default:
24278f9d5b1aSChuck Lever 		kfree(xprt);
24288f9d5b1aSChuck Lever 		return ERR_PTR(-EAFNOSUPPORT);
24298f9d5b1aSChuck Lever 	}
24308f9d5b1aSChuck Lever 
2431c740eff8SChuck Lever 	if (xprt_bound(xprt))
2432c740eff8SChuck Lever 		dprintk("RPC:       set up xprt to %s (port %s) via %s\n",
2433c740eff8SChuck Lever 				xprt->address_strings[RPC_DISPLAY_ADDR],
2434c740eff8SChuck Lever 				xprt->address_strings[RPC_DISPLAY_PORT],
2435c740eff8SChuck Lever 				xprt->address_strings[RPC_DISPLAY_PROTO]);
2436c740eff8SChuck Lever 	else
2437c740eff8SChuck Lever 		dprintk("RPC:       set up xprt to %s (autobind) via %s\n",
2438c740eff8SChuck Lever 				xprt->address_strings[RPC_DISPLAY_ADDR],
2439c740eff8SChuck Lever 				xprt->address_strings[RPC_DISPLAY_PROTO]);
2440c740eff8SChuck Lever 
2441edb267a6SChuck Lever 
2442bc25571eS\"Talpey, Thomas\ 	if (try_module_get(THIS_MODULE))
2443c8541ecdSChuck Lever 		return xprt;
2444bc25571eS\"Talpey, Thomas\ 
2445bc25571eS\"Talpey, Thomas\ 	kfree(xprt->slot);
2446bc25571eS\"Talpey, Thomas\ 	kfree(xprt);
2447bc25571eS\"Talpey, Thomas\ 	return ERR_PTR(-EINVAL);
2448a246b010SChuck Lever }
2449282b32e1SChuck Lever 
2450f300babaSAlexandros Batsakis /**
2451f300babaSAlexandros Batsakis  * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket
2452f300babaSAlexandros Batsakis  * @args: rpc transport creation arguments
2453f300babaSAlexandros Batsakis  *
2454f300babaSAlexandros Batsakis  */
2455f300babaSAlexandros Batsakis static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2456f300babaSAlexandros Batsakis {
2457f300babaSAlexandros Batsakis 	struct sockaddr *addr = args->dstaddr;
2458f300babaSAlexandros Batsakis 	struct rpc_xprt *xprt;
2459f300babaSAlexandros Batsakis 	struct sock_xprt *transport;
2460f300babaSAlexandros Batsakis 	struct svc_sock *bc_sock;
2461f300babaSAlexandros Batsakis 
2462f300babaSAlexandros Batsakis 	if (!args->bc_xprt)
2463f300babaSAlexandros Batsakis 		ERR_PTR(-EINVAL);
2464f300babaSAlexandros Batsakis 
2465f300babaSAlexandros Batsakis 	xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
2466f300babaSAlexandros Batsakis 	if (IS_ERR(xprt))
2467f300babaSAlexandros Batsakis 		return xprt;
2468f300babaSAlexandros Batsakis 	transport = container_of(xprt, struct sock_xprt, xprt);
2469f300babaSAlexandros Batsakis 
2470f300babaSAlexandros Batsakis 	xprt->prot = IPPROTO_TCP;
2471f300babaSAlexandros Batsakis 	xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
2472f300babaSAlexandros Batsakis 	xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2473f300babaSAlexandros Batsakis 	xprt->timeout = &xs_tcp_default_timeout;
2474f300babaSAlexandros Batsakis 
2475f300babaSAlexandros Batsakis 	/* backchannel */
2476f300babaSAlexandros Batsakis 	xprt_set_bound(xprt);
2477f300babaSAlexandros Batsakis 	xprt->bind_timeout = 0;
2478f300babaSAlexandros Batsakis 	xprt->connect_timeout = 0;
2479f300babaSAlexandros Batsakis 	xprt->reestablish_timeout = 0;
2480f300babaSAlexandros Batsakis 	xprt->idle_timeout = 0;
2481f300babaSAlexandros Batsakis 
2482f300babaSAlexandros Batsakis 	/*
2483f300babaSAlexandros Batsakis 	 * The backchannel uses the same socket connection as the
2484f300babaSAlexandros Batsakis 	 * forechannel
2485f300babaSAlexandros Batsakis 	 */
2486f300babaSAlexandros Batsakis 	xprt->bc_xprt = args->bc_xprt;
2487f300babaSAlexandros Batsakis 	bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
2488f300babaSAlexandros Batsakis 	bc_sock->sk_bc_xprt = xprt;
2489f300babaSAlexandros Batsakis 	transport->sock = bc_sock->sk_sock;
2490f300babaSAlexandros Batsakis 	transport->inet = bc_sock->sk_sk;
2491f300babaSAlexandros Batsakis 
2492f300babaSAlexandros Batsakis 	xprt->ops = &bc_tcp_ops;
2493f300babaSAlexandros Batsakis 
2494f300babaSAlexandros Batsakis 	switch (addr->sa_family) {
2495f300babaSAlexandros Batsakis 	case AF_INET:
2496f300babaSAlexandros Batsakis 		xs_format_peer_addresses(xprt, "tcp",
2497f300babaSAlexandros Batsakis 					 RPCBIND_NETID_TCP);
2498f300babaSAlexandros Batsakis 		break;
2499f300babaSAlexandros Batsakis 	case AF_INET6:
2500f300babaSAlexandros Batsakis 		xs_format_peer_addresses(xprt, "tcp",
2501f300babaSAlexandros Batsakis 				   RPCBIND_NETID_TCP6);
2502f300babaSAlexandros Batsakis 		break;
2503f300babaSAlexandros Batsakis 	default:
2504f300babaSAlexandros Batsakis 		kfree(xprt);
2505f300babaSAlexandros Batsakis 		return ERR_PTR(-EAFNOSUPPORT);
2506f300babaSAlexandros Batsakis 	}
2507f300babaSAlexandros Batsakis 
2508f300babaSAlexandros Batsakis 	if (xprt_bound(xprt))
2509f300babaSAlexandros Batsakis 		dprintk("RPC:       set up xprt to %s (port %s) via %s\n",
2510f300babaSAlexandros Batsakis 				xprt->address_strings[RPC_DISPLAY_ADDR],
2511f300babaSAlexandros Batsakis 				xprt->address_strings[RPC_DISPLAY_PORT],
2512f300babaSAlexandros Batsakis 				xprt->address_strings[RPC_DISPLAY_PROTO]);
2513f300babaSAlexandros Batsakis 	else
2514f300babaSAlexandros Batsakis 		dprintk("RPC:       set up xprt to %s (autobind) via %s\n",
2515f300babaSAlexandros Batsakis 				xprt->address_strings[RPC_DISPLAY_ADDR],
2516f300babaSAlexandros Batsakis 				xprt->address_strings[RPC_DISPLAY_PROTO]);
2517f300babaSAlexandros Batsakis 
2518f300babaSAlexandros Batsakis 	/*
2519f300babaSAlexandros Batsakis 	 * Since we don't want connections for the backchannel, we set
2520f300babaSAlexandros Batsakis 	 * the xprt status to connected
2521f300babaSAlexandros Batsakis 	 */
2522f300babaSAlexandros Batsakis 	xprt_set_connected(xprt);
2523f300babaSAlexandros Batsakis 
2524f300babaSAlexandros Batsakis 
2525f300babaSAlexandros Batsakis 	if (try_module_get(THIS_MODULE))
2526f300babaSAlexandros Batsakis 		return xprt;
2527f300babaSAlexandros Batsakis 	kfree(xprt->slot);
2528f300babaSAlexandros Batsakis 	kfree(xprt);
2529f300babaSAlexandros Batsakis 	return ERR_PTR(-EINVAL);
2530f300babaSAlexandros Batsakis }
2531f300babaSAlexandros Batsakis 
2532bc25571eS\"Talpey, Thomas\ static struct xprt_class	xs_udp_transport = {
2533bc25571eS\"Talpey, Thomas\ 	.list		= LIST_HEAD_INIT(xs_udp_transport.list),
2534bc25571eS\"Talpey, Thomas\ 	.name		= "udp",
2535bc25571eS\"Talpey, Thomas\ 	.owner		= THIS_MODULE,
2536f300babaSAlexandros Batsakis 	.ident		= XPRT_TRANSPORT_UDP,
2537bc25571eS\"Talpey, Thomas\ 	.setup		= xs_setup_udp,
2538bc25571eS\"Talpey, Thomas\ };
2539bc25571eS\"Talpey, Thomas\ 
2540bc25571eS\"Talpey, Thomas\ static struct xprt_class	xs_tcp_transport = {
2541bc25571eS\"Talpey, Thomas\ 	.list		= LIST_HEAD_INIT(xs_tcp_transport.list),
2542bc25571eS\"Talpey, Thomas\ 	.name		= "tcp",
2543bc25571eS\"Talpey, Thomas\ 	.owner		= THIS_MODULE,
2544f300babaSAlexandros Batsakis 	.ident		= XPRT_TRANSPORT_TCP,
2545bc25571eS\"Talpey, Thomas\ 	.setup		= xs_setup_tcp,
2546bc25571eS\"Talpey, Thomas\ };
2547bc25571eS\"Talpey, Thomas\ 
2548f300babaSAlexandros Batsakis static struct xprt_class	xs_bc_tcp_transport = {
2549f300babaSAlexandros Batsakis 	.list		= LIST_HEAD_INIT(xs_bc_tcp_transport.list),
2550f300babaSAlexandros Batsakis 	.name		= "tcp NFSv4.1 backchannel",
2551f300babaSAlexandros Batsakis 	.owner		= THIS_MODULE,
2552f300babaSAlexandros Batsakis 	.ident		= XPRT_TRANSPORT_BC_TCP,
2553f300babaSAlexandros Batsakis 	.setup		= xs_setup_bc_tcp,
2554f300babaSAlexandros Batsakis };
2555f300babaSAlexandros Batsakis 
2556282b32e1SChuck Lever /**
2557bc25571eS\"Talpey, Thomas\  * init_socket_xprt - set up xprtsock's sysctls, register with RPC client
2558282b32e1SChuck Lever  *
2559282b32e1SChuck Lever  */
2560282b32e1SChuck Lever int init_socket_xprt(void)
2561282b32e1SChuck Lever {
2562fbf76683SChuck Lever #ifdef RPC_DEBUG
25632b1bec5fSEric W. Biederman 	if (!sunrpc_table_header)
25640b4d4147SEric W. Biederman 		sunrpc_table_header = register_sysctl_table(sunrpc_table);
2565fbf76683SChuck Lever #endif
2566fbf76683SChuck Lever 
2567bc25571eS\"Talpey, Thomas\ 	xprt_register_transport(&xs_udp_transport);
2568bc25571eS\"Talpey, Thomas\ 	xprt_register_transport(&xs_tcp_transport);
2569f300babaSAlexandros Batsakis 	xprt_register_transport(&xs_bc_tcp_transport);
2570bc25571eS\"Talpey, Thomas\ 
2571282b32e1SChuck Lever 	return 0;
2572282b32e1SChuck Lever }
2573282b32e1SChuck Lever 
2574282b32e1SChuck Lever /**
2575bc25571eS\"Talpey, Thomas\  * cleanup_socket_xprt - remove xprtsock's sysctls, unregister
2576282b32e1SChuck Lever  *
2577282b32e1SChuck Lever  */
2578282b32e1SChuck Lever void cleanup_socket_xprt(void)
2579282b32e1SChuck Lever {
2580fbf76683SChuck Lever #ifdef RPC_DEBUG
2581fbf76683SChuck Lever 	if (sunrpc_table_header) {
2582fbf76683SChuck Lever 		unregister_sysctl_table(sunrpc_table_header);
2583fbf76683SChuck Lever 		sunrpc_table_header = NULL;
2584fbf76683SChuck Lever 	}
2585fbf76683SChuck Lever #endif
2586bc25571eS\"Talpey, Thomas\ 
2587bc25571eS\"Talpey, Thomas\ 	xprt_unregister_transport(&xs_udp_transport);
2588bc25571eS\"Talpey, Thomas\ 	xprt_unregister_transport(&xs_tcp_transport);
2589f300babaSAlexandros Batsakis 	xprt_unregister_transport(&xs_bc_tcp_transport);
2590282b32e1SChuck Lever }
2591cbf11071STrond Myklebust 
2592cbf11071STrond Myklebust static int param_set_uint_minmax(const char *val, struct kernel_param *kp,
2593cbf11071STrond Myklebust 		unsigned int min, unsigned int max)
2594cbf11071STrond Myklebust {
2595cbf11071STrond Myklebust 	unsigned long num;
2596cbf11071STrond Myklebust 	int ret;
2597cbf11071STrond Myklebust 
2598cbf11071STrond Myklebust 	if (!val)
2599cbf11071STrond Myklebust 		return -EINVAL;
2600cbf11071STrond Myklebust 	ret = strict_strtoul(val, 0, &num);
2601cbf11071STrond Myklebust 	if (ret == -EINVAL || num < min || num > max)
2602cbf11071STrond Myklebust 		return -EINVAL;
2603cbf11071STrond Myklebust 	*((unsigned int *)kp->arg) = num;
2604cbf11071STrond Myklebust 	return 0;
2605cbf11071STrond Myklebust }
2606cbf11071STrond Myklebust 
2607cbf11071STrond Myklebust static int param_set_portnr(const char *val, struct kernel_param *kp)
2608cbf11071STrond Myklebust {
2609cbf11071STrond Myklebust 	return param_set_uint_minmax(val, kp,
2610cbf11071STrond Myklebust 			RPC_MIN_RESVPORT,
2611cbf11071STrond Myklebust 			RPC_MAX_RESVPORT);
2612cbf11071STrond Myklebust }
2613cbf11071STrond Myklebust 
2614cbf11071STrond Myklebust static int param_get_portnr(char *buffer, struct kernel_param *kp)
2615cbf11071STrond Myklebust {
2616cbf11071STrond Myklebust 	return param_get_uint(buffer, kp);
2617cbf11071STrond Myklebust }
2618cbf11071STrond Myklebust #define param_check_portnr(name, p) \
2619cbf11071STrond Myklebust 	__param_check(name, p, unsigned int);
2620cbf11071STrond Myklebust 
2621cbf11071STrond Myklebust module_param_named(min_resvport, xprt_min_resvport, portnr, 0644);
2622cbf11071STrond Myklebust module_param_named(max_resvport, xprt_max_resvport, portnr, 0644);
2623cbf11071STrond Myklebust 
2624cbf11071STrond Myklebust static int param_set_slot_table_size(const char *val, struct kernel_param *kp)
2625cbf11071STrond Myklebust {
2626cbf11071STrond Myklebust 	return param_set_uint_minmax(val, kp,
2627cbf11071STrond Myklebust 			RPC_MIN_SLOT_TABLE,
2628cbf11071STrond Myklebust 			RPC_MAX_SLOT_TABLE);
2629cbf11071STrond Myklebust }
2630cbf11071STrond Myklebust 
2631cbf11071STrond Myklebust static int param_get_slot_table_size(char *buffer, struct kernel_param *kp)
2632cbf11071STrond Myklebust {
2633cbf11071STrond Myklebust 	return param_get_uint(buffer, kp);
2634cbf11071STrond Myklebust }
2635cbf11071STrond Myklebust #define param_check_slot_table_size(name, p) \
2636cbf11071STrond Myklebust 	__param_check(name, p, unsigned int);
2637cbf11071STrond Myklebust 
2638cbf11071STrond Myklebust module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries,
2639cbf11071STrond Myklebust 		   slot_table_size, 0644);
2640cbf11071STrond Myklebust module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries,
2641cbf11071STrond Myklebust 		   slot_table_size, 0644);
2642cbf11071STrond Myklebust 
2643