xref: /openbmc/linux/net/sunrpc/svc_xprt.c (revision 66c898ca)
11d8206b9STom Tucker /*
21d8206b9STom Tucker  * linux/net/sunrpc/svc_xprt.c
31d8206b9STom Tucker  *
41d8206b9STom Tucker  * Author: Tom Tucker <tom@opengridcomputing.com>
51d8206b9STom Tucker  */
61d8206b9STom Tucker 
71d8206b9STom Tucker #include <linux/sched.h>
81d8206b9STom Tucker #include <linux/errno.h>
91d8206b9STom Tucker #include <linux/freezer.h>
107086721fSJeff Layton #include <linux/kthread.h>
115a0e3ad6STejun Heo #include <linux/slab.h>
121d8206b9STom Tucker #include <net/sock.h>
13c3d4879eSScott Mayhew #include <linux/sunrpc/addr.h>
141d8206b9STom Tucker #include <linux/sunrpc/stats.h>
151d8206b9STom Tucker #include <linux/sunrpc/svc_xprt.h>
16dcf1a357SH Hartley Sweeten #include <linux/sunrpc/svcsock.h>
1799de8ea9SJ. Bruce Fields #include <linux/sunrpc/xprt.h>
183a9a231dSPaul Gortmaker #include <linux/module.h>
19c3d4879eSScott Mayhew #include <linux/netdevice.h>
20860a0d9eSJeff Layton #include <trace/events/sunrpc.h>
211d8206b9STom Tucker 
221d8206b9STom Tucker #define RPCDBG_FACILITY	RPCDBG_SVCXPRT
231d8206b9STom Tucker 
24ff3ac5c3STrond Myklebust static unsigned int svc_rpc_per_connection_limit __read_mostly;
25ff3ac5c3STrond Myklebust module_param(svc_rpc_per_connection_limit, uint, 0644);
26ff3ac5c3STrond Myklebust 
27ff3ac5c3STrond Myklebust 
280f0257eaSTom Tucker static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt);
290f0257eaSTom Tucker static int svc_deferred_recv(struct svc_rqst *rqstp);
300f0257eaSTom Tucker static struct cache_deferred_req *svc_defer(struct cache_req *req);
31ff861c4dSKees Cook static void svc_age_temp_xprts(struct timer_list *t);
327710ec36SJ. Bruce Fields static void svc_delete_xprt(struct svc_xprt *xprt);
330f0257eaSTom Tucker 
340f0257eaSTom Tucker /* apparently the "standard" is that clients close
350f0257eaSTom Tucker  * idle connections after 5 minutes, servers after
360f0257eaSTom Tucker  * 6 minutes
370f0257eaSTom Tucker  *   http://www.connectathon.org/talks96/nfstcp.pdf
380f0257eaSTom Tucker  */
390f0257eaSTom Tucker static int svc_conn_age_period = 6*60;
400f0257eaSTom Tucker 
411d8206b9STom Tucker /* List of registered transport classes */
421d8206b9STom Tucker static DEFINE_SPINLOCK(svc_xprt_class_lock);
431d8206b9STom Tucker static LIST_HEAD(svc_xprt_class_list);
441d8206b9STom Tucker 
450f0257eaSTom Tucker /* SMP locking strategy:
460f0257eaSTom Tucker  *
470f0257eaSTom Tucker  *	svc_pool->sp_lock protects most of the fields of that pool.
480f0257eaSTom Tucker  *	svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
490f0257eaSTom Tucker  *	when both need to be taken (rare), svc_serv->sv_lock is first.
503c519914SJeff Layton  *	The "service mutex" protects svc_serv->sv_nrthread.
510f0257eaSTom Tucker  *	svc_sock->sk_lock protects the svc_sock->sk_deferred list
520f0257eaSTom Tucker  *             and the ->sk_info_authunix cache.
530f0257eaSTom Tucker  *
540f0257eaSTom Tucker  *	The XPT_BUSY bit in xprt->xpt_flags prevents a transport being
550f0257eaSTom Tucker  *	enqueued multiply. During normal transport processing this bit
560f0257eaSTom Tucker  *	is set by svc_xprt_enqueue and cleared by svc_xprt_received.
570f0257eaSTom Tucker  *	Providers should not manipulate this bit directly.
580f0257eaSTom Tucker  *
590f0257eaSTom Tucker  *	Some flags can be set to certain values at any time
600f0257eaSTom Tucker  *	providing that certain rules are followed:
610f0257eaSTom Tucker  *
620f0257eaSTom Tucker  *	XPT_CONN, XPT_DATA:
630f0257eaSTom Tucker  *		- Can be set or cleared at any time.
640f0257eaSTom Tucker  *		- After a set, svc_xprt_enqueue must be called to enqueue
650f0257eaSTom Tucker  *		  the transport for processing.
660f0257eaSTom Tucker  *		- After a clear, the transport must be read/accepted.
670f0257eaSTom Tucker  *		  If this succeeds, it must be set again.
680f0257eaSTom Tucker  *	XPT_CLOSE:
690f0257eaSTom Tucker  *		- Can set at any time. It is never cleared.
700f0257eaSTom Tucker  *      XPT_DEAD:
710f0257eaSTom Tucker  *		- Can only be set while XPT_BUSY is held which ensures
720f0257eaSTom Tucker  *		  that no other thread will be using the transport or will
730f0257eaSTom Tucker  *		  try to set XPT_DEAD.
740f0257eaSTom Tucker  */
751d8206b9STom Tucker int svc_reg_xprt_class(struct svc_xprt_class *xcl)
761d8206b9STom Tucker {
771d8206b9STom Tucker 	struct svc_xprt_class *cl;
781d8206b9STom Tucker 	int res = -EEXIST;
791d8206b9STom Tucker 
801d8206b9STom Tucker 	dprintk("svc: Adding svc transport class '%s'\n", xcl->xcl_name);
811d8206b9STom Tucker 
821d8206b9STom Tucker 	INIT_LIST_HEAD(&xcl->xcl_list);
831d8206b9STom Tucker 	spin_lock(&svc_xprt_class_lock);
841d8206b9STom Tucker 	/* Make sure there isn't already a class with the same name */
851d8206b9STom Tucker 	list_for_each_entry(cl, &svc_xprt_class_list, xcl_list) {
861d8206b9STom Tucker 		if (strcmp(xcl->xcl_name, cl->xcl_name) == 0)
871d8206b9STom Tucker 			goto out;
881d8206b9STom Tucker 	}
891d8206b9STom Tucker 	list_add_tail(&xcl->xcl_list, &svc_xprt_class_list);
901d8206b9STom Tucker 	res = 0;
911d8206b9STom Tucker out:
921d8206b9STom Tucker 	spin_unlock(&svc_xprt_class_lock);
931d8206b9STom Tucker 	return res;
941d8206b9STom Tucker }
951d8206b9STom Tucker EXPORT_SYMBOL_GPL(svc_reg_xprt_class);
961d8206b9STom Tucker 
971d8206b9STom Tucker void svc_unreg_xprt_class(struct svc_xprt_class *xcl)
981d8206b9STom Tucker {
991d8206b9STom Tucker 	dprintk("svc: Removing svc transport class '%s'\n", xcl->xcl_name);
1001d8206b9STom Tucker 	spin_lock(&svc_xprt_class_lock);
1011d8206b9STom Tucker 	list_del_init(&xcl->xcl_list);
1021d8206b9STom Tucker 	spin_unlock(&svc_xprt_class_lock);
1031d8206b9STom Tucker }
1041d8206b9STom Tucker EXPORT_SYMBOL_GPL(svc_unreg_xprt_class);
1051d8206b9STom Tucker 
106dc9a16e4STom Tucker /*
107dc9a16e4STom Tucker  * Format the transport list for printing
108dc9a16e4STom Tucker  */
109dc9a16e4STom Tucker int svc_print_xprts(char *buf, int maxlen)
110dc9a16e4STom Tucker {
1118f3a6de3SPavel Emelyanov 	struct svc_xprt_class *xcl;
112dc9a16e4STom Tucker 	char tmpstr[80];
113dc9a16e4STom Tucker 	int len = 0;
114dc9a16e4STom Tucker 	buf[0] = '\0';
115dc9a16e4STom Tucker 
116dc9a16e4STom Tucker 	spin_lock(&svc_xprt_class_lock);
1178f3a6de3SPavel Emelyanov 	list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
118dc9a16e4STom Tucker 		int slen;
119dc9a16e4STom Tucker 
120dc9a16e4STom Tucker 		sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload);
121dc9a16e4STom Tucker 		slen = strlen(tmpstr);
122dc9a16e4STom Tucker 		if (len + slen > maxlen)
123dc9a16e4STom Tucker 			break;
124dc9a16e4STom Tucker 		len += slen;
125dc9a16e4STom Tucker 		strcat(buf, tmpstr);
126dc9a16e4STom Tucker 	}
127dc9a16e4STom Tucker 	spin_unlock(&svc_xprt_class_lock);
128dc9a16e4STom Tucker 
129dc9a16e4STom Tucker 	return len;
130dc9a16e4STom Tucker }
131dc9a16e4STom Tucker 
132e1b3157fSTom Tucker static void svc_xprt_free(struct kref *kref)
133e1b3157fSTom Tucker {
134e1b3157fSTom Tucker 	struct svc_xprt *xprt =
135e1b3157fSTom Tucker 		container_of(kref, struct svc_xprt, xpt_ref);
136e1b3157fSTom Tucker 	struct module *owner = xprt->xpt_class->xcl_owner;
137e3bfca01SPavel Emelyanov 	if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags))
138e3bfca01SPavel Emelyanov 		svcauth_unix_info_release(xprt);
1394fb8518bSPavel Emelyanov 	put_net(xprt->xpt_net);
14099de8ea9SJ. Bruce Fields 	/* See comment on corresponding get in xs_setup_bc_tcp(): */
14199de8ea9SJ. Bruce Fields 	if (xprt->xpt_bc_xprt)
14299de8ea9SJ. Bruce Fields 		xprt_put(xprt->xpt_bc_xprt);
14339a9beabSJ. Bruce Fields 	if (xprt->xpt_bc_xps)
14439a9beabSJ. Bruce Fields 		xprt_switch_put(xprt->xpt_bc_xps);
145e1b3157fSTom Tucker 	xprt->xpt_ops->xpo_free(xprt);
146e1b3157fSTom Tucker 	module_put(owner);
147e1b3157fSTom Tucker }
148e1b3157fSTom Tucker 
149e1b3157fSTom Tucker void svc_xprt_put(struct svc_xprt *xprt)
150e1b3157fSTom Tucker {
151e1b3157fSTom Tucker 	kref_put(&xprt->xpt_ref, svc_xprt_free);
152e1b3157fSTom Tucker }
153e1b3157fSTom Tucker EXPORT_SYMBOL_GPL(svc_xprt_put);
154e1b3157fSTom Tucker 
1551d8206b9STom Tucker /*
1561d8206b9STom Tucker  * Called by transport drivers to initialize the transport independent
1571d8206b9STom Tucker  * portion of the transport instance.
1581d8206b9STom Tucker  */
159bd4620ddSStanislav Kinsbursky void svc_xprt_init(struct net *net, struct svc_xprt_class *xcl,
160bd4620ddSStanislav Kinsbursky 		   struct svc_xprt *xprt, struct svc_serv *serv)
1611d8206b9STom Tucker {
1621d8206b9STom Tucker 	memset(xprt, 0, sizeof(*xprt));
1631d8206b9STom Tucker 	xprt->xpt_class = xcl;
1641d8206b9STom Tucker 	xprt->xpt_ops = xcl->xcl_ops;
165e1b3157fSTom Tucker 	kref_init(&xprt->xpt_ref);
166bb5cf160STom Tucker 	xprt->xpt_server = serv;
1677a182083STom Tucker 	INIT_LIST_HEAD(&xprt->xpt_list);
1687a182083STom Tucker 	INIT_LIST_HEAD(&xprt->xpt_ready);
1698c7b0172STom Tucker 	INIT_LIST_HEAD(&xprt->xpt_deferred);
170edc7a894SJ. Bruce Fields 	INIT_LIST_HEAD(&xprt->xpt_users);
171a50fea26STom Tucker 	mutex_init(&xprt->xpt_mutex);
172def13d74STom Tucker 	spin_lock_init(&xprt->xpt_lock);
1734e5caaa5STom Tucker 	set_bit(XPT_BUSY, &xprt->xpt_flags);
174bd4620ddSStanislav Kinsbursky 	xprt->xpt_net = get_net(net);
175ece200ddSChuck Lever 	strcpy(xprt->xpt_remotebuf, "uninitialized");
1761d8206b9STom Tucker }
1771d8206b9STom Tucker EXPORT_SYMBOL_GPL(svc_xprt_init);
178b700cbb1STom Tucker 
1795dd248f6SChuck Lever static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
1805dd248f6SChuck Lever 					 struct svc_serv *serv,
18162832c03SPavel Emelyanov 					 struct net *net,
1829652ada3SChuck Lever 					 const int family,
1839652ada3SChuck Lever 					 const unsigned short port,
1849652ada3SChuck Lever 					 int flags)
185b700cbb1STom Tucker {
186b700cbb1STom Tucker 	struct sockaddr_in sin = {
187b700cbb1STom Tucker 		.sin_family		= AF_INET,
188e6f1cebfSAl Viro 		.sin_addr.s_addr	= htonl(INADDR_ANY),
189b700cbb1STom Tucker 		.sin_port		= htons(port),
190b700cbb1STom Tucker 	};
191dfd56b8bSEric Dumazet #if IS_ENABLED(CONFIG_IPV6)
1925dd248f6SChuck Lever 	struct sockaddr_in6 sin6 = {
1935dd248f6SChuck Lever 		.sin6_family		= AF_INET6,
1945dd248f6SChuck Lever 		.sin6_addr		= IN6ADDR_ANY_INIT,
1955dd248f6SChuck Lever 		.sin6_port		= htons(port),
1965dd248f6SChuck Lever 	};
197dfd56b8bSEric Dumazet #endif
1985dd248f6SChuck Lever 	struct sockaddr *sap;
1995dd248f6SChuck Lever 	size_t len;
2005dd248f6SChuck Lever 
2019652ada3SChuck Lever 	switch (family) {
2029652ada3SChuck Lever 	case PF_INET:
2035dd248f6SChuck Lever 		sap = (struct sockaddr *)&sin;
2045dd248f6SChuck Lever 		len = sizeof(sin);
2055dd248f6SChuck Lever 		break;
206dfd56b8bSEric Dumazet #if IS_ENABLED(CONFIG_IPV6)
2079652ada3SChuck Lever 	case PF_INET6:
2085dd248f6SChuck Lever 		sap = (struct sockaddr *)&sin6;
2095dd248f6SChuck Lever 		len = sizeof(sin6);
2105dd248f6SChuck Lever 		break;
211dfd56b8bSEric Dumazet #endif
2125dd248f6SChuck Lever 	default:
2135dd248f6SChuck Lever 		return ERR_PTR(-EAFNOSUPPORT);
2145dd248f6SChuck Lever 	}
2155dd248f6SChuck Lever 
21662832c03SPavel Emelyanov 	return xcl->xcl_ops->xpo_create(serv, net, sap, len, flags);
2175dd248f6SChuck Lever }
2185dd248f6SChuck Lever 
2196741019cSJ. Bruce Fields /*
2206741019cSJ. Bruce Fields  * svc_xprt_received conditionally queues the transport for processing
2216741019cSJ. Bruce Fields  * by another thread. The caller must hold the XPT_BUSY bit and must
2226741019cSJ. Bruce Fields  * not thereafter touch transport data.
2236741019cSJ. Bruce Fields  *
2246741019cSJ. Bruce Fields  * Note: XPT_DATA only gets cleared when a read-attempt finds no (or
2256741019cSJ. Bruce Fields  * insufficient) data.
2266741019cSJ. Bruce Fields  */
2276741019cSJ. Bruce Fields static void svc_xprt_received(struct svc_xprt *xprt)
2286741019cSJ. Bruce Fields {
229acf06a7fSJeff Layton 	if (!test_bit(XPT_BUSY, &xprt->xpt_flags)) {
230acf06a7fSJeff Layton 		WARN_ONCE(1, "xprt=0x%p already busy!", xprt);
231ff1fdb9bSWeston Andros Adamson 		return;
232acf06a7fSJeff Layton 	}
233acf06a7fSJeff Layton 
2346741019cSJ. Bruce Fields 	/* As soon as we clear busy, the xprt could be closed and
235b9e13cdfSJeff Layton 	 * 'put', so we need a reference to call svc_enqueue_xprt with:
2366741019cSJ. Bruce Fields 	 */
2376741019cSJ. Bruce Fields 	svc_xprt_get(xprt);
2380971374eSTrond Myklebust 	smp_mb__before_atomic();
2396741019cSJ. Bruce Fields 	clear_bit(XPT_BUSY, &xprt->xpt_flags);
240b9e13cdfSJeff Layton 	xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt);
2416741019cSJ. Bruce Fields 	svc_xprt_put(xprt);
2426741019cSJ. Bruce Fields }
2436741019cSJ. Bruce Fields 
24439b55301SJ. Bruce Fields void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *new)
24539b55301SJ. Bruce Fields {
24639b55301SJ. Bruce Fields 	clear_bit(XPT_TEMP, &new->xpt_flags);
24739b55301SJ. Bruce Fields 	spin_lock_bh(&serv->sv_lock);
24839b55301SJ. Bruce Fields 	list_add(&new->xpt_list, &serv->sv_permsocks);
24939b55301SJ. Bruce Fields 	spin_unlock_bh(&serv->sv_lock);
25039b55301SJ. Bruce Fields 	svc_xprt_received(new);
25139b55301SJ. Bruce Fields }
25239b55301SJ. Bruce Fields 
253da36e6dbSColin Ian King static int _svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
254fc5d00b0SPavel Emelyanov 			    struct net *net, const int family,
255fc5d00b0SPavel Emelyanov 			    const unsigned short port, int flags)
2565dd248f6SChuck Lever {
2575dd248f6SChuck Lever 	struct svc_xprt_class *xcl;
2585dd248f6SChuck Lever 
259b700cbb1STom Tucker 	spin_lock(&svc_xprt_class_lock);
260b700cbb1STom Tucker 	list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
261b700cbb1STom Tucker 		struct svc_xprt *newxprt;
262ed2849d3SNeilBrown 		unsigned short newport;
2634e5caaa5STom Tucker 
2644e5caaa5STom Tucker 		if (strcmp(xprt_name, xcl->xcl_name))
2654e5caaa5STom Tucker 			continue;
2664e5caaa5STom Tucker 
2674e5caaa5STom Tucker 		if (!try_module_get(xcl->xcl_owner))
2684e5caaa5STom Tucker 			goto err;
2694e5caaa5STom Tucker 
2704e5caaa5STom Tucker 		spin_unlock(&svc_xprt_class_lock);
27162832c03SPavel Emelyanov 		newxprt = __svc_xpo_create(xcl, serv, net, family, port, flags);
272b700cbb1STom Tucker 		if (IS_ERR(newxprt)) {
273b700cbb1STom Tucker 			module_put(xcl->xcl_owner);
2744e5caaa5STom Tucker 			return PTR_ERR(newxprt);
275b700cbb1STom Tucker 		}
27639b55301SJ. Bruce Fields 		svc_add_new_perm_xprt(serv, newxprt);
277ed2849d3SNeilBrown 		newport = svc_xprt_local_port(newxprt);
278ed2849d3SNeilBrown 		return newport;
279b700cbb1STom Tucker 	}
2804e5caaa5STom Tucker  err:
281b700cbb1STom Tucker 	spin_unlock(&svc_xprt_class_lock);
28268717908SChuck Lever 	/* This errno is exposed to user space.  Provide a reasonable
28368717908SChuck Lever 	 * perror msg for a bad transport. */
28468717908SChuck Lever 	return -EPROTONOSUPPORT;
285b700cbb1STom Tucker }
286d96b9c93SJ. Bruce Fields 
287d96b9c93SJ. Bruce Fields int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
288d96b9c93SJ. Bruce Fields 		    struct net *net, const int family,
289d96b9c93SJ. Bruce Fields 		    const unsigned short port, int flags)
290d96b9c93SJ. Bruce Fields {
291d96b9c93SJ. Bruce Fields 	int err;
292d96b9c93SJ. Bruce Fields 
293d96b9c93SJ. Bruce Fields 	dprintk("svc: creating transport %s[%d]\n", xprt_name, port);
294d96b9c93SJ. Bruce Fields 	err = _svc_create_xprt(serv, xprt_name, net, family, port, flags);
295d96b9c93SJ. Bruce Fields 	if (err == -EPROTONOSUPPORT) {
296d96b9c93SJ. Bruce Fields 		request_module("svc%s", xprt_name);
297d96b9c93SJ. Bruce Fields 		err = _svc_create_xprt(serv, xprt_name, net, family, port, flags);
298d96b9c93SJ. Bruce Fields 	}
2999ac31288SVasily Averin 	if (err < 0)
300d96b9c93SJ. Bruce Fields 		dprintk("svc: transport %s not found, err %d\n",
3019ac31288SVasily Averin 			xprt_name, -err);
302d96b9c93SJ. Bruce Fields 	return err;
303d96b9c93SJ. Bruce Fields }
304b700cbb1STom Tucker EXPORT_SYMBOL_GPL(svc_create_xprt);
3059dbc240fSTom Tucker 
3069dbc240fSTom Tucker /*
3079dbc240fSTom Tucker  * Copy the local and remote xprt addresses to the rqstp structure
3089dbc240fSTom Tucker  */
3099dbc240fSTom Tucker void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt)
3109dbc240fSTom Tucker {
3119dbc240fSTom Tucker 	memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen);
3129dbc240fSTom Tucker 	rqstp->rq_addrlen = xprt->xpt_remotelen;
3139dbc240fSTom Tucker 
3149dbc240fSTom Tucker 	/*
3159dbc240fSTom Tucker 	 * Destination address in request is needed for binding the
3169dbc240fSTom Tucker 	 * source address in RPC replies/callbacks later.
3179dbc240fSTom Tucker 	 */
318849a1cf1SMi Jinlong 	memcpy(&rqstp->rq_daddr, &xprt->xpt_local, xprt->xpt_locallen);
319849a1cf1SMi Jinlong 	rqstp->rq_daddrlen = xprt->xpt_locallen;
3209dbc240fSTom Tucker }
3219dbc240fSTom Tucker EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs);
3229dbc240fSTom Tucker 
3230f0257eaSTom Tucker /**
3240f0257eaSTom Tucker  * svc_print_addr - Format rq_addr field for printing
3250f0257eaSTom Tucker  * @rqstp: svc_rqst struct containing address to print
3260f0257eaSTom Tucker  * @buf: target buffer for formatted address
3270f0257eaSTom Tucker  * @len: length of target buffer
3280f0257eaSTom Tucker  *
3290f0257eaSTom Tucker  */
3300f0257eaSTom Tucker char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len)
3310f0257eaSTom Tucker {
3320f0257eaSTom Tucker 	return __svc_print_addr(svc_addr(rqstp), buf, len);
3330f0257eaSTom Tucker }
3340f0257eaSTom Tucker EXPORT_SYMBOL_GPL(svc_print_addr);
3350f0257eaSTom Tucker 
336ff3ac5c3STrond Myklebust static bool svc_xprt_slots_in_range(struct svc_xprt *xprt)
337ff3ac5c3STrond Myklebust {
338ff3ac5c3STrond Myklebust 	unsigned int limit = svc_rpc_per_connection_limit;
339ff3ac5c3STrond Myklebust 	int nrqsts = atomic_read(&xprt->xpt_nr_rqsts);
340ff3ac5c3STrond Myklebust 
341ff3ac5c3STrond Myklebust 	return limit == 0 || (nrqsts >= 0 && nrqsts < limit);
342ff3ac5c3STrond Myklebust }
343ff3ac5c3STrond Myklebust 
344ff3ac5c3STrond Myklebust static bool svc_xprt_reserve_slot(struct svc_rqst *rqstp, struct svc_xprt *xprt)
345ff3ac5c3STrond Myklebust {
346ff3ac5c3STrond Myklebust 	if (!test_bit(RQ_DATA, &rqstp->rq_flags)) {
347ff3ac5c3STrond Myklebust 		if (!svc_xprt_slots_in_range(xprt))
348ff3ac5c3STrond Myklebust 			return false;
349ff3ac5c3STrond Myklebust 		atomic_inc(&xprt->xpt_nr_rqsts);
350ff3ac5c3STrond Myklebust 		set_bit(RQ_DATA, &rqstp->rq_flags);
351ff3ac5c3STrond Myklebust 	}
352ff3ac5c3STrond Myklebust 	return true;
353ff3ac5c3STrond Myklebust }
354ff3ac5c3STrond Myklebust 
355ff3ac5c3STrond Myklebust static void svc_xprt_release_slot(struct svc_rqst *rqstp)
356ff3ac5c3STrond Myklebust {
357ff3ac5c3STrond Myklebust 	struct svc_xprt	*xprt = rqstp->rq_xprt;
358ff3ac5c3STrond Myklebust 	if (test_and_clear_bit(RQ_DATA, &rqstp->rq_flags)) {
359ff3ac5c3STrond Myklebust 		atomic_dec(&xprt->xpt_nr_rqsts);
360ff3ac5c3STrond Myklebust 		svc_xprt_enqueue(xprt);
361ff3ac5c3STrond Myklebust 	}
362ff3ac5c3STrond Myklebust }
363ff3ac5c3STrond Myklebust 
36466c898caSJ. Bruce Fields static bool svc_xprt_ready(struct svc_xprt *xprt)
3659c335c0bSJ. Bruce Fields {
3661602a7b7STrond Myklebust 	unsigned long xpt_flags;
3671602a7b7STrond Myklebust 
3681602a7b7STrond Myklebust 	xpt_flags = READ_ONCE(xprt->xpt_flags);
3691602a7b7STrond Myklebust 
3701602a7b7STrond Myklebust 	if (xpt_flags & (BIT(XPT_CONN) | BIT(XPT_CLOSE)))
3719c335c0bSJ. Bruce Fields 		return true;
3721602a7b7STrond Myklebust 	if (xpt_flags & (BIT(XPT_DATA) | BIT(XPT_DEFERRED))) {
373ff3ac5c3STrond Myklebust 		if (xprt->xpt_ops->xpo_has_wspace(xprt) &&
374ff3ac5c3STrond Myklebust 		    svc_xprt_slots_in_range(xprt))
37582ea2d76STrond Myklebust 			return true;
37682ea2d76STrond Myklebust 		trace_svc_xprt_no_write_space(xprt);
37782ea2d76STrond Myklebust 		return false;
37882ea2d76STrond Myklebust 	}
3799c335c0bSJ. Bruce Fields 	return false;
3809c335c0bSJ. Bruce Fields }
3819c335c0bSJ. Bruce Fields 
382b9e13cdfSJeff Layton void svc_xprt_do_enqueue(struct svc_xprt *xprt)
3830f0257eaSTom Tucker {
3840f0257eaSTom Tucker 	struct svc_pool *pool;
38583a712e0SJeff Layton 	struct svc_rqst	*rqstp = NULL;
3860f0257eaSTom Tucker 	int cpu;
3870f0257eaSTom Tucker 
38866c898caSJ. Bruce Fields 	if (!svc_xprt_ready(xprt))
3897dbb53baSChuck Lever 		return;
3900f0257eaSTom Tucker 
3910f0257eaSTom Tucker 	/* Mark transport as busy. It will remain in this state until
3920f0257eaSTom Tucker 	 * the provider calls svc_xprt_received. We update XPT_BUSY
3930f0257eaSTom Tucker 	 * atomically because it also guards against trying to enqueue
3940f0257eaSTom Tucker 	 * the transport twice.
3950f0257eaSTom Tucker 	 */
3967dbb53baSChuck Lever 	if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
3977dbb53baSChuck Lever 		return;
3980f0257eaSTom Tucker 
3990c0746d0STrond Myklebust 	cpu = get_cpu();
4000c0746d0STrond Myklebust 	pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
4010c0746d0STrond Myklebust 
402403c7b44SJeff Layton 	atomic_long_inc(&pool->sp_stats.packets);
4030c0746d0STrond Myklebust 
404b1691bc0SJeff Layton 	spin_lock_bh(&pool->sp_lock);
405b1691bc0SJeff Layton 	list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
406b1691bc0SJeff Layton 	pool->sp_stats.sockets_queued++;
4070f0257eaSTom Tucker 	spin_unlock_bh(&pool->sp_lock);
40822700f3cSTrond Myklebust 
40922700f3cSTrond Myklebust 	/* find a thread for this xprt */
41022700f3cSTrond Myklebust 	rcu_read_lock();
41122700f3cSTrond Myklebust 	list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
41222700f3cSTrond Myklebust 		if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags))
41322700f3cSTrond Myklebust 			continue;
41422700f3cSTrond Myklebust 		atomic_long_inc(&pool->sp_stats.threads_woken);
41555f5088cSChuck Lever 		rqstp->rq_qtime = ktime_get();
41622700f3cSTrond Myklebust 		wake_up_process(rqstp->rq_task);
41722700f3cSTrond Myklebust 		goto out_unlock;
418b1691bc0SJeff Layton 	}
41922700f3cSTrond Myklebust 	set_bit(SP_CONGESTED, &pool->sp_flags);
42083a712e0SJeff Layton 	rqstp = NULL;
42122700f3cSTrond Myklebust out_unlock:
42222700f3cSTrond Myklebust 	rcu_read_unlock();
423983c6844STrond Myklebust 	put_cpu();
42483a712e0SJeff Layton 	trace_svc_xprt_do_enqueue(xprt, rqstp);
4250f0257eaSTom Tucker }
426b9e13cdfSJeff Layton EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue);
4270971374eSTrond Myklebust 
4280971374eSTrond Myklebust /*
4290971374eSTrond Myklebust  * Queue up a transport with data pending. If there are idle nfsd
4300971374eSTrond Myklebust  * processes, wake 'em up.
4310971374eSTrond Myklebust  *
4320971374eSTrond Myklebust  */
4330971374eSTrond Myklebust void svc_xprt_enqueue(struct svc_xprt *xprt)
4340971374eSTrond Myklebust {
4350971374eSTrond Myklebust 	if (test_bit(XPT_BUSY, &xprt->xpt_flags))
4360971374eSTrond Myklebust 		return;
437b9e13cdfSJeff Layton 	xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt);
4380971374eSTrond Myklebust }
4390f0257eaSTom Tucker EXPORT_SYMBOL_GPL(svc_xprt_enqueue);
4400f0257eaSTom Tucker 
4410f0257eaSTom Tucker /*
442b1691bc0SJeff Layton  * Dequeue the first transport, if there is one.
4430f0257eaSTom Tucker  */
4440f0257eaSTom Tucker static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
4450f0257eaSTom Tucker {
446b1691bc0SJeff Layton 	struct svc_xprt	*xprt = NULL;
4470f0257eaSTom Tucker 
4480f0257eaSTom Tucker 	if (list_empty(&pool->sp_sockets))
44983a712e0SJeff Layton 		goto out;
4500f0257eaSTom Tucker 
451b1691bc0SJeff Layton 	spin_lock_bh(&pool->sp_lock);
452b1691bc0SJeff Layton 	if (likely(!list_empty(&pool->sp_sockets))) {
453b1691bc0SJeff Layton 		xprt = list_first_entry(&pool->sp_sockets,
4540f0257eaSTom Tucker 					struct svc_xprt, xpt_ready);
4550f0257eaSTom Tucker 		list_del_init(&xprt->xpt_ready);
456b1691bc0SJeff Layton 		svc_xprt_get(xprt);
457b1691bc0SJeff Layton 	}
458b1691bc0SJeff Layton 	spin_unlock_bh(&pool->sp_lock);
45983a712e0SJeff Layton out:
4600f0257eaSTom Tucker 	return xprt;
4610f0257eaSTom Tucker }
4620f0257eaSTom Tucker 
4630f0257eaSTom Tucker /**
4640f0257eaSTom Tucker  * svc_reserve - change the space reserved for the reply to a request.
4650f0257eaSTom Tucker  * @rqstp:  The request in question
4660f0257eaSTom Tucker  * @space: new max space to reserve
4670f0257eaSTom Tucker  *
4680f0257eaSTom Tucker  * Each request reserves some space on the output queue of the transport
4690f0257eaSTom Tucker  * to make sure the reply fits.  This function reduces that reserved
4700f0257eaSTom Tucker  * space to be the amount of space used already, plus @space.
4710f0257eaSTom Tucker  *
4720f0257eaSTom Tucker  */
4730f0257eaSTom Tucker void svc_reserve(struct svc_rqst *rqstp, int space)
4740f0257eaSTom Tucker {
475d4b09acfSVasily Averin 	struct svc_xprt *xprt = rqstp->rq_xprt;
476d4b09acfSVasily Averin 
4770f0257eaSTom Tucker 	space += rqstp->rq_res.head[0].iov_len;
4780f0257eaSTom Tucker 
479d4b09acfSVasily Averin 	if (xprt && space < rqstp->rq_reserved) {
4800f0257eaSTom Tucker 		atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved);
4810f0257eaSTom Tucker 		rqstp->rq_reserved = space;
4820f0257eaSTom Tucker 
4830f0257eaSTom Tucker 		svc_xprt_enqueue(xprt);
4840f0257eaSTom Tucker 	}
4850f0257eaSTom Tucker }
48624c3767eSTrond Myklebust EXPORT_SYMBOL_GPL(svc_reserve);
4870f0257eaSTom Tucker 
4880f0257eaSTom Tucker static void svc_xprt_release(struct svc_rqst *rqstp)
4890f0257eaSTom Tucker {
4900f0257eaSTom Tucker 	struct svc_xprt	*xprt = rqstp->rq_xprt;
4910f0257eaSTom Tucker 
49263a1b156SChuck Lever 	xprt->xpt_ops->xpo_release_rqst(rqstp);
4930f0257eaSTom Tucker 
4942779e3aeSTom Tucker 	kfree(rqstp->rq_deferred);
4952779e3aeSTom Tucker 	rqstp->rq_deferred = NULL;
4962779e3aeSTom Tucker 
4970f0257eaSTom Tucker 	svc_free_res_pages(rqstp);
4980f0257eaSTom Tucker 	rqstp->rq_res.page_len = 0;
4990f0257eaSTom Tucker 	rqstp->rq_res.page_base = 0;
5000f0257eaSTom Tucker 
5010f0257eaSTom Tucker 	/* Reset response buffer and release
5020f0257eaSTom Tucker 	 * the reservation.
5030f0257eaSTom Tucker 	 * But first, check that enough space was reserved
5040f0257eaSTom Tucker 	 * for the reply, otherwise we have a bug!
5050f0257eaSTom Tucker 	 */
5060f0257eaSTom Tucker 	if ((rqstp->rq_res.len) >  rqstp->rq_reserved)
5070f0257eaSTom Tucker 		printk(KERN_ERR "RPC request reserved %d but used %d\n",
5080f0257eaSTom Tucker 		       rqstp->rq_reserved,
5090f0257eaSTom Tucker 		       rqstp->rq_res.len);
5100f0257eaSTom Tucker 
5110f0257eaSTom Tucker 	rqstp->rq_res.head[0].iov_len = 0;
5120f0257eaSTom Tucker 	svc_reserve(rqstp, 0);
513ff3ac5c3STrond Myklebust 	svc_xprt_release_slot(rqstp);
5140f0257eaSTom Tucker 	rqstp->rq_xprt = NULL;
5150f0257eaSTom Tucker 	svc_xprt_put(xprt);
5160f0257eaSTom Tucker }
5170f0257eaSTom Tucker 
5180f0257eaSTom Tucker /*
519ceff739cSJeff Layton  * Some svc_serv's will have occasional work to do, even when a xprt is not
520ceff739cSJeff Layton  * waiting to be serviced. This function is there to "kick" a task in one of
521ceff739cSJeff Layton  * those services so that it can wake up and do that work. Note that we only
522ceff739cSJeff Layton  * bother with pool 0 as we don't need to wake up more than one thread for
523ceff739cSJeff Layton  * this purpose.
5240f0257eaSTom Tucker  */
5250f0257eaSTom Tucker void svc_wake_up(struct svc_serv *serv)
5260f0257eaSTom Tucker {
5270f0257eaSTom Tucker 	struct svc_rqst	*rqstp;
5280f0257eaSTom Tucker 	struct svc_pool *pool;
5290f0257eaSTom Tucker 
530ceff739cSJeff Layton 	pool = &serv->sv_pools[0];
5310f0257eaSTom Tucker 
532b1691bc0SJeff Layton 	rcu_read_lock();
533b1691bc0SJeff Layton 	list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
534b1691bc0SJeff Layton 		/* skip any that aren't queued */
535b1691bc0SJeff Layton 		if (test_bit(RQ_BUSY, &rqstp->rq_flags))
536b1691bc0SJeff Layton 			continue;
537b1691bc0SJeff Layton 		rcu_read_unlock();
538983c6844STrond Myklebust 		wake_up_process(rqstp->rq_task);
53983a712e0SJeff Layton 		trace_svc_wake_up(rqstp->rq_task->pid);
540b1691bc0SJeff Layton 		return;
541b1691bc0SJeff Layton 	}
542b1691bc0SJeff Layton 	rcu_read_unlock();
543b1691bc0SJeff Layton 
544b1691bc0SJeff Layton 	/* No free entries available */
5454d5db3f5SJeff Layton 	set_bit(SP_TASK_PENDING, &pool->sp_flags);
546b1691bc0SJeff Layton 	smp_wmb();
54783a712e0SJeff Layton 	trace_svc_wake_up(0);
5480f0257eaSTom Tucker }
54924c3767eSTrond Myklebust EXPORT_SYMBOL_GPL(svc_wake_up);
5500f0257eaSTom Tucker 
5510f0257eaSTom Tucker int svc_port_is_privileged(struct sockaddr *sin)
5520f0257eaSTom Tucker {
5530f0257eaSTom Tucker 	switch (sin->sa_family) {
5540f0257eaSTom Tucker 	case AF_INET:
5550f0257eaSTom Tucker 		return ntohs(((struct sockaddr_in *)sin)->sin_port)
5560f0257eaSTom Tucker 			< PROT_SOCK;
5570f0257eaSTom Tucker 	case AF_INET6:
5580f0257eaSTom Tucker 		return ntohs(((struct sockaddr_in6 *)sin)->sin6_port)
5590f0257eaSTom Tucker 			< PROT_SOCK;
5600f0257eaSTom Tucker 	default:
5610f0257eaSTom Tucker 		return 0;
5620f0257eaSTom Tucker 	}
5630f0257eaSTom Tucker }
5640f0257eaSTom Tucker 
5650f0257eaSTom Tucker /*
566c9233eb7SJeff Layton  * Make sure that we don't have too many active connections. If we have,
567c9233eb7SJeff Layton  * something must be dropped. It's not clear what will happen if we allow
568c9233eb7SJeff Layton  * "too many" connections, but when dealing with network-facing software,
569c9233eb7SJeff Layton  * we have to code defensively. Here we do that by imposing hard limits.
5700f0257eaSTom Tucker  *
5710f0257eaSTom Tucker  * There's no point in trying to do random drop here for DoS
5720f0257eaSTom Tucker  * prevention. The NFS clients does 1 reconnect in 15 seconds. An
5730f0257eaSTom Tucker  * attacker can easily beat that.
5740f0257eaSTom Tucker  *
5750f0257eaSTom Tucker  * The only somewhat efficient mechanism would be if drop old
5760f0257eaSTom Tucker  * connections from the same IP first. But right now we don't even
5770f0257eaSTom Tucker  * record the client IP in svc_sock.
578c9233eb7SJeff Layton  *
579c9233eb7SJeff Layton  * single-threaded services that expect a lot of clients will probably
580c9233eb7SJeff Layton  * need to set sv_maxconn to override the default value which is based
581c9233eb7SJeff Layton  * on the number of threads
5820f0257eaSTom Tucker  */
5830f0257eaSTom Tucker static void svc_check_conn_limits(struct svc_serv *serv)
5840f0257eaSTom Tucker {
585c9233eb7SJeff Layton 	unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn :
586c9233eb7SJeff Layton 				(serv->sv_nrthreads+3) * 20;
587c9233eb7SJeff Layton 
588c9233eb7SJeff Layton 	if (serv->sv_tmpcnt > limit) {
5890f0257eaSTom Tucker 		struct svc_xprt *xprt = NULL;
5900f0257eaSTom Tucker 		spin_lock_bh(&serv->sv_lock);
5910f0257eaSTom Tucker 		if (!list_empty(&serv->sv_tempsocks)) {
5920f0257eaSTom Tucker 			/* Try to help the admin */
593e87cc472SJoe Perches 			net_notice_ratelimited("%s: too many open connections, consider increasing the %s\n",
594c9233eb7SJeff Layton 					       serv->sv_name, serv->sv_maxconn ?
595e87cc472SJoe Perches 					       "max number of connections" :
596e87cc472SJoe Perches 					       "number of threads");
5970f0257eaSTom Tucker 			/*
5980f0257eaSTom Tucker 			 * Always select the oldest connection. It's not fair,
5990f0257eaSTom Tucker 			 * but so is life
6000f0257eaSTom Tucker 			 */
6010f0257eaSTom Tucker 			xprt = list_entry(serv->sv_tempsocks.prev,
6020f0257eaSTom Tucker 					  struct svc_xprt,
6030f0257eaSTom Tucker 					  xpt_list);
6040f0257eaSTom Tucker 			set_bit(XPT_CLOSE, &xprt->xpt_flags);
6050f0257eaSTom Tucker 			svc_xprt_get(xprt);
6060f0257eaSTom Tucker 		}
6070f0257eaSTom Tucker 		spin_unlock_bh(&serv->sv_lock);
6080f0257eaSTom Tucker 
6090f0257eaSTom Tucker 		if (xprt) {
6100f0257eaSTom Tucker 			svc_xprt_enqueue(xprt);
6110f0257eaSTom Tucker 			svc_xprt_put(xprt);
6120f0257eaSTom Tucker 		}
6130f0257eaSTom Tucker 	}
6140f0257eaSTom Tucker }
6150f0257eaSTom Tucker 
616e1d83ee6SRashika Kheria static int svc_alloc_arg(struct svc_rqst *rqstp)
6170f0257eaSTom Tucker {
6180f0257eaSTom Tucker 	struct svc_serv *serv = rqstp->rq_server;
6190f0257eaSTom Tucker 	struct xdr_buf *arg;
6206797fa5aSJ. Bruce Fields 	int pages;
6216797fa5aSJ. Bruce Fields 	int i;
6220f0257eaSTom Tucker 
6230f0257eaSTom Tucker 	/* now allocate needed pages.  If we get a failure, sleep briefly */
6248c6ae498SChuck Lever 	pages = (serv->sv_max_mesg + 2 * PAGE_SIZE) >> PAGE_SHIFT;
6258c6ae498SChuck Lever 	if (pages > RPCSVC_MAXPAGES) {
6268c6ae498SChuck Lever 		pr_warn_once("svc: warning: pages=%u > RPCSVC_MAXPAGES=%lu\n",
6278c6ae498SChuck Lever 			     pages, RPCSVC_MAXPAGES);
628b25cd058SWeston Andros Adamson 		/* use as many pages as possible */
6298c6ae498SChuck Lever 		pages = RPCSVC_MAXPAGES;
6308c6ae498SChuck Lever 	}
6310f0257eaSTom Tucker 	for (i = 0; i < pages ; i++)
6320f0257eaSTom Tucker 		while (rqstp->rq_pages[i] == NULL) {
6330f0257eaSTom Tucker 			struct page *p = alloc_page(GFP_KERNEL);
6340f0257eaSTom Tucker 			if (!p) {
6357b54fe61SJeff Layton 				set_current_state(TASK_INTERRUPTIBLE);
6367b54fe61SJeff Layton 				if (signalled() || kthread_should_stop()) {
6377b54fe61SJeff Layton 					set_current_state(TASK_RUNNING);
6387086721fSJeff Layton 					return -EINTR;
6397b54fe61SJeff Layton 				}
6407b54fe61SJeff Layton 				schedule_timeout(msecs_to_jiffies(500));
6410f0257eaSTom Tucker 			}
6420f0257eaSTom Tucker 			rqstp->rq_pages[i] = p;
6430f0257eaSTom Tucker 		}
6442825a7f9SJ. Bruce Fields 	rqstp->rq_page_end = &rqstp->rq_pages[i];
6450f0257eaSTom Tucker 	rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */
6460f0257eaSTom Tucker 
6470f0257eaSTom Tucker 	/* Make arg->head point to first page and arg->pages point to rest */
6480f0257eaSTom Tucker 	arg = &rqstp->rq_arg;
6490f0257eaSTom Tucker 	arg->head[0].iov_base = page_address(rqstp->rq_pages[0]);
6500f0257eaSTom Tucker 	arg->head[0].iov_len = PAGE_SIZE;
6510f0257eaSTom Tucker 	arg->pages = rqstp->rq_pages + 1;
6520f0257eaSTom Tucker 	arg->page_base = 0;
6530f0257eaSTom Tucker 	/* save at least one page for response */
6540f0257eaSTom Tucker 	arg->page_len = (pages-2)*PAGE_SIZE;
6550f0257eaSTom Tucker 	arg->len = (pages-1)*PAGE_SIZE;
6560f0257eaSTom Tucker 	arg->tail[0].iov_len = 0;
6576797fa5aSJ. Bruce Fields 	return 0;
6586797fa5aSJ. Bruce Fields }
6590f0257eaSTom Tucker 
660b1691bc0SJeff Layton static bool
661b1691bc0SJeff Layton rqst_should_sleep(struct svc_rqst *rqstp)
662b1691bc0SJeff Layton {
663b1691bc0SJeff Layton 	struct svc_pool		*pool = rqstp->rq_pool;
664b1691bc0SJeff Layton 
665b1691bc0SJeff Layton 	/* did someone call svc_wake_up? */
666b1691bc0SJeff Layton 	if (test_and_clear_bit(SP_TASK_PENDING, &pool->sp_flags))
667b1691bc0SJeff Layton 		return false;
668b1691bc0SJeff Layton 
669b1691bc0SJeff Layton 	/* was a socket queued? */
670b1691bc0SJeff Layton 	if (!list_empty(&pool->sp_sockets))
671b1691bc0SJeff Layton 		return false;
672b1691bc0SJeff Layton 
673b1691bc0SJeff Layton 	/* are we shutting down? */
674b1691bc0SJeff Layton 	if (signalled() || kthread_should_stop())
675b1691bc0SJeff Layton 		return false;
676b1691bc0SJeff Layton 
677b1691bc0SJeff Layton 	/* are we freezing? */
678b1691bc0SJeff Layton 	if (freezing(current))
679b1691bc0SJeff Layton 		return false;
680b1691bc0SJeff Layton 
681b1691bc0SJeff Layton 	return true;
682b1691bc0SJeff Layton }
683b1691bc0SJeff Layton 
684e1d83ee6SRashika Kheria static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
6856797fa5aSJ. Bruce Fields {
6866797fa5aSJ. Bruce Fields 	struct svc_pool		*pool = rqstp->rq_pool;
687a4aa8054STrond Myklebust 	long			time_left = 0;
6880f0257eaSTom Tucker 
689b1691bc0SJeff Layton 	/* rq_xprt should be clear on entry */
690b1691bc0SJeff Layton 	WARN_ON_ONCE(rqstp->rq_xprt);
691b1691bc0SJeff Layton 
69222700f3cSTrond Myklebust 	rqstp->rq_xprt = svc_xprt_dequeue(pool);
69322700f3cSTrond Myklebust 	if (rqstp->rq_xprt)
69422700f3cSTrond Myklebust 		goto out_found;
695b1691bc0SJeff Layton 
6960f0257eaSTom Tucker 	/*
6970f0257eaSTom Tucker 	 * We have to be able to interrupt this wait
6980f0257eaSTom Tucker 	 * to bring down the daemons ...
6990f0257eaSTom Tucker 	 */
7000f0257eaSTom Tucker 	set_current_state(TASK_INTERRUPTIBLE);
70122700f3cSTrond Myklebust 	smp_mb__before_atomic();
70222700f3cSTrond Myklebust 	clear_bit(SP_CONGESTED, &pool->sp_flags);
703b1691bc0SJeff Layton 	clear_bit(RQ_BUSY, &rqstp->rq_flags);
70422700f3cSTrond Myklebust 	smp_mb__after_atomic();
7057086721fSJeff Layton 
706b1691bc0SJeff Layton 	if (likely(rqst_should_sleep(rqstp)))
70703cf6c9fSGreg Banks 		time_left = schedule_timeout(timeout);
708b1691bc0SJeff Layton 	else
709983c6844STrond Myklebust 		__set_current_state(TASK_RUNNING);
7100f0257eaSTom Tucker 
7110f0257eaSTom Tucker 	try_to_freeze();
7120f0257eaSTom Tucker 
713b1691bc0SJeff Layton 	set_bit(RQ_BUSY, &rqstp->rq_flags);
71422700f3cSTrond Myklebust 	smp_mb__after_atomic();
71522700f3cSTrond Myklebust 	rqstp->rq_xprt = svc_xprt_dequeue(pool);
71622700f3cSTrond Myklebust 	if (rqstp->rq_xprt)
71722700f3cSTrond Myklebust 		goto out_found;
718106f359cSTrond Myklebust 
71903cf6c9fSGreg Banks 	if (!time_left)
720403c7b44SJeff Layton 		atomic_long_inc(&pool->sp_stats.threads_timedout);
7210f0257eaSTom Tucker 
7227086721fSJeff Layton 	if (signalled() || kthread_should_stop())
7236797fa5aSJ. Bruce Fields 		return ERR_PTR(-EINTR);
7246797fa5aSJ. Bruce Fields 	return ERR_PTR(-EAGAIN);
72522700f3cSTrond Myklebust out_found:
72622700f3cSTrond Myklebust 	/* Normally we will wait up to 5 seconds for any required
72722700f3cSTrond Myklebust 	 * cache information to be provided.
72822700f3cSTrond Myklebust 	 */
72922700f3cSTrond Myklebust 	if (!test_bit(SP_CONGESTED, &pool->sp_flags))
73022700f3cSTrond Myklebust 		rqstp->rq_chandle.thread_wait = 5*HZ;
73122700f3cSTrond Myklebust 	else
73222700f3cSTrond Myklebust 		rqstp->rq_chandle.thread_wait = 1*HZ;
73355f5088cSChuck Lever 	trace_svc_xprt_dequeue(rqstp);
73422700f3cSTrond Myklebust 	return rqstp->rq_xprt;
7350f0257eaSTom Tucker }
7360f0257eaSTom Tucker 
737e1d83ee6SRashika Kheria static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt)
73865b2e665SJ. Bruce Fields {
73965b2e665SJ. Bruce Fields 	spin_lock_bh(&serv->sv_lock);
74065b2e665SJ. Bruce Fields 	set_bit(XPT_TEMP, &newxpt->xpt_flags);
74165b2e665SJ. Bruce Fields 	list_add(&newxpt->xpt_list, &serv->sv_tempsocks);
74265b2e665SJ. Bruce Fields 	serv->sv_tmpcnt++;
74365b2e665SJ. Bruce Fields 	if (serv->sv_temptimer.function == NULL) {
74465b2e665SJ. Bruce Fields 		/* setup timer to age temp transports */
745841b86f3SKees Cook 		serv->sv_temptimer.function = svc_age_temp_xprts;
74665b2e665SJ. Bruce Fields 		mod_timer(&serv->sv_temptimer,
74765b2e665SJ. Bruce Fields 			  jiffies + svc_conn_age_period * HZ);
74865b2e665SJ. Bruce Fields 	}
74965b2e665SJ. Bruce Fields 	spin_unlock_bh(&serv->sv_lock);
75065b2e665SJ. Bruce Fields 	svc_xprt_received(newxpt);
75165b2e665SJ. Bruce Fields }
75265b2e665SJ. Bruce Fields 
7536797fa5aSJ. Bruce Fields static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
7546797fa5aSJ. Bruce Fields {
7556797fa5aSJ. Bruce Fields 	struct svc_serv *serv = rqstp->rq_server;
7566797fa5aSJ. Bruce Fields 	int len = 0;
7576797fa5aSJ. Bruce Fields 
7581b644b6eSJ. Bruce Fields 	if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
7591b644b6eSJ. Bruce Fields 		dprintk("svc_recv: found XPT_CLOSE\n");
760546125d1SScott Mayhew 		if (test_and_clear_bit(XPT_KILL_TEMP, &xprt->xpt_flags))
761546125d1SScott Mayhew 			xprt->xpt_ops->xpo_kill_temp_xprt(xprt);
7621b644b6eSJ. Bruce Fields 		svc_delete_xprt(xprt);
763ca7896cdSJ. Bruce Fields 		/* Leave XPT_BUSY set on the dead xprt: */
76483a712e0SJeff Layton 		goto out;
765ca7896cdSJ. Bruce Fields 	}
766ca7896cdSJ. Bruce Fields 	if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
7670f0257eaSTom Tucker 		struct svc_xprt *newxpt;
7680f0257eaSTom Tucker 		/*
7690f0257eaSTom Tucker 		 * We know this module_get will succeed because the
7700f0257eaSTom Tucker 		 * listener holds a reference too
7710f0257eaSTom Tucker 		 */
77265b2e665SJ. Bruce Fields 		__module_get(xprt->xpt_class->xcl_owner);
7730f0257eaSTom Tucker 		svc_check_conn_limits(xprt->xpt_server);
77465b2e665SJ. Bruce Fields 		newxpt = xprt->xpt_ops->xpo_accept(xprt);
77565b2e665SJ. Bruce Fields 		if (newxpt)
77665b2e665SJ. Bruce Fields 			svc_add_new_temp_xprt(serv, newxpt);
777c789102cSTrond Myklebust 		else
778c789102cSTrond Myklebust 			module_put(xprt->xpt_class->xcl_owner);
779ff3ac5c3STrond Myklebust 	} else if (svc_xprt_reserve_slot(rqstp, xprt)) {
7806797fa5aSJ. Bruce Fields 		/* XPT_DATA|XPT_DEFERRED case: */
7810f0257eaSTom Tucker 		dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
7826797fa5aSJ. Bruce Fields 			rqstp, rqstp->rq_pool->sp_id, xprt,
7832c935bc5SPeter Zijlstra 			kref_read(&xprt->xpt_ref));
7840f0257eaSTom Tucker 		rqstp->rq_deferred = svc_deferred_dequeue(xprt);
785ca7896cdSJ. Bruce Fields 		if (rqstp->rq_deferred)
7860f0257eaSTom Tucker 			len = svc_deferred_recv(rqstp);
787ca7896cdSJ. Bruce Fields 		else
7880f0257eaSTom Tucker 			len = xprt->xpt_ops->xpo_recvfrom(rqstp);
789aaba72cdSChuck Lever 		rqstp->rq_stime = ktime_get();
790d10f27a7SJ. Bruce Fields 		rqstp->rq_reserved = serv->sv_max_mesg;
791d10f27a7SJ. Bruce Fields 		atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
7920f0257eaSTom Tucker 	}
7936797fa5aSJ. Bruce Fields 	/* clear XPT_BUSY: */
794ca7896cdSJ. Bruce Fields 	svc_xprt_received(xprt);
79583a712e0SJeff Layton out:
79683a712e0SJeff Layton 	trace_svc_handle_xprt(xprt, len);
7976797fa5aSJ. Bruce Fields 	return len;
7986797fa5aSJ. Bruce Fields }
7996797fa5aSJ. Bruce Fields 
8006797fa5aSJ. Bruce Fields /*
8016797fa5aSJ. Bruce Fields  * Receive the next request on any transport.  This code is carefully
8026797fa5aSJ. Bruce Fields  * organised not to touch any cachelines in the shared svc_serv
8036797fa5aSJ. Bruce Fields  * structure, only cachelines in the local svc_pool.
8046797fa5aSJ. Bruce Fields  */
8056797fa5aSJ. Bruce Fields int svc_recv(struct svc_rqst *rqstp, long timeout)
8066797fa5aSJ. Bruce Fields {
8076797fa5aSJ. Bruce Fields 	struct svc_xprt		*xprt = NULL;
8086797fa5aSJ. Bruce Fields 	struct svc_serv		*serv = rqstp->rq_server;
8096797fa5aSJ. Bruce Fields 	int			len, err;
8106797fa5aSJ. Bruce Fields 
8116797fa5aSJ. Bruce Fields 	dprintk("svc: server %p waiting for data (to = %ld)\n",
8126797fa5aSJ. Bruce Fields 		rqstp, timeout);
8136797fa5aSJ. Bruce Fields 
8146797fa5aSJ. Bruce Fields 	if (rqstp->rq_xprt)
8156797fa5aSJ. Bruce Fields 		printk(KERN_ERR
8166797fa5aSJ. Bruce Fields 			"svc_recv: service %p, transport not NULL!\n",
8176797fa5aSJ. Bruce Fields 			 rqstp);
818983c6844STrond Myklebust 
8196797fa5aSJ. Bruce Fields 	err = svc_alloc_arg(rqstp);
8206797fa5aSJ. Bruce Fields 	if (err)
821860a0d9eSJeff Layton 		goto out;
8226797fa5aSJ. Bruce Fields 
8236797fa5aSJ. Bruce Fields 	try_to_freeze();
8246797fa5aSJ. Bruce Fields 	cond_resched();
825860a0d9eSJeff Layton 	err = -EINTR;
8266797fa5aSJ. Bruce Fields 	if (signalled() || kthread_should_stop())
827860a0d9eSJeff Layton 		goto out;
8286797fa5aSJ. Bruce Fields 
8296797fa5aSJ. Bruce Fields 	xprt = svc_get_next_xprt(rqstp, timeout);
830860a0d9eSJeff Layton 	if (IS_ERR(xprt)) {
831860a0d9eSJeff Layton 		err = PTR_ERR(xprt);
832860a0d9eSJeff Layton 		goto out;
833860a0d9eSJeff Layton 	}
8346797fa5aSJ. Bruce Fields 
8356797fa5aSJ. Bruce Fields 	len = svc_handle_xprt(rqstp, xprt);
8360f0257eaSTom Tucker 
8370f0257eaSTom Tucker 	/* No data, incomplete (TCP) read, or accept() */
838860a0d9eSJeff Layton 	err = -EAGAIN;
8399f9d2ebeSJ. Bruce Fields 	if (len <= 0)
840860a0d9eSJeff Layton 		goto out_release;
841ca7896cdSJ. Bruce Fields 
8420f0257eaSTom Tucker 	clear_bit(XPT_OLD, &xprt->xpt_flags);
8430f0257eaSTom Tucker 
844989f881eSChuck Lever 	xprt->xpt_ops->xpo_secure_port(rqstp);
8450f0257eaSTom Tucker 	rqstp->rq_chandle.defer = svc_defer;
846860a0d9eSJeff Layton 	rqstp->rq_xid = svc_getu32(&rqstp->rq_arg.head[0]);
8470f0257eaSTom Tucker 
8480f0257eaSTom Tucker 	if (serv->sv_stats)
8490f0257eaSTom Tucker 		serv->sv_stats->netcnt++;
850860a0d9eSJeff Layton 	trace_svc_recv(rqstp, len);
8510f0257eaSTom Tucker 	return len;
852860a0d9eSJeff Layton out_release:
853ca7896cdSJ. Bruce Fields 	rqstp->rq_res.len = 0;
854ca7896cdSJ. Bruce Fields 	svc_xprt_release(rqstp);
855860a0d9eSJeff Layton out:
856860a0d9eSJeff Layton 	return err;
8570f0257eaSTom Tucker }
85824c3767eSTrond Myklebust EXPORT_SYMBOL_GPL(svc_recv);
8590f0257eaSTom Tucker 
8600f0257eaSTom Tucker /*
8610f0257eaSTom Tucker  * Drop request
8620f0257eaSTom Tucker  */
8630f0257eaSTom Tucker void svc_drop(struct svc_rqst *rqstp)
8640f0257eaSTom Tucker {
865104f6351STrond Myklebust 	trace_svc_drop(rqstp);
8660f0257eaSTom Tucker 	dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt);
8670f0257eaSTom Tucker 	svc_xprt_release(rqstp);
8680f0257eaSTom Tucker }
86924c3767eSTrond Myklebust EXPORT_SYMBOL_GPL(svc_drop);
8700f0257eaSTom Tucker 
8710f0257eaSTom Tucker /*
8720f0257eaSTom Tucker  * Return reply to client.
8730f0257eaSTom Tucker  */
8740f0257eaSTom Tucker int svc_send(struct svc_rqst *rqstp)
8750f0257eaSTom Tucker {
8760f0257eaSTom Tucker 	struct svc_xprt	*xprt;
877860a0d9eSJeff Layton 	int		len = -EFAULT;
8780f0257eaSTom Tucker 	struct xdr_buf	*xb;
8790f0257eaSTom Tucker 
8800f0257eaSTom Tucker 	xprt = rqstp->rq_xprt;
8810f0257eaSTom Tucker 	if (!xprt)
882860a0d9eSJeff Layton 		goto out;
8830f0257eaSTom Tucker 
8840f0257eaSTom Tucker 	/* release the receive skb before sending the reply */
88563a1b156SChuck Lever 	xprt->xpt_ops->xpo_release_rqst(rqstp);
8860f0257eaSTom Tucker 
8870f0257eaSTom Tucker 	/* calculate over-all length */
8880f0257eaSTom Tucker 	xb = &rqstp->rq_res;
8890f0257eaSTom Tucker 	xb->len = xb->head[0].iov_len +
8900f0257eaSTom Tucker 		xb->page_len +
8910f0257eaSTom Tucker 		xb->tail[0].iov_len;
8920f0257eaSTom Tucker 
8930f0257eaSTom Tucker 	/* Grab mutex to serialize outgoing data. */
8940f0257eaSTom Tucker 	mutex_lock(&xprt->xpt_mutex);
895aaba72cdSChuck Lever 	trace_svc_stats_latency(rqstp);
896f06f00a2SJ. Bruce Fields 	if (test_bit(XPT_DEAD, &xprt->xpt_flags)
897f06f00a2SJ. Bruce Fields 			|| test_bit(XPT_CLOSE, &xprt->xpt_flags))
8980f0257eaSTom Tucker 		len = -ENOTCONN;
8990f0257eaSTom Tucker 	else
9000f0257eaSTom Tucker 		len = xprt->xpt_ops->xpo_sendto(rqstp);
9010f0257eaSTom Tucker 	mutex_unlock(&xprt->xpt_mutex);
902ece200ddSChuck Lever 	trace_svc_send(rqstp, len);
9030f0257eaSTom Tucker 	svc_xprt_release(rqstp);
9040f0257eaSTom Tucker 
9050f0257eaSTom Tucker 	if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN)
906860a0d9eSJeff Layton 		len = 0;
907860a0d9eSJeff Layton out:
9080f0257eaSTom Tucker 	return len;
9090f0257eaSTom Tucker }
9100f0257eaSTom Tucker 
9110f0257eaSTom Tucker /*
9120f0257eaSTom Tucker  * Timer function to close old temporary transports, using
9130f0257eaSTom Tucker  * a mark-and-sweep algorithm.
9140f0257eaSTom Tucker  */
915ff861c4dSKees Cook static void svc_age_temp_xprts(struct timer_list *t)
9160f0257eaSTom Tucker {
917ff861c4dSKees Cook 	struct svc_serv *serv = from_timer(serv, t, sv_temptimer);
9180f0257eaSTom Tucker 	struct svc_xprt *xprt;
9190f0257eaSTom Tucker 	struct list_head *le, *next;
9200f0257eaSTom Tucker 
9210f0257eaSTom Tucker 	dprintk("svc_age_temp_xprts\n");
9220f0257eaSTom Tucker 
9230f0257eaSTom Tucker 	if (!spin_trylock_bh(&serv->sv_lock)) {
9240f0257eaSTom Tucker 		/* busy, try again 1 sec later */
9250f0257eaSTom Tucker 		dprintk("svc_age_temp_xprts: busy\n");
9260f0257eaSTom Tucker 		mod_timer(&serv->sv_temptimer, jiffies + HZ);
9270f0257eaSTom Tucker 		return;
9280f0257eaSTom Tucker 	}
9290f0257eaSTom Tucker 
9300f0257eaSTom Tucker 	list_for_each_safe(le, next, &serv->sv_tempsocks) {
9310f0257eaSTom Tucker 		xprt = list_entry(le, struct svc_xprt, xpt_list);
9320f0257eaSTom Tucker 
9330f0257eaSTom Tucker 		/* First time through, just mark it OLD. Second time
9340f0257eaSTom Tucker 		 * through, close it. */
9350f0257eaSTom Tucker 		if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags))
9360f0257eaSTom Tucker 			continue;
9372c935bc5SPeter Zijlstra 		if (kref_read(&xprt->xpt_ref) > 1 ||
938f64f9e71SJoe Perches 		    test_bit(XPT_BUSY, &xprt->xpt_flags))
9390f0257eaSTom Tucker 			continue;
940e75bafbfSJ. Bruce Fields 		list_del_init(le);
9410f0257eaSTom Tucker 		set_bit(XPT_CLOSE, &xprt->xpt_flags);
9420f0257eaSTom Tucker 		dprintk("queuing xprt %p for closing\n", xprt);
9430f0257eaSTom Tucker 
9440f0257eaSTom Tucker 		/* a thread will dequeue and close it soon */
9450f0257eaSTom Tucker 		svc_xprt_enqueue(xprt);
9460f0257eaSTom Tucker 	}
947e75bafbfSJ. Bruce Fields 	spin_unlock_bh(&serv->sv_lock);
9480f0257eaSTom Tucker 
9490f0257eaSTom Tucker 	mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
9500f0257eaSTom Tucker }
9510f0257eaSTom Tucker 
952c3d4879eSScott Mayhew /* Close temporary transports whose xpt_local matches server_addr immediately
953c3d4879eSScott Mayhew  * instead of waiting for them to be picked up by the timer.
954c3d4879eSScott Mayhew  *
955c3d4879eSScott Mayhew  * This is meant to be called from a notifier_block that runs when an ip
956c3d4879eSScott Mayhew  * address is deleted.
957c3d4879eSScott Mayhew  */
958c3d4879eSScott Mayhew void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr)
959c3d4879eSScott Mayhew {
960c3d4879eSScott Mayhew 	struct svc_xprt *xprt;
961c3d4879eSScott Mayhew 	struct list_head *le, *next;
962c3d4879eSScott Mayhew 	LIST_HEAD(to_be_closed);
963c3d4879eSScott Mayhew 
964c3d4879eSScott Mayhew 	spin_lock_bh(&serv->sv_lock);
965c3d4879eSScott Mayhew 	list_for_each_safe(le, next, &serv->sv_tempsocks) {
966c3d4879eSScott Mayhew 		xprt = list_entry(le, struct svc_xprt, xpt_list);
967c3d4879eSScott Mayhew 		if (rpc_cmp_addr(server_addr, (struct sockaddr *)
968c3d4879eSScott Mayhew 				&xprt->xpt_local)) {
969c3d4879eSScott Mayhew 			dprintk("svc_age_temp_xprts_now: found %p\n", xprt);
970c3d4879eSScott Mayhew 			list_move(le, &to_be_closed);
971c3d4879eSScott Mayhew 		}
972c3d4879eSScott Mayhew 	}
973c3d4879eSScott Mayhew 	spin_unlock_bh(&serv->sv_lock);
974c3d4879eSScott Mayhew 
975c3d4879eSScott Mayhew 	while (!list_empty(&to_be_closed)) {
976c3d4879eSScott Mayhew 		le = to_be_closed.next;
977c3d4879eSScott Mayhew 		list_del_init(le);
978c3d4879eSScott Mayhew 		xprt = list_entry(le, struct svc_xprt, xpt_list);
979546125d1SScott Mayhew 		set_bit(XPT_CLOSE, &xprt->xpt_flags);
980546125d1SScott Mayhew 		set_bit(XPT_KILL_TEMP, &xprt->xpt_flags);
981546125d1SScott Mayhew 		dprintk("svc_age_temp_xprts_now: queuing xprt %p for closing\n",
982546125d1SScott Mayhew 				xprt);
983546125d1SScott Mayhew 		svc_xprt_enqueue(xprt);
984c3d4879eSScott Mayhew 	}
985c3d4879eSScott Mayhew }
986c3d4879eSScott Mayhew EXPORT_SYMBOL_GPL(svc_age_temp_xprts_now);
987c3d4879eSScott Mayhew 
988edc7a894SJ. Bruce Fields static void call_xpt_users(struct svc_xprt *xprt)
989edc7a894SJ. Bruce Fields {
990edc7a894SJ. Bruce Fields 	struct svc_xpt_user *u;
991edc7a894SJ. Bruce Fields 
992edc7a894SJ. Bruce Fields 	spin_lock(&xprt->xpt_lock);
993edc7a894SJ. Bruce Fields 	while (!list_empty(&xprt->xpt_users)) {
994edc7a894SJ. Bruce Fields 		u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list);
995bb6ad557STrond Myklebust 		list_del_init(&u->list);
996edc7a894SJ. Bruce Fields 		u->callback(u);
997edc7a894SJ. Bruce Fields 	}
998edc7a894SJ. Bruce Fields 	spin_unlock(&xprt->xpt_lock);
999edc7a894SJ. Bruce Fields }
1000edc7a894SJ. Bruce Fields 
10010f0257eaSTom Tucker /*
10020f0257eaSTom Tucker  * Remove a dead transport
10030f0257eaSTom Tucker  */
10047710ec36SJ. Bruce Fields static void svc_delete_xprt(struct svc_xprt *xprt)
10050f0257eaSTom Tucker {
10060f0257eaSTom Tucker 	struct svc_serv	*serv = xprt->xpt_server;
100722945e4aSTom Tucker 	struct svc_deferred_req *dr;
100822945e4aSTom Tucker 
100922945e4aSTom Tucker 	/* Only do this once */
101022945e4aSTom Tucker 	if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags))
1011ac9303ebSJ. Bruce Fields 		BUG();
10120f0257eaSTom Tucker 
10130f0257eaSTom Tucker 	dprintk("svc: svc_delete_xprt(%p)\n", xprt);
10140f0257eaSTom Tucker 	xprt->xpt_ops->xpo_detach(xprt);
10150f0257eaSTom Tucker 
10160f0257eaSTom Tucker 	spin_lock_bh(&serv->sv_lock);
10170f0257eaSTom Tucker 	list_del_init(&xprt->xpt_list);
101801047298SWeston Andros Adamson 	WARN_ON_ONCE(!list_empty(&xprt->xpt_ready));
10190f0257eaSTom Tucker 	if (test_bit(XPT_TEMP, &xprt->xpt_flags))
10200f0257eaSTom Tucker 		serv->sv_tmpcnt--;
1021788e69e5SJ. Bruce Fields 	spin_unlock_bh(&serv->sv_lock);
102222945e4aSTom Tucker 
1023ab1b18f7SNeil Brown 	while ((dr = svc_deferred_dequeue(xprt)) != NULL)
102422945e4aSTom Tucker 		kfree(dr);
102522945e4aSTom Tucker 
1026edc7a894SJ. Bruce Fields 	call_xpt_users(xprt);
102722945e4aSTom Tucker 	svc_xprt_put(xprt);
10280f0257eaSTom Tucker }
10290f0257eaSTom Tucker 
10300f0257eaSTom Tucker void svc_close_xprt(struct svc_xprt *xprt)
10310f0257eaSTom Tucker {
10320f0257eaSTom Tucker 	set_bit(XPT_CLOSE, &xprt->xpt_flags);
10330f0257eaSTom Tucker 	if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
10340f0257eaSTom Tucker 		/* someone else will have to effect the close */
10350f0257eaSTom Tucker 		return;
1036b1763316SJ. Bruce Fields 	/*
1037b1763316SJ. Bruce Fields 	 * We expect svc_close_xprt() to work even when no threads are
1038b1763316SJ. Bruce Fields 	 * running (e.g., while configuring the server before starting
1039b1763316SJ. Bruce Fields 	 * any threads), so if the transport isn't busy, we delete
1040b1763316SJ. Bruce Fields 	 * it ourself:
1041b1763316SJ. Bruce Fields 	 */
10420f0257eaSTom Tucker 	svc_delete_xprt(xprt);
10430f0257eaSTom Tucker }
1044a217813fSTom Tucker EXPORT_SYMBOL_GPL(svc_close_xprt);
10450f0257eaSTom Tucker 
1046cc630d9fSJ. Bruce Fields static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net)
10470f0257eaSTom Tucker {
10480f0257eaSTom Tucker 	struct svc_xprt *xprt;
1049cc630d9fSJ. Bruce Fields 	int ret = 0;
10500f0257eaSTom Tucker 
1051719f8bccSJ. Bruce Fields 	spin_lock(&serv->sv_lock);
1052b4f36f88SJ. Bruce Fields 	list_for_each_entry(xprt, xprt_list, xpt_list) {
10537b147f1fSStanislav Kinsbursky 		if (xprt->xpt_net != net)
10547b147f1fSStanislav Kinsbursky 			continue;
1055cc630d9fSJ. Bruce Fields 		ret++;
10560f0257eaSTom Tucker 		set_bit(XPT_CLOSE, &xprt->xpt_flags);
1057cc630d9fSJ. Bruce Fields 		svc_xprt_enqueue(xprt);
10580f0257eaSTom Tucker 	}
1059719f8bccSJ. Bruce Fields 	spin_unlock(&serv->sv_lock);
1060cc630d9fSJ. Bruce Fields 	return ret;
10610f0257eaSTom Tucker }
10620f0257eaSTom Tucker 
1063cc630d9fSJ. Bruce Fields static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net)
10642fefb8a0SJ. Bruce Fields {
1065b4f36f88SJ. Bruce Fields 	struct svc_pool *pool;
1066b4f36f88SJ. Bruce Fields 	struct svc_xprt *xprt;
1067b4f36f88SJ. Bruce Fields 	struct svc_xprt *tmp;
1068b4f36f88SJ. Bruce Fields 	int i;
1069b4f36f88SJ. Bruce Fields 
1070b4f36f88SJ. Bruce Fields 	for (i = 0; i < serv->sv_nrpools; i++) {
1071b4f36f88SJ. Bruce Fields 		pool = &serv->sv_pools[i];
1072b4f36f88SJ. Bruce Fields 
1073b4f36f88SJ. Bruce Fields 		spin_lock_bh(&pool->sp_lock);
10746f513365SStanislav Kinsbursky 		list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) {
10757b147f1fSStanislav Kinsbursky 			if (xprt->xpt_net != net)
10767b147f1fSStanislav Kinsbursky 				continue;
1077b4f36f88SJ. Bruce Fields 			list_del_init(&xprt->xpt_ready);
1078cc630d9fSJ. Bruce Fields 			spin_unlock_bh(&pool->sp_lock);
1079cc630d9fSJ. Bruce Fields 			return xprt;
1080b4f36f88SJ. Bruce Fields 		}
1081b4f36f88SJ. Bruce Fields 		spin_unlock_bh(&pool->sp_lock);
1082b4f36f88SJ. Bruce Fields 	}
1083cc630d9fSJ. Bruce Fields 	return NULL;
10846f513365SStanislav Kinsbursky }
10856f513365SStanislav Kinsbursky 
1086cc630d9fSJ. Bruce Fields static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net)
10876f513365SStanislav Kinsbursky {
10886f513365SStanislav Kinsbursky 	struct svc_xprt *xprt;
10896f513365SStanislav Kinsbursky 
1090cc630d9fSJ. Bruce Fields 	while ((xprt = svc_dequeue_net(serv, net))) {
1091cc630d9fSJ. Bruce Fields 		set_bit(XPT_CLOSE, &xprt->xpt_flags);
1092719f8bccSJ. Bruce Fields 		svc_delete_xprt(xprt);
10933a22bf50SStanislav Kinsbursky 	}
1094cc630d9fSJ. Bruce Fields }
10953a22bf50SStanislav Kinsbursky 
1096cc630d9fSJ. Bruce Fields /*
1097cc630d9fSJ. Bruce Fields  * Server threads may still be running (especially in the case where the
1098cc630d9fSJ. Bruce Fields  * service is still running in other network namespaces).
1099cc630d9fSJ. Bruce Fields  *
1100cc630d9fSJ. Bruce Fields  * So we shut down sockets the same way we would on a running server, by
1101cc630d9fSJ. Bruce Fields  * setting XPT_CLOSE, enqueuing, and letting a thread pick it up to do
1102cc630d9fSJ. Bruce Fields  * the close.  In the case there are no such other threads,
1103cc630d9fSJ. Bruce Fields  * threads running, svc_clean_up_xprts() does a simple version of a
1104cc630d9fSJ. Bruce Fields  * server's main event loop, and in the case where there are other
1105cc630d9fSJ. Bruce Fields  * threads, we may need to wait a little while and then check again to
1106cc630d9fSJ. Bruce Fields  * see if they're done.
1107cc630d9fSJ. Bruce Fields  */
11087b147f1fSStanislav Kinsbursky void svc_close_net(struct svc_serv *serv, struct net *net)
11093a22bf50SStanislav Kinsbursky {
1110cc630d9fSJ. Bruce Fields 	int delay = 0;
11116f513365SStanislav Kinsbursky 
1112cc630d9fSJ. Bruce Fields 	while (svc_close_list(serv, &serv->sv_permsocks, net) +
1113cc630d9fSJ. Bruce Fields 	       svc_close_list(serv, &serv->sv_tempsocks, net)) {
1114cc630d9fSJ. Bruce Fields 
1115cc630d9fSJ. Bruce Fields 		svc_clean_up_xprts(serv, net);
1116cc630d9fSJ. Bruce Fields 		msleep(delay++);
1117cc630d9fSJ. Bruce Fields 	}
11182fefb8a0SJ. Bruce Fields }
11192fefb8a0SJ. Bruce Fields 
11200f0257eaSTom Tucker /*
11210f0257eaSTom Tucker  * Handle defer and revisit of requests
11220f0257eaSTom Tucker  */
11230f0257eaSTom Tucker 
11240f0257eaSTom Tucker static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
11250f0257eaSTom Tucker {
11260f0257eaSTom Tucker 	struct svc_deferred_req *dr =
11270f0257eaSTom Tucker 		container_of(dreq, struct svc_deferred_req, handle);
11280f0257eaSTom Tucker 	struct svc_xprt *xprt = dr->xprt;
11290f0257eaSTom Tucker 
113022945e4aSTom Tucker 	spin_lock(&xprt->xpt_lock);
113122945e4aSTom Tucker 	set_bit(XPT_DEFERRED, &xprt->xpt_flags);
113222945e4aSTom Tucker 	if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) {
113322945e4aSTom Tucker 		spin_unlock(&xprt->xpt_lock);
113422945e4aSTom Tucker 		dprintk("revisit canceled\n");
11350f0257eaSTom Tucker 		svc_xprt_put(xprt);
1136104f6351STrond Myklebust 		trace_svc_drop_deferred(dr);
11370f0257eaSTom Tucker 		kfree(dr);
11380f0257eaSTom Tucker 		return;
11390f0257eaSTom Tucker 	}
11400f0257eaSTom Tucker 	dprintk("revisit queued\n");
11410f0257eaSTom Tucker 	dr->xprt = NULL;
11420f0257eaSTom Tucker 	list_add(&dr->handle.recent, &xprt->xpt_deferred);
11430f0257eaSTom Tucker 	spin_unlock(&xprt->xpt_lock);
11440f0257eaSTom Tucker 	svc_xprt_enqueue(xprt);
11450f0257eaSTom Tucker 	svc_xprt_put(xprt);
11460f0257eaSTom Tucker }
11470f0257eaSTom Tucker 
1148260c1d12STom Tucker /*
1149260c1d12STom Tucker  * Save the request off for later processing. The request buffer looks
1150260c1d12STom Tucker  * like this:
1151260c1d12STom Tucker  *
1152260c1d12STom Tucker  * <xprt-header><rpc-header><rpc-pagelist><rpc-tail>
1153260c1d12STom Tucker  *
1154260c1d12STom Tucker  * This code can only handle requests that consist of an xprt-header
1155260c1d12STom Tucker  * and rpc-header.
1156260c1d12STom Tucker  */
11570f0257eaSTom Tucker static struct cache_deferred_req *svc_defer(struct cache_req *req)
11580f0257eaSTom Tucker {
11590f0257eaSTom Tucker 	struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
11600f0257eaSTom Tucker 	struct svc_deferred_req *dr;
11610f0257eaSTom Tucker 
116230660e04SJeff Layton 	if (rqstp->rq_arg.page_len || !test_bit(RQ_USEDEFERRAL, &rqstp->rq_flags))
11630f0257eaSTom Tucker 		return NULL; /* if more than a page, give up FIXME */
11640f0257eaSTom Tucker 	if (rqstp->rq_deferred) {
11650f0257eaSTom Tucker 		dr = rqstp->rq_deferred;
11660f0257eaSTom Tucker 		rqstp->rq_deferred = NULL;
11670f0257eaSTom Tucker 	} else {
1168260c1d12STom Tucker 		size_t skip;
1169260c1d12STom Tucker 		size_t size;
11700f0257eaSTom Tucker 		/* FIXME maybe discard if size too large */
1171260c1d12STom Tucker 		size = sizeof(struct svc_deferred_req) + rqstp->rq_arg.len;
11720f0257eaSTom Tucker 		dr = kmalloc(size, GFP_KERNEL);
11730f0257eaSTom Tucker 		if (dr == NULL)
11740f0257eaSTom Tucker 			return NULL;
11750f0257eaSTom Tucker 
11760f0257eaSTom Tucker 		dr->handle.owner = rqstp->rq_server;
11770f0257eaSTom Tucker 		dr->prot = rqstp->rq_prot;
11780f0257eaSTom Tucker 		memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen);
11790f0257eaSTom Tucker 		dr->addrlen = rqstp->rq_addrlen;
11800f0257eaSTom Tucker 		dr->daddr = rqstp->rq_daddr;
11810f0257eaSTom Tucker 		dr->argslen = rqstp->rq_arg.len >> 2;
1182260c1d12STom Tucker 		dr->xprt_hlen = rqstp->rq_xprt_hlen;
1183260c1d12STom Tucker 
1184260c1d12STom Tucker 		/* back up head to the start of the buffer and copy */
1185260c1d12STom Tucker 		skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
11860f0257eaSTom Tucker 		memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip,
11870f0257eaSTom Tucker 		       dr->argslen << 2);
11880f0257eaSTom Tucker 	}
11890f0257eaSTom Tucker 	svc_xprt_get(rqstp->rq_xprt);
11900f0257eaSTom Tucker 	dr->xprt = rqstp->rq_xprt;
119178b65eb3SJeff Layton 	set_bit(RQ_DROPME, &rqstp->rq_flags);
11920f0257eaSTom Tucker 
11930f0257eaSTom Tucker 	dr->handle.revisit = svc_revisit;
1194104f6351STrond Myklebust 	trace_svc_defer(rqstp);
11950f0257eaSTom Tucker 	return &dr->handle;
11960f0257eaSTom Tucker }
11970f0257eaSTom Tucker 
11980f0257eaSTom Tucker /*
11990f0257eaSTom Tucker  * recv data from a deferred request into an active one
12000f0257eaSTom Tucker  */
12010f0257eaSTom Tucker static int svc_deferred_recv(struct svc_rqst *rqstp)
12020f0257eaSTom Tucker {
12030f0257eaSTom Tucker 	struct svc_deferred_req *dr = rqstp->rq_deferred;
12040f0257eaSTom Tucker 
1205260c1d12STom Tucker 	/* setup iov_base past transport header */
1206260c1d12STom Tucker 	rqstp->rq_arg.head[0].iov_base = dr->args + (dr->xprt_hlen>>2);
1207260c1d12STom Tucker 	/* The iov_len does not include the transport header bytes */
1208260c1d12STom Tucker 	rqstp->rq_arg.head[0].iov_len = (dr->argslen<<2) - dr->xprt_hlen;
12090f0257eaSTom Tucker 	rqstp->rq_arg.page_len = 0;
1210260c1d12STom Tucker 	/* The rq_arg.len includes the transport header bytes */
12110f0257eaSTom Tucker 	rqstp->rq_arg.len     = dr->argslen<<2;
12120f0257eaSTom Tucker 	rqstp->rq_prot        = dr->prot;
12130f0257eaSTom Tucker 	memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen);
12140f0257eaSTom Tucker 	rqstp->rq_addrlen     = dr->addrlen;
1215260c1d12STom Tucker 	/* Save off transport header len in case we get deferred again */
1216260c1d12STom Tucker 	rqstp->rq_xprt_hlen   = dr->xprt_hlen;
12170f0257eaSTom Tucker 	rqstp->rq_daddr       = dr->daddr;
12180f0257eaSTom Tucker 	rqstp->rq_respages    = rqstp->rq_pages;
1219260c1d12STom Tucker 	return (dr->argslen<<2) - dr->xprt_hlen;
12200f0257eaSTom Tucker }
12210f0257eaSTom Tucker 
12220f0257eaSTom Tucker 
12230f0257eaSTom Tucker static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt)
12240f0257eaSTom Tucker {
12250f0257eaSTom Tucker 	struct svc_deferred_req *dr = NULL;
12260f0257eaSTom Tucker 
12270f0257eaSTom Tucker 	if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags))
12280f0257eaSTom Tucker 		return NULL;
12290f0257eaSTom Tucker 	spin_lock(&xprt->xpt_lock);
12300f0257eaSTom Tucker 	if (!list_empty(&xprt->xpt_deferred)) {
12310f0257eaSTom Tucker 		dr = list_entry(xprt->xpt_deferred.next,
12320f0257eaSTom Tucker 				struct svc_deferred_req,
12330f0257eaSTom Tucker 				handle.recent);
12340f0257eaSTom Tucker 		list_del_init(&dr->handle.recent);
1235104f6351STrond Myklebust 		trace_svc_revisit_deferred(dr);
123662bac4afSJ. Bruce Fields 	} else
123762bac4afSJ. Bruce Fields 		clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
12380f0257eaSTom Tucker 	spin_unlock(&xprt->xpt_lock);
12390f0257eaSTom Tucker 	return dr;
12400f0257eaSTom Tucker }
12417fcb98d5STom Tucker 
1242156e6209SChuck Lever /**
1243156e6209SChuck Lever  * svc_find_xprt - find an RPC transport instance
1244156e6209SChuck Lever  * @serv: pointer to svc_serv to search
1245156e6209SChuck Lever  * @xcl_name: C string containing transport's class name
12464cb54ca2SStanislav Kinsbursky  * @net: owner net pointer
1247156e6209SChuck Lever  * @af: Address family of transport's local address
1248156e6209SChuck Lever  * @port: transport's IP port number
1249156e6209SChuck Lever  *
12507fcb98d5STom Tucker  * Return the transport instance pointer for the endpoint accepting
12517fcb98d5STom Tucker  * connections/peer traffic from the specified transport class,
12527fcb98d5STom Tucker  * address family and port.
12537fcb98d5STom Tucker  *
12547fcb98d5STom Tucker  * Specifying 0 for the address family or port is effectively a
12557fcb98d5STom Tucker  * wild-card, and will result in matching the first transport in the
12567fcb98d5STom Tucker  * service's list that has a matching class name.
12577fcb98d5STom Tucker  */
1258156e6209SChuck Lever struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name,
12594cb54ca2SStanislav Kinsbursky 			       struct net *net, const sa_family_t af,
12604cb54ca2SStanislav Kinsbursky 			       const unsigned short port)
12617fcb98d5STom Tucker {
12627fcb98d5STom Tucker 	struct svc_xprt *xprt;
12637fcb98d5STom Tucker 	struct svc_xprt *found = NULL;
12647fcb98d5STom Tucker 
12657fcb98d5STom Tucker 	/* Sanity check the args */
1266156e6209SChuck Lever 	if (serv == NULL || xcl_name == NULL)
12677fcb98d5STom Tucker 		return found;
12687fcb98d5STom Tucker 
12697fcb98d5STom Tucker 	spin_lock_bh(&serv->sv_lock);
12707fcb98d5STom Tucker 	list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
12714cb54ca2SStanislav Kinsbursky 		if (xprt->xpt_net != net)
12724cb54ca2SStanislav Kinsbursky 			continue;
12737fcb98d5STom Tucker 		if (strcmp(xprt->xpt_class->xcl_name, xcl_name))
12747fcb98d5STom Tucker 			continue;
12757fcb98d5STom Tucker 		if (af != AF_UNSPEC && af != xprt->xpt_local.ss_family)
12767fcb98d5STom Tucker 			continue;
1277156e6209SChuck Lever 		if (port != 0 && port != svc_xprt_local_port(xprt))
12787fcb98d5STom Tucker 			continue;
12797fcb98d5STom Tucker 		found = xprt;
1280a217813fSTom Tucker 		svc_xprt_get(xprt);
12817fcb98d5STom Tucker 		break;
12827fcb98d5STom Tucker 	}
12837fcb98d5STom Tucker 	spin_unlock_bh(&serv->sv_lock);
12847fcb98d5STom Tucker 	return found;
12857fcb98d5STom Tucker }
12867fcb98d5STom Tucker EXPORT_SYMBOL_GPL(svc_find_xprt);
12879571af18STom Tucker 
1288335c54bdSChuck Lever static int svc_one_xprt_name(const struct svc_xprt *xprt,
1289335c54bdSChuck Lever 			     char *pos, int remaining)
1290335c54bdSChuck Lever {
1291335c54bdSChuck Lever 	int len;
1292335c54bdSChuck Lever 
1293335c54bdSChuck Lever 	len = snprintf(pos, remaining, "%s %u\n",
1294335c54bdSChuck Lever 			xprt->xpt_class->xcl_name,
1295335c54bdSChuck Lever 			svc_xprt_local_port(xprt));
1296335c54bdSChuck Lever 	if (len >= remaining)
1297335c54bdSChuck Lever 		return -ENAMETOOLONG;
1298335c54bdSChuck Lever 	return len;
1299335c54bdSChuck Lever }
1300335c54bdSChuck Lever 
1301335c54bdSChuck Lever /**
1302335c54bdSChuck Lever  * svc_xprt_names - format a buffer with a list of transport names
1303335c54bdSChuck Lever  * @serv: pointer to an RPC service
1304335c54bdSChuck Lever  * @buf: pointer to a buffer to be filled in
1305335c54bdSChuck Lever  * @buflen: length of buffer to be filled in
1306335c54bdSChuck Lever  *
1307335c54bdSChuck Lever  * Fills in @buf with a string containing a list of transport names,
1308335c54bdSChuck Lever  * each name terminated with '\n'.
1309335c54bdSChuck Lever  *
1310335c54bdSChuck Lever  * Returns positive length of the filled-in string on success; otherwise
1311335c54bdSChuck Lever  * a negative errno value is returned if an error occurs.
13129571af18STom Tucker  */
1313335c54bdSChuck Lever int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen)
13149571af18STom Tucker {
13159571af18STom Tucker 	struct svc_xprt *xprt;
1316335c54bdSChuck Lever 	int len, totlen;
1317335c54bdSChuck Lever 	char *pos;
13189571af18STom Tucker 
13199571af18STom Tucker 	/* Sanity check args */
13209571af18STom Tucker 	if (!serv)
13219571af18STom Tucker 		return 0;
13229571af18STom Tucker 
13239571af18STom Tucker 	spin_lock_bh(&serv->sv_lock);
1324335c54bdSChuck Lever 
1325335c54bdSChuck Lever 	pos = buf;
1326335c54bdSChuck Lever 	totlen = 0;
13279571af18STom Tucker 	list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
1328335c54bdSChuck Lever 		len = svc_one_xprt_name(xprt, pos, buflen - totlen);
1329335c54bdSChuck Lever 		if (len < 0) {
1330335c54bdSChuck Lever 			*buf = '\0';
1331335c54bdSChuck Lever 			totlen = len;
1332335c54bdSChuck Lever 		}
1333335c54bdSChuck Lever 		if (len <= 0)
13349571af18STom Tucker 			break;
1335335c54bdSChuck Lever 
1336335c54bdSChuck Lever 		pos += len;
13379571af18STom Tucker 		totlen += len;
13389571af18STom Tucker 	}
1339335c54bdSChuck Lever 
13409571af18STom Tucker 	spin_unlock_bh(&serv->sv_lock);
13419571af18STom Tucker 	return totlen;
13429571af18STom Tucker }
13439571af18STom Tucker EXPORT_SYMBOL_GPL(svc_xprt_names);
134403cf6c9fSGreg Banks 
134503cf6c9fSGreg Banks 
134603cf6c9fSGreg Banks /*----------------------------------------------------------------------------*/
134703cf6c9fSGreg Banks 
134803cf6c9fSGreg Banks static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos)
134903cf6c9fSGreg Banks {
135003cf6c9fSGreg Banks 	unsigned int pidx = (unsigned int)*pos;
135103cf6c9fSGreg Banks 	struct svc_serv *serv = m->private;
135203cf6c9fSGreg Banks 
135303cf6c9fSGreg Banks 	dprintk("svc_pool_stats_start, *pidx=%u\n", pidx);
135403cf6c9fSGreg Banks 
135503cf6c9fSGreg Banks 	if (!pidx)
135603cf6c9fSGreg Banks 		return SEQ_START_TOKEN;
135703cf6c9fSGreg Banks 	return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]);
135803cf6c9fSGreg Banks }
135903cf6c9fSGreg Banks 
136003cf6c9fSGreg Banks static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos)
136103cf6c9fSGreg Banks {
136203cf6c9fSGreg Banks 	struct svc_pool *pool = p;
136303cf6c9fSGreg Banks 	struct svc_serv *serv = m->private;
136403cf6c9fSGreg Banks 
136503cf6c9fSGreg Banks 	dprintk("svc_pool_stats_next, *pos=%llu\n", *pos);
136603cf6c9fSGreg Banks 
136703cf6c9fSGreg Banks 	if (p == SEQ_START_TOKEN) {
136803cf6c9fSGreg Banks 		pool = &serv->sv_pools[0];
136903cf6c9fSGreg Banks 	} else {
137003cf6c9fSGreg Banks 		unsigned int pidx = (pool - &serv->sv_pools[0]);
137103cf6c9fSGreg Banks 		if (pidx < serv->sv_nrpools-1)
137203cf6c9fSGreg Banks 			pool = &serv->sv_pools[pidx+1];
137303cf6c9fSGreg Banks 		else
137403cf6c9fSGreg Banks 			pool = NULL;
137503cf6c9fSGreg Banks 	}
137603cf6c9fSGreg Banks 	++*pos;
137703cf6c9fSGreg Banks 	return pool;
137803cf6c9fSGreg Banks }
137903cf6c9fSGreg Banks 
138003cf6c9fSGreg Banks static void svc_pool_stats_stop(struct seq_file *m, void *p)
138103cf6c9fSGreg Banks {
138203cf6c9fSGreg Banks }
138303cf6c9fSGreg Banks 
138403cf6c9fSGreg Banks static int svc_pool_stats_show(struct seq_file *m, void *p)
138503cf6c9fSGreg Banks {
138603cf6c9fSGreg Banks 	struct svc_pool *pool = p;
138703cf6c9fSGreg Banks 
138803cf6c9fSGreg Banks 	if (p == SEQ_START_TOKEN) {
138978c210efSJ. Bruce Fields 		seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n");
139003cf6c9fSGreg Banks 		return 0;
139103cf6c9fSGreg Banks 	}
139203cf6c9fSGreg Banks 
139378c210efSJ. Bruce Fields 	seq_printf(m, "%u %lu %lu %lu %lu\n",
139403cf6c9fSGreg Banks 		pool->sp_id,
1395403c7b44SJeff Layton 		(unsigned long)atomic_long_read(&pool->sp_stats.packets),
139603cf6c9fSGreg Banks 		pool->sp_stats.sockets_queued,
1397403c7b44SJeff Layton 		(unsigned long)atomic_long_read(&pool->sp_stats.threads_woken),
1398403c7b44SJeff Layton 		(unsigned long)atomic_long_read(&pool->sp_stats.threads_timedout));
139903cf6c9fSGreg Banks 
140003cf6c9fSGreg Banks 	return 0;
140103cf6c9fSGreg Banks }
140203cf6c9fSGreg Banks 
140303cf6c9fSGreg Banks static const struct seq_operations svc_pool_stats_seq_ops = {
140403cf6c9fSGreg Banks 	.start	= svc_pool_stats_start,
140503cf6c9fSGreg Banks 	.next	= svc_pool_stats_next,
140603cf6c9fSGreg Banks 	.stop	= svc_pool_stats_stop,
140703cf6c9fSGreg Banks 	.show	= svc_pool_stats_show,
140803cf6c9fSGreg Banks };
140903cf6c9fSGreg Banks 
141003cf6c9fSGreg Banks int svc_pool_stats_open(struct svc_serv *serv, struct file *file)
141103cf6c9fSGreg Banks {
141203cf6c9fSGreg Banks 	int err;
141303cf6c9fSGreg Banks 
141403cf6c9fSGreg Banks 	err = seq_open(file, &svc_pool_stats_seq_ops);
141503cf6c9fSGreg Banks 	if (!err)
141603cf6c9fSGreg Banks 		((struct seq_file *) file->private_data)->private = serv;
141703cf6c9fSGreg Banks 	return err;
141803cf6c9fSGreg Banks }
141903cf6c9fSGreg Banks EXPORT_SYMBOL(svc_pool_stats_open);
142003cf6c9fSGreg Banks 
142103cf6c9fSGreg Banks /*----------------------------------------------------------------------------*/
1422