xref: /openbmc/linux/net/sunrpc/svc_xprt.c (revision b627b4ed)
1 /*
2  * linux/net/sunrpc/svc_xprt.c
3  *
4  * Author: Tom Tucker <tom@opengridcomputing.com>
5  */
6 
7 #include <linux/sched.h>
8 #include <linux/errno.h>
9 #include <linux/freezer.h>
10 #include <linux/kthread.h>
11 #include <net/sock.h>
12 #include <linux/sunrpc/stats.h>
13 #include <linux/sunrpc/svc_xprt.h>
14 
15 #define RPCDBG_FACILITY	RPCDBG_SVCXPRT
16 
17 #define SVC_MAX_WAKING 5
18 
19 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt);
20 static int svc_deferred_recv(struct svc_rqst *rqstp);
21 static struct cache_deferred_req *svc_defer(struct cache_req *req);
22 static void svc_age_temp_xprts(unsigned long closure);
23 
24 /* apparently the "standard" is that clients close
25  * idle connections after 5 minutes, servers after
26  * 6 minutes
27  *   http://www.connectathon.org/talks96/nfstcp.pdf
28  */
29 static int svc_conn_age_period = 6*60;
30 
31 /* List of registered transport classes */
32 static DEFINE_SPINLOCK(svc_xprt_class_lock);
33 static LIST_HEAD(svc_xprt_class_list);
34 
35 /* SMP locking strategy:
36  *
37  *	svc_pool->sp_lock protects most of the fields of that pool.
38  *	svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
39  *	when both need to be taken (rare), svc_serv->sv_lock is first.
40  *	BKL protects svc_serv->sv_nrthread.
41  *	svc_sock->sk_lock protects the svc_sock->sk_deferred list
42  *             and the ->sk_info_authunix cache.
43  *
44  *	The XPT_BUSY bit in xprt->xpt_flags prevents a transport being
45  *	enqueued multiply. During normal transport processing this bit
46  *	is set by svc_xprt_enqueue and cleared by svc_xprt_received.
47  *	Providers should not manipulate this bit directly.
48  *
49  *	Some flags can be set to certain values at any time
50  *	providing that certain rules are followed:
51  *
52  *	XPT_CONN, XPT_DATA:
53  *		- Can be set or cleared at any time.
54  *		- After a set, svc_xprt_enqueue must be called to enqueue
55  *		  the transport for processing.
56  *		- After a clear, the transport must be read/accepted.
57  *		  If this succeeds, it must be set again.
58  *	XPT_CLOSE:
59  *		- Can set at any time. It is never cleared.
60  *      XPT_DEAD:
61  *		- Can only be set while XPT_BUSY is held which ensures
62  *		  that no other thread will be using the transport or will
63  *		  try to set XPT_DEAD.
64  */
65 
66 int svc_reg_xprt_class(struct svc_xprt_class *xcl)
67 {
68 	struct svc_xprt_class *cl;
69 	int res = -EEXIST;
70 
71 	dprintk("svc: Adding svc transport class '%s'\n", xcl->xcl_name);
72 
73 	INIT_LIST_HEAD(&xcl->xcl_list);
74 	spin_lock(&svc_xprt_class_lock);
75 	/* Make sure there isn't already a class with the same name */
76 	list_for_each_entry(cl, &svc_xprt_class_list, xcl_list) {
77 		if (strcmp(xcl->xcl_name, cl->xcl_name) == 0)
78 			goto out;
79 	}
80 	list_add_tail(&xcl->xcl_list, &svc_xprt_class_list);
81 	res = 0;
82 out:
83 	spin_unlock(&svc_xprt_class_lock);
84 	return res;
85 }
86 EXPORT_SYMBOL_GPL(svc_reg_xprt_class);
87 
88 void svc_unreg_xprt_class(struct svc_xprt_class *xcl)
89 {
90 	dprintk("svc: Removing svc transport class '%s'\n", xcl->xcl_name);
91 	spin_lock(&svc_xprt_class_lock);
92 	list_del_init(&xcl->xcl_list);
93 	spin_unlock(&svc_xprt_class_lock);
94 }
95 EXPORT_SYMBOL_GPL(svc_unreg_xprt_class);
96 
97 /*
98  * Format the transport list for printing
99  */
100 int svc_print_xprts(char *buf, int maxlen)
101 {
102 	struct list_head *le;
103 	char tmpstr[80];
104 	int len = 0;
105 	buf[0] = '\0';
106 
107 	spin_lock(&svc_xprt_class_lock);
108 	list_for_each(le, &svc_xprt_class_list) {
109 		int slen;
110 		struct svc_xprt_class *xcl =
111 			list_entry(le, struct svc_xprt_class, xcl_list);
112 
113 		sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload);
114 		slen = strlen(tmpstr);
115 		if (len + slen > maxlen)
116 			break;
117 		len += slen;
118 		strcat(buf, tmpstr);
119 	}
120 	spin_unlock(&svc_xprt_class_lock);
121 
122 	return len;
123 }
124 
125 static void svc_xprt_free(struct kref *kref)
126 {
127 	struct svc_xprt *xprt =
128 		container_of(kref, struct svc_xprt, xpt_ref);
129 	struct module *owner = xprt->xpt_class->xcl_owner;
130 	if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)
131 	    && xprt->xpt_auth_cache != NULL)
132 		svcauth_unix_info_release(xprt->xpt_auth_cache);
133 	xprt->xpt_ops->xpo_free(xprt);
134 	module_put(owner);
135 }
136 
137 void svc_xprt_put(struct svc_xprt *xprt)
138 {
139 	kref_put(&xprt->xpt_ref, svc_xprt_free);
140 }
141 EXPORT_SYMBOL_GPL(svc_xprt_put);
142 
143 /*
144  * Called by transport drivers to initialize the transport independent
145  * portion of the transport instance.
146  */
147 void svc_xprt_init(struct svc_xprt_class *xcl, struct svc_xprt *xprt,
148 		   struct svc_serv *serv)
149 {
150 	memset(xprt, 0, sizeof(*xprt));
151 	xprt->xpt_class = xcl;
152 	xprt->xpt_ops = xcl->xcl_ops;
153 	kref_init(&xprt->xpt_ref);
154 	xprt->xpt_server = serv;
155 	INIT_LIST_HEAD(&xprt->xpt_list);
156 	INIT_LIST_HEAD(&xprt->xpt_ready);
157 	INIT_LIST_HEAD(&xprt->xpt_deferred);
158 	mutex_init(&xprt->xpt_mutex);
159 	spin_lock_init(&xprt->xpt_lock);
160 	set_bit(XPT_BUSY, &xprt->xpt_flags);
161 }
162 EXPORT_SYMBOL_GPL(svc_xprt_init);
163 
164 static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
165 					 struct svc_serv *serv,
166 					 const int family,
167 					 const unsigned short port,
168 					 int flags)
169 {
170 	struct sockaddr_in sin = {
171 		.sin_family		= AF_INET,
172 		.sin_addr.s_addr	= htonl(INADDR_ANY),
173 		.sin_port		= htons(port),
174 	};
175 	struct sockaddr_in6 sin6 = {
176 		.sin6_family		= AF_INET6,
177 		.sin6_addr		= IN6ADDR_ANY_INIT,
178 		.sin6_port		= htons(port),
179 	};
180 	struct sockaddr *sap;
181 	size_t len;
182 
183 	switch (family) {
184 	case PF_INET:
185 		sap = (struct sockaddr *)&sin;
186 		len = sizeof(sin);
187 		break;
188 	case PF_INET6:
189 		sap = (struct sockaddr *)&sin6;
190 		len = sizeof(sin6);
191 		break;
192 	default:
193 		return ERR_PTR(-EAFNOSUPPORT);
194 	}
195 
196 	return xcl->xcl_ops->xpo_create(serv, sap, len, flags);
197 }
198 
199 int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
200 		    const int family, const unsigned short port,
201 		    int flags)
202 {
203 	struct svc_xprt_class *xcl;
204 
205 	dprintk("svc: creating transport %s[%d]\n", xprt_name, port);
206 	spin_lock(&svc_xprt_class_lock);
207 	list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
208 		struct svc_xprt *newxprt;
209 
210 		if (strcmp(xprt_name, xcl->xcl_name))
211 			continue;
212 
213 		if (!try_module_get(xcl->xcl_owner))
214 			goto err;
215 
216 		spin_unlock(&svc_xprt_class_lock);
217 		newxprt = __svc_xpo_create(xcl, serv, family, port, flags);
218 		if (IS_ERR(newxprt)) {
219 			module_put(xcl->xcl_owner);
220 			return PTR_ERR(newxprt);
221 		}
222 
223 		clear_bit(XPT_TEMP, &newxprt->xpt_flags);
224 		spin_lock_bh(&serv->sv_lock);
225 		list_add(&newxprt->xpt_list, &serv->sv_permsocks);
226 		spin_unlock_bh(&serv->sv_lock);
227 		clear_bit(XPT_BUSY, &newxprt->xpt_flags);
228 		return svc_xprt_local_port(newxprt);
229 	}
230  err:
231 	spin_unlock(&svc_xprt_class_lock);
232 	dprintk("svc: transport %s not found\n", xprt_name);
233 	return -ENOENT;
234 }
235 EXPORT_SYMBOL_GPL(svc_create_xprt);
236 
237 /*
238  * Copy the local and remote xprt addresses to the rqstp structure
239  */
240 void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt)
241 {
242 	struct sockaddr *sin;
243 
244 	memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen);
245 	rqstp->rq_addrlen = xprt->xpt_remotelen;
246 
247 	/*
248 	 * Destination address in request is needed for binding the
249 	 * source address in RPC replies/callbacks later.
250 	 */
251 	sin = (struct sockaddr *)&xprt->xpt_local;
252 	switch (sin->sa_family) {
253 	case AF_INET:
254 		rqstp->rq_daddr.addr = ((struct sockaddr_in *)sin)->sin_addr;
255 		break;
256 	case AF_INET6:
257 		rqstp->rq_daddr.addr6 = ((struct sockaddr_in6 *)sin)->sin6_addr;
258 		break;
259 	}
260 }
261 EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs);
262 
263 /**
264  * svc_print_addr - Format rq_addr field for printing
265  * @rqstp: svc_rqst struct containing address to print
266  * @buf: target buffer for formatted address
267  * @len: length of target buffer
268  *
269  */
270 char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len)
271 {
272 	return __svc_print_addr(svc_addr(rqstp), buf, len);
273 }
274 EXPORT_SYMBOL_GPL(svc_print_addr);
275 
276 /*
277  * Queue up an idle server thread.  Must have pool->sp_lock held.
278  * Note: this is really a stack rather than a queue, so that we only
279  * use as many different threads as we need, and the rest don't pollute
280  * the cache.
281  */
282 static void svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp)
283 {
284 	list_add(&rqstp->rq_list, &pool->sp_threads);
285 }
286 
287 /*
288  * Dequeue an nfsd thread.  Must have pool->sp_lock held.
289  */
290 static void svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp)
291 {
292 	list_del(&rqstp->rq_list);
293 }
294 
295 /*
296  * Queue up a transport with data pending. If there are idle nfsd
297  * processes, wake 'em up.
298  *
299  */
300 void svc_xprt_enqueue(struct svc_xprt *xprt)
301 {
302 	struct svc_serv	*serv = xprt->xpt_server;
303 	struct svc_pool *pool;
304 	struct svc_rqst	*rqstp;
305 	int cpu;
306 	int thread_avail;
307 
308 	if (!(xprt->xpt_flags &
309 	      ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED))))
310 		return;
311 
312 	cpu = get_cpu();
313 	pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
314 	put_cpu();
315 
316 	spin_lock_bh(&pool->sp_lock);
317 
318 	if (test_bit(XPT_DEAD, &xprt->xpt_flags)) {
319 		/* Don't enqueue dead transports */
320 		dprintk("svc: transport %p is dead, not enqueued\n", xprt);
321 		goto out_unlock;
322 	}
323 
324 	pool->sp_stats.packets++;
325 
326 	/* Mark transport as busy. It will remain in this state until
327 	 * the provider calls svc_xprt_received. We update XPT_BUSY
328 	 * atomically because it also guards against trying to enqueue
329 	 * the transport twice.
330 	 */
331 	if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) {
332 		/* Don't enqueue transport while already enqueued */
333 		dprintk("svc: transport %p busy, not enqueued\n", xprt);
334 		goto out_unlock;
335 	}
336 	BUG_ON(xprt->xpt_pool != NULL);
337 	xprt->xpt_pool = pool;
338 
339 	/* Handle pending connection */
340 	if (test_bit(XPT_CONN, &xprt->xpt_flags))
341 		goto process;
342 
343 	/* Handle close in-progress */
344 	if (test_bit(XPT_CLOSE, &xprt->xpt_flags))
345 		goto process;
346 
347 	/* Check if we have space to reply to a request */
348 	if (!xprt->xpt_ops->xpo_has_wspace(xprt)) {
349 		/* Don't enqueue while not enough space for reply */
350 		dprintk("svc: no write space, transport %p  not enqueued\n",
351 			xprt);
352 		xprt->xpt_pool = NULL;
353 		clear_bit(XPT_BUSY, &xprt->xpt_flags);
354 		goto out_unlock;
355 	}
356 
357  process:
358 	/* Work out whether threads are available */
359 	thread_avail = !list_empty(&pool->sp_threads);	/* threads are asleep */
360 	if (pool->sp_nwaking >= SVC_MAX_WAKING) {
361 		/* too many threads are runnable and trying to wake up */
362 		thread_avail = 0;
363 		pool->sp_stats.overloads_avoided++;
364 	}
365 
366 	if (thread_avail) {
367 		rqstp = list_entry(pool->sp_threads.next,
368 				   struct svc_rqst,
369 				   rq_list);
370 		dprintk("svc: transport %p served by daemon %p\n",
371 			xprt, rqstp);
372 		svc_thread_dequeue(pool, rqstp);
373 		if (rqstp->rq_xprt)
374 			printk(KERN_ERR
375 				"svc_xprt_enqueue: server %p, rq_xprt=%p!\n",
376 				rqstp, rqstp->rq_xprt);
377 		rqstp->rq_xprt = xprt;
378 		svc_xprt_get(xprt);
379 		rqstp->rq_reserved = serv->sv_max_mesg;
380 		atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
381 		rqstp->rq_waking = 1;
382 		pool->sp_nwaking++;
383 		pool->sp_stats.threads_woken++;
384 		BUG_ON(xprt->xpt_pool != pool);
385 		wake_up(&rqstp->rq_wait);
386 	} else {
387 		dprintk("svc: transport %p put into queue\n", xprt);
388 		list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
389 		pool->sp_stats.sockets_queued++;
390 		BUG_ON(xprt->xpt_pool != pool);
391 	}
392 
393 out_unlock:
394 	spin_unlock_bh(&pool->sp_lock);
395 }
396 EXPORT_SYMBOL_GPL(svc_xprt_enqueue);
397 
398 /*
399  * Dequeue the first transport.  Must be called with the pool->sp_lock held.
400  */
401 static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
402 {
403 	struct svc_xprt	*xprt;
404 
405 	if (list_empty(&pool->sp_sockets))
406 		return NULL;
407 
408 	xprt = list_entry(pool->sp_sockets.next,
409 			  struct svc_xprt, xpt_ready);
410 	list_del_init(&xprt->xpt_ready);
411 
412 	dprintk("svc: transport %p dequeued, inuse=%d\n",
413 		xprt, atomic_read(&xprt->xpt_ref.refcount));
414 
415 	return xprt;
416 }
417 
418 /*
419  * svc_xprt_received conditionally queues the transport for processing
420  * by another thread. The caller must hold the XPT_BUSY bit and must
421  * not thereafter touch transport data.
422  *
423  * Note: XPT_DATA only gets cleared when a read-attempt finds no (or
424  * insufficient) data.
425  */
426 void svc_xprt_received(struct svc_xprt *xprt)
427 {
428 	BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags));
429 	xprt->xpt_pool = NULL;
430 	clear_bit(XPT_BUSY, &xprt->xpt_flags);
431 	svc_xprt_enqueue(xprt);
432 }
433 EXPORT_SYMBOL_GPL(svc_xprt_received);
434 
435 /**
436  * svc_reserve - change the space reserved for the reply to a request.
437  * @rqstp:  The request in question
438  * @space: new max space to reserve
439  *
440  * Each request reserves some space on the output queue of the transport
441  * to make sure the reply fits.  This function reduces that reserved
442  * space to be the amount of space used already, plus @space.
443  *
444  */
445 void svc_reserve(struct svc_rqst *rqstp, int space)
446 {
447 	space += rqstp->rq_res.head[0].iov_len;
448 
449 	if (space < rqstp->rq_reserved) {
450 		struct svc_xprt *xprt = rqstp->rq_xprt;
451 		atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved);
452 		rqstp->rq_reserved = space;
453 
454 		svc_xprt_enqueue(xprt);
455 	}
456 }
457 EXPORT_SYMBOL_GPL(svc_reserve);
458 
459 static void svc_xprt_release(struct svc_rqst *rqstp)
460 {
461 	struct svc_xprt	*xprt = rqstp->rq_xprt;
462 
463 	rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
464 
465 	kfree(rqstp->rq_deferred);
466 	rqstp->rq_deferred = NULL;
467 
468 	svc_free_res_pages(rqstp);
469 	rqstp->rq_res.page_len = 0;
470 	rqstp->rq_res.page_base = 0;
471 
472 	/* Reset response buffer and release
473 	 * the reservation.
474 	 * But first, check that enough space was reserved
475 	 * for the reply, otherwise we have a bug!
476 	 */
477 	if ((rqstp->rq_res.len) >  rqstp->rq_reserved)
478 		printk(KERN_ERR "RPC request reserved %d but used %d\n",
479 		       rqstp->rq_reserved,
480 		       rqstp->rq_res.len);
481 
482 	rqstp->rq_res.head[0].iov_len = 0;
483 	svc_reserve(rqstp, 0);
484 	rqstp->rq_xprt = NULL;
485 
486 	svc_xprt_put(xprt);
487 }
488 
489 /*
490  * External function to wake up a server waiting for data
491  * This really only makes sense for services like lockd
492  * which have exactly one thread anyway.
493  */
494 void svc_wake_up(struct svc_serv *serv)
495 {
496 	struct svc_rqst	*rqstp;
497 	unsigned int i;
498 	struct svc_pool *pool;
499 
500 	for (i = 0; i < serv->sv_nrpools; i++) {
501 		pool = &serv->sv_pools[i];
502 
503 		spin_lock_bh(&pool->sp_lock);
504 		if (!list_empty(&pool->sp_threads)) {
505 			rqstp = list_entry(pool->sp_threads.next,
506 					   struct svc_rqst,
507 					   rq_list);
508 			dprintk("svc: daemon %p woken up.\n", rqstp);
509 			/*
510 			svc_thread_dequeue(pool, rqstp);
511 			rqstp->rq_xprt = NULL;
512 			 */
513 			wake_up(&rqstp->rq_wait);
514 		}
515 		spin_unlock_bh(&pool->sp_lock);
516 	}
517 }
518 EXPORT_SYMBOL_GPL(svc_wake_up);
519 
520 int svc_port_is_privileged(struct sockaddr *sin)
521 {
522 	switch (sin->sa_family) {
523 	case AF_INET:
524 		return ntohs(((struct sockaddr_in *)sin)->sin_port)
525 			< PROT_SOCK;
526 	case AF_INET6:
527 		return ntohs(((struct sockaddr_in6 *)sin)->sin6_port)
528 			< PROT_SOCK;
529 	default:
530 		return 0;
531 	}
532 }
533 
534 /*
535  * Make sure that we don't have too many active connections. If we have,
536  * something must be dropped. It's not clear what will happen if we allow
537  * "too many" connections, but when dealing with network-facing software,
538  * we have to code defensively. Here we do that by imposing hard limits.
539  *
540  * There's no point in trying to do random drop here for DoS
541  * prevention. The NFS clients does 1 reconnect in 15 seconds. An
542  * attacker can easily beat that.
543  *
544  * The only somewhat efficient mechanism would be if drop old
545  * connections from the same IP first. But right now we don't even
546  * record the client IP in svc_sock.
547  *
548  * single-threaded services that expect a lot of clients will probably
549  * need to set sv_maxconn to override the default value which is based
550  * on the number of threads
551  */
552 static void svc_check_conn_limits(struct svc_serv *serv)
553 {
554 	unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn :
555 				(serv->sv_nrthreads+3) * 20;
556 
557 	if (serv->sv_tmpcnt > limit) {
558 		struct svc_xprt *xprt = NULL;
559 		spin_lock_bh(&serv->sv_lock);
560 		if (!list_empty(&serv->sv_tempsocks)) {
561 			if (net_ratelimit()) {
562 				/* Try to help the admin */
563 				printk(KERN_NOTICE "%s: too many open  "
564 				       "connections, consider increasing %s\n",
565 				       serv->sv_name, serv->sv_maxconn ?
566 				       "the max number of connections." :
567 				       "the number of threads.");
568 			}
569 			/*
570 			 * Always select the oldest connection. It's not fair,
571 			 * but so is life
572 			 */
573 			xprt = list_entry(serv->sv_tempsocks.prev,
574 					  struct svc_xprt,
575 					  xpt_list);
576 			set_bit(XPT_CLOSE, &xprt->xpt_flags);
577 			svc_xprt_get(xprt);
578 		}
579 		spin_unlock_bh(&serv->sv_lock);
580 
581 		if (xprt) {
582 			svc_xprt_enqueue(xprt);
583 			svc_xprt_put(xprt);
584 		}
585 	}
586 }
587 
588 /*
589  * Receive the next request on any transport.  This code is carefully
590  * organised not to touch any cachelines in the shared svc_serv
591  * structure, only cachelines in the local svc_pool.
592  */
593 int svc_recv(struct svc_rqst *rqstp, long timeout)
594 {
595 	struct svc_xprt		*xprt = NULL;
596 	struct svc_serv		*serv = rqstp->rq_server;
597 	struct svc_pool		*pool = rqstp->rq_pool;
598 	int			len, i;
599 	int			pages;
600 	struct xdr_buf		*arg;
601 	DECLARE_WAITQUEUE(wait, current);
602 	long			time_left;
603 
604 	dprintk("svc: server %p waiting for data (to = %ld)\n",
605 		rqstp, timeout);
606 
607 	if (rqstp->rq_xprt)
608 		printk(KERN_ERR
609 			"svc_recv: service %p, transport not NULL!\n",
610 			 rqstp);
611 	if (waitqueue_active(&rqstp->rq_wait))
612 		printk(KERN_ERR
613 			"svc_recv: service %p, wait queue active!\n",
614 			 rqstp);
615 
616 	/* now allocate needed pages.  If we get a failure, sleep briefly */
617 	pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE;
618 	for (i = 0; i < pages ; i++)
619 		while (rqstp->rq_pages[i] == NULL) {
620 			struct page *p = alloc_page(GFP_KERNEL);
621 			if (!p) {
622 				set_current_state(TASK_INTERRUPTIBLE);
623 				if (signalled() || kthread_should_stop()) {
624 					set_current_state(TASK_RUNNING);
625 					return -EINTR;
626 				}
627 				schedule_timeout(msecs_to_jiffies(500));
628 			}
629 			rqstp->rq_pages[i] = p;
630 		}
631 	rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */
632 	BUG_ON(pages >= RPCSVC_MAXPAGES);
633 
634 	/* Make arg->head point to first page and arg->pages point to rest */
635 	arg = &rqstp->rq_arg;
636 	arg->head[0].iov_base = page_address(rqstp->rq_pages[0]);
637 	arg->head[0].iov_len = PAGE_SIZE;
638 	arg->pages = rqstp->rq_pages + 1;
639 	arg->page_base = 0;
640 	/* save at least one page for response */
641 	arg->page_len = (pages-2)*PAGE_SIZE;
642 	arg->len = (pages-1)*PAGE_SIZE;
643 	arg->tail[0].iov_len = 0;
644 
645 	try_to_freeze();
646 	cond_resched();
647 	if (signalled() || kthread_should_stop())
648 		return -EINTR;
649 
650 	spin_lock_bh(&pool->sp_lock);
651 	if (rqstp->rq_waking) {
652 		rqstp->rq_waking = 0;
653 		pool->sp_nwaking--;
654 		BUG_ON(pool->sp_nwaking < 0);
655 	}
656 	xprt = svc_xprt_dequeue(pool);
657 	if (xprt) {
658 		rqstp->rq_xprt = xprt;
659 		svc_xprt_get(xprt);
660 		rqstp->rq_reserved = serv->sv_max_mesg;
661 		atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
662 	} else {
663 		/* No data pending. Go to sleep */
664 		svc_thread_enqueue(pool, rqstp);
665 
666 		/*
667 		 * We have to be able to interrupt this wait
668 		 * to bring down the daemons ...
669 		 */
670 		set_current_state(TASK_INTERRUPTIBLE);
671 
672 		/*
673 		 * checking kthread_should_stop() here allows us to avoid
674 		 * locking and signalling when stopping kthreads that call
675 		 * svc_recv. If the thread has already been woken up, then
676 		 * we can exit here without sleeping. If not, then it
677 		 * it'll be woken up quickly during the schedule_timeout
678 		 */
679 		if (kthread_should_stop()) {
680 			set_current_state(TASK_RUNNING);
681 			spin_unlock_bh(&pool->sp_lock);
682 			return -EINTR;
683 		}
684 
685 		add_wait_queue(&rqstp->rq_wait, &wait);
686 		spin_unlock_bh(&pool->sp_lock);
687 
688 		time_left = schedule_timeout(timeout);
689 
690 		try_to_freeze();
691 
692 		spin_lock_bh(&pool->sp_lock);
693 		remove_wait_queue(&rqstp->rq_wait, &wait);
694 		if (!time_left)
695 			pool->sp_stats.threads_timedout++;
696 
697 		xprt = rqstp->rq_xprt;
698 		if (!xprt) {
699 			svc_thread_dequeue(pool, rqstp);
700 			spin_unlock_bh(&pool->sp_lock);
701 			dprintk("svc: server %p, no data yet\n", rqstp);
702 			if (signalled() || kthread_should_stop())
703 				return -EINTR;
704 			else
705 				return -EAGAIN;
706 		}
707 	}
708 	spin_unlock_bh(&pool->sp_lock);
709 
710 	len = 0;
711 	if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
712 		dprintk("svc_recv: found XPT_CLOSE\n");
713 		svc_delete_xprt(xprt);
714 	} else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
715 		struct svc_xprt *newxpt;
716 		newxpt = xprt->xpt_ops->xpo_accept(xprt);
717 		if (newxpt) {
718 			/*
719 			 * We know this module_get will succeed because the
720 			 * listener holds a reference too
721 			 */
722 			__module_get(newxpt->xpt_class->xcl_owner);
723 			svc_check_conn_limits(xprt->xpt_server);
724 			spin_lock_bh(&serv->sv_lock);
725 			set_bit(XPT_TEMP, &newxpt->xpt_flags);
726 			list_add(&newxpt->xpt_list, &serv->sv_tempsocks);
727 			serv->sv_tmpcnt++;
728 			if (serv->sv_temptimer.function == NULL) {
729 				/* setup timer to age temp transports */
730 				setup_timer(&serv->sv_temptimer,
731 					    svc_age_temp_xprts,
732 					    (unsigned long)serv);
733 				mod_timer(&serv->sv_temptimer,
734 					  jiffies + svc_conn_age_period * HZ);
735 			}
736 			spin_unlock_bh(&serv->sv_lock);
737 			svc_xprt_received(newxpt);
738 		}
739 		svc_xprt_received(xprt);
740 	} else {
741 		dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
742 			rqstp, pool->sp_id, xprt,
743 			atomic_read(&xprt->xpt_ref.refcount));
744 		rqstp->rq_deferred = svc_deferred_dequeue(xprt);
745 		if (rqstp->rq_deferred) {
746 			svc_xprt_received(xprt);
747 			len = svc_deferred_recv(rqstp);
748 		} else
749 			len = xprt->xpt_ops->xpo_recvfrom(rqstp);
750 		dprintk("svc: got len=%d\n", len);
751 	}
752 
753 	/* No data, incomplete (TCP) read, or accept() */
754 	if (len == 0 || len == -EAGAIN) {
755 		rqstp->rq_res.len = 0;
756 		svc_xprt_release(rqstp);
757 		return -EAGAIN;
758 	}
759 	clear_bit(XPT_OLD, &xprt->xpt_flags);
760 
761 	rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp));
762 	rqstp->rq_chandle.defer = svc_defer;
763 
764 	if (serv->sv_stats)
765 		serv->sv_stats->netcnt++;
766 	return len;
767 }
768 EXPORT_SYMBOL_GPL(svc_recv);
769 
770 /*
771  * Drop request
772  */
773 void svc_drop(struct svc_rqst *rqstp)
774 {
775 	dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt);
776 	svc_xprt_release(rqstp);
777 }
778 EXPORT_SYMBOL_GPL(svc_drop);
779 
780 /*
781  * Return reply to client.
782  */
783 int svc_send(struct svc_rqst *rqstp)
784 {
785 	struct svc_xprt	*xprt;
786 	int		len;
787 	struct xdr_buf	*xb;
788 
789 	xprt = rqstp->rq_xprt;
790 	if (!xprt)
791 		return -EFAULT;
792 
793 	/* release the receive skb before sending the reply */
794 	rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
795 
796 	/* calculate over-all length */
797 	xb = &rqstp->rq_res;
798 	xb->len = xb->head[0].iov_len +
799 		xb->page_len +
800 		xb->tail[0].iov_len;
801 
802 	/* Grab mutex to serialize outgoing data. */
803 	mutex_lock(&xprt->xpt_mutex);
804 	if (test_bit(XPT_DEAD, &xprt->xpt_flags))
805 		len = -ENOTCONN;
806 	else
807 		len = xprt->xpt_ops->xpo_sendto(rqstp);
808 	mutex_unlock(&xprt->xpt_mutex);
809 	svc_xprt_release(rqstp);
810 
811 	if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN)
812 		return 0;
813 	return len;
814 }
815 
816 /*
817  * Timer function to close old temporary transports, using
818  * a mark-and-sweep algorithm.
819  */
820 static void svc_age_temp_xprts(unsigned long closure)
821 {
822 	struct svc_serv *serv = (struct svc_serv *)closure;
823 	struct svc_xprt *xprt;
824 	struct list_head *le, *next;
825 	LIST_HEAD(to_be_aged);
826 
827 	dprintk("svc_age_temp_xprts\n");
828 
829 	if (!spin_trylock_bh(&serv->sv_lock)) {
830 		/* busy, try again 1 sec later */
831 		dprintk("svc_age_temp_xprts: busy\n");
832 		mod_timer(&serv->sv_temptimer, jiffies + HZ);
833 		return;
834 	}
835 
836 	list_for_each_safe(le, next, &serv->sv_tempsocks) {
837 		xprt = list_entry(le, struct svc_xprt, xpt_list);
838 
839 		/* First time through, just mark it OLD. Second time
840 		 * through, close it. */
841 		if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags))
842 			continue;
843 		if (atomic_read(&xprt->xpt_ref.refcount) > 1
844 		    || test_bit(XPT_BUSY, &xprt->xpt_flags))
845 			continue;
846 		svc_xprt_get(xprt);
847 		list_move(le, &to_be_aged);
848 		set_bit(XPT_CLOSE, &xprt->xpt_flags);
849 		set_bit(XPT_DETACHED, &xprt->xpt_flags);
850 	}
851 	spin_unlock_bh(&serv->sv_lock);
852 
853 	while (!list_empty(&to_be_aged)) {
854 		le = to_be_aged.next;
855 		/* fiddling the xpt_list node is safe 'cos we're XPT_DETACHED */
856 		list_del_init(le);
857 		xprt = list_entry(le, struct svc_xprt, xpt_list);
858 
859 		dprintk("queuing xprt %p for closing\n", xprt);
860 
861 		/* a thread will dequeue and close it soon */
862 		svc_xprt_enqueue(xprt);
863 		svc_xprt_put(xprt);
864 	}
865 
866 	mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
867 }
868 
869 /*
870  * Remove a dead transport
871  */
872 void svc_delete_xprt(struct svc_xprt *xprt)
873 {
874 	struct svc_serv	*serv = xprt->xpt_server;
875 	struct svc_deferred_req *dr;
876 
877 	/* Only do this once */
878 	if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags))
879 		return;
880 
881 	dprintk("svc: svc_delete_xprt(%p)\n", xprt);
882 	xprt->xpt_ops->xpo_detach(xprt);
883 
884 	spin_lock_bh(&serv->sv_lock);
885 	if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags))
886 		list_del_init(&xprt->xpt_list);
887 	/*
888 	 * We used to delete the transport from whichever list
889 	 * it's sk_xprt.xpt_ready node was on, but we don't actually
890 	 * need to.  This is because the only time we're called
891 	 * while still attached to a queue, the queue itself
892 	 * is about to be destroyed (in svc_destroy).
893 	 */
894 	if (test_bit(XPT_TEMP, &xprt->xpt_flags))
895 		serv->sv_tmpcnt--;
896 
897 	for (dr = svc_deferred_dequeue(xprt); dr;
898 	     dr = svc_deferred_dequeue(xprt)) {
899 		svc_xprt_put(xprt);
900 		kfree(dr);
901 	}
902 
903 	svc_xprt_put(xprt);
904 	spin_unlock_bh(&serv->sv_lock);
905 }
906 
907 void svc_close_xprt(struct svc_xprt *xprt)
908 {
909 	set_bit(XPT_CLOSE, &xprt->xpt_flags);
910 	if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
911 		/* someone else will have to effect the close */
912 		return;
913 
914 	svc_xprt_get(xprt);
915 	svc_delete_xprt(xprt);
916 	clear_bit(XPT_BUSY, &xprt->xpt_flags);
917 	svc_xprt_put(xprt);
918 }
919 EXPORT_SYMBOL_GPL(svc_close_xprt);
920 
921 void svc_close_all(struct list_head *xprt_list)
922 {
923 	struct svc_xprt *xprt;
924 	struct svc_xprt *tmp;
925 
926 	list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) {
927 		set_bit(XPT_CLOSE, &xprt->xpt_flags);
928 		if (test_bit(XPT_BUSY, &xprt->xpt_flags)) {
929 			/* Waiting to be processed, but no threads left,
930 			 * So just remove it from the waiting list
931 			 */
932 			list_del_init(&xprt->xpt_ready);
933 			clear_bit(XPT_BUSY, &xprt->xpt_flags);
934 		}
935 		svc_close_xprt(xprt);
936 	}
937 }
938 
939 /*
940  * Handle defer and revisit of requests
941  */
942 
943 static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
944 {
945 	struct svc_deferred_req *dr =
946 		container_of(dreq, struct svc_deferred_req, handle);
947 	struct svc_xprt *xprt = dr->xprt;
948 
949 	spin_lock(&xprt->xpt_lock);
950 	set_bit(XPT_DEFERRED, &xprt->xpt_flags);
951 	if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) {
952 		spin_unlock(&xprt->xpt_lock);
953 		dprintk("revisit canceled\n");
954 		svc_xprt_put(xprt);
955 		kfree(dr);
956 		return;
957 	}
958 	dprintk("revisit queued\n");
959 	dr->xprt = NULL;
960 	list_add(&dr->handle.recent, &xprt->xpt_deferred);
961 	spin_unlock(&xprt->xpt_lock);
962 	svc_xprt_enqueue(xprt);
963 	svc_xprt_put(xprt);
964 }
965 
966 /*
967  * Save the request off for later processing. The request buffer looks
968  * like this:
969  *
970  * <xprt-header><rpc-header><rpc-pagelist><rpc-tail>
971  *
972  * This code can only handle requests that consist of an xprt-header
973  * and rpc-header.
974  */
975 static struct cache_deferred_req *svc_defer(struct cache_req *req)
976 {
977 	struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
978 	struct svc_deferred_req *dr;
979 
980 	if (rqstp->rq_arg.page_len || !rqstp->rq_usedeferral)
981 		return NULL; /* if more than a page, give up FIXME */
982 	if (rqstp->rq_deferred) {
983 		dr = rqstp->rq_deferred;
984 		rqstp->rq_deferred = NULL;
985 	} else {
986 		size_t skip;
987 		size_t size;
988 		/* FIXME maybe discard if size too large */
989 		size = sizeof(struct svc_deferred_req) + rqstp->rq_arg.len;
990 		dr = kmalloc(size, GFP_KERNEL);
991 		if (dr == NULL)
992 			return NULL;
993 
994 		dr->handle.owner = rqstp->rq_server;
995 		dr->prot = rqstp->rq_prot;
996 		memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen);
997 		dr->addrlen = rqstp->rq_addrlen;
998 		dr->daddr = rqstp->rq_daddr;
999 		dr->argslen = rqstp->rq_arg.len >> 2;
1000 		dr->xprt_hlen = rqstp->rq_xprt_hlen;
1001 
1002 		/* back up head to the start of the buffer and copy */
1003 		skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
1004 		memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip,
1005 		       dr->argslen << 2);
1006 	}
1007 	svc_xprt_get(rqstp->rq_xprt);
1008 	dr->xprt = rqstp->rq_xprt;
1009 
1010 	dr->handle.revisit = svc_revisit;
1011 	return &dr->handle;
1012 }
1013 
1014 /*
1015  * recv data from a deferred request into an active one
1016  */
1017 static int svc_deferred_recv(struct svc_rqst *rqstp)
1018 {
1019 	struct svc_deferred_req *dr = rqstp->rq_deferred;
1020 
1021 	/* setup iov_base past transport header */
1022 	rqstp->rq_arg.head[0].iov_base = dr->args + (dr->xprt_hlen>>2);
1023 	/* The iov_len does not include the transport header bytes */
1024 	rqstp->rq_arg.head[0].iov_len = (dr->argslen<<2) - dr->xprt_hlen;
1025 	rqstp->rq_arg.page_len = 0;
1026 	/* The rq_arg.len includes the transport header bytes */
1027 	rqstp->rq_arg.len     = dr->argslen<<2;
1028 	rqstp->rq_prot        = dr->prot;
1029 	memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen);
1030 	rqstp->rq_addrlen     = dr->addrlen;
1031 	/* Save off transport header len in case we get deferred again */
1032 	rqstp->rq_xprt_hlen   = dr->xprt_hlen;
1033 	rqstp->rq_daddr       = dr->daddr;
1034 	rqstp->rq_respages    = rqstp->rq_pages;
1035 	return (dr->argslen<<2) - dr->xprt_hlen;
1036 }
1037 
1038 
1039 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt)
1040 {
1041 	struct svc_deferred_req *dr = NULL;
1042 
1043 	if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags))
1044 		return NULL;
1045 	spin_lock(&xprt->xpt_lock);
1046 	clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
1047 	if (!list_empty(&xprt->xpt_deferred)) {
1048 		dr = list_entry(xprt->xpt_deferred.next,
1049 				struct svc_deferred_req,
1050 				handle.recent);
1051 		list_del_init(&dr->handle.recent);
1052 		set_bit(XPT_DEFERRED, &xprt->xpt_flags);
1053 	}
1054 	spin_unlock(&xprt->xpt_lock);
1055 	return dr;
1056 }
1057 
1058 /**
1059  * svc_find_xprt - find an RPC transport instance
1060  * @serv: pointer to svc_serv to search
1061  * @xcl_name: C string containing transport's class name
1062  * @af: Address family of transport's local address
1063  * @port: transport's IP port number
1064  *
1065  * Return the transport instance pointer for the endpoint accepting
1066  * connections/peer traffic from the specified transport class,
1067  * address family and port.
1068  *
1069  * Specifying 0 for the address family or port is effectively a
1070  * wild-card, and will result in matching the first transport in the
1071  * service's list that has a matching class name.
1072  */
1073 struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name,
1074 			       const sa_family_t af, const unsigned short port)
1075 {
1076 	struct svc_xprt *xprt;
1077 	struct svc_xprt *found = NULL;
1078 
1079 	/* Sanity check the args */
1080 	if (serv == NULL || xcl_name == NULL)
1081 		return found;
1082 
1083 	spin_lock_bh(&serv->sv_lock);
1084 	list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
1085 		if (strcmp(xprt->xpt_class->xcl_name, xcl_name))
1086 			continue;
1087 		if (af != AF_UNSPEC && af != xprt->xpt_local.ss_family)
1088 			continue;
1089 		if (port != 0 && port != svc_xprt_local_port(xprt))
1090 			continue;
1091 		found = xprt;
1092 		svc_xprt_get(xprt);
1093 		break;
1094 	}
1095 	spin_unlock_bh(&serv->sv_lock);
1096 	return found;
1097 }
1098 EXPORT_SYMBOL_GPL(svc_find_xprt);
1099 
1100 /*
1101  * Format a buffer with a list of the active transports. A zero for
1102  * the buflen parameter disables target buffer overflow checking.
1103  */
1104 int svc_xprt_names(struct svc_serv *serv, char *buf, int buflen)
1105 {
1106 	struct svc_xprt *xprt;
1107 	char xprt_str[64];
1108 	int totlen = 0;
1109 	int len;
1110 
1111 	/* Sanity check args */
1112 	if (!serv)
1113 		return 0;
1114 
1115 	spin_lock_bh(&serv->sv_lock);
1116 	list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
1117 		len = snprintf(xprt_str, sizeof(xprt_str),
1118 			       "%s %d\n", xprt->xpt_class->xcl_name,
1119 			       svc_xprt_local_port(xprt));
1120 		/* If the string was truncated, replace with error string */
1121 		if (len >= sizeof(xprt_str))
1122 			strcpy(xprt_str, "name-too-long\n");
1123 		/* Don't overflow buffer */
1124 		len = strlen(xprt_str);
1125 		if (buflen && (len + totlen >= buflen))
1126 			break;
1127 		strcpy(buf+totlen, xprt_str);
1128 		totlen += len;
1129 	}
1130 	spin_unlock_bh(&serv->sv_lock);
1131 	return totlen;
1132 }
1133 EXPORT_SYMBOL_GPL(svc_xprt_names);
1134 
1135 
1136 /*----------------------------------------------------------------------------*/
1137 
1138 static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos)
1139 {
1140 	unsigned int pidx = (unsigned int)*pos;
1141 	struct svc_serv *serv = m->private;
1142 
1143 	dprintk("svc_pool_stats_start, *pidx=%u\n", pidx);
1144 
1145 	lock_kernel();
1146 	/* bump up the pseudo refcount while traversing */
1147 	svc_get(serv);
1148 	unlock_kernel();
1149 
1150 	if (!pidx)
1151 		return SEQ_START_TOKEN;
1152 	return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]);
1153 }
1154 
1155 static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos)
1156 {
1157 	struct svc_pool *pool = p;
1158 	struct svc_serv *serv = m->private;
1159 
1160 	dprintk("svc_pool_stats_next, *pos=%llu\n", *pos);
1161 
1162 	if (p == SEQ_START_TOKEN) {
1163 		pool = &serv->sv_pools[0];
1164 	} else {
1165 		unsigned int pidx = (pool - &serv->sv_pools[0]);
1166 		if (pidx < serv->sv_nrpools-1)
1167 			pool = &serv->sv_pools[pidx+1];
1168 		else
1169 			pool = NULL;
1170 	}
1171 	++*pos;
1172 	return pool;
1173 }
1174 
1175 static void svc_pool_stats_stop(struct seq_file *m, void *p)
1176 {
1177 	struct svc_serv *serv = m->private;
1178 
1179 	lock_kernel();
1180 	/* this function really, really should have been called svc_put() */
1181 	svc_destroy(serv);
1182 	unlock_kernel();
1183 }
1184 
1185 static int svc_pool_stats_show(struct seq_file *m, void *p)
1186 {
1187 	struct svc_pool *pool = p;
1188 
1189 	if (p == SEQ_START_TOKEN) {
1190 		seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken overloads-avoided threads-timedout\n");
1191 		return 0;
1192 	}
1193 
1194 	seq_printf(m, "%u %lu %lu %lu %lu %lu\n",
1195 		pool->sp_id,
1196 		pool->sp_stats.packets,
1197 		pool->sp_stats.sockets_queued,
1198 		pool->sp_stats.threads_woken,
1199 		pool->sp_stats.overloads_avoided,
1200 		pool->sp_stats.threads_timedout);
1201 
1202 	return 0;
1203 }
1204 
1205 static const struct seq_operations svc_pool_stats_seq_ops = {
1206 	.start	= svc_pool_stats_start,
1207 	.next	= svc_pool_stats_next,
1208 	.stop	= svc_pool_stats_stop,
1209 	.show	= svc_pool_stats_show,
1210 };
1211 
1212 int svc_pool_stats_open(struct svc_serv *serv, struct file *file)
1213 {
1214 	int err;
1215 
1216 	err = seq_open(file, &svc_pool_stats_seq_ops);
1217 	if (!err)
1218 		((struct seq_file *) file->private_data)->private = serv;
1219 	return err;
1220 }
1221 EXPORT_SYMBOL(svc_pool_stats_open);
1222 
1223 /*----------------------------------------------------------------------------*/
1224