xref: /openbmc/linux/net/sunrpc/svc_xprt.c (revision 545e4006)
1 /*
2  * linux/net/sunrpc/svc_xprt.c
3  *
4  * Author: Tom Tucker <tom@opengridcomputing.com>
5  */
6 
7 #include <linux/sched.h>
8 #include <linux/errno.h>
9 #include <linux/freezer.h>
10 #include <linux/kthread.h>
11 #include <net/sock.h>
12 #include <linux/sunrpc/stats.h>
13 #include <linux/sunrpc/svc_xprt.h>
14 
15 #define RPCDBG_FACILITY	RPCDBG_SVCXPRT
16 
17 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt);
18 static int svc_deferred_recv(struct svc_rqst *rqstp);
19 static struct cache_deferred_req *svc_defer(struct cache_req *req);
20 static void svc_age_temp_xprts(unsigned long closure);
21 
22 /* apparently the "standard" is that clients close
23  * idle connections after 5 minutes, servers after
24  * 6 minutes
25  *   http://www.connectathon.org/talks96/nfstcp.pdf
26  */
27 static int svc_conn_age_period = 6*60;
28 
29 /* List of registered transport classes */
30 static DEFINE_SPINLOCK(svc_xprt_class_lock);
31 static LIST_HEAD(svc_xprt_class_list);
32 
33 /* SMP locking strategy:
34  *
35  *	svc_pool->sp_lock protects most of the fields of that pool.
36  *	svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
37  *	when both need to be taken (rare), svc_serv->sv_lock is first.
38  *	BKL protects svc_serv->sv_nrthread.
39  *	svc_sock->sk_lock protects the svc_sock->sk_deferred list
40  *             and the ->sk_info_authunix cache.
41  *
42  *	The XPT_BUSY bit in xprt->xpt_flags prevents a transport being
43  *	enqueued multiply. During normal transport processing this bit
44  *	is set by svc_xprt_enqueue and cleared by svc_xprt_received.
45  *	Providers should not manipulate this bit directly.
46  *
47  *	Some flags can be set to certain values at any time
48  *	providing that certain rules are followed:
49  *
50  *	XPT_CONN, XPT_DATA:
51  *		- Can be set or cleared at any time.
52  *		- After a set, svc_xprt_enqueue must be called to enqueue
53  *		  the transport for processing.
54  *		- After a clear, the transport must be read/accepted.
55  *		  If this succeeds, it must be set again.
56  *	XPT_CLOSE:
57  *		- Can set at any time. It is never cleared.
58  *      XPT_DEAD:
59  *		- Can only be set while XPT_BUSY is held which ensures
60  *		  that no other thread will be using the transport or will
61  *		  try to set XPT_DEAD.
62  */
63 
64 int svc_reg_xprt_class(struct svc_xprt_class *xcl)
65 {
66 	struct svc_xprt_class *cl;
67 	int res = -EEXIST;
68 
69 	dprintk("svc: Adding svc transport class '%s'\n", xcl->xcl_name);
70 
71 	INIT_LIST_HEAD(&xcl->xcl_list);
72 	spin_lock(&svc_xprt_class_lock);
73 	/* Make sure there isn't already a class with the same name */
74 	list_for_each_entry(cl, &svc_xprt_class_list, xcl_list) {
75 		if (strcmp(xcl->xcl_name, cl->xcl_name) == 0)
76 			goto out;
77 	}
78 	list_add_tail(&xcl->xcl_list, &svc_xprt_class_list);
79 	res = 0;
80 out:
81 	spin_unlock(&svc_xprt_class_lock);
82 	return res;
83 }
84 EXPORT_SYMBOL_GPL(svc_reg_xprt_class);
85 
86 void svc_unreg_xprt_class(struct svc_xprt_class *xcl)
87 {
88 	dprintk("svc: Removing svc transport class '%s'\n", xcl->xcl_name);
89 	spin_lock(&svc_xprt_class_lock);
90 	list_del_init(&xcl->xcl_list);
91 	spin_unlock(&svc_xprt_class_lock);
92 }
93 EXPORT_SYMBOL_GPL(svc_unreg_xprt_class);
94 
95 /*
96  * Format the transport list for printing
97  */
98 int svc_print_xprts(char *buf, int maxlen)
99 {
100 	struct list_head *le;
101 	char tmpstr[80];
102 	int len = 0;
103 	buf[0] = '\0';
104 
105 	spin_lock(&svc_xprt_class_lock);
106 	list_for_each(le, &svc_xprt_class_list) {
107 		int slen;
108 		struct svc_xprt_class *xcl =
109 			list_entry(le, struct svc_xprt_class, xcl_list);
110 
111 		sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload);
112 		slen = strlen(tmpstr);
113 		if (len + slen > maxlen)
114 			break;
115 		len += slen;
116 		strcat(buf, tmpstr);
117 	}
118 	spin_unlock(&svc_xprt_class_lock);
119 
120 	return len;
121 }
122 
123 static void svc_xprt_free(struct kref *kref)
124 {
125 	struct svc_xprt *xprt =
126 		container_of(kref, struct svc_xprt, xpt_ref);
127 	struct module *owner = xprt->xpt_class->xcl_owner;
128 	if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)
129 	    && xprt->xpt_auth_cache != NULL)
130 		svcauth_unix_info_release(xprt->xpt_auth_cache);
131 	xprt->xpt_ops->xpo_free(xprt);
132 	module_put(owner);
133 }
134 
135 void svc_xprt_put(struct svc_xprt *xprt)
136 {
137 	kref_put(&xprt->xpt_ref, svc_xprt_free);
138 }
139 EXPORT_SYMBOL_GPL(svc_xprt_put);
140 
141 /*
142  * Called by transport drivers to initialize the transport independent
143  * portion of the transport instance.
144  */
145 void svc_xprt_init(struct svc_xprt_class *xcl, struct svc_xprt *xprt,
146 		   struct svc_serv *serv)
147 {
148 	memset(xprt, 0, sizeof(*xprt));
149 	xprt->xpt_class = xcl;
150 	xprt->xpt_ops = xcl->xcl_ops;
151 	kref_init(&xprt->xpt_ref);
152 	xprt->xpt_server = serv;
153 	INIT_LIST_HEAD(&xprt->xpt_list);
154 	INIT_LIST_HEAD(&xprt->xpt_ready);
155 	INIT_LIST_HEAD(&xprt->xpt_deferred);
156 	mutex_init(&xprt->xpt_mutex);
157 	spin_lock_init(&xprt->xpt_lock);
158 	set_bit(XPT_BUSY, &xprt->xpt_flags);
159 }
160 EXPORT_SYMBOL_GPL(svc_xprt_init);
161 
162 int svc_create_xprt(struct svc_serv *serv, char *xprt_name, unsigned short port,
163 		    int flags)
164 {
165 	struct svc_xprt_class *xcl;
166 	struct sockaddr_in sin = {
167 		.sin_family		= AF_INET,
168 		.sin_addr.s_addr	= htonl(INADDR_ANY),
169 		.sin_port		= htons(port),
170 	};
171 	dprintk("svc: creating transport %s[%d]\n", xprt_name, port);
172 	spin_lock(&svc_xprt_class_lock);
173 	list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
174 		struct svc_xprt *newxprt;
175 
176 		if (strcmp(xprt_name, xcl->xcl_name))
177 			continue;
178 
179 		if (!try_module_get(xcl->xcl_owner))
180 			goto err;
181 
182 		spin_unlock(&svc_xprt_class_lock);
183 		newxprt = xcl->xcl_ops->
184 			xpo_create(serv, (struct sockaddr *)&sin, sizeof(sin),
185 				   flags);
186 		if (IS_ERR(newxprt)) {
187 			module_put(xcl->xcl_owner);
188 			return PTR_ERR(newxprt);
189 		}
190 
191 		clear_bit(XPT_TEMP, &newxprt->xpt_flags);
192 		spin_lock_bh(&serv->sv_lock);
193 		list_add(&newxprt->xpt_list, &serv->sv_permsocks);
194 		spin_unlock_bh(&serv->sv_lock);
195 		clear_bit(XPT_BUSY, &newxprt->xpt_flags);
196 		return svc_xprt_local_port(newxprt);
197 	}
198  err:
199 	spin_unlock(&svc_xprt_class_lock);
200 	dprintk("svc: transport %s not found\n", xprt_name);
201 	return -ENOENT;
202 }
203 EXPORT_SYMBOL_GPL(svc_create_xprt);
204 
205 /*
206  * Copy the local and remote xprt addresses to the rqstp structure
207  */
208 void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt)
209 {
210 	struct sockaddr *sin;
211 
212 	memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen);
213 	rqstp->rq_addrlen = xprt->xpt_remotelen;
214 
215 	/*
216 	 * Destination address in request is needed for binding the
217 	 * source address in RPC replies/callbacks later.
218 	 */
219 	sin = (struct sockaddr *)&xprt->xpt_local;
220 	switch (sin->sa_family) {
221 	case AF_INET:
222 		rqstp->rq_daddr.addr = ((struct sockaddr_in *)sin)->sin_addr;
223 		break;
224 	case AF_INET6:
225 		rqstp->rq_daddr.addr6 = ((struct sockaddr_in6 *)sin)->sin6_addr;
226 		break;
227 	}
228 }
229 EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs);
230 
231 /**
232  * svc_print_addr - Format rq_addr field for printing
233  * @rqstp: svc_rqst struct containing address to print
234  * @buf: target buffer for formatted address
235  * @len: length of target buffer
236  *
237  */
238 char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len)
239 {
240 	return __svc_print_addr(svc_addr(rqstp), buf, len);
241 }
242 EXPORT_SYMBOL_GPL(svc_print_addr);
243 
244 /*
245  * Queue up an idle server thread.  Must have pool->sp_lock held.
246  * Note: this is really a stack rather than a queue, so that we only
247  * use as many different threads as we need, and the rest don't pollute
248  * the cache.
249  */
250 static void svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp)
251 {
252 	list_add(&rqstp->rq_list, &pool->sp_threads);
253 }
254 
255 /*
256  * Dequeue an nfsd thread.  Must have pool->sp_lock held.
257  */
258 static void svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp)
259 {
260 	list_del(&rqstp->rq_list);
261 }
262 
263 /*
264  * Queue up a transport with data pending. If there are idle nfsd
265  * processes, wake 'em up.
266  *
267  */
268 void svc_xprt_enqueue(struct svc_xprt *xprt)
269 {
270 	struct svc_serv	*serv = xprt->xpt_server;
271 	struct svc_pool *pool;
272 	struct svc_rqst	*rqstp;
273 	int cpu;
274 
275 	if (!(xprt->xpt_flags &
276 	      ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED))))
277 		return;
278 
279 	cpu = get_cpu();
280 	pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
281 	put_cpu();
282 
283 	spin_lock_bh(&pool->sp_lock);
284 
285 	if (!list_empty(&pool->sp_threads) &&
286 	    !list_empty(&pool->sp_sockets))
287 		printk(KERN_ERR
288 		       "svc_xprt_enqueue: "
289 		       "threads and transports both waiting??\n");
290 
291 	if (test_bit(XPT_DEAD, &xprt->xpt_flags)) {
292 		/* Don't enqueue dead transports */
293 		dprintk("svc: transport %p is dead, not enqueued\n", xprt);
294 		goto out_unlock;
295 	}
296 
297 	/* Mark transport as busy. It will remain in this state until
298 	 * the provider calls svc_xprt_received. We update XPT_BUSY
299 	 * atomically because it also guards against trying to enqueue
300 	 * the transport twice.
301 	 */
302 	if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) {
303 		/* Don't enqueue transport while already enqueued */
304 		dprintk("svc: transport %p busy, not enqueued\n", xprt);
305 		goto out_unlock;
306 	}
307 	BUG_ON(xprt->xpt_pool != NULL);
308 	xprt->xpt_pool = pool;
309 
310 	/* Handle pending connection */
311 	if (test_bit(XPT_CONN, &xprt->xpt_flags))
312 		goto process;
313 
314 	/* Handle close in-progress */
315 	if (test_bit(XPT_CLOSE, &xprt->xpt_flags))
316 		goto process;
317 
318 	/* Check if we have space to reply to a request */
319 	if (!xprt->xpt_ops->xpo_has_wspace(xprt)) {
320 		/* Don't enqueue while not enough space for reply */
321 		dprintk("svc: no write space, transport %p  not enqueued\n",
322 			xprt);
323 		xprt->xpt_pool = NULL;
324 		clear_bit(XPT_BUSY, &xprt->xpt_flags);
325 		goto out_unlock;
326 	}
327 
328  process:
329 	if (!list_empty(&pool->sp_threads)) {
330 		rqstp = list_entry(pool->sp_threads.next,
331 				   struct svc_rqst,
332 				   rq_list);
333 		dprintk("svc: transport %p served by daemon %p\n",
334 			xprt, rqstp);
335 		svc_thread_dequeue(pool, rqstp);
336 		if (rqstp->rq_xprt)
337 			printk(KERN_ERR
338 				"svc_xprt_enqueue: server %p, rq_xprt=%p!\n",
339 				rqstp, rqstp->rq_xprt);
340 		rqstp->rq_xprt = xprt;
341 		svc_xprt_get(xprt);
342 		rqstp->rq_reserved = serv->sv_max_mesg;
343 		atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
344 		BUG_ON(xprt->xpt_pool != pool);
345 		wake_up(&rqstp->rq_wait);
346 	} else {
347 		dprintk("svc: transport %p put into queue\n", xprt);
348 		list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
349 		BUG_ON(xprt->xpt_pool != pool);
350 	}
351 
352 out_unlock:
353 	spin_unlock_bh(&pool->sp_lock);
354 }
355 EXPORT_SYMBOL_GPL(svc_xprt_enqueue);
356 
357 /*
358  * Dequeue the first transport.  Must be called with the pool->sp_lock held.
359  */
360 static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
361 {
362 	struct svc_xprt	*xprt;
363 
364 	if (list_empty(&pool->sp_sockets))
365 		return NULL;
366 
367 	xprt = list_entry(pool->sp_sockets.next,
368 			  struct svc_xprt, xpt_ready);
369 	list_del_init(&xprt->xpt_ready);
370 
371 	dprintk("svc: transport %p dequeued, inuse=%d\n",
372 		xprt, atomic_read(&xprt->xpt_ref.refcount));
373 
374 	return xprt;
375 }
376 
377 /*
378  * svc_xprt_received conditionally queues the transport for processing
379  * by another thread. The caller must hold the XPT_BUSY bit and must
380  * not thereafter touch transport data.
381  *
382  * Note: XPT_DATA only gets cleared when a read-attempt finds no (or
383  * insufficient) data.
384  */
385 void svc_xprt_received(struct svc_xprt *xprt)
386 {
387 	BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags));
388 	xprt->xpt_pool = NULL;
389 	clear_bit(XPT_BUSY, &xprt->xpt_flags);
390 	svc_xprt_enqueue(xprt);
391 }
392 EXPORT_SYMBOL_GPL(svc_xprt_received);
393 
394 /**
395  * svc_reserve - change the space reserved for the reply to a request.
396  * @rqstp:  The request in question
397  * @space: new max space to reserve
398  *
399  * Each request reserves some space on the output queue of the transport
400  * to make sure the reply fits.  This function reduces that reserved
401  * space to be the amount of space used already, plus @space.
402  *
403  */
404 void svc_reserve(struct svc_rqst *rqstp, int space)
405 {
406 	space += rqstp->rq_res.head[0].iov_len;
407 
408 	if (space < rqstp->rq_reserved) {
409 		struct svc_xprt *xprt = rqstp->rq_xprt;
410 		atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved);
411 		rqstp->rq_reserved = space;
412 
413 		svc_xprt_enqueue(xprt);
414 	}
415 }
416 EXPORT_SYMBOL(svc_reserve);
417 
418 static void svc_xprt_release(struct svc_rqst *rqstp)
419 {
420 	struct svc_xprt	*xprt = rqstp->rq_xprt;
421 
422 	rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
423 
424 	svc_free_res_pages(rqstp);
425 	rqstp->rq_res.page_len = 0;
426 	rqstp->rq_res.page_base = 0;
427 
428 	/* Reset response buffer and release
429 	 * the reservation.
430 	 * But first, check that enough space was reserved
431 	 * for the reply, otherwise we have a bug!
432 	 */
433 	if ((rqstp->rq_res.len) >  rqstp->rq_reserved)
434 		printk(KERN_ERR "RPC request reserved %d but used %d\n",
435 		       rqstp->rq_reserved,
436 		       rqstp->rq_res.len);
437 
438 	rqstp->rq_res.head[0].iov_len = 0;
439 	svc_reserve(rqstp, 0);
440 	rqstp->rq_xprt = NULL;
441 
442 	svc_xprt_put(xprt);
443 }
444 
445 /*
446  * External function to wake up a server waiting for data
447  * This really only makes sense for services like lockd
448  * which have exactly one thread anyway.
449  */
450 void svc_wake_up(struct svc_serv *serv)
451 {
452 	struct svc_rqst	*rqstp;
453 	unsigned int i;
454 	struct svc_pool *pool;
455 
456 	for (i = 0; i < serv->sv_nrpools; i++) {
457 		pool = &serv->sv_pools[i];
458 
459 		spin_lock_bh(&pool->sp_lock);
460 		if (!list_empty(&pool->sp_threads)) {
461 			rqstp = list_entry(pool->sp_threads.next,
462 					   struct svc_rqst,
463 					   rq_list);
464 			dprintk("svc: daemon %p woken up.\n", rqstp);
465 			/*
466 			svc_thread_dequeue(pool, rqstp);
467 			rqstp->rq_xprt = NULL;
468 			 */
469 			wake_up(&rqstp->rq_wait);
470 		}
471 		spin_unlock_bh(&pool->sp_lock);
472 	}
473 }
474 EXPORT_SYMBOL(svc_wake_up);
475 
476 int svc_port_is_privileged(struct sockaddr *sin)
477 {
478 	switch (sin->sa_family) {
479 	case AF_INET:
480 		return ntohs(((struct sockaddr_in *)sin)->sin_port)
481 			< PROT_SOCK;
482 	case AF_INET6:
483 		return ntohs(((struct sockaddr_in6 *)sin)->sin6_port)
484 			< PROT_SOCK;
485 	default:
486 		return 0;
487 	}
488 }
489 
490 /*
491  * Make sure that we don't have too many active connections.  If we
492  * have, something must be dropped.
493  *
494  * There's no point in trying to do random drop here for DoS
495  * prevention. The NFS clients does 1 reconnect in 15 seconds. An
496  * attacker can easily beat that.
497  *
498  * The only somewhat efficient mechanism would be if drop old
499  * connections from the same IP first. But right now we don't even
500  * record the client IP in svc_sock.
501  */
502 static void svc_check_conn_limits(struct svc_serv *serv)
503 {
504 	if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) {
505 		struct svc_xprt *xprt = NULL;
506 		spin_lock_bh(&serv->sv_lock);
507 		if (!list_empty(&serv->sv_tempsocks)) {
508 			if (net_ratelimit()) {
509 				/* Try to help the admin */
510 				printk(KERN_NOTICE "%s: too many open  "
511 				       "connections, consider increasing the "
512 				       "number of nfsd threads\n",
513 				       serv->sv_name);
514 			}
515 			/*
516 			 * Always select the oldest connection. It's not fair,
517 			 * but so is life
518 			 */
519 			xprt = list_entry(serv->sv_tempsocks.prev,
520 					  struct svc_xprt,
521 					  xpt_list);
522 			set_bit(XPT_CLOSE, &xprt->xpt_flags);
523 			svc_xprt_get(xprt);
524 		}
525 		spin_unlock_bh(&serv->sv_lock);
526 
527 		if (xprt) {
528 			svc_xprt_enqueue(xprt);
529 			svc_xprt_put(xprt);
530 		}
531 	}
532 }
533 
534 /*
535  * Receive the next request on any transport.  This code is carefully
536  * organised not to touch any cachelines in the shared svc_serv
537  * structure, only cachelines in the local svc_pool.
538  */
539 int svc_recv(struct svc_rqst *rqstp, long timeout)
540 {
541 	struct svc_xprt		*xprt = NULL;
542 	struct svc_serv		*serv = rqstp->rq_server;
543 	struct svc_pool		*pool = rqstp->rq_pool;
544 	int			len, i;
545 	int			pages;
546 	struct xdr_buf		*arg;
547 	DECLARE_WAITQUEUE(wait, current);
548 
549 	dprintk("svc: server %p waiting for data (to = %ld)\n",
550 		rqstp, timeout);
551 
552 	if (rqstp->rq_xprt)
553 		printk(KERN_ERR
554 			"svc_recv: service %p, transport not NULL!\n",
555 			 rqstp);
556 	if (waitqueue_active(&rqstp->rq_wait))
557 		printk(KERN_ERR
558 			"svc_recv: service %p, wait queue active!\n",
559 			 rqstp);
560 
561 	/* now allocate needed pages.  If we get a failure, sleep briefly */
562 	pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE;
563 	for (i = 0; i < pages ; i++)
564 		while (rqstp->rq_pages[i] == NULL) {
565 			struct page *p = alloc_page(GFP_KERNEL);
566 			if (!p) {
567 				set_current_state(TASK_INTERRUPTIBLE);
568 				if (signalled() || kthread_should_stop()) {
569 					set_current_state(TASK_RUNNING);
570 					return -EINTR;
571 				}
572 				schedule_timeout(msecs_to_jiffies(500));
573 			}
574 			rqstp->rq_pages[i] = p;
575 		}
576 	rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */
577 	BUG_ON(pages >= RPCSVC_MAXPAGES);
578 
579 	/* Make arg->head point to first page and arg->pages point to rest */
580 	arg = &rqstp->rq_arg;
581 	arg->head[0].iov_base = page_address(rqstp->rq_pages[0]);
582 	arg->head[0].iov_len = PAGE_SIZE;
583 	arg->pages = rqstp->rq_pages + 1;
584 	arg->page_base = 0;
585 	/* save at least one page for response */
586 	arg->page_len = (pages-2)*PAGE_SIZE;
587 	arg->len = (pages-1)*PAGE_SIZE;
588 	arg->tail[0].iov_len = 0;
589 
590 	try_to_freeze();
591 	cond_resched();
592 	if (signalled() || kthread_should_stop())
593 		return -EINTR;
594 
595 	spin_lock_bh(&pool->sp_lock);
596 	xprt = svc_xprt_dequeue(pool);
597 	if (xprt) {
598 		rqstp->rq_xprt = xprt;
599 		svc_xprt_get(xprt);
600 		rqstp->rq_reserved = serv->sv_max_mesg;
601 		atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
602 	} else {
603 		/* No data pending. Go to sleep */
604 		svc_thread_enqueue(pool, rqstp);
605 
606 		/*
607 		 * We have to be able to interrupt this wait
608 		 * to bring down the daemons ...
609 		 */
610 		set_current_state(TASK_INTERRUPTIBLE);
611 
612 		/*
613 		 * checking kthread_should_stop() here allows us to avoid
614 		 * locking and signalling when stopping kthreads that call
615 		 * svc_recv. If the thread has already been woken up, then
616 		 * we can exit here without sleeping. If not, then it
617 		 * it'll be woken up quickly during the schedule_timeout
618 		 */
619 		if (kthread_should_stop()) {
620 			set_current_state(TASK_RUNNING);
621 			spin_unlock_bh(&pool->sp_lock);
622 			return -EINTR;
623 		}
624 
625 		add_wait_queue(&rqstp->rq_wait, &wait);
626 		spin_unlock_bh(&pool->sp_lock);
627 
628 		schedule_timeout(timeout);
629 
630 		try_to_freeze();
631 
632 		spin_lock_bh(&pool->sp_lock);
633 		remove_wait_queue(&rqstp->rq_wait, &wait);
634 
635 		xprt = rqstp->rq_xprt;
636 		if (!xprt) {
637 			svc_thread_dequeue(pool, rqstp);
638 			spin_unlock_bh(&pool->sp_lock);
639 			dprintk("svc: server %p, no data yet\n", rqstp);
640 			if (signalled() || kthread_should_stop())
641 				return -EINTR;
642 			else
643 				return -EAGAIN;
644 		}
645 	}
646 	spin_unlock_bh(&pool->sp_lock);
647 
648 	len = 0;
649 	if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
650 		dprintk("svc_recv: found XPT_CLOSE\n");
651 		svc_delete_xprt(xprt);
652 	} else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
653 		struct svc_xprt *newxpt;
654 		newxpt = xprt->xpt_ops->xpo_accept(xprt);
655 		if (newxpt) {
656 			/*
657 			 * We know this module_get will succeed because the
658 			 * listener holds a reference too
659 			 */
660 			__module_get(newxpt->xpt_class->xcl_owner);
661 			svc_check_conn_limits(xprt->xpt_server);
662 			spin_lock_bh(&serv->sv_lock);
663 			set_bit(XPT_TEMP, &newxpt->xpt_flags);
664 			list_add(&newxpt->xpt_list, &serv->sv_tempsocks);
665 			serv->sv_tmpcnt++;
666 			if (serv->sv_temptimer.function == NULL) {
667 				/* setup timer to age temp transports */
668 				setup_timer(&serv->sv_temptimer,
669 					    svc_age_temp_xprts,
670 					    (unsigned long)serv);
671 				mod_timer(&serv->sv_temptimer,
672 					  jiffies + svc_conn_age_period * HZ);
673 			}
674 			spin_unlock_bh(&serv->sv_lock);
675 			svc_xprt_received(newxpt);
676 		}
677 		svc_xprt_received(xprt);
678 	} else {
679 		dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
680 			rqstp, pool->sp_id, xprt,
681 			atomic_read(&xprt->xpt_ref.refcount));
682 		rqstp->rq_deferred = svc_deferred_dequeue(xprt);
683 		if (rqstp->rq_deferred) {
684 			svc_xprt_received(xprt);
685 			len = svc_deferred_recv(rqstp);
686 		} else
687 			len = xprt->xpt_ops->xpo_recvfrom(rqstp);
688 		dprintk("svc: got len=%d\n", len);
689 	}
690 
691 	/* No data, incomplete (TCP) read, or accept() */
692 	if (len == 0 || len == -EAGAIN) {
693 		rqstp->rq_res.len = 0;
694 		svc_xprt_release(rqstp);
695 		return -EAGAIN;
696 	}
697 	clear_bit(XPT_OLD, &xprt->xpt_flags);
698 
699 	rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp));
700 	rqstp->rq_chandle.defer = svc_defer;
701 
702 	if (serv->sv_stats)
703 		serv->sv_stats->netcnt++;
704 	return len;
705 }
706 EXPORT_SYMBOL(svc_recv);
707 
708 /*
709  * Drop request
710  */
711 void svc_drop(struct svc_rqst *rqstp)
712 {
713 	dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt);
714 	svc_xprt_release(rqstp);
715 }
716 EXPORT_SYMBOL(svc_drop);
717 
718 /*
719  * Return reply to client.
720  */
721 int svc_send(struct svc_rqst *rqstp)
722 {
723 	struct svc_xprt	*xprt;
724 	int		len;
725 	struct xdr_buf	*xb;
726 
727 	xprt = rqstp->rq_xprt;
728 	if (!xprt)
729 		return -EFAULT;
730 
731 	/* release the receive skb before sending the reply */
732 	rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
733 
734 	/* calculate over-all length */
735 	xb = &rqstp->rq_res;
736 	xb->len = xb->head[0].iov_len +
737 		xb->page_len +
738 		xb->tail[0].iov_len;
739 
740 	/* Grab mutex to serialize outgoing data. */
741 	mutex_lock(&xprt->xpt_mutex);
742 	if (test_bit(XPT_DEAD, &xprt->xpt_flags))
743 		len = -ENOTCONN;
744 	else
745 		len = xprt->xpt_ops->xpo_sendto(rqstp);
746 	mutex_unlock(&xprt->xpt_mutex);
747 	svc_xprt_release(rqstp);
748 
749 	if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN)
750 		return 0;
751 	return len;
752 }
753 
754 /*
755  * Timer function to close old temporary transports, using
756  * a mark-and-sweep algorithm.
757  */
758 static void svc_age_temp_xprts(unsigned long closure)
759 {
760 	struct svc_serv *serv = (struct svc_serv *)closure;
761 	struct svc_xprt *xprt;
762 	struct list_head *le, *next;
763 	LIST_HEAD(to_be_aged);
764 
765 	dprintk("svc_age_temp_xprts\n");
766 
767 	if (!spin_trylock_bh(&serv->sv_lock)) {
768 		/* busy, try again 1 sec later */
769 		dprintk("svc_age_temp_xprts: busy\n");
770 		mod_timer(&serv->sv_temptimer, jiffies + HZ);
771 		return;
772 	}
773 
774 	list_for_each_safe(le, next, &serv->sv_tempsocks) {
775 		xprt = list_entry(le, struct svc_xprt, xpt_list);
776 
777 		/* First time through, just mark it OLD. Second time
778 		 * through, close it. */
779 		if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags))
780 			continue;
781 		if (atomic_read(&xprt->xpt_ref.refcount) > 1
782 		    || test_bit(XPT_BUSY, &xprt->xpt_flags))
783 			continue;
784 		svc_xprt_get(xprt);
785 		list_move(le, &to_be_aged);
786 		set_bit(XPT_CLOSE, &xprt->xpt_flags);
787 		set_bit(XPT_DETACHED, &xprt->xpt_flags);
788 	}
789 	spin_unlock_bh(&serv->sv_lock);
790 
791 	while (!list_empty(&to_be_aged)) {
792 		le = to_be_aged.next;
793 		/* fiddling the xpt_list node is safe 'cos we're XPT_DETACHED */
794 		list_del_init(le);
795 		xprt = list_entry(le, struct svc_xprt, xpt_list);
796 
797 		dprintk("queuing xprt %p for closing\n", xprt);
798 
799 		/* a thread will dequeue and close it soon */
800 		svc_xprt_enqueue(xprt);
801 		svc_xprt_put(xprt);
802 	}
803 
804 	mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
805 }
806 
807 /*
808  * Remove a dead transport
809  */
810 void svc_delete_xprt(struct svc_xprt *xprt)
811 {
812 	struct svc_serv	*serv = xprt->xpt_server;
813 
814 	dprintk("svc: svc_delete_xprt(%p)\n", xprt);
815 	xprt->xpt_ops->xpo_detach(xprt);
816 
817 	spin_lock_bh(&serv->sv_lock);
818 	if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags))
819 		list_del_init(&xprt->xpt_list);
820 	/*
821 	 * We used to delete the transport from whichever list
822 	 * it's sk_xprt.xpt_ready node was on, but we don't actually
823 	 * need to.  This is because the only time we're called
824 	 * while still attached to a queue, the queue itself
825 	 * is about to be destroyed (in svc_destroy).
826 	 */
827 	if (!test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) {
828 		BUG_ON(atomic_read(&xprt->xpt_ref.refcount) < 2);
829 		if (test_bit(XPT_TEMP, &xprt->xpt_flags))
830 			serv->sv_tmpcnt--;
831 		svc_xprt_put(xprt);
832 	}
833 	spin_unlock_bh(&serv->sv_lock);
834 }
835 
836 void svc_close_xprt(struct svc_xprt *xprt)
837 {
838 	set_bit(XPT_CLOSE, &xprt->xpt_flags);
839 	if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
840 		/* someone else will have to effect the close */
841 		return;
842 
843 	svc_xprt_get(xprt);
844 	svc_delete_xprt(xprt);
845 	clear_bit(XPT_BUSY, &xprt->xpt_flags);
846 	svc_xprt_put(xprt);
847 }
848 EXPORT_SYMBOL_GPL(svc_close_xprt);
849 
850 void svc_close_all(struct list_head *xprt_list)
851 {
852 	struct svc_xprt *xprt;
853 	struct svc_xprt *tmp;
854 
855 	list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) {
856 		set_bit(XPT_CLOSE, &xprt->xpt_flags);
857 		if (test_bit(XPT_BUSY, &xprt->xpt_flags)) {
858 			/* Waiting to be processed, but no threads left,
859 			 * So just remove it from the waiting list
860 			 */
861 			list_del_init(&xprt->xpt_ready);
862 			clear_bit(XPT_BUSY, &xprt->xpt_flags);
863 		}
864 		svc_close_xprt(xprt);
865 	}
866 }
867 
868 /*
869  * Handle defer and revisit of requests
870  */
871 
872 static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
873 {
874 	struct svc_deferred_req *dr =
875 		container_of(dreq, struct svc_deferred_req, handle);
876 	struct svc_xprt *xprt = dr->xprt;
877 
878 	if (too_many) {
879 		svc_xprt_put(xprt);
880 		kfree(dr);
881 		return;
882 	}
883 	dprintk("revisit queued\n");
884 	dr->xprt = NULL;
885 	spin_lock(&xprt->xpt_lock);
886 	list_add(&dr->handle.recent, &xprt->xpt_deferred);
887 	spin_unlock(&xprt->xpt_lock);
888 	set_bit(XPT_DEFERRED, &xprt->xpt_flags);
889 	svc_xprt_enqueue(xprt);
890 	svc_xprt_put(xprt);
891 }
892 
893 /*
894  * Save the request off for later processing. The request buffer looks
895  * like this:
896  *
897  * <xprt-header><rpc-header><rpc-pagelist><rpc-tail>
898  *
899  * This code can only handle requests that consist of an xprt-header
900  * and rpc-header.
901  */
902 static struct cache_deferred_req *svc_defer(struct cache_req *req)
903 {
904 	struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
905 	struct svc_deferred_req *dr;
906 
907 	if (rqstp->rq_arg.page_len)
908 		return NULL; /* if more than a page, give up FIXME */
909 	if (rqstp->rq_deferred) {
910 		dr = rqstp->rq_deferred;
911 		rqstp->rq_deferred = NULL;
912 	} else {
913 		size_t skip;
914 		size_t size;
915 		/* FIXME maybe discard if size too large */
916 		size = sizeof(struct svc_deferred_req) + rqstp->rq_arg.len;
917 		dr = kmalloc(size, GFP_KERNEL);
918 		if (dr == NULL)
919 			return NULL;
920 
921 		dr->handle.owner = rqstp->rq_server;
922 		dr->prot = rqstp->rq_prot;
923 		memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen);
924 		dr->addrlen = rqstp->rq_addrlen;
925 		dr->daddr = rqstp->rq_daddr;
926 		dr->argslen = rqstp->rq_arg.len >> 2;
927 		dr->xprt_hlen = rqstp->rq_xprt_hlen;
928 
929 		/* back up head to the start of the buffer and copy */
930 		skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
931 		memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip,
932 		       dr->argslen << 2);
933 	}
934 	svc_xprt_get(rqstp->rq_xprt);
935 	dr->xprt = rqstp->rq_xprt;
936 
937 	dr->handle.revisit = svc_revisit;
938 	return &dr->handle;
939 }
940 
941 /*
942  * recv data from a deferred request into an active one
943  */
944 static int svc_deferred_recv(struct svc_rqst *rqstp)
945 {
946 	struct svc_deferred_req *dr = rqstp->rq_deferred;
947 
948 	/* setup iov_base past transport header */
949 	rqstp->rq_arg.head[0].iov_base = dr->args + (dr->xprt_hlen>>2);
950 	/* The iov_len does not include the transport header bytes */
951 	rqstp->rq_arg.head[0].iov_len = (dr->argslen<<2) - dr->xprt_hlen;
952 	rqstp->rq_arg.page_len = 0;
953 	/* The rq_arg.len includes the transport header bytes */
954 	rqstp->rq_arg.len     = dr->argslen<<2;
955 	rqstp->rq_prot        = dr->prot;
956 	memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen);
957 	rqstp->rq_addrlen     = dr->addrlen;
958 	/* Save off transport header len in case we get deferred again */
959 	rqstp->rq_xprt_hlen   = dr->xprt_hlen;
960 	rqstp->rq_daddr       = dr->daddr;
961 	rqstp->rq_respages    = rqstp->rq_pages;
962 	return (dr->argslen<<2) - dr->xprt_hlen;
963 }
964 
965 
966 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt)
967 {
968 	struct svc_deferred_req *dr = NULL;
969 
970 	if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags))
971 		return NULL;
972 	spin_lock(&xprt->xpt_lock);
973 	clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
974 	if (!list_empty(&xprt->xpt_deferred)) {
975 		dr = list_entry(xprt->xpt_deferred.next,
976 				struct svc_deferred_req,
977 				handle.recent);
978 		list_del_init(&dr->handle.recent);
979 		set_bit(XPT_DEFERRED, &xprt->xpt_flags);
980 	}
981 	spin_unlock(&xprt->xpt_lock);
982 	return dr;
983 }
984 
985 /*
986  * Return the transport instance pointer for the endpoint accepting
987  * connections/peer traffic from the specified transport class,
988  * address family and port.
989  *
990  * Specifying 0 for the address family or port is effectively a
991  * wild-card, and will result in matching the first transport in the
992  * service's list that has a matching class name.
993  */
994 struct svc_xprt *svc_find_xprt(struct svc_serv *serv, char *xcl_name,
995 			       int af, int port)
996 {
997 	struct svc_xprt *xprt;
998 	struct svc_xprt *found = NULL;
999 
1000 	/* Sanity check the args */
1001 	if (!serv || !xcl_name)
1002 		return found;
1003 
1004 	spin_lock_bh(&serv->sv_lock);
1005 	list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
1006 		if (strcmp(xprt->xpt_class->xcl_name, xcl_name))
1007 			continue;
1008 		if (af != AF_UNSPEC && af != xprt->xpt_local.ss_family)
1009 			continue;
1010 		if (port && port != svc_xprt_local_port(xprt))
1011 			continue;
1012 		found = xprt;
1013 		svc_xprt_get(xprt);
1014 		break;
1015 	}
1016 	spin_unlock_bh(&serv->sv_lock);
1017 	return found;
1018 }
1019 EXPORT_SYMBOL_GPL(svc_find_xprt);
1020 
1021 /*
1022  * Format a buffer with a list of the active transports. A zero for
1023  * the buflen parameter disables target buffer overflow checking.
1024  */
1025 int svc_xprt_names(struct svc_serv *serv, char *buf, int buflen)
1026 {
1027 	struct svc_xprt *xprt;
1028 	char xprt_str[64];
1029 	int totlen = 0;
1030 	int len;
1031 
1032 	/* Sanity check args */
1033 	if (!serv)
1034 		return 0;
1035 
1036 	spin_lock_bh(&serv->sv_lock);
1037 	list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
1038 		len = snprintf(xprt_str, sizeof(xprt_str),
1039 			       "%s %d\n", xprt->xpt_class->xcl_name,
1040 			       svc_xprt_local_port(xprt));
1041 		/* If the string was truncated, replace with error string */
1042 		if (len >= sizeof(xprt_str))
1043 			strcpy(xprt_str, "name-too-long\n");
1044 		/* Don't overflow buffer */
1045 		len = strlen(xprt_str);
1046 		if (buflen && (len + totlen >= buflen))
1047 			break;
1048 		strcpy(buf+totlen, xprt_str);
1049 		totlen += len;
1050 	}
1051 	spin_unlock_bh(&serv->sv_lock);
1052 	return totlen;
1053 }
1054 EXPORT_SYMBOL_GPL(svc_xprt_names);
1055