xref: /openbmc/linux/net/sunrpc/svc.c (revision 545e4006)
1 /*
2  * linux/net/sunrpc/svc.c
3  *
4  * High-level RPC service routines
5  *
6  * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7  *
8  * Multiple threads pools and NUMAisation
9  * Copyright (c) 2006 Silicon Graphics, Inc.
10  * by Greg Banks <gnb@melbourne.sgi.com>
11  */
12 
13 #include <linux/linkage.h>
14 #include <linux/sched.h>
15 #include <linux/errno.h>
16 #include <linux/net.h>
17 #include <linux/in.h>
18 #include <linux/mm.h>
19 #include <linux/interrupt.h>
20 #include <linux/module.h>
21 #include <linux/kthread.h>
22 
23 #include <linux/sunrpc/types.h>
24 #include <linux/sunrpc/xdr.h>
25 #include <linux/sunrpc/stats.h>
26 #include <linux/sunrpc/svcsock.h>
27 #include <linux/sunrpc/clnt.h>
28 
29 #define RPCDBG_FACILITY	RPCDBG_SVCDSP
30 
31 #define svc_serv_is_pooled(serv)    ((serv)->sv_function)
32 
33 /*
34  * Mode for mapping cpus to pools.
35  */
36 enum {
37 	SVC_POOL_AUTO = -1,	/* choose one of the others */
38 	SVC_POOL_GLOBAL,	/* no mapping, just a single global pool
39 				 * (legacy & UP mode) */
40 	SVC_POOL_PERCPU,	/* one pool per cpu */
41 	SVC_POOL_PERNODE	/* one pool per numa node */
42 };
43 #define SVC_POOL_DEFAULT	SVC_POOL_GLOBAL
44 
45 /*
46  * Structure for mapping cpus to pools and vice versa.
47  * Setup once during sunrpc initialisation.
48  */
49 static struct svc_pool_map {
50 	int count;			/* How many svc_servs use us */
51 	int mode;			/* Note: int not enum to avoid
52 					 * warnings about "enumeration value
53 					 * not handled in switch" */
54 	unsigned int npools;
55 	unsigned int *pool_to;		/* maps pool id to cpu or node */
56 	unsigned int *to_pool;		/* maps cpu or node to pool id */
57 } svc_pool_map = {
58 	.count = 0,
59 	.mode = SVC_POOL_DEFAULT
60 };
61 static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */
62 
63 static int
64 param_set_pool_mode(const char *val, struct kernel_param *kp)
65 {
66 	int *ip = (int *)kp->arg;
67 	struct svc_pool_map *m = &svc_pool_map;
68 	int err;
69 
70 	mutex_lock(&svc_pool_map_mutex);
71 
72 	err = -EBUSY;
73 	if (m->count)
74 		goto out;
75 
76 	err = 0;
77 	if (!strncmp(val, "auto", 4))
78 		*ip = SVC_POOL_AUTO;
79 	else if (!strncmp(val, "global", 6))
80 		*ip = SVC_POOL_GLOBAL;
81 	else if (!strncmp(val, "percpu", 6))
82 		*ip = SVC_POOL_PERCPU;
83 	else if (!strncmp(val, "pernode", 7))
84 		*ip = SVC_POOL_PERNODE;
85 	else
86 		err = -EINVAL;
87 
88 out:
89 	mutex_unlock(&svc_pool_map_mutex);
90 	return err;
91 }
92 
93 static int
94 param_get_pool_mode(char *buf, struct kernel_param *kp)
95 {
96 	int *ip = (int *)kp->arg;
97 
98 	switch (*ip)
99 	{
100 	case SVC_POOL_AUTO:
101 		return strlcpy(buf, "auto", 20);
102 	case SVC_POOL_GLOBAL:
103 		return strlcpy(buf, "global", 20);
104 	case SVC_POOL_PERCPU:
105 		return strlcpy(buf, "percpu", 20);
106 	case SVC_POOL_PERNODE:
107 		return strlcpy(buf, "pernode", 20);
108 	default:
109 		return sprintf(buf, "%d", *ip);
110 	}
111 }
112 
113 module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode,
114 		 &svc_pool_map.mode, 0644);
115 
116 /*
117  * Detect best pool mapping mode heuristically,
118  * according to the machine's topology.
119  */
120 static int
121 svc_pool_map_choose_mode(void)
122 {
123 	unsigned int node;
124 
125 	if (num_online_nodes() > 1) {
126 		/*
127 		 * Actually have multiple NUMA nodes,
128 		 * so split pools on NUMA node boundaries
129 		 */
130 		return SVC_POOL_PERNODE;
131 	}
132 
133 	node = any_online_node(node_online_map);
134 	if (nr_cpus_node(node) > 2) {
135 		/*
136 		 * Non-trivial SMP, or CONFIG_NUMA on
137 		 * non-NUMA hardware, e.g. with a generic
138 		 * x86_64 kernel on Xeons.  In this case we
139 		 * want to divide the pools on cpu boundaries.
140 		 */
141 		return SVC_POOL_PERCPU;
142 	}
143 
144 	/* default: one global pool */
145 	return SVC_POOL_GLOBAL;
146 }
147 
148 /*
149  * Allocate the to_pool[] and pool_to[] arrays.
150  * Returns 0 on success or an errno.
151  */
152 static int
153 svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools)
154 {
155 	m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
156 	if (!m->to_pool)
157 		goto fail;
158 	m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
159 	if (!m->pool_to)
160 		goto fail_free;
161 
162 	return 0;
163 
164 fail_free:
165 	kfree(m->to_pool);
166 fail:
167 	return -ENOMEM;
168 }
169 
170 /*
171  * Initialise the pool map for SVC_POOL_PERCPU mode.
172  * Returns number of pools or <0 on error.
173  */
174 static int
175 svc_pool_map_init_percpu(struct svc_pool_map *m)
176 {
177 	unsigned int maxpools = nr_cpu_ids;
178 	unsigned int pidx = 0;
179 	unsigned int cpu;
180 	int err;
181 
182 	err = svc_pool_map_alloc_arrays(m, maxpools);
183 	if (err)
184 		return err;
185 
186 	for_each_online_cpu(cpu) {
187 		BUG_ON(pidx > maxpools);
188 		m->to_pool[cpu] = pidx;
189 		m->pool_to[pidx] = cpu;
190 		pidx++;
191 	}
192 	/* cpus brought online later all get mapped to pool0, sorry */
193 
194 	return pidx;
195 };
196 
197 
198 /*
199  * Initialise the pool map for SVC_POOL_PERNODE mode.
200  * Returns number of pools or <0 on error.
201  */
202 static int
203 svc_pool_map_init_pernode(struct svc_pool_map *m)
204 {
205 	unsigned int maxpools = nr_node_ids;
206 	unsigned int pidx = 0;
207 	unsigned int node;
208 	int err;
209 
210 	err = svc_pool_map_alloc_arrays(m, maxpools);
211 	if (err)
212 		return err;
213 
214 	for_each_node_with_cpus(node) {
215 		/* some architectures (e.g. SN2) have cpuless nodes */
216 		BUG_ON(pidx > maxpools);
217 		m->to_pool[node] = pidx;
218 		m->pool_to[pidx] = node;
219 		pidx++;
220 	}
221 	/* nodes brought online later all get mapped to pool0, sorry */
222 
223 	return pidx;
224 }
225 
226 
227 /*
228  * Add a reference to the global map of cpus to pools (and
229  * vice versa).  Initialise the map if we're the first user.
230  * Returns the number of pools.
231  */
232 static unsigned int
233 svc_pool_map_get(void)
234 {
235 	struct svc_pool_map *m = &svc_pool_map;
236 	int npools = -1;
237 
238 	mutex_lock(&svc_pool_map_mutex);
239 
240 	if (m->count++) {
241 		mutex_unlock(&svc_pool_map_mutex);
242 		return m->npools;
243 	}
244 
245 	if (m->mode == SVC_POOL_AUTO)
246 		m->mode = svc_pool_map_choose_mode();
247 
248 	switch (m->mode) {
249 	case SVC_POOL_PERCPU:
250 		npools = svc_pool_map_init_percpu(m);
251 		break;
252 	case SVC_POOL_PERNODE:
253 		npools = svc_pool_map_init_pernode(m);
254 		break;
255 	}
256 
257 	if (npools < 0) {
258 		/* default, or memory allocation failure */
259 		npools = 1;
260 		m->mode = SVC_POOL_GLOBAL;
261 	}
262 	m->npools = npools;
263 
264 	mutex_unlock(&svc_pool_map_mutex);
265 	return m->npools;
266 }
267 
268 
269 /*
270  * Drop a reference to the global map of cpus to pools.
271  * When the last reference is dropped, the map data is
272  * freed; this allows the sysadmin to change the pool
273  * mode using the pool_mode module option without
274  * rebooting or re-loading sunrpc.ko.
275  */
276 static void
277 svc_pool_map_put(void)
278 {
279 	struct svc_pool_map *m = &svc_pool_map;
280 
281 	mutex_lock(&svc_pool_map_mutex);
282 
283 	if (!--m->count) {
284 		m->mode = SVC_POOL_DEFAULT;
285 		kfree(m->to_pool);
286 		kfree(m->pool_to);
287 		m->npools = 0;
288 	}
289 
290 	mutex_unlock(&svc_pool_map_mutex);
291 }
292 
293 
294 /*
295  * Set the given thread's cpus_allowed mask so that it
296  * will only run on cpus in the given pool.
297  */
298 static inline void
299 svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
300 {
301 	struct svc_pool_map *m = &svc_pool_map;
302 	unsigned int node = m->pool_to[pidx];
303 
304 	/*
305 	 * The caller checks for sv_nrpools > 1, which
306 	 * implies that we've been initialized.
307 	 */
308 	BUG_ON(m->count == 0);
309 
310 	switch (m->mode) {
311 	case SVC_POOL_PERCPU:
312 	{
313 		cpumask_of_cpu_ptr(cpumask, node);
314 		set_cpus_allowed_ptr(task, cpumask);
315 		break;
316 	}
317 	case SVC_POOL_PERNODE:
318 	{
319 		node_to_cpumask_ptr(nodecpumask, node);
320 		set_cpus_allowed_ptr(task, nodecpumask);
321 		break;
322 	}
323 	}
324 }
325 
326 /*
327  * Use the mapping mode to choose a pool for a given CPU.
328  * Used when enqueueing an incoming RPC.  Always returns
329  * a non-NULL pool pointer.
330  */
331 struct svc_pool *
332 svc_pool_for_cpu(struct svc_serv *serv, int cpu)
333 {
334 	struct svc_pool_map *m = &svc_pool_map;
335 	unsigned int pidx = 0;
336 
337 	/*
338 	 * An uninitialised map happens in a pure client when
339 	 * lockd is brought up, so silently treat it the
340 	 * same as SVC_POOL_GLOBAL.
341 	 */
342 	if (svc_serv_is_pooled(serv)) {
343 		switch (m->mode) {
344 		case SVC_POOL_PERCPU:
345 			pidx = m->to_pool[cpu];
346 			break;
347 		case SVC_POOL_PERNODE:
348 			pidx = m->to_pool[cpu_to_node(cpu)];
349 			break;
350 		}
351 	}
352 	return &serv->sv_pools[pidx % serv->sv_nrpools];
353 }
354 
355 
356 /*
357  * Create an RPC service
358  */
359 static struct svc_serv *
360 __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
361 	   void (*shutdown)(struct svc_serv *serv))
362 {
363 	struct svc_serv	*serv;
364 	unsigned int vers;
365 	unsigned int xdrsize;
366 	unsigned int i;
367 
368 	if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL)))
369 		return NULL;
370 	serv->sv_name      = prog->pg_name;
371 	serv->sv_program   = prog;
372 	serv->sv_nrthreads = 1;
373 	serv->sv_stats     = prog->pg_stats;
374 	if (bufsize > RPCSVC_MAXPAYLOAD)
375 		bufsize = RPCSVC_MAXPAYLOAD;
376 	serv->sv_max_payload = bufsize? bufsize : 4096;
377 	serv->sv_max_mesg  = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
378 	serv->sv_shutdown  = shutdown;
379 	xdrsize = 0;
380 	while (prog) {
381 		prog->pg_lovers = prog->pg_nvers-1;
382 		for (vers=0; vers<prog->pg_nvers ; vers++)
383 			if (prog->pg_vers[vers]) {
384 				prog->pg_hivers = vers;
385 				if (prog->pg_lovers > vers)
386 					prog->pg_lovers = vers;
387 				if (prog->pg_vers[vers]->vs_xdrsize > xdrsize)
388 					xdrsize = prog->pg_vers[vers]->vs_xdrsize;
389 			}
390 		prog = prog->pg_next;
391 	}
392 	serv->sv_xdrsize   = xdrsize;
393 	INIT_LIST_HEAD(&serv->sv_tempsocks);
394 	INIT_LIST_HEAD(&serv->sv_permsocks);
395 	init_timer(&serv->sv_temptimer);
396 	spin_lock_init(&serv->sv_lock);
397 
398 	serv->sv_nrpools = npools;
399 	serv->sv_pools =
400 		kcalloc(serv->sv_nrpools, sizeof(struct svc_pool),
401 			GFP_KERNEL);
402 	if (!serv->sv_pools) {
403 		kfree(serv);
404 		return NULL;
405 	}
406 
407 	for (i = 0; i < serv->sv_nrpools; i++) {
408 		struct svc_pool *pool = &serv->sv_pools[i];
409 
410 		dprintk("svc: initialising pool %u for %s\n",
411 				i, serv->sv_name);
412 
413 		pool->sp_id = i;
414 		INIT_LIST_HEAD(&pool->sp_threads);
415 		INIT_LIST_HEAD(&pool->sp_sockets);
416 		INIT_LIST_HEAD(&pool->sp_all_threads);
417 		spin_lock_init(&pool->sp_lock);
418 	}
419 
420 
421 	/* Remove any stale portmap registrations */
422 	svc_register(serv, 0, 0);
423 
424 	return serv;
425 }
426 
427 struct svc_serv *
428 svc_create(struct svc_program *prog, unsigned int bufsize,
429 		void (*shutdown)(struct svc_serv *serv))
430 {
431 	return __svc_create(prog, bufsize, /*npools*/1, shutdown);
432 }
433 EXPORT_SYMBOL(svc_create);
434 
435 struct svc_serv *
436 svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
437 		void (*shutdown)(struct svc_serv *serv),
438 		  svc_thread_fn func, struct module *mod)
439 {
440 	struct svc_serv *serv;
441 	unsigned int npools = svc_pool_map_get();
442 
443 	serv = __svc_create(prog, bufsize, npools, shutdown);
444 
445 	if (serv != NULL) {
446 		serv->sv_function = func;
447 		serv->sv_module = mod;
448 	}
449 
450 	return serv;
451 }
452 EXPORT_SYMBOL(svc_create_pooled);
453 
454 /*
455  * Destroy an RPC service. Should be called with appropriate locking to
456  * protect the sv_nrthreads, sv_permsocks and sv_tempsocks.
457  */
458 void
459 svc_destroy(struct svc_serv *serv)
460 {
461 	dprintk("svc: svc_destroy(%s, %d)\n",
462 				serv->sv_program->pg_name,
463 				serv->sv_nrthreads);
464 
465 	if (serv->sv_nrthreads) {
466 		if (--(serv->sv_nrthreads) != 0) {
467 			svc_sock_update_bufs(serv);
468 			return;
469 		}
470 	} else
471 		printk("svc_destroy: no threads for serv=%p!\n", serv);
472 
473 	del_timer_sync(&serv->sv_temptimer);
474 
475 	svc_close_all(&serv->sv_tempsocks);
476 
477 	if (serv->sv_shutdown)
478 		serv->sv_shutdown(serv);
479 
480 	svc_close_all(&serv->sv_permsocks);
481 
482 	BUG_ON(!list_empty(&serv->sv_permsocks));
483 	BUG_ON(!list_empty(&serv->sv_tempsocks));
484 
485 	cache_clean_deferred(serv);
486 
487 	if (svc_serv_is_pooled(serv))
488 		svc_pool_map_put();
489 
490 	/* Unregister service with the portmapper */
491 	svc_register(serv, 0, 0);
492 	kfree(serv->sv_pools);
493 	kfree(serv);
494 }
495 EXPORT_SYMBOL(svc_destroy);
496 
497 /*
498  * Allocate an RPC server's buffer space.
499  * We allocate pages and place them in rq_argpages.
500  */
501 static int
502 svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
503 {
504 	unsigned int pages, arghi;
505 
506 	pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
507 				       * We assume one is at most one page
508 				       */
509 	arghi = 0;
510 	BUG_ON(pages > RPCSVC_MAXPAGES);
511 	while (pages) {
512 		struct page *p = alloc_page(GFP_KERNEL);
513 		if (!p)
514 			break;
515 		rqstp->rq_pages[arghi++] = p;
516 		pages--;
517 	}
518 	return pages == 0;
519 }
520 
521 /*
522  * Release an RPC server buffer
523  */
524 static void
525 svc_release_buffer(struct svc_rqst *rqstp)
526 {
527 	unsigned int i;
528 
529 	for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++)
530 		if (rqstp->rq_pages[i])
531 			put_page(rqstp->rq_pages[i]);
532 }
533 
534 struct svc_rqst *
535 svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool)
536 {
537 	struct svc_rqst	*rqstp;
538 
539 	rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL);
540 	if (!rqstp)
541 		goto out_enomem;
542 
543 	init_waitqueue_head(&rqstp->rq_wait);
544 
545 	serv->sv_nrthreads++;
546 	spin_lock_bh(&pool->sp_lock);
547 	pool->sp_nrthreads++;
548 	list_add(&rqstp->rq_all, &pool->sp_all_threads);
549 	spin_unlock_bh(&pool->sp_lock);
550 	rqstp->rq_server = serv;
551 	rqstp->rq_pool = pool;
552 
553 	rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL);
554 	if (!rqstp->rq_argp)
555 		goto out_thread;
556 
557 	rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL);
558 	if (!rqstp->rq_resp)
559 		goto out_thread;
560 
561 	if (!svc_init_buffer(rqstp, serv->sv_max_mesg))
562 		goto out_thread;
563 
564 	return rqstp;
565 out_thread:
566 	svc_exit_thread(rqstp);
567 out_enomem:
568 	return ERR_PTR(-ENOMEM);
569 }
570 EXPORT_SYMBOL(svc_prepare_thread);
571 
572 /*
573  * Choose a pool in which to create a new thread, for svc_set_num_threads
574  */
575 static inline struct svc_pool *
576 choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
577 {
578 	if (pool != NULL)
579 		return pool;
580 
581 	return &serv->sv_pools[(*state)++ % serv->sv_nrpools];
582 }
583 
584 /*
585  * Choose a thread to kill, for svc_set_num_threads
586  */
587 static inline struct task_struct *
588 choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
589 {
590 	unsigned int i;
591 	struct task_struct *task = NULL;
592 
593 	if (pool != NULL) {
594 		spin_lock_bh(&pool->sp_lock);
595 	} else {
596 		/* choose a pool in round-robin fashion */
597 		for (i = 0; i < serv->sv_nrpools; i++) {
598 			pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
599 			spin_lock_bh(&pool->sp_lock);
600 			if (!list_empty(&pool->sp_all_threads))
601 				goto found_pool;
602 			spin_unlock_bh(&pool->sp_lock);
603 		}
604 		return NULL;
605 	}
606 
607 found_pool:
608 	if (!list_empty(&pool->sp_all_threads)) {
609 		struct svc_rqst *rqstp;
610 
611 		/*
612 		 * Remove from the pool->sp_all_threads list
613 		 * so we don't try to kill it again.
614 		 */
615 		rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
616 		list_del_init(&rqstp->rq_all);
617 		task = rqstp->rq_task;
618 	}
619 	spin_unlock_bh(&pool->sp_lock);
620 
621 	return task;
622 }
623 
624 /*
625  * Create or destroy enough new threads to make the number
626  * of threads the given number.  If `pool' is non-NULL, applies
627  * only to threads in that pool, otherwise round-robins between
628  * all pools.  Must be called with a svc_get() reference and
629  * the BKL or another lock to protect access to svc_serv fields.
630  *
631  * Destroying threads relies on the service threads filling in
632  * rqstp->rq_task, which only the nfs ones do.  Assumes the serv
633  * has been created using svc_create_pooled().
634  *
635  * Based on code that used to be in nfsd_svc() but tweaked
636  * to be pool-aware.
637  */
638 int
639 svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
640 {
641 	struct svc_rqst	*rqstp;
642 	struct task_struct *task;
643 	struct svc_pool *chosen_pool;
644 	int error = 0;
645 	unsigned int state = serv->sv_nrthreads-1;
646 
647 	if (pool == NULL) {
648 		/* The -1 assumes caller has done a svc_get() */
649 		nrservs -= (serv->sv_nrthreads-1);
650 	} else {
651 		spin_lock_bh(&pool->sp_lock);
652 		nrservs -= pool->sp_nrthreads;
653 		spin_unlock_bh(&pool->sp_lock);
654 	}
655 
656 	/* create new threads */
657 	while (nrservs > 0) {
658 		nrservs--;
659 		chosen_pool = choose_pool(serv, pool, &state);
660 
661 		rqstp = svc_prepare_thread(serv, chosen_pool);
662 		if (IS_ERR(rqstp)) {
663 			error = PTR_ERR(rqstp);
664 			break;
665 		}
666 
667 		__module_get(serv->sv_module);
668 		task = kthread_create(serv->sv_function, rqstp, serv->sv_name);
669 		if (IS_ERR(task)) {
670 			error = PTR_ERR(task);
671 			module_put(serv->sv_module);
672 			svc_exit_thread(rqstp);
673 			break;
674 		}
675 
676 		rqstp->rq_task = task;
677 		if (serv->sv_nrpools > 1)
678 			svc_pool_map_set_cpumask(task, chosen_pool->sp_id);
679 
680 		svc_sock_update_bufs(serv);
681 		wake_up_process(task);
682 	}
683 	/* destroy old threads */
684 	while (nrservs < 0 &&
685 	       (task = choose_victim(serv, pool, &state)) != NULL) {
686 		send_sig(SIGINT, task, 1);
687 		nrservs++;
688 	}
689 
690 	return error;
691 }
692 EXPORT_SYMBOL(svc_set_num_threads);
693 
694 /*
695  * Called from a server thread as it's exiting. Caller must hold the BKL or
696  * the "service mutex", whichever is appropriate for the service.
697  */
698 void
699 svc_exit_thread(struct svc_rqst *rqstp)
700 {
701 	struct svc_serv	*serv = rqstp->rq_server;
702 	struct svc_pool	*pool = rqstp->rq_pool;
703 
704 	svc_release_buffer(rqstp);
705 	kfree(rqstp->rq_resp);
706 	kfree(rqstp->rq_argp);
707 	kfree(rqstp->rq_auth_data);
708 
709 	spin_lock_bh(&pool->sp_lock);
710 	pool->sp_nrthreads--;
711 	list_del(&rqstp->rq_all);
712 	spin_unlock_bh(&pool->sp_lock);
713 
714 	kfree(rqstp);
715 
716 	/* Release the server */
717 	if (serv)
718 		svc_destroy(serv);
719 }
720 EXPORT_SYMBOL(svc_exit_thread);
721 
722 /*
723  * Register an RPC service with the local portmapper.
724  * To unregister a service, call this routine with
725  * proto and port == 0.
726  */
727 int
728 svc_register(struct svc_serv *serv, int proto, unsigned short port)
729 {
730 	struct svc_program	*progp;
731 	unsigned long		flags;
732 	unsigned int		i;
733 	int			error = 0, dummy;
734 
735 	if (!port)
736 		clear_thread_flag(TIF_SIGPENDING);
737 
738 	for (progp = serv->sv_program; progp; progp = progp->pg_next) {
739 		for (i = 0; i < progp->pg_nvers; i++) {
740 			if (progp->pg_vers[i] == NULL)
741 				continue;
742 
743 			dprintk("svc: svc_register(%s, %s, %d, %d)%s\n",
744 					progp->pg_name,
745 					proto == IPPROTO_UDP?  "udp" : "tcp",
746 					port,
747 					i,
748 					progp->pg_vers[i]->vs_hidden?
749 						" (but not telling portmap)" : "");
750 
751 			if (progp->pg_vers[i]->vs_hidden)
752 				continue;
753 
754 			error = rpcb_register(progp->pg_prog, i, proto, port, &dummy);
755 			if (error < 0)
756 				break;
757 			if (port && !dummy) {
758 				error = -EACCES;
759 				break;
760 			}
761 		}
762 	}
763 
764 	if (!port) {
765 		spin_lock_irqsave(&current->sighand->siglock, flags);
766 		recalc_sigpending();
767 		spin_unlock_irqrestore(&current->sighand->siglock, flags);
768 	}
769 
770 	return error;
771 }
772 
773 /*
774  * Printk the given error with the address of the client that caused it.
775  */
776 static int
777 __attribute__ ((format (printf, 2, 3)))
778 svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
779 {
780 	va_list args;
781 	int 	r;
782 	char 	buf[RPC_MAX_ADDRBUFLEN];
783 
784 	if (!net_ratelimit())
785 		return 0;
786 
787 	printk(KERN_WARNING "svc: %s: ",
788 		svc_print_addr(rqstp, buf, sizeof(buf)));
789 
790 	va_start(args, fmt);
791 	r = vprintk(fmt, args);
792 	va_end(args);
793 
794 	return r;
795 }
796 
797 /*
798  * Process the RPC request.
799  */
800 int
801 svc_process(struct svc_rqst *rqstp)
802 {
803 	struct svc_program	*progp;
804 	struct svc_version	*versp = NULL;	/* compiler food */
805 	struct svc_procedure	*procp = NULL;
806 	struct kvec *		argv = &rqstp->rq_arg.head[0];
807 	struct kvec *		resv = &rqstp->rq_res.head[0];
808 	struct svc_serv		*serv = rqstp->rq_server;
809 	kxdrproc_t		xdr;
810 	__be32			*statp;
811 	u32			dir, prog, vers, proc;
812 	__be32			auth_stat, rpc_stat;
813 	int			auth_res;
814 	__be32			*reply_statp;
815 
816 	rpc_stat = rpc_success;
817 
818 	if (argv->iov_len < 6*4)
819 		goto err_short_len;
820 
821 	/* setup response xdr_buf.
822 	 * Initially it has just one page
823 	 */
824 	rqstp->rq_resused = 1;
825 	resv->iov_base = page_address(rqstp->rq_respages[0]);
826 	resv->iov_len = 0;
827 	rqstp->rq_res.pages = rqstp->rq_respages + 1;
828 	rqstp->rq_res.len = 0;
829 	rqstp->rq_res.page_base = 0;
830 	rqstp->rq_res.page_len = 0;
831 	rqstp->rq_res.buflen = PAGE_SIZE;
832 	rqstp->rq_res.tail[0].iov_base = NULL;
833 	rqstp->rq_res.tail[0].iov_len = 0;
834 	/* Will be turned off only in gss privacy case: */
835 	rqstp->rq_splice_ok = 1;
836 
837 	/* Setup reply header */
838 	rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
839 
840 	rqstp->rq_xid = svc_getu32(argv);
841 	svc_putu32(resv, rqstp->rq_xid);
842 
843 	dir  = svc_getnl(argv);
844 	vers = svc_getnl(argv);
845 
846 	/* First words of reply: */
847 	svc_putnl(resv, 1);		/* REPLY */
848 
849 	if (dir != 0)		/* direction != CALL */
850 		goto err_bad_dir;
851 	if (vers != 2)		/* RPC version number */
852 		goto err_bad_rpc;
853 
854 	/* Save position in case we later decide to reject: */
855 	reply_statp = resv->iov_base + resv->iov_len;
856 
857 	svc_putnl(resv, 0);		/* ACCEPT */
858 
859 	rqstp->rq_prog = prog = svc_getnl(argv);	/* program number */
860 	rqstp->rq_vers = vers = svc_getnl(argv);	/* version number */
861 	rqstp->rq_proc = proc = svc_getnl(argv);	/* procedure number */
862 
863 	progp = serv->sv_program;
864 
865 	for (progp = serv->sv_program; progp; progp = progp->pg_next)
866 		if (prog == progp->pg_prog)
867 			break;
868 
869 	/*
870 	 * Decode auth data, and add verifier to reply buffer.
871 	 * We do this before anything else in order to get a decent
872 	 * auth verifier.
873 	 */
874 	auth_res = svc_authenticate(rqstp, &auth_stat);
875 	/* Also give the program a chance to reject this call: */
876 	if (auth_res == SVC_OK && progp) {
877 		auth_stat = rpc_autherr_badcred;
878 		auth_res = progp->pg_authenticate(rqstp);
879 	}
880 	switch (auth_res) {
881 	case SVC_OK:
882 		break;
883 	case SVC_GARBAGE:
884 		goto err_garbage;
885 	case SVC_SYSERR:
886 		rpc_stat = rpc_system_err;
887 		goto err_bad;
888 	case SVC_DENIED:
889 		goto err_bad_auth;
890 	case SVC_DROP:
891 		goto dropit;
892 	case SVC_COMPLETE:
893 		goto sendit;
894 	}
895 
896 	if (progp == NULL)
897 		goto err_bad_prog;
898 
899 	if (vers >= progp->pg_nvers ||
900 	  !(versp = progp->pg_vers[vers]))
901 		goto err_bad_vers;
902 
903 	procp = versp->vs_proc + proc;
904 	if (proc >= versp->vs_nproc || !procp->pc_func)
905 		goto err_bad_proc;
906 	rqstp->rq_server   = serv;
907 	rqstp->rq_procinfo = procp;
908 
909 	/* Syntactic check complete */
910 	serv->sv_stats->rpccnt++;
911 
912 	/* Build the reply header. */
913 	statp = resv->iov_base +resv->iov_len;
914 	svc_putnl(resv, RPC_SUCCESS);
915 
916 	/* Bump per-procedure stats counter */
917 	procp->pc_count++;
918 
919 	/* Initialize storage for argp and resp */
920 	memset(rqstp->rq_argp, 0, procp->pc_argsize);
921 	memset(rqstp->rq_resp, 0, procp->pc_ressize);
922 
923 	/* un-reserve some of the out-queue now that we have a
924 	 * better idea of reply size
925 	 */
926 	if (procp->pc_xdrressize)
927 		svc_reserve_auth(rqstp, procp->pc_xdrressize<<2);
928 
929 	/* Call the function that processes the request. */
930 	if (!versp->vs_dispatch) {
931 		/* Decode arguments */
932 		xdr = procp->pc_decode;
933 		if (xdr && !xdr(rqstp, argv->iov_base, rqstp->rq_argp))
934 			goto err_garbage;
935 
936 		*statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
937 
938 		/* Encode reply */
939 		if (*statp == rpc_drop_reply) {
940 			if (procp->pc_release)
941 				procp->pc_release(rqstp, NULL, rqstp->rq_resp);
942 			goto dropit;
943 		}
944 		if (*statp == rpc_success && (xdr = procp->pc_encode)
945 		 && !xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) {
946 			dprintk("svc: failed to encode reply\n");
947 			/* serv->sv_stats->rpcsystemerr++; */
948 			*statp = rpc_system_err;
949 		}
950 	} else {
951 		dprintk("svc: calling dispatcher\n");
952 		if (!versp->vs_dispatch(rqstp, statp)) {
953 			/* Release reply info */
954 			if (procp->pc_release)
955 				procp->pc_release(rqstp, NULL, rqstp->rq_resp);
956 			goto dropit;
957 		}
958 	}
959 
960 	/* Check RPC status result */
961 	if (*statp != rpc_success)
962 		resv->iov_len = ((void*)statp)  - resv->iov_base + 4;
963 
964 	/* Release reply info */
965 	if (procp->pc_release)
966 		procp->pc_release(rqstp, NULL, rqstp->rq_resp);
967 
968 	if (procp->pc_encode == NULL)
969 		goto dropit;
970 
971  sendit:
972 	if (svc_authorise(rqstp))
973 		goto dropit;
974 	return svc_send(rqstp);
975 
976  dropit:
977 	svc_authorise(rqstp);	/* doesn't hurt to call this twice */
978 	dprintk("svc: svc_process dropit\n");
979 	svc_drop(rqstp);
980 	return 0;
981 
982 err_short_len:
983 	svc_printk(rqstp, "short len %Zd, dropping request\n",
984 			argv->iov_len);
985 
986 	goto dropit;			/* drop request */
987 
988 err_bad_dir:
989 	svc_printk(rqstp, "bad direction %d, dropping request\n", dir);
990 
991 	serv->sv_stats->rpcbadfmt++;
992 	goto dropit;			/* drop request */
993 
994 err_bad_rpc:
995 	serv->sv_stats->rpcbadfmt++;
996 	svc_putnl(resv, 1);	/* REJECT */
997 	svc_putnl(resv, 0);	/* RPC_MISMATCH */
998 	svc_putnl(resv, 2);	/* Only RPCv2 supported */
999 	svc_putnl(resv, 2);
1000 	goto sendit;
1001 
1002 err_bad_auth:
1003 	dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat));
1004 	serv->sv_stats->rpcbadauth++;
1005 	/* Restore write pointer to location of accept status: */
1006 	xdr_ressize_check(rqstp, reply_statp);
1007 	svc_putnl(resv, 1);	/* REJECT */
1008 	svc_putnl(resv, 1);	/* AUTH_ERROR */
1009 	svc_putnl(resv, ntohl(auth_stat));	/* status */
1010 	goto sendit;
1011 
1012 err_bad_prog:
1013 	dprintk("svc: unknown program %d\n", prog);
1014 	serv->sv_stats->rpcbadfmt++;
1015 	svc_putnl(resv, RPC_PROG_UNAVAIL);
1016 	goto sendit;
1017 
1018 err_bad_vers:
1019 	svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n",
1020 		       vers, prog, progp->pg_name);
1021 
1022 	serv->sv_stats->rpcbadfmt++;
1023 	svc_putnl(resv, RPC_PROG_MISMATCH);
1024 	svc_putnl(resv, progp->pg_lovers);
1025 	svc_putnl(resv, progp->pg_hivers);
1026 	goto sendit;
1027 
1028 err_bad_proc:
1029 	svc_printk(rqstp, "unknown procedure (%d)\n", proc);
1030 
1031 	serv->sv_stats->rpcbadfmt++;
1032 	svc_putnl(resv, RPC_PROC_UNAVAIL);
1033 	goto sendit;
1034 
1035 err_garbage:
1036 	svc_printk(rqstp, "failed to decode args\n");
1037 
1038 	rpc_stat = rpc_garbage_args;
1039 err_bad:
1040 	serv->sv_stats->rpcbadfmt++;
1041 	svc_putnl(resv, ntohl(rpc_stat));
1042 	goto sendit;
1043 }
1044 EXPORT_SYMBOL(svc_process);
1045 
1046 /*
1047  * Return (transport-specific) limit on the rpc payload.
1048  */
1049 u32 svc_max_payload(const struct svc_rqst *rqstp)
1050 {
1051 	u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload;
1052 
1053 	if (rqstp->rq_server->sv_max_payload < max)
1054 		max = rqstp->rq_server->sv_max_payload;
1055 	return max;
1056 }
1057 EXPORT_SYMBOL_GPL(svc_max_payload);
1058