xref: /openbmc/linux/fs/nfsd/nfssvc.c (revision e868d61272caa648214046a096e5a6bfc068dc8c)
1 /*
2  * linux/fs/nfsd/nfssvc.c
3  *
4  * Central processing for nfsd.
5  *
6  * Authors:	Olaf Kirch (okir@monad.swb.de)
7  *
8  * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
9  */
10 
11 #include <linux/module.h>
12 
13 #include <linux/time.h>
14 #include <linux/errno.h>
15 #include <linux/nfs.h>
16 #include <linux/in.h>
17 #include <linux/uio.h>
18 #include <linux/unistd.h>
19 #include <linux/slab.h>
20 #include <linux/smp.h>
21 #include <linux/smp_lock.h>
22 #include <linux/fs_struct.h>
23 
24 #include <linux/sunrpc/types.h>
25 #include <linux/sunrpc/stats.h>
26 #include <linux/sunrpc/svc.h>
27 #include <linux/sunrpc/svcsock.h>
28 #include <linux/sunrpc/cache.h>
29 #include <linux/nfsd/nfsd.h>
30 #include <linux/nfsd/stats.h>
31 #include <linux/nfsd/cache.h>
32 #include <linux/nfsd/syscall.h>
33 #include <linux/lockd/bind.h>
34 #include <linux/nfsacl.h>
35 
36 #define NFSDDBG_FACILITY	NFSDDBG_SVC
37 
38 /* these signals will be delivered to an nfsd thread
39  * when handling a request
40  */
41 #define ALLOWED_SIGS	(sigmask(SIGKILL))
42 /* these signals will be delivered to an nfsd thread
43  * when not handling a request. i.e. when waiting
44  */
45 #define SHUTDOWN_SIGS	(sigmask(SIGKILL) | sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT))
46 /* if the last thread dies with SIGHUP, then the exports table is
47  * left unchanged ( like 2.4-{0-9} ).  Any other signal will clear
48  * the exports table (like 2.2).
49  */
50 #define	SIG_NOCLEAN	SIGHUP
51 
52 extern struct svc_program	nfsd_program;
53 static void			nfsd(struct svc_rqst *rqstp);
54 struct timeval			nfssvc_boot;
55        struct svc_serv 		*nfsd_serv;
56 static atomic_t			nfsd_busy;
57 static unsigned long		nfsd_last_call;
58 static DEFINE_SPINLOCK(nfsd_call_lock);
59 
60 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
61 static struct svc_stat	nfsd_acl_svcstats;
62 static struct svc_version *	nfsd_acl_version[] = {
63 	[2] = &nfsd_acl_version2,
64 	[3] = &nfsd_acl_version3,
65 };
66 
67 #define NFSD_ACL_MINVERS            2
68 #define NFSD_ACL_NRVERS		ARRAY_SIZE(nfsd_acl_version)
69 static struct svc_version *nfsd_acl_versions[NFSD_ACL_NRVERS];
70 
71 static struct svc_program	nfsd_acl_program = {
72 	.pg_prog		= NFS_ACL_PROGRAM,
73 	.pg_nvers		= NFSD_ACL_NRVERS,
74 	.pg_vers		= nfsd_acl_versions,
75 	.pg_name		= "nfsacl",
76 	.pg_class		= "nfsd",
77 	.pg_stats		= &nfsd_acl_svcstats,
78 	.pg_authenticate	= &svc_set_client,
79 };
80 
81 static struct svc_stat	nfsd_acl_svcstats = {
82 	.program	= &nfsd_acl_program,
83 };
84 #endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */
85 
86 static struct svc_version *	nfsd_version[] = {
87 	[2] = &nfsd_version2,
88 #if defined(CONFIG_NFSD_V3)
89 	[3] = &nfsd_version3,
90 #endif
91 #if defined(CONFIG_NFSD_V4)
92 	[4] = &nfsd_version4,
93 #endif
94 };
95 
96 #define NFSD_MINVERS    	2
97 #define NFSD_NRVERS		ARRAY_SIZE(nfsd_version)
98 static struct svc_version *nfsd_versions[NFSD_NRVERS];
99 
100 struct svc_program		nfsd_program = {
101 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
102 	.pg_next		= &nfsd_acl_program,
103 #endif
104 	.pg_prog		= NFS_PROGRAM,		/* program number */
105 	.pg_nvers		= NFSD_NRVERS,		/* nr of entries in nfsd_version */
106 	.pg_vers		= nfsd_versions,	/* version table */
107 	.pg_name		= "nfsd",		/* program name */
108 	.pg_class		= "nfsd",		/* authentication class */
109 	.pg_stats		= &nfsd_svcstats,	/* version table */
110 	.pg_authenticate	= &svc_set_client,	/* export authentication */
111 
112 };
113 
114 int nfsd_vers(int vers, enum vers_op change)
115 {
116 	if (vers < NFSD_MINVERS || vers >= NFSD_NRVERS)
117 		return -1;
118 	switch(change) {
119 	case NFSD_SET:
120 		nfsd_versions[vers] = nfsd_version[vers];
121 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
122 		if (vers < NFSD_ACL_NRVERS)
123 			nfsd_acl_versions[vers] = nfsd_acl_version[vers];
124 #endif
125 		break;
126 	case NFSD_CLEAR:
127 		nfsd_versions[vers] = NULL;
128 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
129 		if (vers < NFSD_ACL_NRVERS)
130 			nfsd_acl_versions[vers] = NULL;
131 #endif
132 		break;
133 	case NFSD_TEST:
134 		return nfsd_versions[vers] != NULL;
135 	case NFSD_AVAIL:
136 		return nfsd_version[vers] != NULL;
137 	}
138 	return 0;
139 }
140 /*
141  * Maximum number of nfsd processes
142  */
143 #define	NFSD_MAXSERVS		8192
144 
145 int nfsd_nrthreads(void)
146 {
147 	if (nfsd_serv == NULL)
148 		return 0;
149 	else
150 		return nfsd_serv->sv_nrthreads;
151 }
152 
153 static int killsig;	/* signal that was used to kill last nfsd */
154 static void nfsd_last_thread(struct svc_serv *serv)
155 {
156 	/* When last nfsd thread exits we need to do some clean-up */
157 	struct svc_sock *svsk;
158 	list_for_each_entry(svsk, &serv->sv_permsocks, sk_list)
159 		lockd_down();
160 	nfsd_serv = NULL;
161 	nfsd_racache_shutdown();
162 	nfs4_state_shutdown();
163 
164 	printk(KERN_WARNING "nfsd: last server has exited\n");
165 	if (killsig != SIG_NOCLEAN) {
166 		printk(KERN_WARNING "nfsd: unexporting all filesystems\n");
167 		nfsd_export_flush();
168 	}
169 }
170 
171 void nfsd_reset_versions(void)
172 {
173 	int found_one = 0;
174 	int i;
175 
176 	for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) {
177 		if (nfsd_program.pg_vers[i])
178 			found_one = 1;
179 	}
180 
181 	if (!found_one) {
182 		for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++)
183 			nfsd_program.pg_vers[i] = nfsd_version[i];
184 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
185 		for (i = NFSD_ACL_MINVERS; i < NFSD_ACL_NRVERS; i++)
186 			nfsd_acl_program.pg_vers[i] =
187 				nfsd_acl_version[i];
188 #endif
189 	}
190 }
191 
192 int nfsd_create_serv(void)
193 {
194 	int err = 0;
195 	lock_kernel();
196 	if (nfsd_serv) {
197 		svc_get(nfsd_serv);
198 		unlock_kernel();
199 		return 0;
200 	}
201 	if (nfsd_max_blksize == 0) {
202 		/* choose a suitable default */
203 		struct sysinfo i;
204 		si_meminfo(&i);
205 		/* Aim for 1/4096 of memory per thread
206 		 * This gives 1MB on 4Gig machines
207 		 * But only uses 32K on 128M machines.
208 		 * Bottom out at 8K on 32M and smaller.
209 		 * Of course, this is only a default.
210 		 */
211 		nfsd_max_blksize = NFSSVC_MAXBLKSIZE;
212 		i.totalram <<= PAGE_SHIFT - 12;
213 		while (nfsd_max_blksize > i.totalram &&
214 		       nfsd_max_blksize >= 8*1024*2)
215 			nfsd_max_blksize /= 2;
216 	}
217 
218 	atomic_set(&nfsd_busy, 0);
219 	nfsd_serv = svc_create_pooled(&nfsd_program,
220 				      nfsd_max_blksize,
221 				      nfsd_last_thread,
222 				      nfsd, SIG_NOCLEAN, THIS_MODULE);
223 	if (nfsd_serv == NULL)
224 		err = -ENOMEM;
225 	unlock_kernel();
226 	do_gettimeofday(&nfssvc_boot);		/* record boot time */
227 	return err;
228 }
229 
230 static int nfsd_init_socks(int port)
231 {
232 	int error;
233 	if (!list_empty(&nfsd_serv->sv_permsocks))
234 		return 0;
235 
236 	error = lockd_up(IPPROTO_UDP);
237 	if (error >= 0) {
238 		error = svc_makesock(nfsd_serv, IPPROTO_UDP, port,
239 					SVC_SOCK_DEFAULTS);
240 		if (error < 0)
241 			lockd_down();
242 	}
243 	if (error < 0)
244 		return error;
245 
246 #ifdef CONFIG_NFSD_TCP
247 	error = lockd_up(IPPROTO_TCP);
248 	if (error >= 0) {
249 		error = svc_makesock(nfsd_serv, IPPROTO_TCP, port,
250 					SVC_SOCK_DEFAULTS);
251 		if (error < 0)
252 			lockd_down();
253 	}
254 	if (error < 0)
255 		return error;
256 #endif
257 	return 0;
258 }
259 
260 int nfsd_nrpools(void)
261 {
262 	if (nfsd_serv == NULL)
263 		return 0;
264 	else
265 		return nfsd_serv->sv_nrpools;
266 }
267 
268 int nfsd_get_nrthreads(int n, int *nthreads)
269 {
270 	int i = 0;
271 
272 	if (nfsd_serv != NULL) {
273 		for (i = 0; i < nfsd_serv->sv_nrpools && i < n; i++)
274 			nthreads[i] = nfsd_serv->sv_pools[i].sp_nrthreads;
275 	}
276 
277 	return 0;
278 }
279 
280 int nfsd_set_nrthreads(int n, int *nthreads)
281 {
282 	int i = 0;
283 	int tot = 0;
284 	int err = 0;
285 
286 	if (nfsd_serv == NULL || n <= 0)
287 		return 0;
288 
289 	if (n > nfsd_serv->sv_nrpools)
290 		n = nfsd_serv->sv_nrpools;
291 
292 	/* enforce a global maximum number of threads */
293 	tot = 0;
294 	for (i = 0; i < n; i++) {
295 		if (nthreads[i] > NFSD_MAXSERVS)
296 			nthreads[i] = NFSD_MAXSERVS;
297 		tot += nthreads[i];
298 	}
299 	if (tot > NFSD_MAXSERVS) {
300 		/* total too large: scale down requested numbers */
301 		for (i = 0; i < n && tot > 0; i++) {
302 		    	int new = nthreads[i] * NFSD_MAXSERVS / tot;
303 			tot -= (nthreads[i] - new);
304 			nthreads[i] = new;
305 		}
306 		for (i = 0; i < n && tot > 0; i++) {
307 			nthreads[i]--;
308 			tot--;
309 		}
310 	}
311 
312 	/*
313 	 * There must always be a thread in pool 0; the admin
314 	 * can't shut down NFS completely using pool_threads.
315 	 */
316 	if (nthreads[0] == 0)
317 		nthreads[0] = 1;
318 
319 	/* apply the new numbers */
320 	lock_kernel();
321 	svc_get(nfsd_serv);
322 	for (i = 0; i < n; i++) {
323 		err = svc_set_num_threads(nfsd_serv, &nfsd_serv->sv_pools[i],
324 				    	  nthreads[i]);
325 		if (err)
326 			break;
327 	}
328 	svc_destroy(nfsd_serv);
329 	unlock_kernel();
330 
331 	return err;
332 }
333 
334 int
335 nfsd_svc(unsigned short port, int nrservs)
336 {
337 	int	error;
338 
339 	lock_kernel();
340 	dprintk("nfsd: creating service\n");
341 	error = -EINVAL;
342 	if (nrservs <= 0)
343 		nrservs = 0;
344 	if (nrservs > NFSD_MAXSERVS)
345 		nrservs = NFSD_MAXSERVS;
346 
347 	/* Readahead param cache - will no-op if it already exists */
348 	error =	nfsd_racache_init(2*nrservs);
349 	if (error<0)
350 		goto out;
351 	error = nfs4_state_start();
352 	if (error<0)
353 		goto out;
354 
355 	nfsd_reset_versions();
356 
357 	error = nfsd_create_serv();
358 
359 	if (error)
360 		goto out;
361 	error = nfsd_init_socks(port);
362 	if (error)
363 		goto failure;
364 
365 	error = svc_set_num_threads(nfsd_serv, NULL, nrservs);
366  failure:
367 	svc_destroy(nfsd_serv);		/* Release server */
368  out:
369 	unlock_kernel();
370 	return error;
371 }
372 
373 static inline void
374 update_thread_usage(int busy_threads)
375 {
376 	unsigned long prev_call;
377 	unsigned long diff;
378 	int decile;
379 
380 	spin_lock(&nfsd_call_lock);
381 	prev_call = nfsd_last_call;
382 	nfsd_last_call = jiffies;
383 	decile = busy_threads*10/nfsdstats.th_cnt;
384 	if (decile>0 && decile <= 10) {
385 		diff = nfsd_last_call - prev_call;
386 		if ( (nfsdstats.th_usage[decile-1] += diff) >= NFSD_USAGE_WRAP)
387 			nfsdstats.th_usage[decile-1] -= NFSD_USAGE_WRAP;
388 		if (decile == 10)
389 			nfsdstats.th_fullcnt++;
390 	}
391 	spin_unlock(&nfsd_call_lock);
392 }
393 
394 /*
395  * This is the NFS server kernel thread
396  */
397 static void
398 nfsd(struct svc_rqst *rqstp)
399 {
400 	struct fs_struct *fsp;
401 	int		err;
402 	sigset_t shutdown_mask, allowed_mask;
403 
404 	/* Lock module and set up kernel thread */
405 	lock_kernel();
406 	daemonize("nfsd");
407 
408 	/* After daemonize() this kernel thread shares current->fs
409 	 * with the init process. We need to create files with a
410 	 * umask of 0 instead of init's umask. */
411 	fsp = copy_fs_struct(current->fs);
412 	if (!fsp) {
413 		printk("Unable to start nfsd thread: out of memory\n");
414 		goto out;
415 	}
416 	exit_fs(current);
417 	current->fs = fsp;
418 	current->fs->umask = 0;
419 
420 	siginitsetinv(&shutdown_mask, SHUTDOWN_SIGS);
421 	siginitsetinv(&allowed_mask, ALLOWED_SIGS);
422 
423 	nfsdstats.th_cnt++;
424 
425 	rqstp->rq_task = current;
426 
427 	unlock_kernel();
428 
429 	/*
430 	 * We want less throttling in balance_dirty_pages() so that nfs to
431 	 * localhost doesn't cause nfsd to lock up due to all the client's
432 	 * dirty pages.
433 	 */
434 	current->flags |= PF_LESS_THROTTLE;
435 
436 	/*
437 	 * The main request loop
438 	 */
439 	for (;;) {
440 		/* Block all but the shutdown signals */
441 		sigprocmask(SIG_SETMASK, &shutdown_mask, NULL);
442 
443 		/*
444 		 * Find a socket with data available and call its
445 		 * recvfrom routine.
446 		 */
447 		while ((err = svc_recv(rqstp, 60*60*HZ)) == -EAGAIN)
448 			;
449 		if (err < 0)
450 			break;
451 		update_thread_usage(atomic_read(&nfsd_busy));
452 		atomic_inc(&nfsd_busy);
453 
454 		/* Lock the export hash tables for reading. */
455 		exp_readlock();
456 
457 		/* Process request with signals blocked.  */
458 		sigprocmask(SIG_SETMASK, &allowed_mask, NULL);
459 
460 		svc_process(rqstp);
461 
462 		/* Unlock export hash tables */
463 		exp_readunlock();
464 		update_thread_usage(atomic_read(&nfsd_busy));
465 		atomic_dec(&nfsd_busy);
466 	}
467 
468 	if (err != -EINTR) {
469 		printk(KERN_WARNING "nfsd: terminating on error %d\n", -err);
470 	} else {
471 		unsigned int	signo;
472 
473 		for (signo = 1; signo <= _NSIG; signo++)
474 			if (sigismember(&current->pending.signal, signo) &&
475 			    !sigismember(&current->blocked, signo))
476 				break;
477 		killsig = signo;
478 	}
479 	/* Clear signals before calling svc_exit_thread() */
480 	flush_signals(current);
481 
482 	lock_kernel();
483 
484 	nfsdstats.th_cnt --;
485 
486 out:
487 	/* Release the thread */
488 	svc_exit_thread(rqstp);
489 
490 	/* Release module */
491 	unlock_kernel();
492 	module_put_and_exit(0);
493 }
494 
495 int
496 nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
497 {
498 	struct svc_procedure	*proc;
499 	kxdrproc_t		xdr;
500 	__be32			nfserr;
501 	__be32			*nfserrp;
502 
503 	dprintk("nfsd_dispatch: vers %d proc %d\n",
504 				rqstp->rq_vers, rqstp->rq_proc);
505 	proc = rqstp->rq_procinfo;
506 
507 	/* Check whether we have this call in the cache. */
508 	switch (nfsd_cache_lookup(rqstp, proc->pc_cachetype)) {
509 	case RC_INTR:
510 	case RC_DROPIT:
511 		return 0;
512 	case RC_REPLY:
513 		return 1;
514 	case RC_DOIT:;
515 		/* do it */
516 	}
517 
518 	/* Decode arguments */
519 	xdr = proc->pc_decode;
520 	if (xdr && !xdr(rqstp, (__be32*)rqstp->rq_arg.head[0].iov_base,
521 			rqstp->rq_argp)) {
522 		dprintk("nfsd: failed to decode arguments!\n");
523 		nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
524 		*statp = rpc_garbage_args;
525 		return 1;
526 	}
527 
528 	/* need to grab the location to store the status, as
529 	 * nfsv4 does some encoding while processing
530 	 */
531 	nfserrp = rqstp->rq_res.head[0].iov_base
532 		+ rqstp->rq_res.head[0].iov_len;
533 	rqstp->rq_res.head[0].iov_len += sizeof(__be32);
534 
535 	/* Now call the procedure handler, and encode NFS status. */
536 	nfserr = proc->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
537 	if (nfserr == nfserr_jukebox && rqstp->rq_vers == 2)
538 		nfserr = nfserr_dropit;
539 	if (nfserr == nfserr_dropit) {
540 		dprintk("nfsd: Dropping request due to malloc failure!\n");
541 		nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
542 		return 0;
543 	}
544 
545 	if (rqstp->rq_proc != 0)
546 		*nfserrp++ = nfserr;
547 
548 	/* Encode result.
549 	 * For NFSv2, additional info is never returned in case of an error.
550 	 */
551 	if (!(nfserr && rqstp->rq_vers == 2)) {
552 		xdr = proc->pc_encode;
553 		if (xdr && !xdr(rqstp, nfserrp,
554 				rqstp->rq_resp)) {
555 			/* Failed to encode result. Release cache entry */
556 			dprintk("nfsd: failed to encode result!\n");
557 			nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
558 			*statp = rpc_system_err;
559 			return 1;
560 		}
561 	}
562 
563 	/* Store reply in cache. */
564 	nfsd_cache_update(rqstp, proc->pc_cachetype, statp + 1);
565 	return 1;
566 }
567