xref: /openbmc/linux/fs/nfsd/nfssvc.c (revision e8e0929d)
1 /*
2  * linux/fs/nfsd/nfssvc.c
3  *
4  * Central processing for nfsd.
5  *
6  * Authors:	Olaf Kirch (okir@monad.swb.de)
7  *
8  * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
9  */
10 
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/time.h>
14 #include <linux/errno.h>
15 #include <linux/nfs.h>
16 #include <linux/in.h>
17 #include <linux/uio.h>
18 #include <linux/unistd.h>
19 #include <linux/slab.h>
20 #include <linux/smp.h>
21 #include <linux/freezer.h>
22 #include <linux/fs_struct.h>
23 #include <linux/kthread.h>
24 #include <linux/swap.h>
25 
26 #include <linux/sunrpc/types.h>
27 #include <linux/sunrpc/stats.h>
28 #include <linux/sunrpc/svc.h>
29 #include <linux/sunrpc/svcsock.h>
30 #include <linux/sunrpc/cache.h>
31 #include <linux/nfsd/nfsd.h>
32 #include <linux/nfsd/stats.h>
33 #include <linux/nfsd/cache.h>
34 #include <linux/nfsd/syscall.h>
35 #include <linux/lockd/bind.h>
36 #include <linux/nfsacl.h>
37 #include <linux/seq_file.h>
38 
39 #define NFSDDBG_FACILITY	NFSDDBG_SVC
40 
41 extern struct svc_program	nfsd_program;
42 static int			nfsd(void *vrqstp);
43 struct timeval			nfssvc_boot;
44 
45 /*
46  * nfsd_mutex protects nfsd_serv -- both the pointer itself and the members
47  * of the svc_serv struct. In particular, ->sv_nrthreads but also to some
48  * extent ->sv_temp_socks and ->sv_permsocks. It also protects nfsdstats.th_cnt
49  *
50  * If (out side the lock) nfsd_serv is non-NULL, then it must point to a
51  * properly initialised 'struct svc_serv' with ->sv_nrthreads > 0. That number
52  * of nfsd threads must exist and each must listed in ->sp_all_threads in each
53  * entry of ->sv_pools[].
54  *
55  * Transitions of the thread count between zero and non-zero are of particular
56  * interest since the svc_serv needs to be created and initialized at that
57  * point, or freed.
58  *
59  * Finally, the nfsd_mutex also protects some of the global variables that are
60  * accessed when nfsd starts and that are settable via the write_* routines in
61  * nfsctl.c. In particular:
62  *
63  *	user_recovery_dirname
64  *	user_lease_time
65  *	nfsd_versions
66  */
67 DEFINE_MUTEX(nfsd_mutex);
68 struct svc_serv 		*nfsd_serv;
69 
70 /*
71  * nfsd_drc_lock protects nfsd_drc_max_pages and nfsd_drc_pages_used.
72  * nfsd_drc_max_pages limits the total amount of memory available for
73  * version 4.1 DRC caches.
74  * nfsd_drc_pages_used tracks the current version 4.1 DRC memory usage.
75  */
76 spinlock_t	nfsd_drc_lock;
77 unsigned int	nfsd_drc_max_mem;
78 unsigned int	nfsd_drc_mem_used;
79 
80 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
81 static struct svc_stat	nfsd_acl_svcstats;
82 static struct svc_version *	nfsd_acl_version[] = {
83 	[2] = &nfsd_acl_version2,
84 	[3] = &nfsd_acl_version3,
85 };
86 
87 #define NFSD_ACL_MINVERS            2
88 #define NFSD_ACL_NRVERS		ARRAY_SIZE(nfsd_acl_version)
89 static struct svc_version *nfsd_acl_versions[NFSD_ACL_NRVERS];
90 
91 static struct svc_program	nfsd_acl_program = {
92 	.pg_prog		= NFS_ACL_PROGRAM,
93 	.pg_nvers		= NFSD_ACL_NRVERS,
94 	.pg_vers		= nfsd_acl_versions,
95 	.pg_name		= "nfsacl",
96 	.pg_class		= "nfsd",
97 	.pg_stats		= &nfsd_acl_svcstats,
98 	.pg_authenticate	= &svc_set_client,
99 };
100 
101 static struct svc_stat	nfsd_acl_svcstats = {
102 	.program	= &nfsd_acl_program,
103 };
104 #endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */
105 
106 static struct svc_version *	nfsd_version[] = {
107 	[2] = &nfsd_version2,
108 #if defined(CONFIG_NFSD_V3)
109 	[3] = &nfsd_version3,
110 #endif
111 #if defined(CONFIG_NFSD_V4)
112 	[4] = &nfsd_version4,
113 #endif
114 };
115 
116 #define NFSD_MINVERS    	2
117 #define NFSD_NRVERS		ARRAY_SIZE(nfsd_version)
118 static struct svc_version *nfsd_versions[NFSD_NRVERS];
119 
120 struct svc_program		nfsd_program = {
121 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
122 	.pg_next		= &nfsd_acl_program,
123 #endif
124 	.pg_prog		= NFS_PROGRAM,		/* program number */
125 	.pg_nvers		= NFSD_NRVERS,		/* nr of entries in nfsd_version */
126 	.pg_vers		= nfsd_versions,	/* version table */
127 	.pg_name		= "nfsd",		/* program name */
128 	.pg_class		= "nfsd",		/* authentication class */
129 	.pg_stats		= &nfsd_svcstats,	/* version table */
130 	.pg_authenticate	= &svc_set_client,	/* export authentication */
131 
132 };
133 
134 u32 nfsd_supported_minorversion;
135 
136 int nfsd_vers(int vers, enum vers_op change)
137 {
138 	if (vers < NFSD_MINVERS || vers >= NFSD_NRVERS)
139 		return -1;
140 	switch(change) {
141 	case NFSD_SET:
142 		nfsd_versions[vers] = nfsd_version[vers];
143 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
144 		if (vers < NFSD_ACL_NRVERS)
145 			nfsd_acl_versions[vers] = nfsd_acl_version[vers];
146 #endif
147 		break;
148 	case NFSD_CLEAR:
149 		nfsd_versions[vers] = NULL;
150 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
151 		if (vers < NFSD_ACL_NRVERS)
152 			nfsd_acl_versions[vers] = NULL;
153 #endif
154 		break;
155 	case NFSD_TEST:
156 		return nfsd_versions[vers] != NULL;
157 	case NFSD_AVAIL:
158 		return nfsd_version[vers] != NULL;
159 	}
160 	return 0;
161 }
162 
163 int nfsd_minorversion(u32 minorversion, enum vers_op change)
164 {
165 	if (minorversion > NFSD_SUPPORTED_MINOR_VERSION)
166 		return -1;
167 	switch(change) {
168 	case NFSD_SET:
169 		nfsd_supported_minorversion = minorversion;
170 		break;
171 	case NFSD_CLEAR:
172 		if (minorversion == 0)
173 			return -1;
174 		nfsd_supported_minorversion = minorversion - 1;
175 		break;
176 	case NFSD_TEST:
177 		return minorversion <= nfsd_supported_minorversion;
178 	case NFSD_AVAIL:
179 		return minorversion <= NFSD_SUPPORTED_MINOR_VERSION;
180 	}
181 	return 0;
182 }
183 
184 /*
185  * Maximum number of nfsd processes
186  */
187 #define	NFSD_MAXSERVS		8192
188 
189 int nfsd_nrthreads(void)
190 {
191 	int rv = 0;
192 	mutex_lock(&nfsd_mutex);
193 	if (nfsd_serv)
194 		rv = nfsd_serv->sv_nrthreads;
195 	mutex_unlock(&nfsd_mutex);
196 	return rv;
197 }
198 
199 static void nfsd_last_thread(struct svc_serv *serv)
200 {
201 	/* When last nfsd thread exits we need to do some clean-up */
202 	struct svc_xprt *xprt;
203 	list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list)
204 		lockd_down();
205 	nfsd_serv = NULL;
206 	nfsd_racache_shutdown();
207 	nfs4_state_shutdown();
208 
209 	printk(KERN_WARNING "nfsd: last server has exited, flushing export "
210 			    "cache\n");
211 	nfsd_export_flush();
212 }
213 
214 void nfsd_reset_versions(void)
215 {
216 	int found_one = 0;
217 	int i;
218 
219 	for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) {
220 		if (nfsd_program.pg_vers[i])
221 			found_one = 1;
222 	}
223 
224 	if (!found_one) {
225 		for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++)
226 			nfsd_program.pg_vers[i] = nfsd_version[i];
227 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
228 		for (i = NFSD_ACL_MINVERS; i < NFSD_ACL_NRVERS; i++)
229 			nfsd_acl_program.pg_vers[i] =
230 				nfsd_acl_version[i];
231 #endif
232 	}
233 }
234 
235 /*
236  * Each session guarantees a negotiated per slot memory cache for replies
237  * which in turn consumes memory beyond the v2/v3/v4.0 server. A dedicated
238  * NFSv4.1 server might want to use more memory for a DRC than a machine
239  * with mutiple services.
240  *
241  * Impose a hard limit on the number of pages for the DRC which varies
242  * according to the machines free pages. This is of course only a default.
243  *
244  * For now this is a #defined shift which could be under admin control
245  * in the future.
246  */
247 static void set_max_drc(void)
248 {
249 	#define NFSD_DRC_SIZE_SHIFT	10
250 	nfsd_drc_max_mem = (nr_free_buffer_pages()
251 					>> NFSD_DRC_SIZE_SHIFT) * PAGE_SIZE;
252 	nfsd_drc_mem_used = 0;
253 	spin_lock_init(&nfsd_drc_lock);
254 	dprintk("%s nfsd_drc_max_mem %u \n", __func__, nfsd_drc_max_mem);
255 }
256 
257 int nfsd_create_serv(void)
258 {
259 	int err = 0;
260 
261 	WARN_ON(!mutex_is_locked(&nfsd_mutex));
262 	if (nfsd_serv) {
263 		svc_get(nfsd_serv);
264 		return 0;
265 	}
266 	if (nfsd_max_blksize == 0) {
267 		/* choose a suitable default */
268 		struct sysinfo i;
269 		si_meminfo(&i);
270 		/* Aim for 1/4096 of memory per thread
271 		 * This gives 1MB on 4Gig machines
272 		 * But only uses 32K on 128M machines.
273 		 * Bottom out at 8K on 32M and smaller.
274 		 * Of course, this is only a default.
275 		 */
276 		nfsd_max_blksize = NFSSVC_MAXBLKSIZE;
277 		i.totalram <<= PAGE_SHIFT - 12;
278 		while (nfsd_max_blksize > i.totalram &&
279 		       nfsd_max_blksize >= 8*1024*2)
280 			nfsd_max_blksize /= 2;
281 	}
282 
283 	nfsd_serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize,
284 				      nfsd_last_thread, nfsd, THIS_MODULE);
285 	if (nfsd_serv == NULL)
286 		err = -ENOMEM;
287 	else
288 		set_max_drc();
289 
290 	do_gettimeofday(&nfssvc_boot);		/* record boot time */
291 	return err;
292 }
293 
294 static int nfsd_init_socks(int port)
295 {
296 	int error;
297 	if (!list_empty(&nfsd_serv->sv_permsocks))
298 		return 0;
299 
300 	error = svc_create_xprt(nfsd_serv, "udp", PF_INET, port,
301 					SVC_SOCK_DEFAULTS);
302 	if (error < 0)
303 		return error;
304 
305 	error = lockd_up();
306 	if (error < 0)
307 		return error;
308 
309 	error = svc_create_xprt(nfsd_serv, "tcp", PF_INET, port,
310 					SVC_SOCK_DEFAULTS);
311 	if (error < 0)
312 		return error;
313 
314 	error = lockd_up();
315 	if (error < 0)
316 		return error;
317 
318 	return 0;
319 }
320 
321 int nfsd_nrpools(void)
322 {
323 	if (nfsd_serv == NULL)
324 		return 0;
325 	else
326 		return nfsd_serv->sv_nrpools;
327 }
328 
329 int nfsd_get_nrthreads(int n, int *nthreads)
330 {
331 	int i = 0;
332 
333 	if (nfsd_serv != NULL) {
334 		for (i = 0; i < nfsd_serv->sv_nrpools && i < n; i++)
335 			nthreads[i] = nfsd_serv->sv_pools[i].sp_nrthreads;
336 	}
337 
338 	return 0;
339 }
340 
341 int nfsd_set_nrthreads(int n, int *nthreads)
342 {
343 	int i = 0;
344 	int tot = 0;
345 	int err = 0;
346 
347 	WARN_ON(!mutex_is_locked(&nfsd_mutex));
348 
349 	if (nfsd_serv == NULL || n <= 0)
350 		return 0;
351 
352 	if (n > nfsd_serv->sv_nrpools)
353 		n = nfsd_serv->sv_nrpools;
354 
355 	/* enforce a global maximum number of threads */
356 	tot = 0;
357 	for (i = 0; i < n; i++) {
358 		if (nthreads[i] > NFSD_MAXSERVS)
359 			nthreads[i] = NFSD_MAXSERVS;
360 		tot += nthreads[i];
361 	}
362 	if (tot > NFSD_MAXSERVS) {
363 		/* total too large: scale down requested numbers */
364 		for (i = 0; i < n && tot > 0; i++) {
365 		    	int new = nthreads[i] * NFSD_MAXSERVS / tot;
366 			tot -= (nthreads[i] - new);
367 			nthreads[i] = new;
368 		}
369 		for (i = 0; i < n && tot > 0; i++) {
370 			nthreads[i]--;
371 			tot--;
372 		}
373 	}
374 
375 	/*
376 	 * There must always be a thread in pool 0; the admin
377 	 * can't shut down NFS completely using pool_threads.
378 	 */
379 	if (nthreads[0] == 0)
380 		nthreads[0] = 1;
381 
382 	/* apply the new numbers */
383 	svc_get(nfsd_serv);
384 	for (i = 0; i < n; i++) {
385 		err = svc_set_num_threads(nfsd_serv, &nfsd_serv->sv_pools[i],
386 				    	  nthreads[i]);
387 		if (err)
388 			break;
389 	}
390 	svc_destroy(nfsd_serv);
391 
392 	return err;
393 }
394 
395 int
396 nfsd_svc(unsigned short port, int nrservs)
397 {
398 	int	error;
399 
400 	mutex_lock(&nfsd_mutex);
401 	dprintk("nfsd: creating service\n");
402 	if (nrservs <= 0)
403 		nrservs = 0;
404 	if (nrservs > NFSD_MAXSERVS)
405 		nrservs = NFSD_MAXSERVS;
406 	error = 0;
407 	if (nrservs == 0 && nfsd_serv == NULL)
408 		goto out;
409 
410 	/* Readahead param cache - will no-op if it already exists */
411 	error =	nfsd_racache_init(2*nrservs);
412 	if (error<0)
413 		goto out;
414 	error = nfs4_state_start();
415 	if (error)
416 		goto out;
417 
418 	nfsd_reset_versions();
419 
420 	error = nfsd_create_serv();
421 
422 	if (error)
423 		goto out;
424 	error = nfsd_init_socks(port);
425 	if (error)
426 		goto failure;
427 
428 	error = svc_set_num_threads(nfsd_serv, NULL, nrservs);
429 	if (error == 0)
430 		/* We are holding a reference to nfsd_serv which
431 		 * we don't want to count in the return value,
432 		 * so subtract 1
433 		 */
434 		error = nfsd_serv->sv_nrthreads - 1;
435  failure:
436 	svc_destroy(nfsd_serv);		/* Release server */
437  out:
438 	mutex_unlock(&nfsd_mutex);
439 	return error;
440 }
441 
442 
443 /*
444  * This is the NFS server kernel thread
445  */
446 static int
447 nfsd(void *vrqstp)
448 {
449 	struct svc_rqst *rqstp = (struct svc_rqst *) vrqstp;
450 	int err, preverr = 0;
451 
452 	/* Lock module and set up kernel thread */
453 	mutex_lock(&nfsd_mutex);
454 
455 	/* At this point, the thread shares current->fs
456 	 * with the init process. We need to create files with a
457 	 * umask of 0 instead of init's umask. */
458 	if (unshare_fs_struct() < 0) {
459 		printk("Unable to start nfsd thread: out of memory\n");
460 		goto out;
461 	}
462 
463 	current->fs->umask = 0;
464 
465 	/*
466 	 * thread is spawned with all signals set to SIG_IGN, re-enable
467 	 * the ones that will bring down the thread
468 	 */
469 	allow_signal(SIGKILL);
470 	allow_signal(SIGHUP);
471 	allow_signal(SIGINT);
472 	allow_signal(SIGQUIT);
473 
474 	nfsdstats.th_cnt++;
475 	mutex_unlock(&nfsd_mutex);
476 
477 	/*
478 	 * We want less throttling in balance_dirty_pages() so that nfs to
479 	 * localhost doesn't cause nfsd to lock up due to all the client's
480 	 * dirty pages.
481 	 */
482 	current->flags |= PF_LESS_THROTTLE;
483 	set_freezable();
484 
485 	/*
486 	 * The main request loop
487 	 */
488 	for (;;) {
489 		/*
490 		 * Find a socket with data available and call its
491 		 * recvfrom routine.
492 		 */
493 		while ((err = svc_recv(rqstp, 60*60*HZ)) == -EAGAIN)
494 			;
495 		if (err == -EINTR)
496 			break;
497 		else if (err < 0) {
498 			if (err != preverr) {
499 				printk(KERN_WARNING "%s: unexpected error "
500 					"from svc_recv (%d)\n", __func__, -err);
501 				preverr = err;
502 			}
503 			schedule_timeout_uninterruptible(HZ);
504 			continue;
505 		}
506 
507 
508 		/* Lock the export hash tables for reading. */
509 		exp_readlock();
510 
511 		validate_process_creds();
512 		svc_process(rqstp);
513 		validate_process_creds();
514 
515 		/* Unlock export hash tables */
516 		exp_readunlock();
517 	}
518 
519 	/* Clear signals before calling svc_exit_thread() */
520 	flush_signals(current);
521 
522 	mutex_lock(&nfsd_mutex);
523 	nfsdstats.th_cnt --;
524 
525 out:
526 	/* Release the thread */
527 	svc_exit_thread(rqstp);
528 
529 	/* Release module */
530 	mutex_unlock(&nfsd_mutex);
531 	module_put_and_exit(0);
532 	return 0;
533 }
534 
535 static __be32 map_new_errors(u32 vers, __be32 nfserr)
536 {
537 	if (nfserr == nfserr_jukebox && vers == 2)
538 		return nfserr_dropit;
539 	if (nfserr == nfserr_wrongsec && vers < 4)
540 		return nfserr_acces;
541 	return nfserr;
542 }
543 
544 int
545 nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
546 {
547 	struct svc_procedure	*proc;
548 	kxdrproc_t		xdr;
549 	__be32			nfserr;
550 	__be32			*nfserrp;
551 
552 	dprintk("nfsd_dispatch: vers %d proc %d\n",
553 				rqstp->rq_vers, rqstp->rq_proc);
554 	proc = rqstp->rq_procinfo;
555 
556 	/* Check whether we have this call in the cache. */
557 	switch (nfsd_cache_lookup(rqstp, proc->pc_cachetype)) {
558 	case RC_INTR:
559 	case RC_DROPIT:
560 		return 0;
561 	case RC_REPLY:
562 		return 1;
563 	case RC_DOIT:;
564 		/* do it */
565 	}
566 
567 	/* Decode arguments */
568 	xdr = proc->pc_decode;
569 	if (xdr && !xdr(rqstp, (__be32*)rqstp->rq_arg.head[0].iov_base,
570 			rqstp->rq_argp)) {
571 		dprintk("nfsd: failed to decode arguments!\n");
572 		nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
573 		*statp = rpc_garbage_args;
574 		return 1;
575 	}
576 
577 	/* need to grab the location to store the status, as
578 	 * nfsv4 does some encoding while processing
579 	 */
580 	nfserrp = rqstp->rq_res.head[0].iov_base
581 		+ rqstp->rq_res.head[0].iov_len;
582 	rqstp->rq_res.head[0].iov_len += sizeof(__be32);
583 
584 	/* Now call the procedure handler, and encode NFS status. */
585 	nfserr = proc->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
586 	nfserr = map_new_errors(rqstp->rq_vers, nfserr);
587 	if (nfserr == nfserr_dropit) {
588 		dprintk("nfsd: Dropping request; may be revisited later\n");
589 		nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
590 		return 0;
591 	}
592 
593 	if (rqstp->rq_proc != 0)
594 		*nfserrp++ = nfserr;
595 
596 	/* Encode result.
597 	 * For NFSv2, additional info is never returned in case of an error.
598 	 */
599 	if (!(nfserr && rqstp->rq_vers == 2)) {
600 		xdr = proc->pc_encode;
601 		if (xdr && !xdr(rqstp, nfserrp,
602 				rqstp->rq_resp)) {
603 			/* Failed to encode result. Release cache entry */
604 			dprintk("nfsd: failed to encode result!\n");
605 			nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
606 			*statp = rpc_system_err;
607 			return 1;
608 		}
609 	}
610 
611 	/* Store reply in cache. */
612 	nfsd_cache_update(rqstp, proc->pc_cachetype, statp + 1);
613 	return 1;
614 }
615 
616 int nfsd_pool_stats_open(struct inode *inode, struct file *file)
617 {
618 	int ret;
619 	mutex_lock(&nfsd_mutex);
620 	if (nfsd_serv == NULL) {
621 		mutex_unlock(&nfsd_mutex);
622 		return -ENODEV;
623 	}
624 	/* bump up the psudo refcount while traversing */
625 	svc_get(nfsd_serv);
626 	ret = svc_pool_stats_open(nfsd_serv, file);
627 	mutex_unlock(&nfsd_mutex);
628 	return ret;
629 }
630 
631 int nfsd_pool_stats_release(struct inode *inode, struct file *file)
632 {
633 	int ret = seq_release(inode, file);
634 	mutex_lock(&nfsd_mutex);
635 	/* this function really, really should have been called svc_put() */
636 	svc_destroy(nfsd_serv);
637 	mutex_unlock(&nfsd_mutex);
638 	return ret;
639 }
640