xref: /openbmc/linux/fs/nfsd/filecache.c (revision d63670d2)
1 /*
2  * Open file cache.
3  *
4  * (c) 2015 - Jeff Layton <jeff.layton@primarydata.com>
5  */
6 
7 #include <linux/hash.h>
8 #include <linux/slab.h>
9 #include <linux/file.h>
10 #include <linux/pagemap.h>
11 #include <linux/sched.h>
12 #include <linux/list_lru.h>
13 #include <linux/fsnotify_backend.h>
14 #include <linux/fsnotify.h>
15 #include <linux/seq_file.h>
16 
17 #include "vfs.h"
18 #include "nfsd.h"
19 #include "nfsfh.h"
20 #include "netns.h"
21 #include "filecache.h"
22 #include "trace.h"
23 
24 #define NFSDDBG_FACILITY	NFSDDBG_FH
25 
26 /* FIXME: dynamically size this for the machine somehow? */
27 #define NFSD_FILE_HASH_BITS                   12
28 #define NFSD_FILE_HASH_SIZE                  (1 << NFSD_FILE_HASH_BITS)
29 #define NFSD_LAUNDRETTE_DELAY		     (2 * HZ)
30 
31 #define NFSD_FILE_SHUTDOWN		     (1)
32 #define NFSD_FILE_LRU_THRESHOLD		     (4096UL)
33 #define NFSD_FILE_LRU_LIMIT		     (NFSD_FILE_LRU_THRESHOLD << 2)
34 
35 /* We only care about NFSD_MAY_READ/WRITE for this cache */
36 #define NFSD_FILE_MAY_MASK	(NFSD_MAY_READ|NFSD_MAY_WRITE)
37 
38 struct nfsd_fcache_bucket {
39 	struct hlist_head	nfb_head;
40 	spinlock_t		nfb_lock;
41 	unsigned int		nfb_count;
42 	unsigned int		nfb_maxcount;
43 };
44 
45 static DEFINE_PER_CPU(unsigned long, nfsd_file_cache_hits);
46 
47 struct nfsd_fcache_disposal {
48 	struct work_struct work;
49 	spinlock_t lock;
50 	struct list_head freeme;
51 };
52 
53 static struct workqueue_struct *nfsd_filecache_wq __read_mostly;
54 
55 static struct kmem_cache		*nfsd_file_slab;
56 static struct kmem_cache		*nfsd_file_mark_slab;
57 static struct nfsd_fcache_bucket	*nfsd_file_hashtbl;
58 static struct list_lru			nfsd_file_lru;
59 static long				nfsd_file_lru_flags;
60 static struct fsnotify_group		*nfsd_file_fsnotify_group;
61 static atomic_long_t			nfsd_filecache_count;
62 static struct delayed_work		nfsd_filecache_laundrette;
63 
64 static void nfsd_file_gc(void);
65 
66 static void
67 nfsd_file_schedule_laundrette(void)
68 {
69 	long count = atomic_long_read(&nfsd_filecache_count);
70 
71 	if (count == 0 || test_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags))
72 		return;
73 
74 	queue_delayed_work(system_wq, &nfsd_filecache_laundrette,
75 			NFSD_LAUNDRETTE_DELAY);
76 }
77 
78 static void
79 nfsd_file_slab_free(struct rcu_head *rcu)
80 {
81 	struct nfsd_file *nf = container_of(rcu, struct nfsd_file, nf_rcu);
82 
83 	put_cred(nf->nf_cred);
84 	kmem_cache_free(nfsd_file_slab, nf);
85 }
86 
87 static void
88 nfsd_file_mark_free(struct fsnotify_mark *mark)
89 {
90 	struct nfsd_file_mark *nfm = container_of(mark, struct nfsd_file_mark,
91 						  nfm_mark);
92 
93 	kmem_cache_free(nfsd_file_mark_slab, nfm);
94 }
95 
96 static struct nfsd_file_mark *
97 nfsd_file_mark_get(struct nfsd_file_mark *nfm)
98 {
99 	if (!refcount_inc_not_zero(&nfm->nfm_ref))
100 		return NULL;
101 	return nfm;
102 }
103 
104 static void
105 nfsd_file_mark_put(struct nfsd_file_mark *nfm)
106 {
107 	if (refcount_dec_and_test(&nfm->nfm_ref)) {
108 		fsnotify_destroy_mark(&nfm->nfm_mark, nfsd_file_fsnotify_group);
109 		fsnotify_put_mark(&nfm->nfm_mark);
110 	}
111 }
112 
113 static struct nfsd_file_mark *
114 nfsd_file_mark_find_or_create(struct nfsd_file *nf)
115 {
116 	int			err;
117 	struct fsnotify_mark	*mark;
118 	struct nfsd_file_mark	*nfm = NULL, *new;
119 	struct inode *inode = nf->nf_inode;
120 
121 	do {
122 		mutex_lock(&nfsd_file_fsnotify_group->mark_mutex);
123 		mark = fsnotify_find_mark(&inode->i_fsnotify_marks,
124 				nfsd_file_fsnotify_group);
125 		if (mark) {
126 			nfm = nfsd_file_mark_get(container_of(mark,
127 						 struct nfsd_file_mark,
128 						 nfm_mark));
129 			mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex);
130 			if (nfm) {
131 				fsnotify_put_mark(mark);
132 				break;
133 			}
134 			/* Avoid soft lockup race with nfsd_file_mark_put() */
135 			fsnotify_destroy_mark(mark, nfsd_file_fsnotify_group);
136 			fsnotify_put_mark(mark);
137 		} else
138 			mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex);
139 
140 		/* allocate a new nfm */
141 		new = kmem_cache_alloc(nfsd_file_mark_slab, GFP_KERNEL);
142 		if (!new)
143 			return NULL;
144 		fsnotify_init_mark(&new->nfm_mark, nfsd_file_fsnotify_group);
145 		new->nfm_mark.mask = FS_ATTRIB|FS_DELETE_SELF;
146 		refcount_set(&new->nfm_ref, 1);
147 
148 		err = fsnotify_add_inode_mark(&new->nfm_mark, inode, 0);
149 
150 		/*
151 		 * If the add was successful, then return the object.
152 		 * Otherwise, we need to put the reference we hold on the
153 		 * nfm_mark. The fsnotify code will take a reference and put
154 		 * it on failure, so we can't just free it directly. It's also
155 		 * not safe to call fsnotify_destroy_mark on it as the
156 		 * mark->group will be NULL. Thus, we can't let the nfm_ref
157 		 * counter drive the destruction at this point.
158 		 */
159 		if (likely(!err))
160 			nfm = new;
161 		else
162 			fsnotify_put_mark(&new->nfm_mark);
163 	} while (unlikely(err == -EEXIST));
164 
165 	return nfm;
166 }
167 
168 static struct nfsd_file *
169 nfsd_file_alloc(struct inode *inode, unsigned int may, unsigned int hashval,
170 		struct net *net)
171 {
172 	struct nfsd_file *nf;
173 
174 	nf = kmem_cache_alloc(nfsd_file_slab, GFP_KERNEL);
175 	if (nf) {
176 		INIT_HLIST_NODE(&nf->nf_node);
177 		INIT_LIST_HEAD(&nf->nf_lru);
178 		nf->nf_file = NULL;
179 		nf->nf_cred = get_current_cred();
180 		nf->nf_net = net;
181 		nf->nf_flags = 0;
182 		nf->nf_inode = inode;
183 		nf->nf_hashval = hashval;
184 		refcount_set(&nf->nf_ref, 1);
185 		nf->nf_may = may & NFSD_FILE_MAY_MASK;
186 		if (may & NFSD_MAY_NOT_BREAK_LEASE) {
187 			if (may & NFSD_MAY_WRITE)
188 				__set_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags);
189 			if (may & NFSD_MAY_READ)
190 				__set_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
191 		}
192 		nf->nf_mark = NULL;
193 		trace_nfsd_file_alloc(nf);
194 	}
195 	return nf;
196 }
197 
198 static bool
199 nfsd_file_free(struct nfsd_file *nf)
200 {
201 	bool flush = false;
202 
203 	trace_nfsd_file_put_final(nf);
204 	if (nf->nf_mark)
205 		nfsd_file_mark_put(nf->nf_mark);
206 	if (nf->nf_file) {
207 		get_file(nf->nf_file);
208 		filp_close(nf->nf_file, NULL);
209 		fput(nf->nf_file);
210 		flush = true;
211 	}
212 	call_rcu(&nf->nf_rcu, nfsd_file_slab_free);
213 	return flush;
214 }
215 
216 static bool
217 nfsd_file_check_writeback(struct nfsd_file *nf)
218 {
219 	struct file *file = nf->nf_file;
220 	struct address_space *mapping;
221 
222 	if (!file || !(file->f_mode & FMODE_WRITE))
223 		return false;
224 	mapping = file->f_mapping;
225 	return mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) ||
226 		mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
227 }
228 
229 static int
230 nfsd_file_check_write_error(struct nfsd_file *nf)
231 {
232 	struct file *file = nf->nf_file;
233 
234 	if (!file || !(file->f_mode & FMODE_WRITE))
235 		return 0;
236 	return filemap_check_wb_err(file->f_mapping, READ_ONCE(file->f_wb_err));
237 }
238 
239 static void
240 nfsd_file_do_unhash(struct nfsd_file *nf)
241 {
242 	lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
243 
244 	trace_nfsd_file_unhash(nf);
245 
246 	if (nfsd_file_check_write_error(nf))
247 		nfsd_reset_write_verifier(net_generic(nf->nf_net, nfsd_net_id));
248 	--nfsd_file_hashtbl[nf->nf_hashval].nfb_count;
249 	hlist_del_rcu(&nf->nf_node);
250 	atomic_long_dec(&nfsd_filecache_count);
251 }
252 
253 static bool
254 nfsd_file_unhash(struct nfsd_file *nf)
255 {
256 	if (test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
257 		nfsd_file_do_unhash(nf);
258 		if (!list_empty(&nf->nf_lru))
259 			list_lru_del(&nfsd_file_lru, &nf->nf_lru);
260 		return true;
261 	}
262 	return false;
263 }
264 
265 /*
266  * Return true if the file was unhashed.
267  */
268 static bool
269 nfsd_file_unhash_and_release_locked(struct nfsd_file *nf, struct list_head *dispose)
270 {
271 	lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
272 
273 	trace_nfsd_file_unhash_and_release_locked(nf);
274 	if (!nfsd_file_unhash(nf))
275 		return false;
276 	/* keep final reference for nfsd_file_lru_dispose */
277 	if (refcount_dec_not_one(&nf->nf_ref))
278 		return true;
279 
280 	list_add(&nf->nf_lru, dispose);
281 	return true;
282 }
283 
284 static void
285 nfsd_file_put_noref(struct nfsd_file *nf)
286 {
287 	trace_nfsd_file_put(nf);
288 
289 	if (refcount_dec_and_test(&nf->nf_ref)) {
290 		WARN_ON(test_bit(NFSD_FILE_HASHED, &nf->nf_flags));
291 		nfsd_file_free(nf);
292 	}
293 }
294 
295 void
296 nfsd_file_put(struct nfsd_file *nf)
297 {
298 	bool is_hashed;
299 
300 	set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
301 	if (refcount_read(&nf->nf_ref) > 2 || !nf->nf_file) {
302 		nfsd_file_put_noref(nf);
303 		return;
304 	}
305 
306 	filemap_flush(nf->nf_file->f_mapping);
307 	is_hashed = test_bit(NFSD_FILE_HASHED, &nf->nf_flags) != 0;
308 	nfsd_file_put_noref(nf);
309 	if (is_hashed)
310 		nfsd_file_schedule_laundrette();
311 	if (atomic_long_read(&nfsd_filecache_count) >= NFSD_FILE_LRU_LIMIT)
312 		nfsd_file_gc();
313 }
314 
315 struct nfsd_file *
316 nfsd_file_get(struct nfsd_file *nf)
317 {
318 	if (likely(refcount_inc_not_zero(&nf->nf_ref)))
319 		return nf;
320 	return NULL;
321 }
322 
323 static void
324 nfsd_file_dispose_list(struct list_head *dispose)
325 {
326 	struct nfsd_file *nf;
327 
328 	while(!list_empty(dispose)) {
329 		nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
330 		list_del(&nf->nf_lru);
331 		nfsd_file_put_noref(nf);
332 	}
333 }
334 
335 static void
336 nfsd_file_dispose_list_sync(struct list_head *dispose)
337 {
338 	bool flush = false;
339 	struct nfsd_file *nf;
340 
341 	while(!list_empty(dispose)) {
342 		nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
343 		list_del(&nf->nf_lru);
344 		if (!refcount_dec_and_test(&nf->nf_ref))
345 			continue;
346 		if (nfsd_file_free(nf))
347 			flush = true;
348 	}
349 	if (flush)
350 		flush_delayed_fput();
351 }
352 
353 static void
354 nfsd_file_list_remove_disposal(struct list_head *dst,
355 		struct nfsd_fcache_disposal *l)
356 {
357 	spin_lock(&l->lock);
358 	list_splice_init(&l->freeme, dst);
359 	spin_unlock(&l->lock);
360 }
361 
362 static void
363 nfsd_file_list_add_disposal(struct list_head *files, struct net *net)
364 {
365 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
366 	struct nfsd_fcache_disposal *l = nn->fcache_disposal;
367 
368 	spin_lock(&l->lock);
369 	list_splice_tail_init(files, &l->freeme);
370 	spin_unlock(&l->lock);
371 	queue_work(nfsd_filecache_wq, &l->work);
372 }
373 
374 static void
375 nfsd_file_list_add_pernet(struct list_head *dst, struct list_head *src,
376 		struct net *net)
377 {
378 	struct nfsd_file *nf, *tmp;
379 
380 	list_for_each_entry_safe(nf, tmp, src, nf_lru) {
381 		if (nf->nf_net == net)
382 			list_move_tail(&nf->nf_lru, dst);
383 	}
384 }
385 
386 static void
387 nfsd_file_dispose_list_delayed(struct list_head *dispose)
388 {
389 	LIST_HEAD(list);
390 	struct nfsd_file *nf;
391 
392 	while(!list_empty(dispose)) {
393 		nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
394 		nfsd_file_list_add_pernet(&list, dispose, nf->nf_net);
395 		nfsd_file_list_add_disposal(&list, nf->nf_net);
396 	}
397 }
398 
399 /*
400  * Note this can deadlock with nfsd_file_cache_purge.
401  */
402 static enum lru_status
403 nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
404 		 spinlock_t *lock, void *arg)
405 	__releases(lock)
406 	__acquires(lock)
407 {
408 	struct list_head *head = arg;
409 	struct nfsd_file *nf = list_entry(item, struct nfsd_file, nf_lru);
410 
411 	/*
412 	 * Do a lockless refcount check. The hashtable holds one reference, so
413 	 * we look to see if anything else has a reference, or if any have
414 	 * been put since the shrinker last ran. Those don't get unhashed and
415 	 * released.
416 	 *
417 	 * Note that in the put path, we set the flag and then decrement the
418 	 * counter. Here we check the counter and then test and clear the flag.
419 	 * That order is deliberate to ensure that we can do this locklessly.
420 	 */
421 	if (refcount_read(&nf->nf_ref) > 1)
422 		goto out_skip;
423 
424 	/*
425 	 * Don't throw out files that are still undergoing I/O or
426 	 * that have uncleared errors pending.
427 	 */
428 	if (nfsd_file_check_writeback(nf))
429 		goto out_skip;
430 
431 	if (test_and_clear_bit(NFSD_FILE_REFERENCED, &nf->nf_flags))
432 		goto out_skip;
433 
434 	if (!test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags))
435 		goto out_skip;
436 
437 	list_lru_isolate_move(lru, &nf->nf_lru, head);
438 	return LRU_REMOVED;
439 out_skip:
440 	return LRU_SKIP;
441 }
442 
443 static unsigned long
444 nfsd_file_lru_walk_list(struct shrink_control *sc)
445 {
446 	LIST_HEAD(head);
447 	struct nfsd_file *nf;
448 	unsigned long ret;
449 
450 	if (sc)
451 		ret = list_lru_shrink_walk(&nfsd_file_lru, sc,
452 				nfsd_file_lru_cb, &head);
453 	else
454 		ret = list_lru_walk(&nfsd_file_lru,
455 				nfsd_file_lru_cb,
456 				&head, LONG_MAX);
457 	list_for_each_entry(nf, &head, nf_lru) {
458 		spin_lock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
459 		nfsd_file_do_unhash(nf);
460 		spin_unlock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
461 	}
462 	nfsd_file_dispose_list_delayed(&head);
463 	return ret;
464 }
465 
466 static void
467 nfsd_file_gc(void)
468 {
469 	nfsd_file_lru_walk_list(NULL);
470 }
471 
472 static void
473 nfsd_file_gc_worker(struct work_struct *work)
474 {
475 	nfsd_file_gc();
476 	nfsd_file_schedule_laundrette();
477 }
478 
479 static unsigned long
480 nfsd_file_lru_count(struct shrinker *s, struct shrink_control *sc)
481 {
482 	return list_lru_count(&nfsd_file_lru);
483 }
484 
485 static unsigned long
486 nfsd_file_lru_scan(struct shrinker *s, struct shrink_control *sc)
487 {
488 	return nfsd_file_lru_walk_list(sc);
489 }
490 
491 static struct shrinker	nfsd_file_shrinker = {
492 	.scan_objects = nfsd_file_lru_scan,
493 	.count_objects = nfsd_file_lru_count,
494 	.seeks = 1,
495 };
496 
497 static void
498 __nfsd_file_close_inode(struct inode *inode, unsigned int hashval,
499 			struct list_head *dispose)
500 {
501 	struct nfsd_file	*nf;
502 	struct hlist_node	*tmp;
503 
504 	spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
505 	hlist_for_each_entry_safe(nf, tmp, &nfsd_file_hashtbl[hashval].nfb_head, nf_node) {
506 		if (inode == nf->nf_inode)
507 			nfsd_file_unhash_and_release_locked(nf, dispose);
508 	}
509 	spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
510 }
511 
512 /**
513  * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
514  * @inode: inode of the file to attempt to remove
515  *
516  * Walk the whole hash bucket, looking for any files that correspond to "inode".
517  * If any do, then unhash them and put the hashtable reference to them and
518  * destroy any that had their last reference put. Also ensure that any of the
519  * fputs also have their final __fput done as well.
520  */
521 void
522 nfsd_file_close_inode_sync(struct inode *inode)
523 {
524 	unsigned int		hashval = (unsigned int)hash_long(inode->i_ino,
525 						NFSD_FILE_HASH_BITS);
526 	LIST_HEAD(dispose);
527 
528 	__nfsd_file_close_inode(inode, hashval, &dispose);
529 	trace_nfsd_file_close_inode_sync(inode, hashval, !list_empty(&dispose));
530 	nfsd_file_dispose_list_sync(&dispose);
531 }
532 
533 /**
534  * nfsd_file_close_inode - attempt a delayed close of a nfsd_file
535  * @inode: inode of the file to attempt to remove
536  *
537  * Walk the whole hash bucket, looking for any files that correspond to "inode".
538  * If any do, then unhash them and put the hashtable reference to them and
539  * destroy any that had their last reference put.
540  */
541 static void
542 nfsd_file_close_inode(struct inode *inode)
543 {
544 	unsigned int		hashval = (unsigned int)hash_long(inode->i_ino,
545 						NFSD_FILE_HASH_BITS);
546 	LIST_HEAD(dispose);
547 
548 	__nfsd_file_close_inode(inode, hashval, &dispose);
549 	trace_nfsd_file_close_inode(inode, hashval, !list_empty(&dispose));
550 	nfsd_file_dispose_list_delayed(&dispose);
551 }
552 
553 /**
554  * nfsd_file_delayed_close - close unused nfsd_files
555  * @work: dummy
556  *
557  * Walk the LRU list and close any entries that have not been used since
558  * the last scan.
559  *
560  * Note this can deadlock with nfsd_file_cache_purge.
561  */
562 static void
563 nfsd_file_delayed_close(struct work_struct *work)
564 {
565 	LIST_HEAD(head);
566 	struct nfsd_fcache_disposal *l = container_of(work,
567 			struct nfsd_fcache_disposal, work);
568 
569 	nfsd_file_list_remove_disposal(&head, l);
570 	nfsd_file_dispose_list(&head);
571 }
572 
573 static int
574 nfsd_file_lease_notifier_call(struct notifier_block *nb, unsigned long arg,
575 			    void *data)
576 {
577 	struct file_lock *fl = data;
578 
579 	/* Only close files for F_SETLEASE leases */
580 	if (fl->fl_flags & FL_LEASE)
581 		nfsd_file_close_inode_sync(file_inode(fl->fl_file));
582 	return 0;
583 }
584 
585 static struct notifier_block nfsd_file_lease_notifier = {
586 	.notifier_call = nfsd_file_lease_notifier_call,
587 };
588 
589 static int
590 nfsd_file_fsnotify_handle_event(struct fsnotify_mark *mark, u32 mask,
591 				struct inode *inode, struct inode *dir,
592 				const struct qstr *name, u32 cookie)
593 {
594 	if (WARN_ON_ONCE(!inode))
595 		return 0;
596 
597 	trace_nfsd_file_fsnotify_handle_event(inode, mask);
598 
599 	/* Should be no marks on non-regular files */
600 	if (!S_ISREG(inode->i_mode)) {
601 		WARN_ON_ONCE(1);
602 		return 0;
603 	}
604 
605 	/* don't close files if this was not the last link */
606 	if (mask & FS_ATTRIB) {
607 		if (inode->i_nlink)
608 			return 0;
609 	}
610 
611 	nfsd_file_close_inode(inode);
612 	return 0;
613 }
614 
615 
616 static const struct fsnotify_ops nfsd_file_fsnotify_ops = {
617 	.handle_inode_event = nfsd_file_fsnotify_handle_event,
618 	.free_mark = nfsd_file_mark_free,
619 };
620 
621 int
622 nfsd_file_cache_init(void)
623 {
624 	int		ret = -ENOMEM;
625 	unsigned int	i;
626 
627 	clear_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags);
628 
629 	if (nfsd_file_hashtbl)
630 		return 0;
631 
632 	nfsd_filecache_wq = alloc_workqueue("nfsd_filecache", 0, 0);
633 	if (!nfsd_filecache_wq)
634 		goto out;
635 
636 	nfsd_file_hashtbl = kvcalloc(NFSD_FILE_HASH_SIZE,
637 				sizeof(*nfsd_file_hashtbl), GFP_KERNEL);
638 	if (!nfsd_file_hashtbl) {
639 		pr_err("nfsd: unable to allocate nfsd_file_hashtbl\n");
640 		goto out_err;
641 	}
642 
643 	nfsd_file_slab = kmem_cache_create("nfsd_file",
644 				sizeof(struct nfsd_file), 0, 0, NULL);
645 	if (!nfsd_file_slab) {
646 		pr_err("nfsd: unable to create nfsd_file_slab\n");
647 		goto out_err;
648 	}
649 
650 	nfsd_file_mark_slab = kmem_cache_create("nfsd_file_mark",
651 					sizeof(struct nfsd_file_mark), 0, 0, NULL);
652 	if (!nfsd_file_mark_slab) {
653 		pr_err("nfsd: unable to create nfsd_file_mark_slab\n");
654 		goto out_err;
655 	}
656 
657 
658 	ret = list_lru_init(&nfsd_file_lru);
659 	if (ret) {
660 		pr_err("nfsd: failed to init nfsd_file_lru: %d\n", ret);
661 		goto out_err;
662 	}
663 
664 	ret = register_shrinker(&nfsd_file_shrinker);
665 	if (ret) {
666 		pr_err("nfsd: failed to register nfsd_file_shrinker: %d\n", ret);
667 		goto out_lru;
668 	}
669 
670 	ret = lease_register_notifier(&nfsd_file_lease_notifier);
671 	if (ret) {
672 		pr_err("nfsd: unable to register lease notifier: %d\n", ret);
673 		goto out_shrinker;
674 	}
675 
676 	nfsd_file_fsnotify_group = fsnotify_alloc_group(&nfsd_file_fsnotify_ops);
677 	if (IS_ERR(nfsd_file_fsnotify_group)) {
678 		pr_err("nfsd: unable to create fsnotify group: %ld\n",
679 			PTR_ERR(nfsd_file_fsnotify_group));
680 		ret = PTR_ERR(nfsd_file_fsnotify_group);
681 		nfsd_file_fsnotify_group = NULL;
682 		goto out_notifier;
683 	}
684 
685 	for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
686 		INIT_HLIST_HEAD(&nfsd_file_hashtbl[i].nfb_head);
687 		spin_lock_init(&nfsd_file_hashtbl[i].nfb_lock);
688 	}
689 
690 	INIT_DELAYED_WORK(&nfsd_filecache_laundrette, nfsd_file_gc_worker);
691 out:
692 	return ret;
693 out_notifier:
694 	lease_unregister_notifier(&nfsd_file_lease_notifier);
695 out_shrinker:
696 	unregister_shrinker(&nfsd_file_shrinker);
697 out_lru:
698 	list_lru_destroy(&nfsd_file_lru);
699 out_err:
700 	kmem_cache_destroy(nfsd_file_slab);
701 	nfsd_file_slab = NULL;
702 	kmem_cache_destroy(nfsd_file_mark_slab);
703 	nfsd_file_mark_slab = NULL;
704 	kvfree(nfsd_file_hashtbl);
705 	nfsd_file_hashtbl = NULL;
706 	destroy_workqueue(nfsd_filecache_wq);
707 	nfsd_filecache_wq = NULL;
708 	goto out;
709 }
710 
711 /*
712  * Note this can deadlock with nfsd_file_lru_cb.
713  */
714 void
715 nfsd_file_cache_purge(struct net *net)
716 {
717 	unsigned int		i;
718 	struct nfsd_file	*nf;
719 	struct hlist_node	*next;
720 	LIST_HEAD(dispose);
721 	bool del;
722 
723 	if (!nfsd_file_hashtbl)
724 		return;
725 
726 	for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
727 		struct nfsd_fcache_bucket *nfb = &nfsd_file_hashtbl[i];
728 
729 		spin_lock(&nfb->nfb_lock);
730 		hlist_for_each_entry_safe(nf, next, &nfb->nfb_head, nf_node) {
731 			if (net && nf->nf_net != net)
732 				continue;
733 			del = nfsd_file_unhash_and_release_locked(nf, &dispose);
734 
735 			/*
736 			 * Deadlock detected! Something marked this entry as
737 			 * unhased, but hasn't removed it from the hash list.
738 			 */
739 			WARN_ON_ONCE(!del);
740 		}
741 		spin_unlock(&nfb->nfb_lock);
742 		nfsd_file_dispose_list(&dispose);
743 	}
744 }
745 
746 static struct nfsd_fcache_disposal *
747 nfsd_alloc_fcache_disposal(void)
748 {
749 	struct nfsd_fcache_disposal *l;
750 
751 	l = kmalloc(sizeof(*l), GFP_KERNEL);
752 	if (!l)
753 		return NULL;
754 	INIT_WORK(&l->work, nfsd_file_delayed_close);
755 	spin_lock_init(&l->lock);
756 	INIT_LIST_HEAD(&l->freeme);
757 	return l;
758 }
759 
760 static void
761 nfsd_free_fcache_disposal(struct nfsd_fcache_disposal *l)
762 {
763 	cancel_work_sync(&l->work);
764 	nfsd_file_dispose_list(&l->freeme);
765 	kfree(l);
766 }
767 
768 static void
769 nfsd_free_fcache_disposal_net(struct net *net)
770 {
771 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
772 	struct nfsd_fcache_disposal *l = nn->fcache_disposal;
773 
774 	nfsd_free_fcache_disposal(l);
775 }
776 
777 int
778 nfsd_file_cache_start_net(struct net *net)
779 {
780 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
781 
782 	nn->fcache_disposal = nfsd_alloc_fcache_disposal();
783 	return nn->fcache_disposal ? 0 : -ENOMEM;
784 }
785 
786 void
787 nfsd_file_cache_shutdown_net(struct net *net)
788 {
789 	nfsd_file_cache_purge(net);
790 	nfsd_free_fcache_disposal_net(net);
791 }
792 
793 void
794 nfsd_file_cache_shutdown(void)
795 {
796 	set_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags);
797 
798 	lease_unregister_notifier(&nfsd_file_lease_notifier);
799 	unregister_shrinker(&nfsd_file_shrinker);
800 	/*
801 	 * make sure all callers of nfsd_file_lru_cb are done before
802 	 * calling nfsd_file_cache_purge
803 	 */
804 	cancel_delayed_work_sync(&nfsd_filecache_laundrette);
805 	nfsd_file_cache_purge(NULL);
806 	list_lru_destroy(&nfsd_file_lru);
807 	rcu_barrier();
808 	fsnotify_put_group(nfsd_file_fsnotify_group);
809 	nfsd_file_fsnotify_group = NULL;
810 	kmem_cache_destroy(nfsd_file_slab);
811 	nfsd_file_slab = NULL;
812 	fsnotify_wait_marks_destroyed();
813 	kmem_cache_destroy(nfsd_file_mark_slab);
814 	nfsd_file_mark_slab = NULL;
815 	kvfree(nfsd_file_hashtbl);
816 	nfsd_file_hashtbl = NULL;
817 	destroy_workqueue(nfsd_filecache_wq);
818 	nfsd_filecache_wq = NULL;
819 }
820 
821 static bool
822 nfsd_match_cred(const struct cred *c1, const struct cred *c2)
823 {
824 	int i;
825 
826 	if (!uid_eq(c1->fsuid, c2->fsuid))
827 		return false;
828 	if (!gid_eq(c1->fsgid, c2->fsgid))
829 		return false;
830 	if (c1->group_info == NULL || c2->group_info == NULL)
831 		return c1->group_info == c2->group_info;
832 	if (c1->group_info->ngroups != c2->group_info->ngroups)
833 		return false;
834 	for (i = 0; i < c1->group_info->ngroups; i++) {
835 		if (!gid_eq(c1->group_info->gid[i], c2->group_info->gid[i]))
836 			return false;
837 	}
838 	return true;
839 }
840 
841 static struct nfsd_file *
842 nfsd_file_find_locked(struct inode *inode, unsigned int may_flags,
843 			unsigned int hashval, struct net *net)
844 {
845 	struct nfsd_file *nf;
846 	unsigned char need = may_flags & NFSD_FILE_MAY_MASK;
847 
848 	hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head,
849 				 nf_node, lockdep_is_held(&nfsd_file_hashtbl[hashval].nfb_lock)) {
850 		if (nf->nf_may != need)
851 			continue;
852 		if (nf->nf_inode != inode)
853 			continue;
854 		if (nf->nf_net != net)
855 			continue;
856 		if (!nfsd_match_cred(nf->nf_cred, current_cred()))
857 			continue;
858 		if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags))
859 			continue;
860 		if (nfsd_file_get(nf) != NULL)
861 			return nf;
862 	}
863 	return NULL;
864 }
865 
866 /**
867  * nfsd_file_is_cached - are there any cached open files for this fh?
868  * @inode: inode of the file to check
869  *
870  * Scan the hashtable for open files that match this fh. Returns true if there
871  * are any, and false if not.
872  */
873 bool
874 nfsd_file_is_cached(struct inode *inode)
875 {
876 	bool			ret = false;
877 	struct nfsd_file	*nf;
878 	unsigned int		hashval;
879 
880         hashval = (unsigned int)hash_long(inode->i_ino, NFSD_FILE_HASH_BITS);
881 
882 	rcu_read_lock();
883 	hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head,
884 				 nf_node) {
885 		if (inode == nf->nf_inode) {
886 			ret = true;
887 			break;
888 		}
889 	}
890 	rcu_read_unlock();
891 	trace_nfsd_file_is_cached(inode, hashval, (int)ret);
892 	return ret;
893 }
894 
895 __be32
896 nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
897 		  unsigned int may_flags, struct nfsd_file **pnf)
898 {
899 	__be32	status;
900 	struct net *net = SVC_NET(rqstp);
901 	struct nfsd_file *nf, *new;
902 	struct inode *inode;
903 	unsigned int hashval;
904 	bool retry = true;
905 
906 	/* FIXME: skip this if fh_dentry is already set? */
907 	status = fh_verify(rqstp, fhp, S_IFREG,
908 				may_flags|NFSD_MAY_OWNER_OVERRIDE);
909 	if (status != nfs_ok)
910 		return status;
911 
912 	inode = d_inode(fhp->fh_dentry);
913 	hashval = (unsigned int)hash_long(inode->i_ino, NFSD_FILE_HASH_BITS);
914 retry:
915 	rcu_read_lock();
916 	nf = nfsd_file_find_locked(inode, may_flags, hashval, net);
917 	rcu_read_unlock();
918 	if (nf)
919 		goto wait_for_construction;
920 
921 	new = nfsd_file_alloc(inode, may_flags, hashval, net);
922 	if (!new) {
923 		trace_nfsd_file_acquire(rqstp, hashval, inode, may_flags,
924 					NULL, nfserr_jukebox);
925 		return nfserr_jukebox;
926 	}
927 
928 	spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
929 	nf = nfsd_file_find_locked(inode, may_flags, hashval, net);
930 	if (nf == NULL)
931 		goto open_file;
932 	spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
933 	nfsd_file_slab_free(&new->nf_rcu);
934 
935 wait_for_construction:
936 	wait_on_bit(&nf->nf_flags, NFSD_FILE_PENDING, TASK_UNINTERRUPTIBLE);
937 
938 	/* Did construction of this file fail? */
939 	if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
940 		if (!retry) {
941 			status = nfserr_jukebox;
942 			goto out;
943 		}
944 		retry = false;
945 		nfsd_file_put_noref(nf);
946 		goto retry;
947 	}
948 
949 	this_cpu_inc(nfsd_file_cache_hits);
950 
951 	if (!(may_flags & NFSD_MAY_NOT_BREAK_LEASE)) {
952 		bool write = (may_flags & NFSD_MAY_WRITE);
953 
954 		if (test_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags) ||
955 		    (test_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags) && write)) {
956 			status = nfserrno(nfsd_open_break_lease(
957 					file_inode(nf->nf_file), may_flags));
958 			if (status == nfs_ok) {
959 				clear_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
960 				if (write)
961 					clear_bit(NFSD_FILE_BREAK_WRITE,
962 						  &nf->nf_flags);
963 			}
964 		}
965 	}
966 out:
967 	if (status == nfs_ok) {
968 		*pnf = nf;
969 	} else {
970 		nfsd_file_put(nf);
971 		nf = NULL;
972 	}
973 
974 	trace_nfsd_file_acquire(rqstp, hashval, inode, may_flags, nf, status);
975 	return status;
976 open_file:
977 	nf = new;
978 	/* Take reference for the hashtable */
979 	refcount_inc(&nf->nf_ref);
980 	__set_bit(NFSD_FILE_HASHED, &nf->nf_flags);
981 	__set_bit(NFSD_FILE_PENDING, &nf->nf_flags);
982 	list_lru_add(&nfsd_file_lru, &nf->nf_lru);
983 	hlist_add_head_rcu(&nf->nf_node, &nfsd_file_hashtbl[hashval].nfb_head);
984 	++nfsd_file_hashtbl[hashval].nfb_count;
985 	nfsd_file_hashtbl[hashval].nfb_maxcount = max(nfsd_file_hashtbl[hashval].nfb_maxcount,
986 			nfsd_file_hashtbl[hashval].nfb_count);
987 	spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
988 	if (atomic_long_inc_return(&nfsd_filecache_count) >= NFSD_FILE_LRU_THRESHOLD)
989 		nfsd_file_gc();
990 
991 	nf->nf_mark = nfsd_file_mark_find_or_create(nf);
992 	if (nf->nf_mark)
993 		status = nfsd_open_verified(rqstp, fhp, S_IFREG,
994 				may_flags, &nf->nf_file);
995 	else
996 		status = nfserr_jukebox;
997 	/*
998 	 * If construction failed, or we raced with a call to unlink()
999 	 * then unhash.
1000 	 */
1001 	if (status != nfs_ok || inode->i_nlink == 0) {
1002 		bool do_free;
1003 		spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
1004 		do_free = nfsd_file_unhash(nf);
1005 		spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
1006 		if (do_free)
1007 			nfsd_file_put_noref(nf);
1008 	}
1009 	clear_bit_unlock(NFSD_FILE_PENDING, &nf->nf_flags);
1010 	smp_mb__after_atomic();
1011 	wake_up_bit(&nf->nf_flags, NFSD_FILE_PENDING);
1012 	goto out;
1013 }
1014 
1015 /*
1016  * Note that fields may be added, removed or reordered in the future. Programs
1017  * scraping this file for info should test the labels to ensure they're
1018  * getting the correct field.
1019  */
1020 static int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
1021 {
1022 	unsigned int i, count = 0, longest = 0;
1023 	unsigned long hits = 0;
1024 
1025 	/*
1026 	 * No need for spinlocks here since we're not terribly interested in
1027 	 * accuracy. We do take the nfsd_mutex simply to ensure that we
1028 	 * don't end up racing with server shutdown
1029 	 */
1030 	mutex_lock(&nfsd_mutex);
1031 	if (nfsd_file_hashtbl) {
1032 		for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
1033 			count += nfsd_file_hashtbl[i].nfb_count;
1034 			longest = max(longest, nfsd_file_hashtbl[i].nfb_count);
1035 		}
1036 	}
1037 	mutex_unlock(&nfsd_mutex);
1038 
1039 	for_each_possible_cpu(i)
1040 		hits += per_cpu(nfsd_file_cache_hits, i);
1041 
1042 	seq_printf(m, "total entries: %u\n", count);
1043 	seq_printf(m, "longest chain: %u\n", longest);
1044 	seq_printf(m, "cache hits:    %lu\n", hits);
1045 	return 0;
1046 }
1047 
1048 int nfsd_file_cache_stats_open(struct inode *inode, struct file *file)
1049 {
1050 	return single_open(file, nfsd_file_cache_stats_show, NULL);
1051 }
1052