xref: /openbmc/linux/fs/nfsd/filecache.c (revision 5e113224c17e2fb156b785ddbbc48a0209fddb0c)
1 /*
2  * Open file cache.
3  *
4  * (c) 2015 - Jeff Layton <jeff.layton@primarydata.com>
5  */
6 
7 #include <linux/hash.h>
8 #include <linux/slab.h>
9 #include <linux/file.h>
10 #include <linux/sched.h>
11 #include <linux/list_lru.h>
12 #include <linux/fsnotify_backend.h>
13 #include <linux/fsnotify.h>
14 #include <linux/seq_file.h>
15 
16 #include "vfs.h"
17 #include "nfsd.h"
18 #include "nfsfh.h"
19 #include "netns.h"
20 #include "filecache.h"
21 #include "trace.h"
22 
23 #define NFSDDBG_FACILITY	NFSDDBG_FH
24 
25 /* FIXME: dynamically size this for the machine somehow? */
26 #define NFSD_FILE_HASH_BITS                   12
27 #define NFSD_FILE_HASH_SIZE                  (1 << NFSD_FILE_HASH_BITS)
28 #define NFSD_LAUNDRETTE_DELAY		     (2 * HZ)
29 
30 #define NFSD_FILE_LRU_RESCAN		     (0)
31 #define NFSD_FILE_SHUTDOWN		     (1)
32 #define NFSD_FILE_LRU_THRESHOLD		     (4096UL)
33 #define NFSD_FILE_LRU_LIMIT		     (NFSD_FILE_LRU_THRESHOLD << 2)
34 
35 /* We only care about NFSD_MAY_READ/WRITE for this cache */
36 #define NFSD_FILE_MAY_MASK	(NFSD_MAY_READ|NFSD_MAY_WRITE)
37 
38 struct nfsd_fcache_bucket {
39 	struct hlist_head	nfb_head;
40 	spinlock_t		nfb_lock;
41 	unsigned int		nfb_count;
42 	unsigned int		nfb_maxcount;
43 };
44 
45 static DEFINE_PER_CPU(unsigned long, nfsd_file_cache_hits);
46 
47 static struct kmem_cache		*nfsd_file_slab;
48 static struct kmem_cache		*nfsd_file_mark_slab;
49 static struct nfsd_fcache_bucket	*nfsd_file_hashtbl;
50 static struct list_lru			nfsd_file_lru;
51 static long				nfsd_file_lru_flags;
52 static struct fsnotify_group		*nfsd_file_fsnotify_group;
53 static atomic_long_t			nfsd_filecache_count;
54 static struct delayed_work		nfsd_filecache_laundrette;
55 
56 enum nfsd_file_laundrette_ctl {
57 	NFSD_FILE_LAUNDRETTE_NOFLUSH = 0,
58 	NFSD_FILE_LAUNDRETTE_MAY_FLUSH
59 };
60 
61 static void
62 nfsd_file_schedule_laundrette(enum nfsd_file_laundrette_ctl ctl)
63 {
64 	long count = atomic_long_read(&nfsd_filecache_count);
65 
66 	if (count == 0 || test_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags))
67 		return;
68 
69 	/* Be more aggressive about scanning if over the threshold */
70 	if (count > NFSD_FILE_LRU_THRESHOLD)
71 		mod_delayed_work(system_wq, &nfsd_filecache_laundrette, 0);
72 	else
73 		schedule_delayed_work(&nfsd_filecache_laundrette, NFSD_LAUNDRETTE_DELAY);
74 
75 	if (ctl == NFSD_FILE_LAUNDRETTE_NOFLUSH)
76 		return;
77 
78 	/* ...and don't delay flushing if we're out of control */
79 	if (count >= NFSD_FILE_LRU_LIMIT)
80 		flush_delayed_work(&nfsd_filecache_laundrette);
81 }
82 
83 static void
84 nfsd_file_slab_free(struct rcu_head *rcu)
85 {
86 	struct nfsd_file *nf = container_of(rcu, struct nfsd_file, nf_rcu);
87 
88 	put_cred(nf->nf_cred);
89 	kmem_cache_free(nfsd_file_slab, nf);
90 }
91 
92 static void
93 nfsd_file_mark_free(struct fsnotify_mark *mark)
94 {
95 	struct nfsd_file_mark *nfm = container_of(mark, struct nfsd_file_mark,
96 						  nfm_mark);
97 
98 	kmem_cache_free(nfsd_file_mark_slab, nfm);
99 }
100 
101 static struct nfsd_file_mark *
102 nfsd_file_mark_get(struct nfsd_file_mark *nfm)
103 {
104 	if (!atomic_inc_not_zero(&nfm->nfm_ref))
105 		return NULL;
106 	return nfm;
107 }
108 
109 static void
110 nfsd_file_mark_put(struct nfsd_file_mark *nfm)
111 {
112 	if (atomic_dec_and_test(&nfm->nfm_ref)) {
113 
114 		fsnotify_destroy_mark(&nfm->nfm_mark, nfsd_file_fsnotify_group);
115 		fsnotify_put_mark(&nfm->nfm_mark);
116 	}
117 }
118 
119 static struct nfsd_file_mark *
120 nfsd_file_mark_find_or_create(struct nfsd_file *nf)
121 {
122 	int			err;
123 	struct fsnotify_mark	*mark;
124 	struct nfsd_file_mark	*nfm = NULL, *new;
125 	struct inode *inode = nf->nf_inode;
126 
127 	do {
128 		mutex_lock(&nfsd_file_fsnotify_group->mark_mutex);
129 		mark = fsnotify_find_mark(&inode->i_fsnotify_marks,
130 				nfsd_file_fsnotify_group);
131 		if (mark) {
132 			nfm = nfsd_file_mark_get(container_of(mark,
133 						 struct nfsd_file_mark,
134 						 nfm_mark));
135 			mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex);
136 			fsnotify_put_mark(mark);
137 			if (likely(nfm))
138 				break;
139 		} else
140 			mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex);
141 
142 		/* allocate a new nfm */
143 		new = kmem_cache_alloc(nfsd_file_mark_slab, GFP_KERNEL);
144 		if (!new)
145 			return NULL;
146 		fsnotify_init_mark(&new->nfm_mark, nfsd_file_fsnotify_group);
147 		new->nfm_mark.mask = FS_ATTRIB|FS_DELETE_SELF;
148 		atomic_set(&new->nfm_ref, 1);
149 
150 		err = fsnotify_add_inode_mark(&new->nfm_mark, inode, 0);
151 
152 		/*
153 		 * If the add was successful, then return the object.
154 		 * Otherwise, we need to put the reference we hold on the
155 		 * nfm_mark. The fsnotify code will take a reference and put
156 		 * it on failure, so we can't just free it directly. It's also
157 		 * not safe to call fsnotify_destroy_mark on it as the
158 		 * mark->group will be NULL. Thus, we can't let the nfm_ref
159 		 * counter drive the destruction at this point.
160 		 */
161 		if (likely(!err))
162 			nfm = new;
163 		else
164 			fsnotify_put_mark(&new->nfm_mark);
165 	} while (unlikely(err == -EEXIST));
166 
167 	return nfm;
168 }
169 
170 static struct nfsd_file *
171 nfsd_file_alloc(struct inode *inode, unsigned int may, unsigned int hashval,
172 		struct net *net)
173 {
174 	struct nfsd_file *nf;
175 
176 	nf = kmem_cache_alloc(nfsd_file_slab, GFP_KERNEL);
177 	if (nf) {
178 		INIT_HLIST_NODE(&nf->nf_node);
179 		INIT_LIST_HEAD(&nf->nf_lru);
180 		nf->nf_file = NULL;
181 		nf->nf_cred = get_current_cred();
182 		nf->nf_net = net;
183 		nf->nf_flags = 0;
184 		nf->nf_inode = inode;
185 		nf->nf_hashval = hashval;
186 		atomic_set(&nf->nf_ref, 1);
187 		nf->nf_may = may & NFSD_FILE_MAY_MASK;
188 		if (may & NFSD_MAY_NOT_BREAK_LEASE) {
189 			if (may & NFSD_MAY_WRITE)
190 				__set_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags);
191 			if (may & NFSD_MAY_READ)
192 				__set_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
193 		}
194 		nf->nf_mark = NULL;
195 		trace_nfsd_file_alloc(nf);
196 	}
197 	return nf;
198 }
199 
200 static bool
201 nfsd_file_free(struct nfsd_file *nf)
202 {
203 	bool flush = false;
204 
205 	trace_nfsd_file_put_final(nf);
206 	if (nf->nf_mark)
207 		nfsd_file_mark_put(nf->nf_mark);
208 	if (nf->nf_file) {
209 		get_file(nf->nf_file);
210 		filp_close(nf->nf_file, NULL);
211 		fput(nf->nf_file);
212 		flush = true;
213 	}
214 	call_rcu(&nf->nf_rcu, nfsd_file_slab_free);
215 	return flush;
216 }
217 
218 static void
219 nfsd_file_do_unhash(struct nfsd_file *nf)
220 {
221 	lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
222 
223 	trace_nfsd_file_unhash(nf);
224 
225 	--nfsd_file_hashtbl[nf->nf_hashval].nfb_count;
226 	hlist_del_rcu(&nf->nf_node);
227 	if (!list_empty(&nf->nf_lru))
228 		list_lru_del(&nfsd_file_lru, &nf->nf_lru);
229 	atomic_long_dec(&nfsd_filecache_count);
230 }
231 
232 static bool
233 nfsd_file_unhash(struct nfsd_file *nf)
234 {
235 	if (test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
236 		nfsd_file_do_unhash(nf);
237 		return true;
238 	}
239 	return false;
240 }
241 
242 /*
243  * Return true if the file was unhashed.
244  */
245 static bool
246 nfsd_file_unhash_and_release_locked(struct nfsd_file *nf, struct list_head *dispose)
247 {
248 	lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
249 
250 	trace_nfsd_file_unhash_and_release_locked(nf);
251 	if (!nfsd_file_unhash(nf))
252 		return false;
253 	/* keep final reference for nfsd_file_lru_dispose */
254 	if (atomic_add_unless(&nf->nf_ref, -1, 1))
255 		return true;
256 
257 	list_add(&nf->nf_lru, dispose);
258 	return true;
259 }
260 
261 static int
262 nfsd_file_put_noref(struct nfsd_file *nf)
263 {
264 	int count;
265 	trace_nfsd_file_put(nf);
266 
267 	count = atomic_dec_return(&nf->nf_ref);
268 	if (!count) {
269 		WARN_ON(test_bit(NFSD_FILE_HASHED, &nf->nf_flags));
270 		nfsd_file_free(nf);
271 	}
272 	return count;
273 }
274 
275 void
276 nfsd_file_put(struct nfsd_file *nf)
277 {
278 	bool is_hashed = test_bit(NFSD_FILE_HASHED, &nf->nf_flags) != 0;
279 
280 	set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
281 	if (nfsd_file_put_noref(nf) == 1 && is_hashed)
282 		nfsd_file_schedule_laundrette(NFSD_FILE_LAUNDRETTE_MAY_FLUSH);
283 }
284 
285 struct nfsd_file *
286 nfsd_file_get(struct nfsd_file *nf)
287 {
288 	if (likely(atomic_inc_not_zero(&nf->nf_ref)))
289 		return nf;
290 	return NULL;
291 }
292 
293 static void
294 nfsd_file_dispose_list(struct list_head *dispose)
295 {
296 	struct nfsd_file *nf;
297 
298 	while(!list_empty(dispose)) {
299 		nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
300 		list_del(&nf->nf_lru);
301 		nfsd_file_put_noref(nf);
302 	}
303 }
304 
305 static void
306 nfsd_file_dispose_list_sync(struct list_head *dispose)
307 {
308 	bool flush = false;
309 	struct nfsd_file *nf;
310 
311 	while(!list_empty(dispose)) {
312 		nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
313 		list_del(&nf->nf_lru);
314 		if (!atomic_dec_and_test(&nf->nf_ref))
315 			continue;
316 		if (nfsd_file_free(nf))
317 			flush = true;
318 	}
319 	if (flush)
320 		flush_delayed_fput();
321 }
322 
323 /*
324  * Note this can deadlock with nfsd_file_cache_purge.
325  */
326 static enum lru_status
327 nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
328 		 spinlock_t *lock, void *arg)
329 	__releases(lock)
330 	__acquires(lock)
331 {
332 	struct list_head *head = arg;
333 	struct nfsd_file *nf = list_entry(item, struct nfsd_file, nf_lru);
334 
335 	/*
336 	 * Do a lockless refcount check. The hashtable holds one reference, so
337 	 * we look to see if anything else has a reference, or if any have
338 	 * been put since the shrinker last ran. Those don't get unhashed and
339 	 * released.
340 	 *
341 	 * Note that in the put path, we set the flag and then decrement the
342 	 * counter. Here we check the counter and then test and clear the flag.
343 	 * That order is deliberate to ensure that we can do this locklessly.
344 	 */
345 	if (atomic_read(&nf->nf_ref) > 1)
346 		goto out_skip;
347 	if (test_and_clear_bit(NFSD_FILE_REFERENCED, &nf->nf_flags))
348 		goto out_rescan;
349 
350 	if (!test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags))
351 		goto out_skip;
352 
353 	list_lru_isolate_move(lru, &nf->nf_lru, head);
354 	return LRU_REMOVED;
355 out_rescan:
356 	set_bit(NFSD_FILE_LRU_RESCAN, &nfsd_file_lru_flags);
357 out_skip:
358 	return LRU_SKIP;
359 }
360 
361 static void
362 nfsd_file_lru_dispose(struct list_head *head)
363 {
364 	while(!list_empty(head)) {
365 		struct nfsd_file *nf = list_first_entry(head,
366 				struct nfsd_file, nf_lru);
367 		list_del_init(&nf->nf_lru);
368 		spin_lock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
369 		nfsd_file_do_unhash(nf);
370 		spin_unlock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
371 		nfsd_file_put_noref(nf);
372 	}
373 }
374 
375 static unsigned long
376 nfsd_file_lru_count(struct shrinker *s, struct shrink_control *sc)
377 {
378 	return list_lru_count(&nfsd_file_lru);
379 }
380 
381 static unsigned long
382 nfsd_file_lru_scan(struct shrinker *s, struct shrink_control *sc)
383 {
384 	LIST_HEAD(head);
385 	unsigned long ret;
386 
387 	ret = list_lru_shrink_walk(&nfsd_file_lru, sc, nfsd_file_lru_cb, &head);
388 	nfsd_file_lru_dispose(&head);
389 	return ret;
390 }
391 
392 static struct shrinker	nfsd_file_shrinker = {
393 	.scan_objects = nfsd_file_lru_scan,
394 	.count_objects = nfsd_file_lru_count,
395 	.seeks = 1,
396 };
397 
398 static void
399 __nfsd_file_close_inode(struct inode *inode, unsigned int hashval,
400 			struct list_head *dispose)
401 {
402 	struct nfsd_file	*nf;
403 	struct hlist_node	*tmp;
404 
405 	spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
406 	hlist_for_each_entry_safe(nf, tmp, &nfsd_file_hashtbl[hashval].nfb_head, nf_node) {
407 		if (inode == nf->nf_inode)
408 			nfsd_file_unhash_and_release_locked(nf, dispose);
409 	}
410 	spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
411 }
412 
413 /**
414  * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
415  * @inode: inode of the file to attempt to remove
416  *
417  * Walk the whole hash bucket, looking for any files that correspond to "inode".
418  * If any do, then unhash them and put the hashtable reference to them and
419  * destroy any that had their last reference put. Also ensure that any of the
420  * fputs also have their final __fput done as well.
421  */
422 void
423 nfsd_file_close_inode_sync(struct inode *inode)
424 {
425 	unsigned int		hashval = (unsigned int)hash_long(inode->i_ino,
426 						NFSD_FILE_HASH_BITS);
427 	LIST_HEAD(dispose);
428 
429 	__nfsd_file_close_inode(inode, hashval, &dispose);
430 	trace_nfsd_file_close_inode_sync(inode, hashval, !list_empty(&dispose));
431 	nfsd_file_dispose_list_sync(&dispose);
432 }
433 
434 /**
435  * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
436  * @inode: inode of the file to attempt to remove
437  *
438  * Walk the whole hash bucket, looking for any files that correspond to "inode".
439  * If any do, then unhash them and put the hashtable reference to them and
440  * destroy any that had their last reference put.
441  */
442 static void
443 nfsd_file_close_inode(struct inode *inode)
444 {
445 	unsigned int		hashval = (unsigned int)hash_long(inode->i_ino,
446 						NFSD_FILE_HASH_BITS);
447 	LIST_HEAD(dispose);
448 
449 	__nfsd_file_close_inode(inode, hashval, &dispose);
450 	trace_nfsd_file_close_inode(inode, hashval, !list_empty(&dispose));
451 	nfsd_file_dispose_list(&dispose);
452 }
453 
454 /**
455  * nfsd_file_delayed_close - close unused nfsd_files
456  * @work: dummy
457  *
458  * Walk the LRU list and close any entries that have not been used since
459  * the last scan.
460  *
461  * Note this can deadlock with nfsd_file_cache_purge.
462  */
463 static void
464 nfsd_file_delayed_close(struct work_struct *work)
465 {
466 	LIST_HEAD(head);
467 
468 	list_lru_walk(&nfsd_file_lru, nfsd_file_lru_cb, &head, LONG_MAX);
469 
470 	if (test_and_clear_bit(NFSD_FILE_LRU_RESCAN, &nfsd_file_lru_flags))
471 		nfsd_file_schedule_laundrette(NFSD_FILE_LAUNDRETTE_NOFLUSH);
472 
473 	if (!list_empty(&head)) {
474 		nfsd_file_lru_dispose(&head);
475 		flush_delayed_fput();
476 	}
477 }
478 
479 static int
480 nfsd_file_lease_notifier_call(struct notifier_block *nb, unsigned long arg,
481 			    void *data)
482 {
483 	struct file_lock *fl = data;
484 
485 	/* Only close files for F_SETLEASE leases */
486 	if (fl->fl_flags & FL_LEASE)
487 		nfsd_file_close_inode_sync(file_inode(fl->fl_file));
488 	return 0;
489 }
490 
491 static struct notifier_block nfsd_file_lease_notifier = {
492 	.notifier_call = nfsd_file_lease_notifier_call,
493 };
494 
495 static int
496 nfsd_file_fsnotify_handle_event(struct fsnotify_group *group,
497 				struct inode *inode,
498 				u32 mask, const void *data, int data_type,
499 				const struct qstr *file_name, u32 cookie,
500 				struct fsnotify_iter_info *iter_info)
501 {
502 	trace_nfsd_file_fsnotify_handle_event(inode, mask);
503 
504 	/* Should be no marks on non-regular files */
505 	if (!S_ISREG(inode->i_mode)) {
506 		WARN_ON_ONCE(1);
507 		return 0;
508 	}
509 
510 	/* don't close files if this was not the last link */
511 	if (mask & FS_ATTRIB) {
512 		if (inode->i_nlink)
513 			return 0;
514 	}
515 
516 	nfsd_file_close_inode(inode);
517 	return 0;
518 }
519 
520 
521 static const struct fsnotify_ops nfsd_file_fsnotify_ops = {
522 	.handle_event = nfsd_file_fsnotify_handle_event,
523 	.free_mark = nfsd_file_mark_free,
524 };
525 
526 int
527 nfsd_file_cache_init(void)
528 {
529 	int		ret = -ENOMEM;
530 	unsigned int	i;
531 
532 	clear_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags);
533 
534 	if (nfsd_file_hashtbl)
535 		return 0;
536 
537 	nfsd_file_hashtbl = kcalloc(NFSD_FILE_HASH_SIZE,
538 				sizeof(*nfsd_file_hashtbl), GFP_KERNEL);
539 	if (!nfsd_file_hashtbl) {
540 		pr_err("nfsd: unable to allocate nfsd_file_hashtbl\n");
541 		goto out_err;
542 	}
543 
544 	nfsd_file_slab = kmem_cache_create("nfsd_file",
545 				sizeof(struct nfsd_file), 0, 0, NULL);
546 	if (!nfsd_file_slab) {
547 		pr_err("nfsd: unable to create nfsd_file_slab\n");
548 		goto out_err;
549 	}
550 
551 	nfsd_file_mark_slab = kmem_cache_create("nfsd_file_mark",
552 					sizeof(struct nfsd_file_mark), 0, 0, NULL);
553 	if (!nfsd_file_mark_slab) {
554 		pr_err("nfsd: unable to create nfsd_file_mark_slab\n");
555 		goto out_err;
556 	}
557 
558 
559 	ret = list_lru_init(&nfsd_file_lru);
560 	if (ret) {
561 		pr_err("nfsd: failed to init nfsd_file_lru: %d\n", ret);
562 		goto out_err;
563 	}
564 
565 	ret = register_shrinker(&nfsd_file_shrinker);
566 	if (ret) {
567 		pr_err("nfsd: failed to register nfsd_file_shrinker: %d\n", ret);
568 		goto out_lru;
569 	}
570 
571 	ret = lease_register_notifier(&nfsd_file_lease_notifier);
572 	if (ret) {
573 		pr_err("nfsd: unable to register lease notifier: %d\n", ret);
574 		goto out_shrinker;
575 	}
576 
577 	nfsd_file_fsnotify_group = fsnotify_alloc_group(&nfsd_file_fsnotify_ops);
578 	if (IS_ERR(nfsd_file_fsnotify_group)) {
579 		pr_err("nfsd: unable to create fsnotify group: %ld\n",
580 			PTR_ERR(nfsd_file_fsnotify_group));
581 		nfsd_file_fsnotify_group = NULL;
582 		goto out_notifier;
583 	}
584 
585 	for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
586 		INIT_HLIST_HEAD(&nfsd_file_hashtbl[i].nfb_head);
587 		spin_lock_init(&nfsd_file_hashtbl[i].nfb_lock);
588 	}
589 
590 	INIT_DELAYED_WORK(&nfsd_filecache_laundrette, nfsd_file_delayed_close);
591 out:
592 	return ret;
593 out_notifier:
594 	lease_unregister_notifier(&nfsd_file_lease_notifier);
595 out_shrinker:
596 	unregister_shrinker(&nfsd_file_shrinker);
597 out_lru:
598 	list_lru_destroy(&nfsd_file_lru);
599 out_err:
600 	kmem_cache_destroy(nfsd_file_slab);
601 	nfsd_file_slab = NULL;
602 	kmem_cache_destroy(nfsd_file_mark_slab);
603 	nfsd_file_mark_slab = NULL;
604 	kfree(nfsd_file_hashtbl);
605 	nfsd_file_hashtbl = NULL;
606 	goto out;
607 }
608 
609 /*
610  * Note this can deadlock with nfsd_file_lru_cb.
611  */
612 void
613 nfsd_file_cache_purge(struct net *net)
614 {
615 	unsigned int		i;
616 	struct nfsd_file	*nf;
617 	struct hlist_node	*next;
618 	LIST_HEAD(dispose);
619 	bool del;
620 
621 	if (!nfsd_file_hashtbl)
622 		return;
623 
624 	for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
625 		struct nfsd_fcache_bucket *nfb = &nfsd_file_hashtbl[i];
626 
627 		spin_lock(&nfb->nfb_lock);
628 		hlist_for_each_entry_safe(nf, next, &nfb->nfb_head, nf_node) {
629 			if (net && nf->nf_net != net)
630 				continue;
631 			del = nfsd_file_unhash_and_release_locked(nf, &dispose);
632 
633 			/*
634 			 * Deadlock detected! Something marked this entry as
635 			 * unhased, but hasn't removed it from the hash list.
636 			 */
637 			WARN_ON_ONCE(!del);
638 		}
639 		spin_unlock(&nfb->nfb_lock);
640 		nfsd_file_dispose_list(&dispose);
641 	}
642 }
643 
644 void
645 nfsd_file_cache_shutdown(void)
646 {
647 	LIST_HEAD(dispose);
648 
649 	set_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags);
650 
651 	lease_unregister_notifier(&nfsd_file_lease_notifier);
652 	unregister_shrinker(&nfsd_file_shrinker);
653 	/*
654 	 * make sure all callers of nfsd_file_lru_cb are done before
655 	 * calling nfsd_file_cache_purge
656 	 */
657 	cancel_delayed_work_sync(&nfsd_filecache_laundrette);
658 	nfsd_file_cache_purge(NULL);
659 	list_lru_destroy(&nfsd_file_lru);
660 	rcu_barrier();
661 	fsnotify_put_group(nfsd_file_fsnotify_group);
662 	nfsd_file_fsnotify_group = NULL;
663 	kmem_cache_destroy(nfsd_file_slab);
664 	nfsd_file_slab = NULL;
665 	fsnotify_wait_marks_destroyed();
666 	kmem_cache_destroy(nfsd_file_mark_slab);
667 	nfsd_file_mark_slab = NULL;
668 	kfree(nfsd_file_hashtbl);
669 	nfsd_file_hashtbl = NULL;
670 }
671 
672 static bool
673 nfsd_match_cred(const struct cred *c1, const struct cred *c2)
674 {
675 	int i;
676 
677 	if (!uid_eq(c1->fsuid, c2->fsuid))
678 		return false;
679 	if (!gid_eq(c1->fsgid, c2->fsgid))
680 		return false;
681 	if (c1->group_info == NULL || c2->group_info == NULL)
682 		return c1->group_info == c2->group_info;
683 	if (c1->group_info->ngroups != c2->group_info->ngroups)
684 		return false;
685 	for (i = 0; i < c1->group_info->ngroups; i++) {
686 		if (!gid_eq(c1->group_info->gid[i], c2->group_info->gid[i]))
687 			return false;
688 	}
689 	return true;
690 }
691 
692 static struct nfsd_file *
693 nfsd_file_find_locked(struct inode *inode, unsigned int may_flags,
694 			unsigned int hashval, struct net *net)
695 {
696 	struct nfsd_file *nf;
697 	unsigned char need = may_flags & NFSD_FILE_MAY_MASK;
698 
699 	hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head,
700 				 nf_node) {
701 		if ((need & nf->nf_may) != need)
702 			continue;
703 		if (nf->nf_inode != inode)
704 			continue;
705 		if (nf->nf_net != net)
706 			continue;
707 		if (!nfsd_match_cred(nf->nf_cred, current_cred()))
708 			continue;
709 		if (nfsd_file_get(nf) != NULL)
710 			return nf;
711 	}
712 	return NULL;
713 }
714 
715 /**
716  * nfsd_file_is_cached - are there any cached open files for this fh?
717  * @inode: inode of the file to check
718  *
719  * Scan the hashtable for open files that match this fh. Returns true if there
720  * are any, and false if not.
721  */
722 bool
723 nfsd_file_is_cached(struct inode *inode)
724 {
725 	bool			ret = false;
726 	struct nfsd_file	*nf;
727 	unsigned int		hashval;
728 
729         hashval = (unsigned int)hash_long(inode->i_ino, NFSD_FILE_HASH_BITS);
730 
731 	rcu_read_lock();
732 	hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head,
733 				 nf_node) {
734 		if (inode == nf->nf_inode) {
735 			ret = true;
736 			break;
737 		}
738 	}
739 	rcu_read_unlock();
740 	trace_nfsd_file_is_cached(inode, hashval, (int)ret);
741 	return ret;
742 }
743 
744 __be32
745 nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
746 		  unsigned int may_flags, struct nfsd_file **pnf)
747 {
748 	__be32	status;
749 	struct net *net = SVC_NET(rqstp);
750 	struct nfsd_file *nf, *new;
751 	struct inode *inode;
752 	unsigned int hashval;
753 
754 	/* FIXME: skip this if fh_dentry is already set? */
755 	status = fh_verify(rqstp, fhp, S_IFREG,
756 				may_flags|NFSD_MAY_OWNER_OVERRIDE);
757 	if (status != nfs_ok)
758 		return status;
759 
760 	inode = d_inode(fhp->fh_dentry);
761 	hashval = (unsigned int)hash_long(inode->i_ino, NFSD_FILE_HASH_BITS);
762 retry:
763 	rcu_read_lock();
764 	nf = nfsd_file_find_locked(inode, may_flags, hashval, net);
765 	rcu_read_unlock();
766 	if (nf)
767 		goto wait_for_construction;
768 
769 	new = nfsd_file_alloc(inode, may_flags, hashval, net);
770 	if (!new) {
771 		trace_nfsd_file_acquire(rqstp, hashval, inode, may_flags,
772 					NULL, nfserr_jukebox);
773 		return nfserr_jukebox;
774 	}
775 
776 	spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
777 	nf = nfsd_file_find_locked(inode, may_flags, hashval, net);
778 	if (nf == NULL)
779 		goto open_file;
780 	spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
781 	nfsd_file_slab_free(&new->nf_rcu);
782 
783 wait_for_construction:
784 	wait_on_bit(&nf->nf_flags, NFSD_FILE_PENDING, TASK_UNINTERRUPTIBLE);
785 
786 	/* Did construction of this file fail? */
787 	if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
788 		nfsd_file_put_noref(nf);
789 		goto retry;
790 	}
791 
792 	this_cpu_inc(nfsd_file_cache_hits);
793 
794 	if (!(may_flags & NFSD_MAY_NOT_BREAK_LEASE)) {
795 		bool write = (may_flags & NFSD_MAY_WRITE);
796 
797 		if (test_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags) ||
798 		    (test_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags) && write)) {
799 			status = nfserrno(nfsd_open_break_lease(
800 					file_inode(nf->nf_file), may_flags));
801 			if (status == nfs_ok) {
802 				clear_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
803 				if (write)
804 					clear_bit(NFSD_FILE_BREAK_WRITE,
805 						  &nf->nf_flags);
806 			}
807 		}
808 	}
809 out:
810 	if (status == nfs_ok) {
811 		*pnf = nf;
812 	} else {
813 		nfsd_file_put(nf);
814 		nf = NULL;
815 	}
816 
817 	trace_nfsd_file_acquire(rqstp, hashval, inode, may_flags, nf, status);
818 	return status;
819 open_file:
820 	nf = new;
821 	/* Take reference for the hashtable */
822 	atomic_inc(&nf->nf_ref);
823 	__set_bit(NFSD_FILE_HASHED, &nf->nf_flags);
824 	__set_bit(NFSD_FILE_PENDING, &nf->nf_flags);
825 	list_lru_add(&nfsd_file_lru, &nf->nf_lru);
826 	hlist_add_head_rcu(&nf->nf_node, &nfsd_file_hashtbl[hashval].nfb_head);
827 	++nfsd_file_hashtbl[hashval].nfb_count;
828 	nfsd_file_hashtbl[hashval].nfb_maxcount = max(nfsd_file_hashtbl[hashval].nfb_maxcount,
829 			nfsd_file_hashtbl[hashval].nfb_count);
830 	spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
831 	atomic_long_inc(&nfsd_filecache_count);
832 
833 	nf->nf_mark = nfsd_file_mark_find_or_create(nf);
834 	if (nf->nf_mark)
835 		status = nfsd_open_verified(rqstp, fhp, S_IFREG,
836 				may_flags, &nf->nf_file);
837 	else
838 		status = nfserr_jukebox;
839 	/*
840 	 * If construction failed, or we raced with a call to unlink()
841 	 * then unhash.
842 	 */
843 	if (status != nfs_ok || inode->i_nlink == 0) {
844 		bool do_free;
845 		spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
846 		do_free = nfsd_file_unhash(nf);
847 		spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
848 		if (do_free)
849 			nfsd_file_put_noref(nf);
850 	}
851 	clear_bit_unlock(NFSD_FILE_PENDING, &nf->nf_flags);
852 	smp_mb__after_atomic();
853 	wake_up_bit(&nf->nf_flags, NFSD_FILE_PENDING);
854 	goto out;
855 }
856 
857 /*
858  * Note that fields may be added, removed or reordered in the future. Programs
859  * scraping this file for info should test the labels to ensure they're
860  * getting the correct field.
861  */
862 static int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
863 {
864 	unsigned int i, count = 0, longest = 0;
865 	unsigned long hits = 0;
866 
867 	/*
868 	 * No need for spinlocks here since we're not terribly interested in
869 	 * accuracy. We do take the nfsd_mutex simply to ensure that we
870 	 * don't end up racing with server shutdown
871 	 */
872 	mutex_lock(&nfsd_mutex);
873 	if (nfsd_file_hashtbl) {
874 		for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
875 			count += nfsd_file_hashtbl[i].nfb_count;
876 			longest = max(longest, nfsd_file_hashtbl[i].nfb_count);
877 		}
878 	}
879 	mutex_unlock(&nfsd_mutex);
880 
881 	for_each_possible_cpu(i)
882 		hits += per_cpu(nfsd_file_cache_hits, i);
883 
884 	seq_printf(m, "total entries: %u\n", count);
885 	seq_printf(m, "longest chain: %u\n", longest);
886 	seq_printf(m, "cache hits:    %lu\n", hits);
887 	return 0;
888 }
889 
890 int nfsd_file_cache_stats_open(struct inode *inode, struct file *file)
891 {
892 	return single_open(file, nfsd_file_cache_stats_show, NULL);
893 }
894