xref: /openbmc/linux/fs/nfsd/filecache.c (revision 65294c1f2c5e72b15b76e16c8c8cfd9359fc9f6f)
1*65294c1fSJeff Layton /*
2*65294c1fSJeff Layton  * Open file cache.
3*65294c1fSJeff Layton  *
4*65294c1fSJeff Layton  * (c) 2015 - Jeff Layton <jeff.layton@primarydata.com>
5*65294c1fSJeff Layton  */
6*65294c1fSJeff Layton 
7*65294c1fSJeff Layton #include <linux/hash.h>
8*65294c1fSJeff Layton #include <linux/slab.h>
9*65294c1fSJeff Layton #include <linux/hash.h>
10*65294c1fSJeff Layton #include <linux/file.h>
11*65294c1fSJeff Layton #include <linux/sched.h>
12*65294c1fSJeff Layton #include <linux/list_lru.h>
13*65294c1fSJeff Layton #include <linux/fsnotify_backend.h>
14*65294c1fSJeff Layton #include <linux/fsnotify.h>
15*65294c1fSJeff Layton #include <linux/seq_file.h>
16*65294c1fSJeff Layton 
17*65294c1fSJeff Layton #include "vfs.h"
18*65294c1fSJeff Layton #include "nfsd.h"
19*65294c1fSJeff Layton #include "nfsfh.h"
20*65294c1fSJeff Layton #include "filecache.h"
21*65294c1fSJeff Layton #include "trace.h"
22*65294c1fSJeff Layton 
23*65294c1fSJeff Layton #define NFSDDBG_FACILITY	NFSDDBG_FH
24*65294c1fSJeff Layton 
25*65294c1fSJeff Layton /* FIXME: dynamically size this for the machine somehow? */
26*65294c1fSJeff Layton #define NFSD_FILE_HASH_BITS                   12
27*65294c1fSJeff Layton #define NFSD_FILE_HASH_SIZE                  (1 << NFSD_FILE_HASH_BITS)
28*65294c1fSJeff Layton #define NFSD_LAUNDRETTE_DELAY		     (2 * HZ)
29*65294c1fSJeff Layton 
30*65294c1fSJeff Layton #define NFSD_FILE_LRU_RESCAN		     (0)
31*65294c1fSJeff Layton #define NFSD_FILE_SHUTDOWN		     (1)
32*65294c1fSJeff Layton #define NFSD_FILE_LRU_THRESHOLD		     (4096UL)
33*65294c1fSJeff Layton #define NFSD_FILE_LRU_LIMIT		     (NFSD_FILE_LRU_THRESHOLD << 2)
34*65294c1fSJeff Layton 
35*65294c1fSJeff Layton /* We only care about NFSD_MAY_READ/WRITE for this cache */
36*65294c1fSJeff Layton #define NFSD_FILE_MAY_MASK	(NFSD_MAY_READ|NFSD_MAY_WRITE)
37*65294c1fSJeff Layton 
38*65294c1fSJeff Layton struct nfsd_fcache_bucket {
39*65294c1fSJeff Layton 	struct hlist_head	nfb_head;
40*65294c1fSJeff Layton 	spinlock_t		nfb_lock;
41*65294c1fSJeff Layton 	unsigned int		nfb_count;
42*65294c1fSJeff Layton 	unsigned int		nfb_maxcount;
43*65294c1fSJeff Layton };
44*65294c1fSJeff Layton 
45*65294c1fSJeff Layton static DEFINE_PER_CPU(unsigned long, nfsd_file_cache_hits);
46*65294c1fSJeff Layton 
47*65294c1fSJeff Layton static struct kmem_cache		*nfsd_file_slab;
48*65294c1fSJeff Layton static struct kmem_cache		*nfsd_file_mark_slab;
49*65294c1fSJeff Layton static struct nfsd_fcache_bucket	*nfsd_file_hashtbl;
50*65294c1fSJeff Layton static struct list_lru			nfsd_file_lru;
51*65294c1fSJeff Layton static long				nfsd_file_lru_flags;
52*65294c1fSJeff Layton static struct fsnotify_group		*nfsd_file_fsnotify_group;
53*65294c1fSJeff Layton static atomic_long_t			nfsd_filecache_count;
54*65294c1fSJeff Layton static struct delayed_work		nfsd_filecache_laundrette;
55*65294c1fSJeff Layton 
56*65294c1fSJeff Layton enum nfsd_file_laundrette_ctl {
57*65294c1fSJeff Layton 	NFSD_FILE_LAUNDRETTE_NOFLUSH = 0,
58*65294c1fSJeff Layton 	NFSD_FILE_LAUNDRETTE_MAY_FLUSH
59*65294c1fSJeff Layton };
60*65294c1fSJeff Layton 
61*65294c1fSJeff Layton static void
62*65294c1fSJeff Layton nfsd_file_schedule_laundrette(enum nfsd_file_laundrette_ctl ctl)
63*65294c1fSJeff Layton {
64*65294c1fSJeff Layton 	long count = atomic_long_read(&nfsd_filecache_count);
65*65294c1fSJeff Layton 
66*65294c1fSJeff Layton 	if (count == 0 || test_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags))
67*65294c1fSJeff Layton 		return;
68*65294c1fSJeff Layton 
69*65294c1fSJeff Layton 	/* Be more aggressive about scanning if over the threshold */
70*65294c1fSJeff Layton 	if (count > NFSD_FILE_LRU_THRESHOLD)
71*65294c1fSJeff Layton 		mod_delayed_work(system_wq, &nfsd_filecache_laundrette, 0);
72*65294c1fSJeff Layton 	else
73*65294c1fSJeff Layton 		schedule_delayed_work(&nfsd_filecache_laundrette, NFSD_LAUNDRETTE_DELAY);
74*65294c1fSJeff Layton 
75*65294c1fSJeff Layton 	if (ctl == NFSD_FILE_LAUNDRETTE_NOFLUSH)
76*65294c1fSJeff Layton 		return;
77*65294c1fSJeff Layton 
78*65294c1fSJeff Layton 	/* ...and don't delay flushing if we're out of control */
79*65294c1fSJeff Layton 	if (count >= NFSD_FILE_LRU_LIMIT)
80*65294c1fSJeff Layton 		flush_delayed_work(&nfsd_filecache_laundrette);
81*65294c1fSJeff Layton }
82*65294c1fSJeff Layton 
83*65294c1fSJeff Layton static void
84*65294c1fSJeff Layton nfsd_file_slab_free(struct rcu_head *rcu)
85*65294c1fSJeff Layton {
86*65294c1fSJeff Layton 	struct nfsd_file *nf = container_of(rcu, struct nfsd_file, nf_rcu);
87*65294c1fSJeff Layton 
88*65294c1fSJeff Layton 	put_cred(nf->nf_cred);
89*65294c1fSJeff Layton 	kmem_cache_free(nfsd_file_slab, nf);
90*65294c1fSJeff Layton }
91*65294c1fSJeff Layton 
92*65294c1fSJeff Layton static void
93*65294c1fSJeff Layton nfsd_file_mark_free(struct fsnotify_mark *mark)
94*65294c1fSJeff Layton {
95*65294c1fSJeff Layton 	struct nfsd_file_mark *nfm = container_of(mark, struct nfsd_file_mark,
96*65294c1fSJeff Layton 						  nfm_mark);
97*65294c1fSJeff Layton 
98*65294c1fSJeff Layton 	kmem_cache_free(nfsd_file_mark_slab, nfm);
99*65294c1fSJeff Layton }
100*65294c1fSJeff Layton 
101*65294c1fSJeff Layton static struct nfsd_file_mark *
102*65294c1fSJeff Layton nfsd_file_mark_get(struct nfsd_file_mark *nfm)
103*65294c1fSJeff Layton {
104*65294c1fSJeff Layton 	if (!atomic_inc_not_zero(&nfm->nfm_ref))
105*65294c1fSJeff Layton 		return NULL;
106*65294c1fSJeff Layton 	return nfm;
107*65294c1fSJeff Layton }
108*65294c1fSJeff Layton 
109*65294c1fSJeff Layton static void
110*65294c1fSJeff Layton nfsd_file_mark_put(struct nfsd_file_mark *nfm)
111*65294c1fSJeff Layton {
112*65294c1fSJeff Layton 	if (atomic_dec_and_test(&nfm->nfm_ref)) {
113*65294c1fSJeff Layton 
114*65294c1fSJeff Layton 		fsnotify_destroy_mark(&nfm->nfm_mark, nfsd_file_fsnotify_group);
115*65294c1fSJeff Layton 		fsnotify_put_mark(&nfm->nfm_mark);
116*65294c1fSJeff Layton 	}
117*65294c1fSJeff Layton }
118*65294c1fSJeff Layton 
119*65294c1fSJeff Layton static struct nfsd_file_mark *
120*65294c1fSJeff Layton nfsd_file_mark_find_or_create(struct nfsd_file *nf)
121*65294c1fSJeff Layton {
122*65294c1fSJeff Layton 	int			err;
123*65294c1fSJeff Layton 	struct fsnotify_mark	*mark;
124*65294c1fSJeff Layton 	struct nfsd_file_mark	*nfm = NULL, *new;
125*65294c1fSJeff Layton 	struct inode *inode = nf->nf_inode;
126*65294c1fSJeff Layton 
127*65294c1fSJeff Layton 	do {
128*65294c1fSJeff Layton 		mutex_lock(&nfsd_file_fsnotify_group->mark_mutex);
129*65294c1fSJeff Layton 		mark = fsnotify_find_mark(&inode->i_fsnotify_marks,
130*65294c1fSJeff Layton 				nfsd_file_fsnotify_group);
131*65294c1fSJeff Layton 		if (mark) {
132*65294c1fSJeff Layton 			nfm = nfsd_file_mark_get(container_of(mark,
133*65294c1fSJeff Layton 						 struct nfsd_file_mark,
134*65294c1fSJeff Layton 						 nfm_mark));
135*65294c1fSJeff Layton 			mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex);
136*65294c1fSJeff Layton 			fsnotify_put_mark(mark);
137*65294c1fSJeff Layton 			if (likely(nfm))
138*65294c1fSJeff Layton 				break;
139*65294c1fSJeff Layton 		} else
140*65294c1fSJeff Layton 			mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex);
141*65294c1fSJeff Layton 
142*65294c1fSJeff Layton 		/* allocate a new nfm */
143*65294c1fSJeff Layton 		new = kmem_cache_alloc(nfsd_file_mark_slab, GFP_KERNEL);
144*65294c1fSJeff Layton 		if (!new)
145*65294c1fSJeff Layton 			return NULL;
146*65294c1fSJeff Layton 		fsnotify_init_mark(&new->nfm_mark, nfsd_file_fsnotify_group);
147*65294c1fSJeff Layton 		new->nfm_mark.mask = FS_ATTRIB|FS_DELETE_SELF;
148*65294c1fSJeff Layton 		atomic_set(&new->nfm_ref, 1);
149*65294c1fSJeff Layton 
150*65294c1fSJeff Layton 		err = fsnotify_add_inode_mark(&new->nfm_mark, inode, 0);
151*65294c1fSJeff Layton 
152*65294c1fSJeff Layton 		/*
153*65294c1fSJeff Layton 		 * If the add was successful, then return the object.
154*65294c1fSJeff Layton 		 * Otherwise, we need to put the reference we hold on the
155*65294c1fSJeff Layton 		 * nfm_mark. The fsnotify code will take a reference and put
156*65294c1fSJeff Layton 		 * it on failure, so we can't just free it directly. It's also
157*65294c1fSJeff Layton 		 * not safe to call fsnotify_destroy_mark on it as the
158*65294c1fSJeff Layton 		 * mark->group will be NULL. Thus, we can't let the nfm_ref
159*65294c1fSJeff Layton 		 * counter drive the destruction at this point.
160*65294c1fSJeff Layton 		 */
161*65294c1fSJeff Layton 		if (likely(!err))
162*65294c1fSJeff Layton 			nfm = new;
163*65294c1fSJeff Layton 		else
164*65294c1fSJeff Layton 			fsnotify_put_mark(&new->nfm_mark);
165*65294c1fSJeff Layton 	} while (unlikely(err == -EEXIST));
166*65294c1fSJeff Layton 
167*65294c1fSJeff Layton 	return nfm;
168*65294c1fSJeff Layton }
169*65294c1fSJeff Layton 
170*65294c1fSJeff Layton static struct nfsd_file *
171*65294c1fSJeff Layton nfsd_file_alloc(struct inode *inode, unsigned int may, unsigned int hashval)
172*65294c1fSJeff Layton {
173*65294c1fSJeff Layton 	struct nfsd_file *nf;
174*65294c1fSJeff Layton 
175*65294c1fSJeff Layton 	nf = kmem_cache_alloc(nfsd_file_slab, GFP_KERNEL);
176*65294c1fSJeff Layton 	if (nf) {
177*65294c1fSJeff Layton 		INIT_HLIST_NODE(&nf->nf_node);
178*65294c1fSJeff Layton 		INIT_LIST_HEAD(&nf->nf_lru);
179*65294c1fSJeff Layton 		nf->nf_file = NULL;
180*65294c1fSJeff Layton 		nf->nf_cred = get_current_cred();
181*65294c1fSJeff Layton 		nf->nf_flags = 0;
182*65294c1fSJeff Layton 		nf->nf_inode = inode;
183*65294c1fSJeff Layton 		nf->nf_hashval = hashval;
184*65294c1fSJeff Layton 		atomic_set(&nf->nf_ref, 1);
185*65294c1fSJeff Layton 		nf->nf_may = may & NFSD_FILE_MAY_MASK;
186*65294c1fSJeff Layton 		if (may & NFSD_MAY_NOT_BREAK_LEASE) {
187*65294c1fSJeff Layton 			if (may & NFSD_MAY_WRITE)
188*65294c1fSJeff Layton 				__set_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags);
189*65294c1fSJeff Layton 			if (may & NFSD_MAY_READ)
190*65294c1fSJeff Layton 				__set_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
191*65294c1fSJeff Layton 		}
192*65294c1fSJeff Layton 		nf->nf_mark = NULL;
193*65294c1fSJeff Layton 		trace_nfsd_file_alloc(nf);
194*65294c1fSJeff Layton 	}
195*65294c1fSJeff Layton 	return nf;
196*65294c1fSJeff Layton }
197*65294c1fSJeff Layton 
198*65294c1fSJeff Layton static bool
199*65294c1fSJeff Layton nfsd_file_free(struct nfsd_file *nf)
200*65294c1fSJeff Layton {
201*65294c1fSJeff Layton 	bool flush = false;
202*65294c1fSJeff Layton 
203*65294c1fSJeff Layton 	trace_nfsd_file_put_final(nf);
204*65294c1fSJeff Layton 	if (nf->nf_mark)
205*65294c1fSJeff Layton 		nfsd_file_mark_put(nf->nf_mark);
206*65294c1fSJeff Layton 	if (nf->nf_file) {
207*65294c1fSJeff Layton 		get_file(nf->nf_file);
208*65294c1fSJeff Layton 		filp_close(nf->nf_file, NULL);
209*65294c1fSJeff Layton 		fput(nf->nf_file);
210*65294c1fSJeff Layton 		flush = true;
211*65294c1fSJeff Layton 	}
212*65294c1fSJeff Layton 	call_rcu(&nf->nf_rcu, nfsd_file_slab_free);
213*65294c1fSJeff Layton 	return flush;
214*65294c1fSJeff Layton }
215*65294c1fSJeff Layton 
216*65294c1fSJeff Layton static void
217*65294c1fSJeff Layton nfsd_file_do_unhash(struct nfsd_file *nf)
218*65294c1fSJeff Layton {
219*65294c1fSJeff Layton 	lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
220*65294c1fSJeff Layton 
221*65294c1fSJeff Layton 	trace_nfsd_file_unhash(nf);
222*65294c1fSJeff Layton 
223*65294c1fSJeff Layton 	--nfsd_file_hashtbl[nf->nf_hashval].nfb_count;
224*65294c1fSJeff Layton 	hlist_del_rcu(&nf->nf_node);
225*65294c1fSJeff Layton 	if (!list_empty(&nf->nf_lru))
226*65294c1fSJeff Layton 		list_lru_del(&nfsd_file_lru, &nf->nf_lru);
227*65294c1fSJeff Layton 	atomic_long_dec(&nfsd_filecache_count);
228*65294c1fSJeff Layton }
229*65294c1fSJeff Layton 
230*65294c1fSJeff Layton static bool
231*65294c1fSJeff Layton nfsd_file_unhash(struct nfsd_file *nf)
232*65294c1fSJeff Layton {
233*65294c1fSJeff Layton 	if (test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
234*65294c1fSJeff Layton 		nfsd_file_do_unhash(nf);
235*65294c1fSJeff Layton 		return true;
236*65294c1fSJeff Layton 	}
237*65294c1fSJeff Layton 	return false;
238*65294c1fSJeff Layton }
239*65294c1fSJeff Layton 
240*65294c1fSJeff Layton /*
241*65294c1fSJeff Layton  * Return true if the file was unhashed.
242*65294c1fSJeff Layton  */
243*65294c1fSJeff Layton static bool
244*65294c1fSJeff Layton nfsd_file_unhash_and_release_locked(struct nfsd_file *nf, struct list_head *dispose)
245*65294c1fSJeff Layton {
246*65294c1fSJeff Layton 	lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
247*65294c1fSJeff Layton 
248*65294c1fSJeff Layton 	trace_nfsd_file_unhash_and_release_locked(nf);
249*65294c1fSJeff Layton 	if (!nfsd_file_unhash(nf))
250*65294c1fSJeff Layton 		return false;
251*65294c1fSJeff Layton 	/* keep final reference for nfsd_file_lru_dispose */
252*65294c1fSJeff Layton 	if (atomic_add_unless(&nf->nf_ref, -1, 1))
253*65294c1fSJeff Layton 		return true;
254*65294c1fSJeff Layton 
255*65294c1fSJeff Layton 	list_add(&nf->nf_lru, dispose);
256*65294c1fSJeff Layton 	return true;
257*65294c1fSJeff Layton }
258*65294c1fSJeff Layton 
259*65294c1fSJeff Layton static int
260*65294c1fSJeff Layton nfsd_file_put_noref(struct nfsd_file *nf)
261*65294c1fSJeff Layton {
262*65294c1fSJeff Layton 	int count;
263*65294c1fSJeff Layton 	trace_nfsd_file_put(nf);
264*65294c1fSJeff Layton 
265*65294c1fSJeff Layton 	count = atomic_dec_return(&nf->nf_ref);
266*65294c1fSJeff Layton 	if (!count) {
267*65294c1fSJeff Layton 		WARN_ON(test_bit(NFSD_FILE_HASHED, &nf->nf_flags));
268*65294c1fSJeff Layton 		nfsd_file_free(nf);
269*65294c1fSJeff Layton 	}
270*65294c1fSJeff Layton 	return count;
271*65294c1fSJeff Layton }
272*65294c1fSJeff Layton 
273*65294c1fSJeff Layton void
274*65294c1fSJeff Layton nfsd_file_put(struct nfsd_file *nf)
275*65294c1fSJeff Layton {
276*65294c1fSJeff Layton 	bool is_hashed = test_bit(NFSD_FILE_HASHED, &nf->nf_flags) != 0;
277*65294c1fSJeff Layton 
278*65294c1fSJeff Layton 	set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
279*65294c1fSJeff Layton 	if (nfsd_file_put_noref(nf) == 1 && is_hashed)
280*65294c1fSJeff Layton 		nfsd_file_schedule_laundrette(NFSD_FILE_LAUNDRETTE_MAY_FLUSH);
281*65294c1fSJeff Layton }
282*65294c1fSJeff Layton 
283*65294c1fSJeff Layton struct nfsd_file *
284*65294c1fSJeff Layton nfsd_file_get(struct nfsd_file *nf)
285*65294c1fSJeff Layton {
286*65294c1fSJeff Layton 	if (likely(atomic_inc_not_zero(&nf->nf_ref)))
287*65294c1fSJeff Layton 		return nf;
288*65294c1fSJeff Layton 	return NULL;
289*65294c1fSJeff Layton }
290*65294c1fSJeff Layton 
291*65294c1fSJeff Layton static void
292*65294c1fSJeff Layton nfsd_file_dispose_list(struct list_head *dispose)
293*65294c1fSJeff Layton {
294*65294c1fSJeff Layton 	struct nfsd_file *nf;
295*65294c1fSJeff Layton 
296*65294c1fSJeff Layton 	while(!list_empty(dispose)) {
297*65294c1fSJeff Layton 		nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
298*65294c1fSJeff Layton 		list_del(&nf->nf_lru);
299*65294c1fSJeff Layton 		nfsd_file_put_noref(nf);
300*65294c1fSJeff Layton 	}
301*65294c1fSJeff Layton }
302*65294c1fSJeff Layton 
303*65294c1fSJeff Layton static void
304*65294c1fSJeff Layton nfsd_file_dispose_list_sync(struct list_head *dispose)
305*65294c1fSJeff Layton {
306*65294c1fSJeff Layton 	bool flush = false;
307*65294c1fSJeff Layton 	struct nfsd_file *nf;
308*65294c1fSJeff Layton 
309*65294c1fSJeff Layton 	while(!list_empty(dispose)) {
310*65294c1fSJeff Layton 		nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
311*65294c1fSJeff Layton 		list_del(&nf->nf_lru);
312*65294c1fSJeff Layton 		if (!atomic_dec_and_test(&nf->nf_ref))
313*65294c1fSJeff Layton 			continue;
314*65294c1fSJeff Layton 		if (nfsd_file_free(nf))
315*65294c1fSJeff Layton 			flush = true;
316*65294c1fSJeff Layton 	}
317*65294c1fSJeff Layton 	if (flush)
318*65294c1fSJeff Layton 		flush_delayed_fput();
319*65294c1fSJeff Layton }
320*65294c1fSJeff Layton 
321*65294c1fSJeff Layton /*
322*65294c1fSJeff Layton  * Note this can deadlock with nfsd_file_cache_purge.
323*65294c1fSJeff Layton  */
324*65294c1fSJeff Layton static enum lru_status
325*65294c1fSJeff Layton nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
326*65294c1fSJeff Layton 		 spinlock_t *lock, void *arg)
327*65294c1fSJeff Layton 	__releases(lock)
328*65294c1fSJeff Layton 	__acquires(lock)
329*65294c1fSJeff Layton {
330*65294c1fSJeff Layton 	struct list_head *head = arg;
331*65294c1fSJeff Layton 	struct nfsd_file *nf = list_entry(item, struct nfsd_file, nf_lru);
332*65294c1fSJeff Layton 
333*65294c1fSJeff Layton 	/*
334*65294c1fSJeff Layton 	 * Do a lockless refcount check. The hashtable holds one reference, so
335*65294c1fSJeff Layton 	 * we look to see if anything else has a reference, or if any have
336*65294c1fSJeff Layton 	 * been put since the shrinker last ran. Those don't get unhashed and
337*65294c1fSJeff Layton 	 * released.
338*65294c1fSJeff Layton 	 *
339*65294c1fSJeff Layton 	 * Note that in the put path, we set the flag and then decrement the
340*65294c1fSJeff Layton 	 * counter. Here we check the counter and then test and clear the flag.
341*65294c1fSJeff Layton 	 * That order is deliberate to ensure that we can do this locklessly.
342*65294c1fSJeff Layton 	 */
343*65294c1fSJeff Layton 	if (atomic_read(&nf->nf_ref) > 1)
344*65294c1fSJeff Layton 		goto out_skip;
345*65294c1fSJeff Layton 	if (test_and_clear_bit(NFSD_FILE_REFERENCED, &nf->nf_flags))
346*65294c1fSJeff Layton 		goto out_rescan;
347*65294c1fSJeff Layton 
348*65294c1fSJeff Layton 	if (!test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags))
349*65294c1fSJeff Layton 		goto out_skip;
350*65294c1fSJeff Layton 
351*65294c1fSJeff Layton 	list_lru_isolate_move(lru, &nf->nf_lru, head);
352*65294c1fSJeff Layton 	return LRU_REMOVED;
353*65294c1fSJeff Layton out_rescan:
354*65294c1fSJeff Layton 	set_bit(NFSD_FILE_LRU_RESCAN, &nfsd_file_lru_flags);
355*65294c1fSJeff Layton out_skip:
356*65294c1fSJeff Layton 	return LRU_SKIP;
357*65294c1fSJeff Layton }
358*65294c1fSJeff Layton 
359*65294c1fSJeff Layton static void
360*65294c1fSJeff Layton nfsd_file_lru_dispose(struct list_head *head)
361*65294c1fSJeff Layton {
362*65294c1fSJeff Layton 	while(!list_empty(head)) {
363*65294c1fSJeff Layton 		struct nfsd_file *nf = list_first_entry(head,
364*65294c1fSJeff Layton 				struct nfsd_file, nf_lru);
365*65294c1fSJeff Layton 		list_del_init(&nf->nf_lru);
366*65294c1fSJeff Layton 		spin_lock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
367*65294c1fSJeff Layton 		nfsd_file_do_unhash(nf);
368*65294c1fSJeff Layton 		spin_unlock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
369*65294c1fSJeff Layton 		nfsd_file_put_noref(nf);
370*65294c1fSJeff Layton 	}
371*65294c1fSJeff Layton }
372*65294c1fSJeff Layton 
373*65294c1fSJeff Layton static unsigned long
374*65294c1fSJeff Layton nfsd_file_lru_count(struct shrinker *s, struct shrink_control *sc)
375*65294c1fSJeff Layton {
376*65294c1fSJeff Layton 	return list_lru_count(&nfsd_file_lru);
377*65294c1fSJeff Layton }
378*65294c1fSJeff Layton 
379*65294c1fSJeff Layton static unsigned long
380*65294c1fSJeff Layton nfsd_file_lru_scan(struct shrinker *s, struct shrink_control *sc)
381*65294c1fSJeff Layton {
382*65294c1fSJeff Layton 	LIST_HEAD(head);
383*65294c1fSJeff Layton 	unsigned long ret;
384*65294c1fSJeff Layton 
385*65294c1fSJeff Layton 	ret = list_lru_shrink_walk(&nfsd_file_lru, sc, nfsd_file_lru_cb, &head);
386*65294c1fSJeff Layton 	nfsd_file_lru_dispose(&head);
387*65294c1fSJeff Layton 	return ret;
388*65294c1fSJeff Layton }
389*65294c1fSJeff Layton 
390*65294c1fSJeff Layton static struct shrinker	nfsd_file_shrinker = {
391*65294c1fSJeff Layton 	.scan_objects = nfsd_file_lru_scan,
392*65294c1fSJeff Layton 	.count_objects = nfsd_file_lru_count,
393*65294c1fSJeff Layton 	.seeks = 1,
394*65294c1fSJeff Layton };
395*65294c1fSJeff Layton 
396*65294c1fSJeff Layton static void
397*65294c1fSJeff Layton __nfsd_file_close_inode(struct inode *inode, unsigned int hashval,
398*65294c1fSJeff Layton 			struct list_head *dispose)
399*65294c1fSJeff Layton {
400*65294c1fSJeff Layton 	struct nfsd_file	*nf;
401*65294c1fSJeff Layton 	struct hlist_node	*tmp;
402*65294c1fSJeff Layton 
403*65294c1fSJeff Layton 	spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
404*65294c1fSJeff Layton 	hlist_for_each_entry_safe(nf, tmp, &nfsd_file_hashtbl[hashval].nfb_head, nf_node) {
405*65294c1fSJeff Layton 		if (inode == nf->nf_inode)
406*65294c1fSJeff Layton 			nfsd_file_unhash_and_release_locked(nf, dispose);
407*65294c1fSJeff Layton 	}
408*65294c1fSJeff Layton 	spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
409*65294c1fSJeff Layton }
410*65294c1fSJeff Layton 
411*65294c1fSJeff Layton /**
412*65294c1fSJeff Layton  * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
413*65294c1fSJeff Layton  * @inode: inode of the file to attempt to remove
414*65294c1fSJeff Layton  *
415*65294c1fSJeff Layton  * Walk the whole hash bucket, looking for any files that correspond to "inode".
416*65294c1fSJeff Layton  * If any do, then unhash them and put the hashtable reference to them and
417*65294c1fSJeff Layton  * destroy any that had their last reference put. Also ensure that any of the
418*65294c1fSJeff Layton  * fputs also have their final __fput done as well.
419*65294c1fSJeff Layton  */
420*65294c1fSJeff Layton void
421*65294c1fSJeff Layton nfsd_file_close_inode_sync(struct inode *inode)
422*65294c1fSJeff Layton {
423*65294c1fSJeff Layton 	unsigned int		hashval = (unsigned int)hash_long(inode->i_ino,
424*65294c1fSJeff Layton 						NFSD_FILE_HASH_BITS);
425*65294c1fSJeff Layton 	LIST_HEAD(dispose);
426*65294c1fSJeff Layton 
427*65294c1fSJeff Layton 	__nfsd_file_close_inode(inode, hashval, &dispose);
428*65294c1fSJeff Layton 	trace_nfsd_file_close_inode_sync(inode, hashval, !list_empty(&dispose));
429*65294c1fSJeff Layton 	nfsd_file_dispose_list_sync(&dispose);
430*65294c1fSJeff Layton }
431*65294c1fSJeff Layton 
432*65294c1fSJeff Layton /**
433*65294c1fSJeff Layton  * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
434*65294c1fSJeff Layton  * @inode: inode of the file to attempt to remove
435*65294c1fSJeff Layton  *
436*65294c1fSJeff Layton  * Walk the whole hash bucket, looking for any files that correspond to "inode".
437*65294c1fSJeff Layton  * If any do, then unhash them and put the hashtable reference to them and
438*65294c1fSJeff Layton  * destroy any that had their last reference put.
439*65294c1fSJeff Layton  */
440*65294c1fSJeff Layton static void
441*65294c1fSJeff Layton nfsd_file_close_inode(struct inode *inode)
442*65294c1fSJeff Layton {
443*65294c1fSJeff Layton 	unsigned int		hashval = (unsigned int)hash_long(inode->i_ino,
444*65294c1fSJeff Layton 						NFSD_FILE_HASH_BITS);
445*65294c1fSJeff Layton 	LIST_HEAD(dispose);
446*65294c1fSJeff Layton 
447*65294c1fSJeff Layton 	__nfsd_file_close_inode(inode, hashval, &dispose);
448*65294c1fSJeff Layton 	trace_nfsd_file_close_inode(inode, hashval, !list_empty(&dispose));
449*65294c1fSJeff Layton 	nfsd_file_dispose_list(&dispose);
450*65294c1fSJeff Layton }
451*65294c1fSJeff Layton 
452*65294c1fSJeff Layton /**
453*65294c1fSJeff Layton  * nfsd_file_delayed_close - close unused nfsd_files
454*65294c1fSJeff Layton  * @work: dummy
455*65294c1fSJeff Layton  *
456*65294c1fSJeff Layton  * Walk the LRU list and close any entries that have not been used since
457*65294c1fSJeff Layton  * the last scan.
458*65294c1fSJeff Layton  *
459*65294c1fSJeff Layton  * Note this can deadlock with nfsd_file_cache_purge.
460*65294c1fSJeff Layton  */
461*65294c1fSJeff Layton static void
462*65294c1fSJeff Layton nfsd_file_delayed_close(struct work_struct *work)
463*65294c1fSJeff Layton {
464*65294c1fSJeff Layton 	LIST_HEAD(head);
465*65294c1fSJeff Layton 
466*65294c1fSJeff Layton 	list_lru_walk(&nfsd_file_lru, nfsd_file_lru_cb, &head, LONG_MAX);
467*65294c1fSJeff Layton 
468*65294c1fSJeff Layton 	if (test_and_clear_bit(NFSD_FILE_LRU_RESCAN, &nfsd_file_lru_flags))
469*65294c1fSJeff Layton 		nfsd_file_schedule_laundrette(NFSD_FILE_LAUNDRETTE_NOFLUSH);
470*65294c1fSJeff Layton 
471*65294c1fSJeff Layton 	if (!list_empty(&head)) {
472*65294c1fSJeff Layton 		nfsd_file_lru_dispose(&head);
473*65294c1fSJeff Layton 		flush_delayed_fput();
474*65294c1fSJeff Layton 	}
475*65294c1fSJeff Layton }
476*65294c1fSJeff Layton 
477*65294c1fSJeff Layton static int
478*65294c1fSJeff Layton nfsd_file_lease_notifier_call(struct notifier_block *nb, unsigned long arg,
479*65294c1fSJeff Layton 			    void *data)
480*65294c1fSJeff Layton {
481*65294c1fSJeff Layton 	struct file_lock *fl = data;
482*65294c1fSJeff Layton 
483*65294c1fSJeff Layton 	/* Only close files for F_SETLEASE leases */
484*65294c1fSJeff Layton 	if (fl->fl_flags & FL_LEASE)
485*65294c1fSJeff Layton 		nfsd_file_close_inode_sync(file_inode(fl->fl_file));
486*65294c1fSJeff Layton 	return 0;
487*65294c1fSJeff Layton }
488*65294c1fSJeff Layton 
489*65294c1fSJeff Layton static struct notifier_block nfsd_file_lease_notifier = {
490*65294c1fSJeff Layton 	.notifier_call = nfsd_file_lease_notifier_call,
491*65294c1fSJeff Layton };
492*65294c1fSJeff Layton 
493*65294c1fSJeff Layton static int
494*65294c1fSJeff Layton nfsd_file_fsnotify_handle_event(struct fsnotify_group *group,
495*65294c1fSJeff Layton 				struct inode *inode,
496*65294c1fSJeff Layton 				u32 mask, const void *data, int data_type,
497*65294c1fSJeff Layton 				const struct qstr *file_name, u32 cookie,
498*65294c1fSJeff Layton 				struct fsnotify_iter_info *iter_info)
499*65294c1fSJeff Layton {
500*65294c1fSJeff Layton 	trace_nfsd_file_fsnotify_handle_event(inode, mask);
501*65294c1fSJeff Layton 
502*65294c1fSJeff Layton 	/* Should be no marks on non-regular files */
503*65294c1fSJeff Layton 	if (!S_ISREG(inode->i_mode)) {
504*65294c1fSJeff Layton 		WARN_ON_ONCE(1);
505*65294c1fSJeff Layton 		return 0;
506*65294c1fSJeff Layton 	}
507*65294c1fSJeff Layton 
508*65294c1fSJeff Layton 	/* don't close files if this was not the last link */
509*65294c1fSJeff Layton 	if (mask & FS_ATTRIB) {
510*65294c1fSJeff Layton 		if (inode->i_nlink)
511*65294c1fSJeff Layton 			return 0;
512*65294c1fSJeff Layton 	}
513*65294c1fSJeff Layton 
514*65294c1fSJeff Layton 	nfsd_file_close_inode(inode);
515*65294c1fSJeff Layton 	return 0;
516*65294c1fSJeff Layton }
517*65294c1fSJeff Layton 
518*65294c1fSJeff Layton 
519*65294c1fSJeff Layton static const struct fsnotify_ops nfsd_file_fsnotify_ops = {
520*65294c1fSJeff Layton 	.handle_event = nfsd_file_fsnotify_handle_event,
521*65294c1fSJeff Layton 	.free_mark = nfsd_file_mark_free,
522*65294c1fSJeff Layton };
523*65294c1fSJeff Layton 
524*65294c1fSJeff Layton int
525*65294c1fSJeff Layton nfsd_file_cache_init(void)
526*65294c1fSJeff Layton {
527*65294c1fSJeff Layton 	int		ret = -ENOMEM;
528*65294c1fSJeff Layton 	unsigned int	i;
529*65294c1fSJeff Layton 
530*65294c1fSJeff Layton 	clear_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags);
531*65294c1fSJeff Layton 
532*65294c1fSJeff Layton 	if (nfsd_file_hashtbl)
533*65294c1fSJeff Layton 		return 0;
534*65294c1fSJeff Layton 
535*65294c1fSJeff Layton 	nfsd_file_hashtbl = kcalloc(NFSD_FILE_HASH_SIZE,
536*65294c1fSJeff Layton 				sizeof(*nfsd_file_hashtbl), GFP_KERNEL);
537*65294c1fSJeff Layton 	if (!nfsd_file_hashtbl) {
538*65294c1fSJeff Layton 		pr_err("nfsd: unable to allocate nfsd_file_hashtbl\n");
539*65294c1fSJeff Layton 		goto out_err;
540*65294c1fSJeff Layton 	}
541*65294c1fSJeff Layton 
542*65294c1fSJeff Layton 	nfsd_file_slab = kmem_cache_create("nfsd_file",
543*65294c1fSJeff Layton 				sizeof(struct nfsd_file), 0, 0, NULL);
544*65294c1fSJeff Layton 	if (!nfsd_file_slab) {
545*65294c1fSJeff Layton 		pr_err("nfsd: unable to create nfsd_file_slab\n");
546*65294c1fSJeff Layton 		goto out_err;
547*65294c1fSJeff Layton 	}
548*65294c1fSJeff Layton 
549*65294c1fSJeff Layton 	nfsd_file_mark_slab = kmem_cache_create("nfsd_file_mark",
550*65294c1fSJeff Layton 					sizeof(struct nfsd_file_mark), 0, 0, NULL);
551*65294c1fSJeff Layton 	if (!nfsd_file_mark_slab) {
552*65294c1fSJeff Layton 		pr_err("nfsd: unable to create nfsd_file_mark_slab\n");
553*65294c1fSJeff Layton 		goto out_err;
554*65294c1fSJeff Layton 	}
555*65294c1fSJeff Layton 
556*65294c1fSJeff Layton 
557*65294c1fSJeff Layton 	ret = list_lru_init(&nfsd_file_lru);
558*65294c1fSJeff Layton 	if (ret) {
559*65294c1fSJeff Layton 		pr_err("nfsd: failed to init nfsd_file_lru: %d\n", ret);
560*65294c1fSJeff Layton 		goto out_err;
561*65294c1fSJeff Layton 	}
562*65294c1fSJeff Layton 
563*65294c1fSJeff Layton 	ret = register_shrinker(&nfsd_file_shrinker);
564*65294c1fSJeff Layton 	if (ret) {
565*65294c1fSJeff Layton 		pr_err("nfsd: failed to register nfsd_file_shrinker: %d\n", ret);
566*65294c1fSJeff Layton 		goto out_lru;
567*65294c1fSJeff Layton 	}
568*65294c1fSJeff Layton 
569*65294c1fSJeff Layton 	ret = lease_register_notifier(&nfsd_file_lease_notifier);
570*65294c1fSJeff Layton 	if (ret) {
571*65294c1fSJeff Layton 		pr_err("nfsd: unable to register lease notifier: %d\n", ret);
572*65294c1fSJeff Layton 		goto out_shrinker;
573*65294c1fSJeff Layton 	}
574*65294c1fSJeff Layton 
575*65294c1fSJeff Layton 	nfsd_file_fsnotify_group = fsnotify_alloc_group(&nfsd_file_fsnotify_ops);
576*65294c1fSJeff Layton 	if (IS_ERR(nfsd_file_fsnotify_group)) {
577*65294c1fSJeff Layton 		pr_err("nfsd: unable to create fsnotify group: %ld\n",
578*65294c1fSJeff Layton 			PTR_ERR(nfsd_file_fsnotify_group));
579*65294c1fSJeff Layton 		nfsd_file_fsnotify_group = NULL;
580*65294c1fSJeff Layton 		goto out_notifier;
581*65294c1fSJeff Layton 	}
582*65294c1fSJeff Layton 
583*65294c1fSJeff Layton 	for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
584*65294c1fSJeff Layton 		INIT_HLIST_HEAD(&nfsd_file_hashtbl[i].nfb_head);
585*65294c1fSJeff Layton 		spin_lock_init(&nfsd_file_hashtbl[i].nfb_lock);
586*65294c1fSJeff Layton 	}
587*65294c1fSJeff Layton 
588*65294c1fSJeff Layton 	INIT_DELAYED_WORK(&nfsd_filecache_laundrette, nfsd_file_delayed_close);
589*65294c1fSJeff Layton out:
590*65294c1fSJeff Layton 	return ret;
591*65294c1fSJeff Layton out_notifier:
592*65294c1fSJeff Layton 	lease_unregister_notifier(&nfsd_file_lease_notifier);
593*65294c1fSJeff Layton out_shrinker:
594*65294c1fSJeff Layton 	unregister_shrinker(&nfsd_file_shrinker);
595*65294c1fSJeff Layton out_lru:
596*65294c1fSJeff Layton 	list_lru_destroy(&nfsd_file_lru);
597*65294c1fSJeff Layton out_err:
598*65294c1fSJeff Layton 	kmem_cache_destroy(nfsd_file_slab);
599*65294c1fSJeff Layton 	nfsd_file_slab = NULL;
600*65294c1fSJeff Layton 	kmem_cache_destroy(nfsd_file_mark_slab);
601*65294c1fSJeff Layton 	nfsd_file_mark_slab = NULL;
602*65294c1fSJeff Layton 	kfree(nfsd_file_hashtbl);
603*65294c1fSJeff Layton 	nfsd_file_hashtbl = NULL;
604*65294c1fSJeff Layton 	goto out;
605*65294c1fSJeff Layton }
606*65294c1fSJeff Layton 
607*65294c1fSJeff Layton /*
608*65294c1fSJeff Layton  * Note this can deadlock with nfsd_file_lru_cb.
609*65294c1fSJeff Layton  */
610*65294c1fSJeff Layton void
611*65294c1fSJeff Layton nfsd_file_cache_purge(void)
612*65294c1fSJeff Layton {
613*65294c1fSJeff Layton 	unsigned int		i;
614*65294c1fSJeff Layton 	struct nfsd_file	*nf;
615*65294c1fSJeff Layton 	LIST_HEAD(dispose);
616*65294c1fSJeff Layton 	bool del;
617*65294c1fSJeff Layton 
618*65294c1fSJeff Layton 	if (!nfsd_file_hashtbl)
619*65294c1fSJeff Layton 		return;
620*65294c1fSJeff Layton 
621*65294c1fSJeff Layton 	for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
622*65294c1fSJeff Layton 		spin_lock(&nfsd_file_hashtbl[i].nfb_lock);
623*65294c1fSJeff Layton 		while(!hlist_empty(&nfsd_file_hashtbl[i].nfb_head)) {
624*65294c1fSJeff Layton 			nf = hlist_entry(nfsd_file_hashtbl[i].nfb_head.first,
625*65294c1fSJeff Layton 					 struct nfsd_file, nf_node);
626*65294c1fSJeff Layton 			del = nfsd_file_unhash_and_release_locked(nf, &dispose);
627*65294c1fSJeff Layton 
628*65294c1fSJeff Layton 			/*
629*65294c1fSJeff Layton 			 * Deadlock detected! Something marked this entry as
630*65294c1fSJeff Layton 			 * unhased, but hasn't removed it from the hash list.
631*65294c1fSJeff Layton 			 */
632*65294c1fSJeff Layton 			WARN_ON_ONCE(!del);
633*65294c1fSJeff Layton 		}
634*65294c1fSJeff Layton 		spin_unlock(&nfsd_file_hashtbl[i].nfb_lock);
635*65294c1fSJeff Layton 		nfsd_file_dispose_list(&dispose);
636*65294c1fSJeff Layton 	}
637*65294c1fSJeff Layton }
638*65294c1fSJeff Layton 
639*65294c1fSJeff Layton void
640*65294c1fSJeff Layton nfsd_file_cache_shutdown(void)
641*65294c1fSJeff Layton {
642*65294c1fSJeff Layton 	LIST_HEAD(dispose);
643*65294c1fSJeff Layton 
644*65294c1fSJeff Layton 	set_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags);
645*65294c1fSJeff Layton 
646*65294c1fSJeff Layton 	lease_unregister_notifier(&nfsd_file_lease_notifier);
647*65294c1fSJeff Layton 	unregister_shrinker(&nfsd_file_shrinker);
648*65294c1fSJeff Layton 	/*
649*65294c1fSJeff Layton 	 * make sure all callers of nfsd_file_lru_cb are done before
650*65294c1fSJeff Layton 	 * calling nfsd_file_cache_purge
651*65294c1fSJeff Layton 	 */
652*65294c1fSJeff Layton 	cancel_delayed_work_sync(&nfsd_filecache_laundrette);
653*65294c1fSJeff Layton 	nfsd_file_cache_purge();
654*65294c1fSJeff Layton 	list_lru_destroy(&nfsd_file_lru);
655*65294c1fSJeff Layton 	rcu_barrier();
656*65294c1fSJeff Layton 	fsnotify_put_group(nfsd_file_fsnotify_group);
657*65294c1fSJeff Layton 	nfsd_file_fsnotify_group = NULL;
658*65294c1fSJeff Layton 	kmem_cache_destroy(nfsd_file_slab);
659*65294c1fSJeff Layton 	nfsd_file_slab = NULL;
660*65294c1fSJeff Layton 	fsnotify_wait_marks_destroyed();
661*65294c1fSJeff Layton 	kmem_cache_destroy(nfsd_file_mark_slab);
662*65294c1fSJeff Layton 	nfsd_file_mark_slab = NULL;
663*65294c1fSJeff Layton 	kfree(nfsd_file_hashtbl);
664*65294c1fSJeff Layton 	nfsd_file_hashtbl = NULL;
665*65294c1fSJeff Layton }
666*65294c1fSJeff Layton 
667*65294c1fSJeff Layton static bool
668*65294c1fSJeff Layton nfsd_match_cred(const struct cred *c1, const struct cred *c2)
669*65294c1fSJeff Layton {
670*65294c1fSJeff Layton 	int i;
671*65294c1fSJeff Layton 
672*65294c1fSJeff Layton 	if (!uid_eq(c1->fsuid, c2->fsuid))
673*65294c1fSJeff Layton 		return false;
674*65294c1fSJeff Layton 	if (!gid_eq(c1->fsgid, c2->fsgid))
675*65294c1fSJeff Layton 		return false;
676*65294c1fSJeff Layton 	if (c1->group_info == NULL || c2->group_info == NULL)
677*65294c1fSJeff Layton 		return c1->group_info == c2->group_info;
678*65294c1fSJeff Layton 	if (c1->group_info->ngroups != c2->group_info->ngroups)
679*65294c1fSJeff Layton 		return false;
680*65294c1fSJeff Layton 	for (i = 0; i < c1->group_info->ngroups; i++) {
681*65294c1fSJeff Layton 		if (!gid_eq(c1->group_info->gid[i], c2->group_info->gid[i]))
682*65294c1fSJeff Layton 			return false;
683*65294c1fSJeff Layton 	}
684*65294c1fSJeff Layton 	return true;
685*65294c1fSJeff Layton }
686*65294c1fSJeff Layton 
687*65294c1fSJeff Layton static struct nfsd_file *
688*65294c1fSJeff Layton nfsd_file_find_locked(struct inode *inode, unsigned int may_flags,
689*65294c1fSJeff Layton 			unsigned int hashval)
690*65294c1fSJeff Layton {
691*65294c1fSJeff Layton 	struct nfsd_file *nf;
692*65294c1fSJeff Layton 	unsigned char need = may_flags & NFSD_FILE_MAY_MASK;
693*65294c1fSJeff Layton 
694*65294c1fSJeff Layton 	hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head,
695*65294c1fSJeff Layton 				 nf_node) {
696*65294c1fSJeff Layton 		if ((need & nf->nf_may) != need)
697*65294c1fSJeff Layton 			continue;
698*65294c1fSJeff Layton 		if (nf->nf_inode != inode)
699*65294c1fSJeff Layton 			continue;
700*65294c1fSJeff Layton 		if (!nfsd_match_cred(nf->nf_cred, current_cred()))
701*65294c1fSJeff Layton 			continue;
702*65294c1fSJeff Layton 		if (nfsd_file_get(nf) != NULL)
703*65294c1fSJeff Layton 			return nf;
704*65294c1fSJeff Layton 	}
705*65294c1fSJeff Layton 	return NULL;
706*65294c1fSJeff Layton }
707*65294c1fSJeff Layton 
708*65294c1fSJeff Layton /**
709*65294c1fSJeff Layton  * nfsd_file_is_cached - are there any cached open files for this fh?
710*65294c1fSJeff Layton  * @inode: inode of the file to check
711*65294c1fSJeff Layton  *
712*65294c1fSJeff Layton  * Scan the hashtable for open files that match this fh. Returns true if there
713*65294c1fSJeff Layton  * are any, and false if not.
714*65294c1fSJeff Layton  */
715*65294c1fSJeff Layton bool
716*65294c1fSJeff Layton nfsd_file_is_cached(struct inode *inode)
717*65294c1fSJeff Layton {
718*65294c1fSJeff Layton 	bool			ret = false;
719*65294c1fSJeff Layton 	struct nfsd_file	*nf;
720*65294c1fSJeff Layton 	unsigned int		hashval;
721*65294c1fSJeff Layton 
722*65294c1fSJeff Layton         hashval = (unsigned int)hash_long(inode->i_ino, NFSD_FILE_HASH_BITS);
723*65294c1fSJeff Layton 
724*65294c1fSJeff Layton 	rcu_read_lock();
725*65294c1fSJeff Layton 	hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head,
726*65294c1fSJeff Layton 				 nf_node) {
727*65294c1fSJeff Layton 		if (inode == nf->nf_inode) {
728*65294c1fSJeff Layton 			ret = true;
729*65294c1fSJeff Layton 			break;
730*65294c1fSJeff Layton 		}
731*65294c1fSJeff Layton 	}
732*65294c1fSJeff Layton 	rcu_read_unlock();
733*65294c1fSJeff Layton 	trace_nfsd_file_is_cached(inode, hashval, (int)ret);
734*65294c1fSJeff Layton 	return ret;
735*65294c1fSJeff Layton }
736*65294c1fSJeff Layton 
737*65294c1fSJeff Layton __be32
738*65294c1fSJeff Layton nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
739*65294c1fSJeff Layton 		  unsigned int may_flags, struct nfsd_file **pnf)
740*65294c1fSJeff Layton {
741*65294c1fSJeff Layton 	__be32	status;
742*65294c1fSJeff Layton 	struct nfsd_file *nf, *new;
743*65294c1fSJeff Layton 	struct inode *inode;
744*65294c1fSJeff Layton 	unsigned int hashval;
745*65294c1fSJeff Layton 
746*65294c1fSJeff Layton 	/* FIXME: skip this if fh_dentry is already set? */
747*65294c1fSJeff Layton 	status = fh_verify(rqstp, fhp, S_IFREG,
748*65294c1fSJeff Layton 				may_flags|NFSD_MAY_OWNER_OVERRIDE);
749*65294c1fSJeff Layton 	if (status != nfs_ok)
750*65294c1fSJeff Layton 		return status;
751*65294c1fSJeff Layton 
752*65294c1fSJeff Layton 	inode = d_inode(fhp->fh_dentry);
753*65294c1fSJeff Layton 	hashval = (unsigned int)hash_long(inode->i_ino, NFSD_FILE_HASH_BITS);
754*65294c1fSJeff Layton retry:
755*65294c1fSJeff Layton 	rcu_read_lock();
756*65294c1fSJeff Layton 	nf = nfsd_file_find_locked(inode, may_flags, hashval);
757*65294c1fSJeff Layton 	rcu_read_unlock();
758*65294c1fSJeff Layton 	if (nf)
759*65294c1fSJeff Layton 		goto wait_for_construction;
760*65294c1fSJeff Layton 
761*65294c1fSJeff Layton 	new = nfsd_file_alloc(inode, may_flags, hashval);
762*65294c1fSJeff Layton 	if (!new) {
763*65294c1fSJeff Layton 		trace_nfsd_file_acquire(rqstp, hashval, inode, may_flags,
764*65294c1fSJeff Layton 					NULL, nfserr_jukebox);
765*65294c1fSJeff Layton 		return nfserr_jukebox;
766*65294c1fSJeff Layton 	}
767*65294c1fSJeff Layton 
768*65294c1fSJeff Layton 	spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
769*65294c1fSJeff Layton 	nf = nfsd_file_find_locked(inode, may_flags, hashval);
770*65294c1fSJeff Layton 	if (nf == NULL)
771*65294c1fSJeff Layton 		goto open_file;
772*65294c1fSJeff Layton 	spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
773*65294c1fSJeff Layton 	nfsd_file_slab_free(&new->nf_rcu);
774*65294c1fSJeff Layton 
775*65294c1fSJeff Layton wait_for_construction:
776*65294c1fSJeff Layton 	wait_on_bit(&nf->nf_flags, NFSD_FILE_PENDING, TASK_UNINTERRUPTIBLE);
777*65294c1fSJeff Layton 
778*65294c1fSJeff Layton 	/* Did construction of this file fail? */
779*65294c1fSJeff Layton 	if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
780*65294c1fSJeff Layton 		nfsd_file_put_noref(nf);
781*65294c1fSJeff Layton 		goto retry;
782*65294c1fSJeff Layton 	}
783*65294c1fSJeff Layton 
784*65294c1fSJeff Layton 	this_cpu_inc(nfsd_file_cache_hits);
785*65294c1fSJeff Layton 
786*65294c1fSJeff Layton 	if (!(may_flags & NFSD_MAY_NOT_BREAK_LEASE)) {
787*65294c1fSJeff Layton 		bool write = (may_flags & NFSD_MAY_WRITE);
788*65294c1fSJeff Layton 
789*65294c1fSJeff Layton 		if (test_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags) ||
790*65294c1fSJeff Layton 		    (test_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags) && write)) {
791*65294c1fSJeff Layton 			status = nfserrno(nfsd_open_break_lease(
792*65294c1fSJeff Layton 					file_inode(nf->nf_file), may_flags));
793*65294c1fSJeff Layton 			if (status == nfs_ok) {
794*65294c1fSJeff Layton 				clear_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
795*65294c1fSJeff Layton 				if (write)
796*65294c1fSJeff Layton 					clear_bit(NFSD_FILE_BREAK_WRITE,
797*65294c1fSJeff Layton 						  &nf->nf_flags);
798*65294c1fSJeff Layton 			}
799*65294c1fSJeff Layton 		}
800*65294c1fSJeff Layton 	}
801*65294c1fSJeff Layton out:
802*65294c1fSJeff Layton 	if (status == nfs_ok) {
803*65294c1fSJeff Layton 		*pnf = nf;
804*65294c1fSJeff Layton 	} else {
805*65294c1fSJeff Layton 		nfsd_file_put(nf);
806*65294c1fSJeff Layton 		nf = NULL;
807*65294c1fSJeff Layton 	}
808*65294c1fSJeff Layton 
809*65294c1fSJeff Layton 	trace_nfsd_file_acquire(rqstp, hashval, inode, may_flags, nf, status);
810*65294c1fSJeff Layton 	return status;
811*65294c1fSJeff Layton open_file:
812*65294c1fSJeff Layton 	nf = new;
813*65294c1fSJeff Layton 	/* Take reference for the hashtable */
814*65294c1fSJeff Layton 	atomic_inc(&nf->nf_ref);
815*65294c1fSJeff Layton 	__set_bit(NFSD_FILE_HASHED, &nf->nf_flags);
816*65294c1fSJeff Layton 	__set_bit(NFSD_FILE_PENDING, &nf->nf_flags);
817*65294c1fSJeff Layton 	list_lru_add(&nfsd_file_lru, &nf->nf_lru);
818*65294c1fSJeff Layton 	hlist_add_head_rcu(&nf->nf_node, &nfsd_file_hashtbl[hashval].nfb_head);
819*65294c1fSJeff Layton 	++nfsd_file_hashtbl[hashval].nfb_count;
820*65294c1fSJeff Layton 	nfsd_file_hashtbl[hashval].nfb_maxcount = max(nfsd_file_hashtbl[hashval].nfb_maxcount,
821*65294c1fSJeff Layton 			nfsd_file_hashtbl[hashval].nfb_count);
822*65294c1fSJeff Layton 	spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
823*65294c1fSJeff Layton 	atomic_long_inc(&nfsd_filecache_count);
824*65294c1fSJeff Layton 
825*65294c1fSJeff Layton 	nf->nf_mark = nfsd_file_mark_find_or_create(nf);
826*65294c1fSJeff Layton 	if (nf->nf_mark)
827*65294c1fSJeff Layton 		status = nfsd_open_verified(rqstp, fhp, S_IFREG,
828*65294c1fSJeff Layton 				may_flags, &nf->nf_file);
829*65294c1fSJeff Layton 	else
830*65294c1fSJeff Layton 		status = nfserr_jukebox;
831*65294c1fSJeff Layton 	/*
832*65294c1fSJeff Layton 	 * If construction failed, or we raced with a call to unlink()
833*65294c1fSJeff Layton 	 * then unhash.
834*65294c1fSJeff Layton 	 */
835*65294c1fSJeff Layton 	if (status != nfs_ok || inode->i_nlink == 0) {
836*65294c1fSJeff Layton 		bool do_free;
837*65294c1fSJeff Layton 		spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
838*65294c1fSJeff Layton 		do_free = nfsd_file_unhash(nf);
839*65294c1fSJeff Layton 		spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
840*65294c1fSJeff Layton 		if (do_free)
841*65294c1fSJeff Layton 			nfsd_file_put_noref(nf);
842*65294c1fSJeff Layton 	}
843*65294c1fSJeff Layton 	clear_bit_unlock(NFSD_FILE_PENDING, &nf->nf_flags);
844*65294c1fSJeff Layton 	smp_mb__after_atomic();
845*65294c1fSJeff Layton 	wake_up_bit(&nf->nf_flags, NFSD_FILE_PENDING);
846*65294c1fSJeff Layton 	goto out;
847*65294c1fSJeff Layton }
848*65294c1fSJeff Layton 
849*65294c1fSJeff Layton /*
850*65294c1fSJeff Layton  * Note that fields may be added, removed or reordered in the future. Programs
851*65294c1fSJeff Layton  * scraping this file for info should test the labels to ensure they're
852*65294c1fSJeff Layton  * getting the correct field.
853*65294c1fSJeff Layton  */
854*65294c1fSJeff Layton static int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
855*65294c1fSJeff Layton {
856*65294c1fSJeff Layton 	unsigned int i, count = 0, longest = 0;
857*65294c1fSJeff Layton 	unsigned long hits = 0;
858*65294c1fSJeff Layton 
859*65294c1fSJeff Layton 	/*
860*65294c1fSJeff Layton 	 * No need for spinlocks here since we're not terribly interested in
861*65294c1fSJeff Layton 	 * accuracy. We do take the nfsd_mutex simply to ensure that we
862*65294c1fSJeff Layton 	 * don't end up racing with server shutdown
863*65294c1fSJeff Layton 	 */
864*65294c1fSJeff Layton 	mutex_lock(&nfsd_mutex);
865*65294c1fSJeff Layton 	if (nfsd_file_hashtbl) {
866*65294c1fSJeff Layton 		for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
867*65294c1fSJeff Layton 			count += nfsd_file_hashtbl[i].nfb_count;
868*65294c1fSJeff Layton 			longest = max(longest, nfsd_file_hashtbl[i].nfb_count);
869*65294c1fSJeff Layton 		}
870*65294c1fSJeff Layton 	}
871*65294c1fSJeff Layton 	mutex_unlock(&nfsd_mutex);
872*65294c1fSJeff Layton 
873*65294c1fSJeff Layton 	for_each_possible_cpu(i)
874*65294c1fSJeff Layton 		hits += per_cpu(nfsd_file_cache_hits, i);
875*65294c1fSJeff Layton 
876*65294c1fSJeff Layton 	seq_printf(m, "total entries: %u\n", count);
877*65294c1fSJeff Layton 	seq_printf(m, "longest chain: %u\n", longest);
878*65294c1fSJeff Layton 	seq_printf(m, "cache hits:    %lu\n", hits);
879*65294c1fSJeff Layton 	return 0;
880*65294c1fSJeff Layton }
881*65294c1fSJeff Layton 
882*65294c1fSJeff Layton int nfsd_file_cache_stats_open(struct inode *inode, struct file *file)
883*65294c1fSJeff Layton {
884*65294c1fSJeff Layton 	return single_open(file, nfsd_file_cache_stats_show, NULL);
885*65294c1fSJeff Layton }
886