xref: /openbmc/linux/fs/dcache.c (revision 645f08975f49441b3e753d8dc5b740cbcb226594)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * fs/dcache.c
4   *
5   * Complete reimplementation
6   * (C) 1997 Thomas Schoebel-Theuer,
7   * with heavy changes by Linus Torvalds
8   */
9  
10  /*
11   * Notes on the allocation strategy:
12   *
13   * The dcache is a master of the icache - whenever a dcache entry
14   * exists, the inode will always exist. "iput()" is done either when
15   * the dcache entry is deleted or garbage collected.
16   */
17  
18  #include <linux/ratelimit.h>
19  #include <linux/string.h>
20  #include <linux/mm.h>
21  #include <linux/fs.h>
22  #include <linux/fscrypt.h>
23  #include <linux/fsnotify.h>
24  #include <linux/slab.h>
25  #include <linux/init.h>
26  #include <linux/hash.h>
27  #include <linux/cache.h>
28  #include <linux/export.h>
29  #include <linux/security.h>
30  #include <linux/seqlock.h>
31  #include <linux/memblock.h>
32  #include <linux/bit_spinlock.h>
33  #include <linux/rculist_bl.h>
34  #include <linux/list_lru.h>
35  #include "internal.h"
36  #include "mount.h"
37  
38  /*
39   * Usage:
40   * dcache->d_inode->i_lock protects:
41   *   - i_dentry, d_u.d_alias, d_inode of aliases
42   * dcache_hash_bucket lock protects:
43   *   - the dcache hash table
44   * s_roots bl list spinlock protects:
45   *   - the s_roots list (see __d_drop)
46   * dentry->d_sb->s_dentry_lru_lock protects:
47   *   - the dcache lru lists and counters
48   * d_lock protects:
49   *   - d_flags
50   *   - d_name
51   *   - d_lru
52   *   - d_count
53   *   - d_unhashed()
54   *   - d_parent and d_subdirs
55   *   - childrens' d_child and d_parent
56   *   - d_u.d_alias, d_inode
57   *
58   * Ordering:
59   * dentry->d_inode->i_lock
60   *   dentry->d_lock
61   *     dentry->d_sb->s_dentry_lru_lock
62   *     dcache_hash_bucket lock
63   *     s_roots lock
64   *
65   * If there is an ancestor relationship:
66   * dentry->d_parent->...->d_parent->d_lock
67   *   ...
68   *     dentry->d_parent->d_lock
69   *       dentry->d_lock
70   *
71   * If no ancestor relationship:
72   * arbitrary, since it's serialized on rename_lock
73   */
74  int sysctl_vfs_cache_pressure __read_mostly = 100;
75  EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
76  
77  __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
78  
79  EXPORT_SYMBOL(rename_lock);
80  
81  static struct kmem_cache *dentry_cache __read_mostly;
82  
83  const struct qstr empty_name = QSTR_INIT("", 0);
84  EXPORT_SYMBOL(empty_name);
85  const struct qstr slash_name = QSTR_INIT("/", 1);
86  EXPORT_SYMBOL(slash_name);
87  
88  /*
89   * This is the single most critical data structure when it comes
90   * to the dcache: the hashtable for lookups. Somebody should try
91   * to make this good - I've just made it work.
92   *
93   * This hash-function tries to avoid losing too many bits of hash
94   * information, yet avoid using a prime hash-size or similar.
95   */
96  
97  static unsigned int d_hash_shift __read_mostly;
98  
99  static struct hlist_bl_head *dentry_hashtable __read_mostly;
100  
101  static inline struct hlist_bl_head *d_hash(unsigned int hash)
102  {
103  	return dentry_hashtable + (hash >> d_hash_shift);
104  }
105  
106  #define IN_LOOKUP_SHIFT 10
107  static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT];
108  
109  static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent,
110  					unsigned int hash)
111  {
112  	hash += (unsigned long) parent / L1_CACHE_BYTES;
113  	return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
114  }
115  
116  
117  /* Statistics gathering. */
118  struct dentry_stat_t dentry_stat = {
119  	.age_limit = 45,
120  };
121  
122  static DEFINE_PER_CPU(long, nr_dentry);
123  static DEFINE_PER_CPU(long, nr_dentry_unused);
124  static DEFINE_PER_CPU(long, nr_dentry_negative);
125  
126  #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
127  
128  /*
129   * Here we resort to our own counters instead of using generic per-cpu counters
130   * for consistency with what the vfs inode code does. We are expected to harvest
131   * better code and performance by having our own specialized counters.
132   *
133   * Please note that the loop is done over all possible CPUs, not over all online
134   * CPUs. The reason for this is that we don't want to play games with CPUs going
135   * on and off. If one of them goes off, we will just keep their counters.
136   *
137   * glommer: See cffbc8a for details, and if you ever intend to change this,
138   * please update all vfs counters to match.
139   */
140  static long get_nr_dentry(void)
141  {
142  	int i;
143  	long sum = 0;
144  	for_each_possible_cpu(i)
145  		sum += per_cpu(nr_dentry, i);
146  	return sum < 0 ? 0 : sum;
147  }
148  
149  static long get_nr_dentry_unused(void)
150  {
151  	int i;
152  	long sum = 0;
153  	for_each_possible_cpu(i)
154  		sum += per_cpu(nr_dentry_unused, i);
155  	return sum < 0 ? 0 : sum;
156  }
157  
158  static long get_nr_dentry_negative(void)
159  {
160  	int i;
161  	long sum = 0;
162  
163  	for_each_possible_cpu(i)
164  		sum += per_cpu(nr_dentry_negative, i);
165  	return sum < 0 ? 0 : sum;
166  }
167  
168  int proc_nr_dentry(struct ctl_table *table, int write, void *buffer,
169  		   size_t *lenp, loff_t *ppos)
170  {
171  	dentry_stat.nr_dentry = get_nr_dentry();
172  	dentry_stat.nr_unused = get_nr_dentry_unused();
173  	dentry_stat.nr_negative = get_nr_dentry_negative();
174  	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
175  }
176  #endif
177  
178  /*
179   * Compare 2 name strings, return 0 if they match, otherwise non-zero.
180   * The strings are both count bytes long, and count is non-zero.
181   */
182  #ifdef CONFIG_DCACHE_WORD_ACCESS
183  
184  #include <asm/word-at-a-time.h>
185  /*
186   * NOTE! 'cs' and 'scount' come from a dentry, so it has a
187   * aligned allocation for this particular component. We don't
188   * strictly need the load_unaligned_zeropad() safety, but it
189   * doesn't hurt either.
190   *
191   * In contrast, 'ct' and 'tcount' can be from a pathname, and do
192   * need the careful unaligned handling.
193   */
194  static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
195  {
196  	unsigned long a,b,mask;
197  
198  	for (;;) {
199  		a = read_word_at_a_time(cs);
200  		b = load_unaligned_zeropad(ct);
201  		if (tcount < sizeof(unsigned long))
202  			break;
203  		if (unlikely(a != b))
204  			return 1;
205  		cs += sizeof(unsigned long);
206  		ct += sizeof(unsigned long);
207  		tcount -= sizeof(unsigned long);
208  		if (!tcount)
209  			return 0;
210  	}
211  	mask = bytemask_from_count(tcount);
212  	return unlikely(!!((a ^ b) & mask));
213  }
214  
215  #else
216  
217  static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
218  {
219  	do {
220  		if (*cs != *ct)
221  			return 1;
222  		cs++;
223  		ct++;
224  		tcount--;
225  	} while (tcount);
226  	return 0;
227  }
228  
229  #endif
230  
231  static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
232  {
233  	/*
234  	 * Be careful about RCU walk racing with rename:
235  	 * use 'READ_ONCE' to fetch the name pointer.
236  	 *
237  	 * NOTE! Even if a rename will mean that the length
238  	 * was not loaded atomically, we don't care. The
239  	 * RCU walk will check the sequence count eventually,
240  	 * and catch it. And we won't overrun the buffer,
241  	 * because we're reading the name pointer atomically,
242  	 * and a dentry name is guaranteed to be properly
243  	 * terminated with a NUL byte.
244  	 *
245  	 * End result: even if 'len' is wrong, we'll exit
246  	 * early because the data cannot match (there can
247  	 * be no NUL in the ct/tcount data)
248  	 */
249  	const unsigned char *cs = READ_ONCE(dentry->d_name.name);
250  
251  	return dentry_string_cmp(cs, ct, tcount);
252  }
253  
254  struct external_name {
255  	union {
256  		atomic_t count;
257  		struct rcu_head head;
258  	} u;
259  	unsigned char name[];
260  };
261  
262  static inline struct external_name *external_name(struct dentry *dentry)
263  {
264  	return container_of(dentry->d_name.name, struct external_name, name[0]);
265  }
266  
267  static void __d_free(struct rcu_head *head)
268  {
269  	struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
270  
271  	kmem_cache_free(dentry_cache, dentry);
272  }
273  
274  static void __d_free_external(struct rcu_head *head)
275  {
276  	struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
277  	kfree(external_name(dentry));
278  	kmem_cache_free(dentry_cache, dentry);
279  }
280  
281  static inline int dname_external(const struct dentry *dentry)
282  {
283  	return dentry->d_name.name != dentry->d_iname;
284  }
285  
286  void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry)
287  {
288  	spin_lock(&dentry->d_lock);
289  	name->name = dentry->d_name;
290  	if (unlikely(dname_external(dentry))) {
291  		atomic_inc(&external_name(dentry)->u.count);
292  	} else {
293  		memcpy(name->inline_name, dentry->d_iname,
294  		       dentry->d_name.len + 1);
295  		name->name.name = name->inline_name;
296  	}
297  	spin_unlock(&dentry->d_lock);
298  }
299  EXPORT_SYMBOL(take_dentry_name_snapshot);
300  
301  void release_dentry_name_snapshot(struct name_snapshot *name)
302  {
303  	if (unlikely(name->name.name != name->inline_name)) {
304  		struct external_name *p;
305  		p = container_of(name->name.name, struct external_name, name[0]);
306  		if (unlikely(atomic_dec_and_test(&p->u.count)))
307  			kfree_rcu(p, u.head);
308  	}
309  }
310  EXPORT_SYMBOL(release_dentry_name_snapshot);
311  
312  static inline void __d_set_inode_and_type(struct dentry *dentry,
313  					  struct inode *inode,
314  					  unsigned type_flags)
315  {
316  	unsigned flags;
317  
318  	dentry->d_inode = inode;
319  	flags = READ_ONCE(dentry->d_flags);
320  	flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
321  	flags |= type_flags;
322  	smp_store_release(&dentry->d_flags, flags);
323  }
324  
325  static inline void __d_clear_type_and_inode(struct dentry *dentry)
326  {
327  	unsigned flags = READ_ONCE(dentry->d_flags);
328  
329  	flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
330  	WRITE_ONCE(dentry->d_flags, flags);
331  	dentry->d_inode = NULL;
332  	if (dentry->d_flags & DCACHE_LRU_LIST)
333  		this_cpu_inc(nr_dentry_negative);
334  }
335  
336  static void dentry_free(struct dentry *dentry)
337  {
338  	WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
339  	if (unlikely(dname_external(dentry))) {
340  		struct external_name *p = external_name(dentry);
341  		if (likely(atomic_dec_and_test(&p->u.count))) {
342  			call_rcu(&dentry->d_u.d_rcu, __d_free_external);
343  			return;
344  		}
345  	}
346  	/* if dentry was never visible to RCU, immediate free is OK */
347  	if (dentry->d_flags & DCACHE_NORCU)
348  		__d_free(&dentry->d_u.d_rcu);
349  	else
350  		call_rcu(&dentry->d_u.d_rcu, __d_free);
351  }
352  
353  /*
354   * Release the dentry's inode, using the filesystem
355   * d_iput() operation if defined.
356   */
357  static void dentry_unlink_inode(struct dentry * dentry)
358  	__releases(dentry->d_lock)
359  	__releases(dentry->d_inode->i_lock)
360  {
361  	struct inode *inode = dentry->d_inode;
362  
363  	raw_write_seqcount_begin(&dentry->d_seq);
364  	__d_clear_type_and_inode(dentry);
365  	hlist_del_init(&dentry->d_u.d_alias);
366  	raw_write_seqcount_end(&dentry->d_seq);
367  	spin_unlock(&dentry->d_lock);
368  	spin_unlock(&inode->i_lock);
369  	if (!inode->i_nlink)
370  		fsnotify_inoderemove(inode);
371  	if (dentry->d_op && dentry->d_op->d_iput)
372  		dentry->d_op->d_iput(dentry, inode);
373  	else
374  		iput(inode);
375  }
376  
377  /*
378   * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
379   * is in use - which includes both the "real" per-superblock
380   * LRU list _and_ the DCACHE_SHRINK_LIST use.
381   *
382   * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
383   * on the shrink list (ie not on the superblock LRU list).
384   *
385   * The per-cpu "nr_dentry_unused" counters are updated with
386   * the DCACHE_LRU_LIST bit.
387   *
388   * The per-cpu "nr_dentry_negative" counters are only updated
389   * when deleted from or added to the per-superblock LRU list, not
390   * from/to the shrink list. That is to avoid an unneeded dec/inc
391   * pair when moving from LRU to shrink list in select_collect().
392   *
393   * These helper functions make sure we always follow the
394   * rules. d_lock must be held by the caller.
395   */
396  #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
397  static void d_lru_add(struct dentry *dentry)
398  {
399  	D_FLAG_VERIFY(dentry, 0);
400  	dentry->d_flags |= DCACHE_LRU_LIST;
401  	this_cpu_inc(nr_dentry_unused);
402  	if (d_is_negative(dentry))
403  		this_cpu_inc(nr_dentry_negative);
404  	WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
405  }
406  
407  static void d_lru_del(struct dentry *dentry)
408  {
409  	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
410  	dentry->d_flags &= ~DCACHE_LRU_LIST;
411  	this_cpu_dec(nr_dentry_unused);
412  	if (d_is_negative(dentry))
413  		this_cpu_dec(nr_dentry_negative);
414  	WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
415  }
416  
417  static void d_shrink_del(struct dentry *dentry)
418  {
419  	D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
420  	list_del_init(&dentry->d_lru);
421  	dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
422  	this_cpu_dec(nr_dentry_unused);
423  }
424  
425  static void d_shrink_add(struct dentry *dentry, struct list_head *list)
426  {
427  	D_FLAG_VERIFY(dentry, 0);
428  	list_add(&dentry->d_lru, list);
429  	dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
430  	this_cpu_inc(nr_dentry_unused);
431  }
432  
433  /*
434   * These can only be called under the global LRU lock, ie during the
435   * callback for freeing the LRU list. "isolate" removes it from the
436   * LRU lists entirely, while shrink_move moves it to the indicated
437   * private list.
438   */
439  static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
440  {
441  	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
442  	dentry->d_flags &= ~DCACHE_LRU_LIST;
443  	this_cpu_dec(nr_dentry_unused);
444  	if (d_is_negative(dentry))
445  		this_cpu_dec(nr_dentry_negative);
446  	list_lru_isolate(lru, &dentry->d_lru);
447  }
448  
449  static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
450  			      struct list_head *list)
451  {
452  	D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
453  	dentry->d_flags |= DCACHE_SHRINK_LIST;
454  	if (d_is_negative(dentry))
455  		this_cpu_dec(nr_dentry_negative);
456  	list_lru_isolate_move(lru, &dentry->d_lru, list);
457  }
458  
459  /**
460   * d_drop - drop a dentry
461   * @dentry: dentry to drop
462   *
463   * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
464   * be found through a VFS lookup any more. Note that this is different from
465   * deleting the dentry - d_delete will try to mark the dentry negative if
466   * possible, giving a successful _negative_ lookup, while d_drop will
467   * just make the cache lookup fail.
468   *
469   * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
470   * reason (NFS timeouts or autofs deletes).
471   *
472   * __d_drop requires dentry->d_lock
473   * ___d_drop doesn't mark dentry as "unhashed"
474   *   (dentry->d_hash.pprev will be LIST_POISON2, not NULL).
475   */
476  static void ___d_drop(struct dentry *dentry)
477  {
478  	struct hlist_bl_head *b;
479  	/*
480  	 * Hashed dentries are normally on the dentry hashtable,
481  	 * with the exception of those newly allocated by
482  	 * d_obtain_root, which are always IS_ROOT:
483  	 */
484  	if (unlikely(IS_ROOT(dentry)))
485  		b = &dentry->d_sb->s_roots;
486  	else
487  		b = d_hash(dentry->d_name.hash);
488  
489  	hlist_bl_lock(b);
490  	__hlist_bl_del(&dentry->d_hash);
491  	hlist_bl_unlock(b);
492  }
493  
494  void __d_drop(struct dentry *dentry)
495  {
496  	if (!d_unhashed(dentry)) {
497  		___d_drop(dentry);
498  		dentry->d_hash.pprev = NULL;
499  		write_seqcount_invalidate(&dentry->d_seq);
500  	}
501  }
502  EXPORT_SYMBOL(__d_drop);
503  
504  void d_drop(struct dentry *dentry)
505  {
506  	spin_lock(&dentry->d_lock);
507  	__d_drop(dentry);
508  	spin_unlock(&dentry->d_lock);
509  }
510  EXPORT_SYMBOL(d_drop);
511  
512  static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent)
513  {
514  	struct dentry *next;
515  	/*
516  	 * Inform d_walk() and shrink_dentry_list() that we are no longer
517  	 * attached to the dentry tree
518  	 */
519  	dentry->d_flags |= DCACHE_DENTRY_KILLED;
520  	if (unlikely(list_empty(&dentry->d_child)))
521  		return;
522  	__list_del_entry(&dentry->d_child);
523  	/*
524  	 * Cursors can move around the list of children.  While we'd been
525  	 * a normal list member, it didn't matter - ->d_child.next would've
526  	 * been updated.  However, from now on it won't be and for the
527  	 * things like d_walk() it might end up with a nasty surprise.
528  	 * Normally d_walk() doesn't care about cursors moving around -
529  	 * ->d_lock on parent prevents that and since a cursor has no children
530  	 * of its own, we get through it without ever unlocking the parent.
531  	 * There is one exception, though - if we ascend from a child that
532  	 * gets killed as soon as we unlock it, the next sibling is found
533  	 * using the value left in its ->d_child.next.  And if _that_
534  	 * pointed to a cursor, and cursor got moved (e.g. by lseek())
535  	 * before d_walk() regains parent->d_lock, we'll end up skipping
536  	 * everything the cursor had been moved past.
537  	 *
538  	 * Solution: make sure that the pointer left behind in ->d_child.next
539  	 * points to something that won't be moving around.  I.e. skip the
540  	 * cursors.
541  	 */
542  	while (dentry->d_child.next != &parent->d_subdirs) {
543  		next = list_entry(dentry->d_child.next, struct dentry, d_child);
544  		if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
545  			break;
546  		dentry->d_child.next = next->d_child.next;
547  	}
548  }
549  
550  static void __dentry_kill(struct dentry *dentry)
551  {
552  	struct dentry *parent = NULL;
553  	bool can_free = true;
554  	if (!IS_ROOT(dentry))
555  		parent = dentry->d_parent;
556  
557  	/*
558  	 * The dentry is now unrecoverably dead to the world.
559  	 */
560  	lockref_mark_dead(&dentry->d_lockref);
561  
562  	/*
563  	 * inform the fs via d_prune that this dentry is about to be
564  	 * unhashed and destroyed.
565  	 */
566  	if (dentry->d_flags & DCACHE_OP_PRUNE)
567  		dentry->d_op->d_prune(dentry);
568  
569  	if (dentry->d_flags & DCACHE_LRU_LIST) {
570  		if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
571  			d_lru_del(dentry);
572  	}
573  	/* if it was on the hash then remove it */
574  	__d_drop(dentry);
575  	dentry_unlist(dentry, parent);
576  	if (parent)
577  		spin_unlock(&parent->d_lock);
578  	if (dentry->d_inode)
579  		dentry_unlink_inode(dentry);
580  	else
581  		spin_unlock(&dentry->d_lock);
582  	this_cpu_dec(nr_dentry);
583  	if (dentry->d_op && dentry->d_op->d_release)
584  		dentry->d_op->d_release(dentry);
585  
586  	spin_lock(&dentry->d_lock);
587  	if (dentry->d_flags & DCACHE_SHRINK_LIST) {
588  		dentry->d_flags |= DCACHE_MAY_FREE;
589  		can_free = false;
590  	}
591  	spin_unlock(&dentry->d_lock);
592  	if (likely(can_free))
593  		dentry_free(dentry);
594  	cond_resched();
595  }
596  
597  static struct dentry *__lock_parent(struct dentry *dentry)
598  {
599  	struct dentry *parent;
600  	rcu_read_lock();
601  	spin_unlock(&dentry->d_lock);
602  again:
603  	parent = READ_ONCE(dentry->d_parent);
604  	spin_lock(&parent->d_lock);
605  	/*
606  	 * We can't blindly lock dentry until we are sure
607  	 * that we won't violate the locking order.
608  	 * Any changes of dentry->d_parent must have
609  	 * been done with parent->d_lock held, so
610  	 * spin_lock() above is enough of a barrier
611  	 * for checking if it's still our child.
612  	 */
613  	if (unlikely(parent != dentry->d_parent)) {
614  		spin_unlock(&parent->d_lock);
615  		goto again;
616  	}
617  	rcu_read_unlock();
618  	if (parent != dentry)
619  		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
620  	else
621  		parent = NULL;
622  	return parent;
623  }
624  
625  static inline struct dentry *lock_parent(struct dentry *dentry)
626  {
627  	struct dentry *parent = dentry->d_parent;
628  	if (IS_ROOT(dentry))
629  		return NULL;
630  	if (likely(spin_trylock(&parent->d_lock)))
631  		return parent;
632  	return __lock_parent(dentry);
633  }
634  
635  static inline bool retain_dentry(struct dentry *dentry)
636  {
637  	WARN_ON(d_in_lookup(dentry));
638  
639  	/* Unreachable? Get rid of it */
640  	if (unlikely(d_unhashed(dentry)))
641  		return false;
642  
643  	if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
644  		return false;
645  
646  	if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
647  		if (dentry->d_op->d_delete(dentry))
648  			return false;
649  	}
650  
651  	if (unlikely(dentry->d_flags & DCACHE_DONTCACHE))
652  		return false;
653  
654  	/* retain; LRU fodder */
655  	dentry->d_lockref.count--;
656  	if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
657  		d_lru_add(dentry);
658  	else if (unlikely(!(dentry->d_flags & DCACHE_REFERENCED)))
659  		dentry->d_flags |= DCACHE_REFERENCED;
660  	return true;
661  }
662  
663  void d_mark_dontcache(struct inode *inode)
664  {
665  	struct dentry *de;
666  
667  	spin_lock(&inode->i_lock);
668  	hlist_for_each_entry(de, &inode->i_dentry, d_u.d_alias) {
669  		spin_lock(&de->d_lock);
670  		de->d_flags |= DCACHE_DONTCACHE;
671  		spin_unlock(&de->d_lock);
672  	}
673  	inode->i_state |= I_DONTCACHE;
674  	spin_unlock(&inode->i_lock);
675  }
676  EXPORT_SYMBOL(d_mark_dontcache);
677  
678  /*
679   * Finish off a dentry we've decided to kill.
680   * dentry->d_lock must be held, returns with it unlocked.
681   * Returns dentry requiring refcount drop, or NULL if we're done.
682   */
683  static struct dentry *dentry_kill(struct dentry *dentry)
684  	__releases(dentry->d_lock)
685  {
686  	struct inode *inode = dentry->d_inode;
687  	struct dentry *parent = NULL;
688  
689  	if (inode && unlikely(!spin_trylock(&inode->i_lock)))
690  		goto slow_positive;
691  
692  	if (!IS_ROOT(dentry)) {
693  		parent = dentry->d_parent;
694  		if (unlikely(!spin_trylock(&parent->d_lock))) {
695  			parent = __lock_parent(dentry);
696  			if (likely(inode || !dentry->d_inode))
697  				goto got_locks;
698  			/* negative that became positive */
699  			if (parent)
700  				spin_unlock(&parent->d_lock);
701  			inode = dentry->d_inode;
702  			goto slow_positive;
703  		}
704  	}
705  	__dentry_kill(dentry);
706  	return parent;
707  
708  slow_positive:
709  	spin_unlock(&dentry->d_lock);
710  	spin_lock(&inode->i_lock);
711  	spin_lock(&dentry->d_lock);
712  	parent = lock_parent(dentry);
713  got_locks:
714  	if (unlikely(dentry->d_lockref.count != 1)) {
715  		dentry->d_lockref.count--;
716  	} else if (likely(!retain_dentry(dentry))) {
717  		__dentry_kill(dentry);
718  		return parent;
719  	}
720  	/* we are keeping it, after all */
721  	if (inode)
722  		spin_unlock(&inode->i_lock);
723  	if (parent)
724  		spin_unlock(&parent->d_lock);
725  	spin_unlock(&dentry->d_lock);
726  	return NULL;
727  }
728  
729  /*
730   * Try to do a lockless dput(), and return whether that was successful.
731   *
732   * If unsuccessful, we return false, having already taken the dentry lock.
733   *
734   * The caller needs to hold the RCU read lock, so that the dentry is
735   * guaranteed to stay around even if the refcount goes down to zero!
736   */
737  static inline bool fast_dput(struct dentry *dentry)
738  {
739  	int ret;
740  	unsigned int d_flags;
741  
742  	/*
743  	 * If we have a d_op->d_delete() operation, we sould not
744  	 * let the dentry count go to zero, so use "put_or_lock".
745  	 */
746  	if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
747  		return lockref_put_or_lock(&dentry->d_lockref);
748  
749  	/*
750  	 * .. otherwise, we can try to just decrement the
751  	 * lockref optimistically.
752  	 */
753  	ret = lockref_put_return(&dentry->d_lockref);
754  
755  	/*
756  	 * If the lockref_put_return() failed due to the lock being held
757  	 * by somebody else, the fast path has failed. We will need to
758  	 * get the lock, and then check the count again.
759  	 */
760  	if (unlikely(ret < 0)) {
761  		spin_lock(&dentry->d_lock);
762  		if (dentry->d_lockref.count > 1) {
763  			dentry->d_lockref.count--;
764  			spin_unlock(&dentry->d_lock);
765  			return true;
766  		}
767  		return false;
768  	}
769  
770  	/*
771  	 * If we weren't the last ref, we're done.
772  	 */
773  	if (ret)
774  		return true;
775  
776  	/*
777  	 * Careful, careful. The reference count went down
778  	 * to zero, but we don't hold the dentry lock, so
779  	 * somebody else could get it again, and do another
780  	 * dput(), and we need to not race with that.
781  	 *
782  	 * However, there is a very special and common case
783  	 * where we don't care, because there is nothing to
784  	 * do: the dentry is still hashed, it does not have
785  	 * a 'delete' op, and it's referenced and already on
786  	 * the LRU list.
787  	 *
788  	 * NOTE! Since we aren't locked, these values are
789  	 * not "stable". However, it is sufficient that at
790  	 * some point after we dropped the reference the
791  	 * dentry was hashed and the flags had the proper
792  	 * value. Other dentry users may have re-gotten
793  	 * a reference to the dentry and change that, but
794  	 * our work is done - we can leave the dentry
795  	 * around with a zero refcount.
796  	 */
797  	smp_rmb();
798  	d_flags = READ_ONCE(dentry->d_flags);
799  	d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;
800  
801  	/* Nothing to do? Dropping the reference was all we needed? */
802  	if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
803  		return true;
804  
805  	/*
806  	 * Not the fast normal case? Get the lock. We've already decremented
807  	 * the refcount, but we'll need to re-check the situation after
808  	 * getting the lock.
809  	 */
810  	spin_lock(&dentry->d_lock);
811  
812  	/*
813  	 * Did somebody else grab a reference to it in the meantime, and
814  	 * we're no longer the last user after all? Alternatively, somebody
815  	 * else could have killed it and marked it dead. Either way, we
816  	 * don't need to do anything else.
817  	 */
818  	if (dentry->d_lockref.count) {
819  		spin_unlock(&dentry->d_lock);
820  		return true;
821  	}
822  
823  	/*
824  	 * Re-get the reference we optimistically dropped. We hold the
825  	 * lock, and we just tested that it was zero, so we can just
826  	 * set it to 1.
827  	 */
828  	dentry->d_lockref.count = 1;
829  	return false;
830  }
831  
832  
833  /*
834   * This is dput
835   *
836   * This is complicated by the fact that we do not want to put
837   * dentries that are no longer on any hash chain on the unused
838   * list: we'd much rather just get rid of them immediately.
839   *
840   * However, that implies that we have to traverse the dentry
841   * tree upwards to the parents which might _also_ now be
842   * scheduled for deletion (it may have been only waiting for
843   * its last child to go away).
844   *
845   * This tail recursion is done by hand as we don't want to depend
846   * on the compiler to always get this right (gcc generally doesn't).
847   * Real recursion would eat up our stack space.
848   */
849  
850  /*
851   * dput - release a dentry
852   * @dentry: dentry to release
853   *
854   * Release a dentry. This will drop the usage count and if appropriate
855   * call the dentry unlink method as well as removing it from the queues and
856   * releasing its resources. If the parent dentries were scheduled for release
857   * they too may now get deleted.
858   */
859  void dput(struct dentry *dentry)
860  {
861  	while (dentry) {
862  		might_sleep();
863  
864  		rcu_read_lock();
865  		if (likely(fast_dput(dentry))) {
866  			rcu_read_unlock();
867  			return;
868  		}
869  
870  		/* Slow case: now with the dentry lock held */
871  		rcu_read_unlock();
872  
873  		if (likely(retain_dentry(dentry))) {
874  			spin_unlock(&dentry->d_lock);
875  			return;
876  		}
877  
878  		dentry = dentry_kill(dentry);
879  	}
880  }
881  EXPORT_SYMBOL(dput);
882  
883  static void __dput_to_list(struct dentry *dentry, struct list_head *list)
884  __must_hold(&dentry->d_lock)
885  {
886  	if (dentry->d_flags & DCACHE_SHRINK_LIST) {
887  		/* let the owner of the list it's on deal with it */
888  		--dentry->d_lockref.count;
889  	} else {
890  		if (dentry->d_flags & DCACHE_LRU_LIST)
891  			d_lru_del(dentry);
892  		if (!--dentry->d_lockref.count)
893  			d_shrink_add(dentry, list);
894  	}
895  }
896  
897  void dput_to_list(struct dentry *dentry, struct list_head *list)
898  {
899  	rcu_read_lock();
900  	if (likely(fast_dput(dentry))) {
901  		rcu_read_unlock();
902  		return;
903  	}
904  	rcu_read_unlock();
905  	if (!retain_dentry(dentry))
906  		__dput_to_list(dentry, list);
907  	spin_unlock(&dentry->d_lock);
908  }
909  
910  /* This must be called with d_lock held */
911  static inline void __dget_dlock(struct dentry *dentry)
912  {
913  	dentry->d_lockref.count++;
914  }
915  
916  static inline void __dget(struct dentry *dentry)
917  {
918  	lockref_get(&dentry->d_lockref);
919  }
920  
921  struct dentry *dget_parent(struct dentry *dentry)
922  {
923  	int gotref;
924  	struct dentry *ret;
925  	unsigned seq;
926  
927  	/*
928  	 * Do optimistic parent lookup without any
929  	 * locking.
930  	 */
931  	rcu_read_lock();
932  	seq = raw_seqcount_begin(&dentry->d_seq);
933  	ret = READ_ONCE(dentry->d_parent);
934  	gotref = lockref_get_not_zero(&ret->d_lockref);
935  	rcu_read_unlock();
936  	if (likely(gotref)) {
937  		if (!read_seqcount_retry(&dentry->d_seq, seq))
938  			return ret;
939  		dput(ret);
940  	}
941  
942  repeat:
943  	/*
944  	 * Don't need rcu_dereference because we re-check it was correct under
945  	 * the lock.
946  	 */
947  	rcu_read_lock();
948  	ret = dentry->d_parent;
949  	spin_lock(&ret->d_lock);
950  	if (unlikely(ret != dentry->d_parent)) {
951  		spin_unlock(&ret->d_lock);
952  		rcu_read_unlock();
953  		goto repeat;
954  	}
955  	rcu_read_unlock();
956  	BUG_ON(!ret->d_lockref.count);
957  	ret->d_lockref.count++;
958  	spin_unlock(&ret->d_lock);
959  	return ret;
960  }
961  EXPORT_SYMBOL(dget_parent);
962  
963  static struct dentry * __d_find_any_alias(struct inode *inode)
964  {
965  	struct dentry *alias;
966  
967  	if (hlist_empty(&inode->i_dentry))
968  		return NULL;
969  	alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
970  	__dget(alias);
971  	return alias;
972  }
973  
974  /**
975   * d_find_any_alias - find any alias for a given inode
976   * @inode: inode to find an alias for
977   *
978   * If any aliases exist for the given inode, take and return a
979   * reference for one of them.  If no aliases exist, return %NULL.
980   */
981  struct dentry *d_find_any_alias(struct inode *inode)
982  {
983  	struct dentry *de;
984  
985  	spin_lock(&inode->i_lock);
986  	de = __d_find_any_alias(inode);
987  	spin_unlock(&inode->i_lock);
988  	return de;
989  }
990  EXPORT_SYMBOL(d_find_any_alias);
991  
992  /**
993   * d_find_alias - grab a hashed alias of inode
994   * @inode: inode in question
995   *
996   * If inode has a hashed alias, or is a directory and has any alias,
997   * acquire the reference to alias and return it. Otherwise return NULL.
998   * Notice that if inode is a directory there can be only one alias and
999   * it can be unhashed only if it has no children, or if it is the root
1000   * of a filesystem, or if the directory was renamed and d_revalidate
1001   * was the first vfs operation to notice.
1002   *
1003   * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
1004   * any other hashed alias over that one.
1005   */
1006  static struct dentry *__d_find_alias(struct inode *inode)
1007  {
1008  	struct dentry *alias;
1009  
1010  	if (S_ISDIR(inode->i_mode))
1011  		return __d_find_any_alias(inode);
1012  
1013  	hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
1014  		spin_lock(&alias->d_lock);
1015   		if (!d_unhashed(alias)) {
1016  			__dget_dlock(alias);
1017  			spin_unlock(&alias->d_lock);
1018  			return alias;
1019  		}
1020  		spin_unlock(&alias->d_lock);
1021  	}
1022  	return NULL;
1023  }
1024  
1025  struct dentry *d_find_alias(struct inode *inode)
1026  {
1027  	struct dentry *de = NULL;
1028  
1029  	if (!hlist_empty(&inode->i_dentry)) {
1030  		spin_lock(&inode->i_lock);
1031  		de = __d_find_alias(inode);
1032  		spin_unlock(&inode->i_lock);
1033  	}
1034  	return de;
1035  }
1036  EXPORT_SYMBOL(d_find_alias);
1037  
1038  /*
1039   *	Try to kill dentries associated with this inode.
1040   * WARNING: you must own a reference to inode.
1041   */
1042  void d_prune_aliases(struct inode *inode)
1043  {
1044  	struct dentry *dentry;
1045  restart:
1046  	spin_lock(&inode->i_lock);
1047  	hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
1048  		spin_lock(&dentry->d_lock);
1049  		if (!dentry->d_lockref.count) {
1050  			struct dentry *parent = lock_parent(dentry);
1051  			if (likely(!dentry->d_lockref.count)) {
1052  				__dentry_kill(dentry);
1053  				dput(parent);
1054  				goto restart;
1055  			}
1056  			if (parent)
1057  				spin_unlock(&parent->d_lock);
1058  		}
1059  		spin_unlock(&dentry->d_lock);
1060  	}
1061  	spin_unlock(&inode->i_lock);
1062  }
1063  EXPORT_SYMBOL(d_prune_aliases);
1064  
1065  /*
1066   * Lock a dentry from shrink list.
1067   * Called under rcu_read_lock() and dentry->d_lock; the former
1068   * guarantees that nothing we access will be freed under us.
1069   * Note that dentry is *not* protected from concurrent dentry_kill(),
1070   * d_delete(), etc.
1071   *
1072   * Return false if dentry has been disrupted or grabbed, leaving
1073   * the caller to kick it off-list.  Otherwise, return true and have
1074   * that dentry's inode and parent both locked.
1075   */
1076  static bool shrink_lock_dentry(struct dentry *dentry)
1077  {
1078  	struct inode *inode;
1079  	struct dentry *parent;
1080  
1081  	if (dentry->d_lockref.count)
1082  		return false;
1083  
1084  	inode = dentry->d_inode;
1085  	if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
1086  		spin_unlock(&dentry->d_lock);
1087  		spin_lock(&inode->i_lock);
1088  		spin_lock(&dentry->d_lock);
1089  		if (unlikely(dentry->d_lockref.count))
1090  			goto out;
1091  		/* changed inode means that somebody had grabbed it */
1092  		if (unlikely(inode != dentry->d_inode))
1093  			goto out;
1094  	}
1095  
1096  	parent = dentry->d_parent;
1097  	if (IS_ROOT(dentry) || likely(spin_trylock(&parent->d_lock)))
1098  		return true;
1099  
1100  	spin_unlock(&dentry->d_lock);
1101  	spin_lock(&parent->d_lock);
1102  	if (unlikely(parent != dentry->d_parent)) {
1103  		spin_unlock(&parent->d_lock);
1104  		spin_lock(&dentry->d_lock);
1105  		goto out;
1106  	}
1107  	spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1108  	if (likely(!dentry->d_lockref.count))
1109  		return true;
1110  	spin_unlock(&parent->d_lock);
1111  out:
1112  	if (inode)
1113  		spin_unlock(&inode->i_lock);
1114  	return false;
1115  }
1116  
1117  void shrink_dentry_list(struct list_head *list)
1118  {
1119  	while (!list_empty(list)) {
1120  		struct dentry *dentry, *parent;
1121  
1122  		dentry = list_entry(list->prev, struct dentry, d_lru);
1123  		spin_lock(&dentry->d_lock);
1124  		rcu_read_lock();
1125  		if (!shrink_lock_dentry(dentry)) {
1126  			bool can_free = false;
1127  			rcu_read_unlock();
1128  			d_shrink_del(dentry);
1129  			if (dentry->d_lockref.count < 0)
1130  				can_free = dentry->d_flags & DCACHE_MAY_FREE;
1131  			spin_unlock(&dentry->d_lock);
1132  			if (can_free)
1133  				dentry_free(dentry);
1134  			continue;
1135  		}
1136  		rcu_read_unlock();
1137  		d_shrink_del(dentry);
1138  		parent = dentry->d_parent;
1139  		if (parent != dentry)
1140  			__dput_to_list(parent, list);
1141  		__dentry_kill(dentry);
1142  	}
1143  }
1144  
1145  static enum lru_status dentry_lru_isolate(struct list_head *item,
1146  		struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1147  {
1148  	struct list_head *freeable = arg;
1149  	struct dentry	*dentry = container_of(item, struct dentry, d_lru);
1150  
1151  
1152  	/*
1153  	 * we are inverting the lru lock/dentry->d_lock here,
1154  	 * so use a trylock. If we fail to get the lock, just skip
1155  	 * it
1156  	 */
1157  	if (!spin_trylock(&dentry->d_lock))
1158  		return LRU_SKIP;
1159  
1160  	/*
1161  	 * Referenced dentries are still in use. If they have active
1162  	 * counts, just remove them from the LRU. Otherwise give them
1163  	 * another pass through the LRU.
1164  	 */
1165  	if (dentry->d_lockref.count) {
1166  		d_lru_isolate(lru, dentry);
1167  		spin_unlock(&dentry->d_lock);
1168  		return LRU_REMOVED;
1169  	}
1170  
1171  	if (dentry->d_flags & DCACHE_REFERENCED) {
1172  		dentry->d_flags &= ~DCACHE_REFERENCED;
1173  		spin_unlock(&dentry->d_lock);
1174  
1175  		/*
1176  		 * The list move itself will be made by the common LRU code. At
1177  		 * this point, we've dropped the dentry->d_lock but keep the
1178  		 * lru lock. This is safe to do, since every list movement is
1179  		 * protected by the lru lock even if both locks are held.
1180  		 *
1181  		 * This is guaranteed by the fact that all LRU management
1182  		 * functions are intermediated by the LRU API calls like
1183  		 * list_lru_add and list_lru_del. List movement in this file
1184  		 * only ever occur through this functions or through callbacks
1185  		 * like this one, that are called from the LRU API.
1186  		 *
1187  		 * The only exceptions to this are functions like
1188  		 * shrink_dentry_list, and code that first checks for the
1189  		 * DCACHE_SHRINK_LIST flag.  Those are guaranteed to be
1190  		 * operating only with stack provided lists after they are
1191  		 * properly isolated from the main list.  It is thus, always a
1192  		 * local access.
1193  		 */
1194  		return LRU_ROTATE;
1195  	}
1196  
1197  	d_lru_shrink_move(lru, dentry, freeable);
1198  	spin_unlock(&dentry->d_lock);
1199  
1200  	return LRU_REMOVED;
1201  }
1202  
1203  /**
1204   * prune_dcache_sb - shrink the dcache
1205   * @sb: superblock
1206   * @sc: shrink control, passed to list_lru_shrink_walk()
1207   *
1208   * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This
1209   * is done when we need more memory and called from the superblock shrinker
1210   * function.
1211   *
1212   * This function may fail to free any resources if all the dentries are in
1213   * use.
1214   */
1215  long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
1216  {
1217  	LIST_HEAD(dispose);
1218  	long freed;
1219  
1220  	freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc,
1221  				     dentry_lru_isolate, &dispose);
1222  	shrink_dentry_list(&dispose);
1223  	return freed;
1224  }
1225  
1226  static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1227  		struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1228  {
1229  	struct list_head *freeable = arg;
1230  	struct dentry	*dentry = container_of(item, struct dentry, d_lru);
1231  
1232  	/*
1233  	 * we are inverting the lru lock/dentry->d_lock here,
1234  	 * so use a trylock. If we fail to get the lock, just skip
1235  	 * it
1236  	 */
1237  	if (!spin_trylock(&dentry->d_lock))
1238  		return LRU_SKIP;
1239  
1240  	d_lru_shrink_move(lru, dentry, freeable);
1241  	spin_unlock(&dentry->d_lock);
1242  
1243  	return LRU_REMOVED;
1244  }
1245  
1246  
1247  /**
1248   * shrink_dcache_sb - shrink dcache for a superblock
1249   * @sb: superblock
1250   *
1251   * Shrink the dcache for the specified super block. This is used to free
1252   * the dcache before unmounting a file system.
1253   */
1254  void shrink_dcache_sb(struct super_block *sb)
1255  {
1256  	do {
1257  		LIST_HEAD(dispose);
1258  
1259  		list_lru_walk(&sb->s_dentry_lru,
1260  			dentry_lru_isolate_shrink, &dispose, 1024);
1261  		shrink_dentry_list(&dispose);
1262  	} while (list_lru_count(&sb->s_dentry_lru) > 0);
1263  }
1264  EXPORT_SYMBOL(shrink_dcache_sb);
1265  
1266  /**
1267   * enum d_walk_ret - action to talke during tree walk
1268   * @D_WALK_CONTINUE:	contrinue walk
1269   * @D_WALK_QUIT:	quit walk
1270   * @D_WALK_NORETRY:	quit when retry is needed
1271   * @D_WALK_SKIP:	skip this dentry and its children
1272   */
1273  enum d_walk_ret {
1274  	D_WALK_CONTINUE,
1275  	D_WALK_QUIT,
1276  	D_WALK_NORETRY,
1277  	D_WALK_SKIP,
1278  };
1279  
1280  /**
1281   * d_walk - walk the dentry tree
1282   * @parent:	start of walk
1283   * @data:	data passed to @enter() and @finish()
1284   * @enter:	callback when first entering the dentry
1285   *
1286   * The @enter() callbacks are called with d_lock held.
1287   */
1288  static void d_walk(struct dentry *parent, void *data,
1289  		   enum d_walk_ret (*enter)(void *, struct dentry *))
1290  {
1291  	struct dentry *this_parent;
1292  	struct list_head *next;
1293  	unsigned seq = 0;
1294  	enum d_walk_ret ret;
1295  	bool retry = true;
1296  
1297  again:
1298  	read_seqbegin_or_lock(&rename_lock, &seq);
1299  	this_parent = parent;
1300  	spin_lock(&this_parent->d_lock);
1301  
1302  	ret = enter(data, this_parent);
1303  	switch (ret) {
1304  	case D_WALK_CONTINUE:
1305  		break;
1306  	case D_WALK_QUIT:
1307  	case D_WALK_SKIP:
1308  		goto out_unlock;
1309  	case D_WALK_NORETRY:
1310  		retry = false;
1311  		break;
1312  	}
1313  repeat:
1314  	next = this_parent->d_subdirs.next;
1315  resume:
1316  	while (next != &this_parent->d_subdirs) {
1317  		struct list_head *tmp = next;
1318  		struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1319  		next = tmp->next;
1320  
1321  		if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
1322  			continue;
1323  
1324  		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1325  
1326  		ret = enter(data, dentry);
1327  		switch (ret) {
1328  		case D_WALK_CONTINUE:
1329  			break;
1330  		case D_WALK_QUIT:
1331  			spin_unlock(&dentry->d_lock);
1332  			goto out_unlock;
1333  		case D_WALK_NORETRY:
1334  			retry = false;
1335  			break;
1336  		case D_WALK_SKIP:
1337  			spin_unlock(&dentry->d_lock);
1338  			continue;
1339  		}
1340  
1341  		if (!list_empty(&dentry->d_subdirs)) {
1342  			spin_unlock(&this_parent->d_lock);
1343  			spin_release(&dentry->d_lock.dep_map, _RET_IP_);
1344  			this_parent = dentry;
1345  			spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1346  			goto repeat;
1347  		}
1348  		spin_unlock(&dentry->d_lock);
1349  	}
1350  	/*
1351  	 * All done at this level ... ascend and resume the search.
1352  	 */
1353  	rcu_read_lock();
1354  ascend:
1355  	if (this_parent != parent) {
1356  		struct dentry *child = this_parent;
1357  		this_parent = child->d_parent;
1358  
1359  		spin_unlock(&child->d_lock);
1360  		spin_lock(&this_parent->d_lock);
1361  
1362  		/* might go back up the wrong parent if we have had a rename. */
1363  		if (need_seqretry(&rename_lock, seq))
1364  			goto rename_retry;
1365  		/* go into the first sibling still alive */
1366  		do {
1367  			next = child->d_child.next;
1368  			if (next == &this_parent->d_subdirs)
1369  				goto ascend;
1370  			child = list_entry(next, struct dentry, d_child);
1371  		} while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
1372  		rcu_read_unlock();
1373  		goto resume;
1374  	}
1375  	if (need_seqretry(&rename_lock, seq))
1376  		goto rename_retry;
1377  	rcu_read_unlock();
1378  
1379  out_unlock:
1380  	spin_unlock(&this_parent->d_lock);
1381  	done_seqretry(&rename_lock, seq);
1382  	return;
1383  
1384  rename_retry:
1385  	spin_unlock(&this_parent->d_lock);
1386  	rcu_read_unlock();
1387  	BUG_ON(seq & 1);
1388  	if (!retry)
1389  		return;
1390  	seq = 1;
1391  	goto again;
1392  }
1393  
1394  struct check_mount {
1395  	struct vfsmount *mnt;
1396  	unsigned int mounted;
1397  };
1398  
1399  static enum d_walk_ret path_check_mount(void *data, struct dentry *dentry)
1400  {
1401  	struct check_mount *info = data;
1402  	struct path path = { .mnt = info->mnt, .dentry = dentry };
1403  
1404  	if (likely(!d_mountpoint(dentry)))
1405  		return D_WALK_CONTINUE;
1406  	if (__path_is_mountpoint(&path)) {
1407  		info->mounted = 1;
1408  		return D_WALK_QUIT;
1409  	}
1410  	return D_WALK_CONTINUE;
1411  }
1412  
1413  /**
1414   * path_has_submounts - check for mounts over a dentry in the
1415   *                      current namespace.
1416   * @parent: path to check.
1417   *
1418   * Return true if the parent or its subdirectories contain
1419   * a mount point in the current namespace.
1420   */
1421  int path_has_submounts(const struct path *parent)
1422  {
1423  	struct check_mount data = { .mnt = parent->mnt, .mounted = 0 };
1424  
1425  	read_seqlock_excl(&mount_lock);
1426  	d_walk(parent->dentry, &data, path_check_mount);
1427  	read_sequnlock_excl(&mount_lock);
1428  
1429  	return data.mounted;
1430  }
1431  EXPORT_SYMBOL(path_has_submounts);
1432  
1433  /*
1434   * Called by mount code to set a mountpoint and check if the mountpoint is
1435   * reachable (e.g. NFS can unhash a directory dentry and then the complete
1436   * subtree can become unreachable).
1437   *
1438   * Only one of d_invalidate() and d_set_mounted() must succeed.  For
1439   * this reason take rename_lock and d_lock on dentry and ancestors.
1440   */
1441  int d_set_mounted(struct dentry *dentry)
1442  {
1443  	struct dentry *p;
1444  	int ret = -ENOENT;
1445  	write_seqlock(&rename_lock);
1446  	for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1447  		/* Need exclusion wrt. d_invalidate() */
1448  		spin_lock(&p->d_lock);
1449  		if (unlikely(d_unhashed(p))) {
1450  			spin_unlock(&p->d_lock);
1451  			goto out;
1452  		}
1453  		spin_unlock(&p->d_lock);
1454  	}
1455  	spin_lock(&dentry->d_lock);
1456  	if (!d_unlinked(dentry)) {
1457  		ret = -EBUSY;
1458  		if (!d_mountpoint(dentry)) {
1459  			dentry->d_flags |= DCACHE_MOUNTED;
1460  			ret = 0;
1461  		}
1462  	}
1463   	spin_unlock(&dentry->d_lock);
1464  out:
1465  	write_sequnlock(&rename_lock);
1466  	return ret;
1467  }
1468  
1469  /*
1470   * Search the dentry child list of the specified parent,
1471   * and move any unused dentries to the end of the unused
1472   * list for prune_dcache(). We descend to the next level
1473   * whenever the d_subdirs list is non-empty and continue
1474   * searching.
1475   *
1476   * It returns zero iff there are no unused children,
1477   * otherwise  it returns the number of children moved to
1478   * the end of the unused list. This may not be the total
1479   * number of unused children, because select_parent can
1480   * drop the lock and return early due to latency
1481   * constraints.
1482   */
1483  
1484  struct select_data {
1485  	struct dentry *start;
1486  	union {
1487  		long found;
1488  		struct dentry *victim;
1489  	};
1490  	struct list_head dispose;
1491  };
1492  
1493  static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1494  {
1495  	struct select_data *data = _data;
1496  	enum d_walk_ret ret = D_WALK_CONTINUE;
1497  
1498  	if (data->start == dentry)
1499  		goto out;
1500  
1501  	if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1502  		data->found++;
1503  	} else {
1504  		if (dentry->d_flags & DCACHE_LRU_LIST)
1505  			d_lru_del(dentry);
1506  		if (!dentry->d_lockref.count) {
1507  			d_shrink_add(dentry, &data->dispose);
1508  			data->found++;
1509  		}
1510  	}
1511  	/*
1512  	 * We can return to the caller if we have found some (this
1513  	 * ensures forward progress). We'll be coming back to find
1514  	 * the rest.
1515  	 */
1516  	if (!list_empty(&data->dispose))
1517  		ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1518  out:
1519  	return ret;
1520  }
1521  
1522  static enum d_walk_ret select_collect2(void *_data, struct dentry *dentry)
1523  {
1524  	struct select_data *data = _data;
1525  	enum d_walk_ret ret = D_WALK_CONTINUE;
1526  
1527  	if (data->start == dentry)
1528  		goto out;
1529  
1530  	if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1531  		if (!dentry->d_lockref.count) {
1532  			rcu_read_lock();
1533  			data->victim = dentry;
1534  			return D_WALK_QUIT;
1535  		}
1536  	} else {
1537  		if (dentry->d_flags & DCACHE_LRU_LIST)
1538  			d_lru_del(dentry);
1539  		if (!dentry->d_lockref.count)
1540  			d_shrink_add(dentry, &data->dispose);
1541  	}
1542  	/*
1543  	 * We can return to the caller if we have found some (this
1544  	 * ensures forward progress). We'll be coming back to find
1545  	 * the rest.
1546  	 */
1547  	if (!list_empty(&data->dispose))
1548  		ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1549  out:
1550  	return ret;
1551  }
1552  
1553  /**
1554   * shrink_dcache_parent - prune dcache
1555   * @parent: parent of entries to prune
1556   *
1557   * Prune the dcache to remove unused children of the parent dentry.
1558   */
1559  void shrink_dcache_parent(struct dentry *parent)
1560  {
1561  	for (;;) {
1562  		struct select_data data = {.start = parent};
1563  
1564  		INIT_LIST_HEAD(&data.dispose);
1565  		d_walk(parent, &data, select_collect);
1566  
1567  		if (!list_empty(&data.dispose)) {
1568  			shrink_dentry_list(&data.dispose);
1569  			continue;
1570  		}
1571  
1572  		cond_resched();
1573  		if (!data.found)
1574  			break;
1575  		data.victim = NULL;
1576  		d_walk(parent, &data, select_collect2);
1577  		if (data.victim) {
1578  			struct dentry *parent;
1579  			spin_lock(&data.victim->d_lock);
1580  			if (!shrink_lock_dentry(data.victim)) {
1581  				spin_unlock(&data.victim->d_lock);
1582  				rcu_read_unlock();
1583  			} else {
1584  				rcu_read_unlock();
1585  				parent = data.victim->d_parent;
1586  				if (parent != data.victim)
1587  					__dput_to_list(parent, &data.dispose);
1588  				__dentry_kill(data.victim);
1589  			}
1590  		}
1591  		if (!list_empty(&data.dispose))
1592  			shrink_dentry_list(&data.dispose);
1593  	}
1594  }
1595  EXPORT_SYMBOL(shrink_dcache_parent);
1596  
1597  static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1598  {
1599  	/* it has busy descendents; complain about those instead */
1600  	if (!list_empty(&dentry->d_subdirs))
1601  		return D_WALK_CONTINUE;
1602  
1603  	/* root with refcount 1 is fine */
1604  	if (dentry == _data && dentry->d_lockref.count == 1)
1605  		return D_WALK_CONTINUE;
1606  
1607  	printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
1608  			" still in use (%d) [unmount of %s %s]\n",
1609  		       dentry,
1610  		       dentry->d_inode ?
1611  		       dentry->d_inode->i_ino : 0UL,
1612  		       dentry,
1613  		       dentry->d_lockref.count,
1614  		       dentry->d_sb->s_type->name,
1615  		       dentry->d_sb->s_id);
1616  	WARN_ON(1);
1617  	return D_WALK_CONTINUE;
1618  }
1619  
1620  static void do_one_tree(struct dentry *dentry)
1621  {
1622  	shrink_dcache_parent(dentry);
1623  	d_walk(dentry, dentry, umount_check);
1624  	d_drop(dentry);
1625  	dput(dentry);
1626  }
1627  
1628  /*
1629   * destroy the dentries attached to a superblock on unmounting
1630   */
1631  void shrink_dcache_for_umount(struct super_block *sb)
1632  {
1633  	struct dentry *dentry;
1634  
1635  	WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
1636  
1637  	dentry = sb->s_root;
1638  	sb->s_root = NULL;
1639  	do_one_tree(dentry);
1640  
1641  	while (!hlist_bl_empty(&sb->s_roots)) {
1642  		dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_roots), struct dentry, d_hash));
1643  		do_one_tree(dentry);
1644  	}
1645  }
1646  
1647  static enum d_walk_ret find_submount(void *_data, struct dentry *dentry)
1648  {
1649  	struct dentry **victim = _data;
1650  	if (d_mountpoint(dentry)) {
1651  		__dget_dlock(dentry);
1652  		*victim = dentry;
1653  		return D_WALK_QUIT;
1654  	}
1655  	return D_WALK_CONTINUE;
1656  }
1657  
1658  /**
1659   * d_invalidate - detach submounts, prune dcache, and drop
1660   * @dentry: dentry to invalidate (aka detach, prune and drop)
1661   */
1662  void d_invalidate(struct dentry *dentry)
1663  {
1664  	bool had_submounts = false;
1665  	spin_lock(&dentry->d_lock);
1666  	if (d_unhashed(dentry)) {
1667  		spin_unlock(&dentry->d_lock);
1668  		return;
1669  	}
1670  	__d_drop(dentry);
1671  	spin_unlock(&dentry->d_lock);
1672  
1673  	/* Negative dentries can be dropped without further checks */
1674  	if (!dentry->d_inode)
1675  		return;
1676  
1677  	shrink_dcache_parent(dentry);
1678  	for (;;) {
1679  		struct dentry *victim = NULL;
1680  		d_walk(dentry, &victim, find_submount);
1681  		if (!victim) {
1682  			if (had_submounts)
1683  				shrink_dcache_parent(dentry);
1684  			return;
1685  		}
1686  		had_submounts = true;
1687  		detach_mounts(victim);
1688  		dput(victim);
1689  	}
1690  }
1691  EXPORT_SYMBOL(d_invalidate);
1692  
1693  /**
1694   * __d_alloc	-	allocate a dcache entry
1695   * @sb: filesystem it will belong to
1696   * @name: qstr of the name
1697   *
1698   * Allocates a dentry. It returns %NULL if there is insufficient memory
1699   * available. On a success the dentry is returned. The name passed in is
1700   * copied and the copy passed in may be reused after this call.
1701   */
1702  
1703  static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1704  {
1705  	struct dentry *dentry;
1706  	char *dname;
1707  	int err;
1708  
1709  	dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1710  	if (!dentry)
1711  		return NULL;
1712  
1713  	/*
1714  	 * We guarantee that the inline name is always NUL-terminated.
1715  	 * This way the memcpy() done by the name switching in rename
1716  	 * will still always have a NUL at the end, even if we might
1717  	 * be overwriting an internal NUL character
1718  	 */
1719  	dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1720  	if (unlikely(!name)) {
1721  		name = &slash_name;
1722  		dname = dentry->d_iname;
1723  	} else if (name->len > DNAME_INLINE_LEN-1) {
1724  		size_t size = offsetof(struct external_name, name[1]);
1725  		struct external_name *p = kmalloc(size + name->len,
1726  						  GFP_KERNEL_ACCOUNT |
1727  						  __GFP_RECLAIMABLE);
1728  		if (!p) {
1729  			kmem_cache_free(dentry_cache, dentry);
1730  			return NULL;
1731  		}
1732  		atomic_set(&p->u.count, 1);
1733  		dname = p->name;
1734  	} else  {
1735  		dname = dentry->d_iname;
1736  	}
1737  
1738  	dentry->d_name.len = name->len;
1739  	dentry->d_name.hash = name->hash;
1740  	memcpy(dname, name->name, name->len);
1741  	dname[name->len] = 0;
1742  
1743  	/* Make sure we always see the terminating NUL character */
1744  	smp_store_release(&dentry->d_name.name, dname); /* ^^^ */
1745  
1746  	dentry->d_lockref.count = 1;
1747  	dentry->d_flags = 0;
1748  	spin_lock_init(&dentry->d_lock);
1749  	seqcount_spinlock_init(&dentry->d_seq, &dentry->d_lock);
1750  	dentry->d_inode = NULL;
1751  	dentry->d_parent = dentry;
1752  	dentry->d_sb = sb;
1753  	dentry->d_op = NULL;
1754  	dentry->d_fsdata = NULL;
1755  	INIT_HLIST_BL_NODE(&dentry->d_hash);
1756  	INIT_LIST_HEAD(&dentry->d_lru);
1757  	INIT_LIST_HEAD(&dentry->d_subdirs);
1758  	INIT_HLIST_NODE(&dentry->d_u.d_alias);
1759  	INIT_LIST_HEAD(&dentry->d_child);
1760  	d_set_d_op(dentry, dentry->d_sb->s_d_op);
1761  
1762  	if (dentry->d_op && dentry->d_op->d_init) {
1763  		err = dentry->d_op->d_init(dentry);
1764  		if (err) {
1765  			if (dname_external(dentry))
1766  				kfree(external_name(dentry));
1767  			kmem_cache_free(dentry_cache, dentry);
1768  			return NULL;
1769  		}
1770  	}
1771  
1772  	this_cpu_inc(nr_dentry);
1773  
1774  	return dentry;
1775  }
1776  
1777  /**
1778   * d_alloc	-	allocate a dcache entry
1779   * @parent: parent of entry to allocate
1780   * @name: qstr of the name
1781   *
1782   * Allocates a dentry. It returns %NULL if there is insufficient memory
1783   * available. On a success the dentry is returned. The name passed in is
1784   * copied and the copy passed in may be reused after this call.
1785   */
1786  struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1787  {
1788  	struct dentry *dentry = __d_alloc(parent->d_sb, name);
1789  	if (!dentry)
1790  		return NULL;
1791  	spin_lock(&parent->d_lock);
1792  	/*
1793  	 * don't need child lock because it is not subject
1794  	 * to concurrency here
1795  	 */
1796  	__dget_dlock(parent);
1797  	dentry->d_parent = parent;
1798  	list_add(&dentry->d_child, &parent->d_subdirs);
1799  	spin_unlock(&parent->d_lock);
1800  
1801  	return dentry;
1802  }
1803  EXPORT_SYMBOL(d_alloc);
1804  
1805  struct dentry *d_alloc_anon(struct super_block *sb)
1806  {
1807  	return __d_alloc(sb, NULL);
1808  }
1809  EXPORT_SYMBOL(d_alloc_anon);
1810  
1811  struct dentry *d_alloc_cursor(struct dentry * parent)
1812  {
1813  	struct dentry *dentry = d_alloc_anon(parent->d_sb);
1814  	if (dentry) {
1815  		dentry->d_flags |= DCACHE_DENTRY_CURSOR;
1816  		dentry->d_parent = dget(parent);
1817  	}
1818  	return dentry;
1819  }
1820  
1821  /**
1822   * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1823   * @sb: the superblock
1824   * @name: qstr of the name
1825   *
1826   * For a filesystem that just pins its dentries in memory and never
1827   * performs lookups at all, return an unhashed IS_ROOT dentry.
1828   * This is used for pipes, sockets et.al. - the stuff that should
1829   * never be anyone's children or parents.  Unlike all other
1830   * dentries, these will not have RCU delay between dropping the
1831   * last reference and freeing them.
1832   *
1833   * The only user is alloc_file_pseudo() and that's what should
1834   * be considered a public interface.  Don't use directly.
1835   */
1836  struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1837  {
1838  	struct dentry *dentry = __d_alloc(sb, name);
1839  	if (likely(dentry))
1840  		dentry->d_flags |= DCACHE_NORCU;
1841  	return dentry;
1842  }
1843  
1844  struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1845  {
1846  	struct qstr q;
1847  
1848  	q.name = name;
1849  	q.hash_len = hashlen_string(parent, name);
1850  	return d_alloc(parent, &q);
1851  }
1852  EXPORT_SYMBOL(d_alloc_name);
1853  
1854  void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1855  {
1856  	WARN_ON_ONCE(dentry->d_op);
1857  	WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH	|
1858  				DCACHE_OP_COMPARE	|
1859  				DCACHE_OP_REVALIDATE	|
1860  				DCACHE_OP_WEAK_REVALIDATE	|
1861  				DCACHE_OP_DELETE	|
1862  				DCACHE_OP_REAL));
1863  	dentry->d_op = op;
1864  	if (!op)
1865  		return;
1866  	if (op->d_hash)
1867  		dentry->d_flags |= DCACHE_OP_HASH;
1868  	if (op->d_compare)
1869  		dentry->d_flags |= DCACHE_OP_COMPARE;
1870  	if (op->d_revalidate)
1871  		dentry->d_flags |= DCACHE_OP_REVALIDATE;
1872  	if (op->d_weak_revalidate)
1873  		dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1874  	if (op->d_delete)
1875  		dentry->d_flags |= DCACHE_OP_DELETE;
1876  	if (op->d_prune)
1877  		dentry->d_flags |= DCACHE_OP_PRUNE;
1878  	if (op->d_real)
1879  		dentry->d_flags |= DCACHE_OP_REAL;
1880  
1881  }
1882  EXPORT_SYMBOL(d_set_d_op);
1883  
1884  
1885  /*
1886   * d_set_fallthru - Mark a dentry as falling through to a lower layer
1887   * @dentry - The dentry to mark
1888   *
1889   * Mark a dentry as falling through to the lower layer (as set with
1890   * d_pin_lower()).  This flag may be recorded on the medium.
1891   */
1892  void d_set_fallthru(struct dentry *dentry)
1893  {
1894  	spin_lock(&dentry->d_lock);
1895  	dentry->d_flags |= DCACHE_FALLTHRU;
1896  	spin_unlock(&dentry->d_lock);
1897  }
1898  EXPORT_SYMBOL(d_set_fallthru);
1899  
1900  static unsigned d_flags_for_inode(struct inode *inode)
1901  {
1902  	unsigned add_flags = DCACHE_REGULAR_TYPE;
1903  
1904  	if (!inode)
1905  		return DCACHE_MISS_TYPE;
1906  
1907  	if (S_ISDIR(inode->i_mode)) {
1908  		add_flags = DCACHE_DIRECTORY_TYPE;
1909  		if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1910  			if (unlikely(!inode->i_op->lookup))
1911  				add_flags = DCACHE_AUTODIR_TYPE;
1912  			else
1913  				inode->i_opflags |= IOP_LOOKUP;
1914  		}
1915  		goto type_determined;
1916  	}
1917  
1918  	if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1919  		if (unlikely(inode->i_op->get_link)) {
1920  			add_flags = DCACHE_SYMLINK_TYPE;
1921  			goto type_determined;
1922  		}
1923  		inode->i_opflags |= IOP_NOFOLLOW;
1924  	}
1925  
1926  	if (unlikely(!S_ISREG(inode->i_mode)))
1927  		add_flags = DCACHE_SPECIAL_TYPE;
1928  
1929  type_determined:
1930  	if (unlikely(IS_AUTOMOUNT(inode)))
1931  		add_flags |= DCACHE_NEED_AUTOMOUNT;
1932  	return add_flags;
1933  }
1934  
1935  static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1936  {
1937  	unsigned add_flags = d_flags_for_inode(inode);
1938  	WARN_ON(d_in_lookup(dentry));
1939  
1940  	spin_lock(&dentry->d_lock);
1941  	/*
1942  	 * Decrement negative dentry count if it was in the LRU list.
1943  	 */
1944  	if (dentry->d_flags & DCACHE_LRU_LIST)
1945  		this_cpu_dec(nr_dentry_negative);
1946  	hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1947  	raw_write_seqcount_begin(&dentry->d_seq);
1948  	__d_set_inode_and_type(dentry, inode, add_flags);
1949  	raw_write_seqcount_end(&dentry->d_seq);
1950  	fsnotify_update_flags(dentry);
1951  	spin_unlock(&dentry->d_lock);
1952  }
1953  
1954  /**
1955   * d_instantiate - fill in inode information for a dentry
1956   * @entry: dentry to complete
1957   * @inode: inode to attach to this dentry
1958   *
1959   * Fill in inode information in the entry.
1960   *
1961   * This turns negative dentries into productive full members
1962   * of society.
1963   *
1964   * NOTE! This assumes that the inode count has been incremented
1965   * (or otherwise set) by the caller to indicate that it is now
1966   * in use by the dcache.
1967   */
1968  
1969  void d_instantiate(struct dentry *entry, struct inode * inode)
1970  {
1971  	BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1972  	if (inode) {
1973  		security_d_instantiate(entry, inode);
1974  		spin_lock(&inode->i_lock);
1975  		__d_instantiate(entry, inode);
1976  		spin_unlock(&inode->i_lock);
1977  	}
1978  }
1979  EXPORT_SYMBOL(d_instantiate);
1980  
1981  /*
1982   * This should be equivalent to d_instantiate() + unlock_new_inode(),
1983   * with lockdep-related part of unlock_new_inode() done before
1984   * anything else.  Use that instead of open-coding d_instantiate()/
1985   * unlock_new_inode() combinations.
1986   */
1987  void d_instantiate_new(struct dentry *entry, struct inode *inode)
1988  {
1989  	BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1990  	BUG_ON(!inode);
1991  	lockdep_annotate_inode_mutex_key(inode);
1992  	security_d_instantiate(entry, inode);
1993  	spin_lock(&inode->i_lock);
1994  	__d_instantiate(entry, inode);
1995  	WARN_ON(!(inode->i_state & I_NEW));
1996  	inode->i_state &= ~I_NEW & ~I_CREATING;
1997  	smp_mb();
1998  	wake_up_bit(&inode->i_state, __I_NEW);
1999  	spin_unlock(&inode->i_lock);
2000  }
2001  EXPORT_SYMBOL(d_instantiate_new);
2002  
2003  struct dentry *d_make_root(struct inode *root_inode)
2004  {
2005  	struct dentry *res = NULL;
2006  
2007  	if (root_inode) {
2008  		res = d_alloc_anon(root_inode->i_sb);
2009  		if (res)
2010  			d_instantiate(res, root_inode);
2011  		else
2012  			iput(root_inode);
2013  	}
2014  	return res;
2015  }
2016  EXPORT_SYMBOL(d_make_root);
2017  
2018  static struct dentry *__d_instantiate_anon(struct dentry *dentry,
2019  					   struct inode *inode,
2020  					   bool disconnected)
2021  {
2022  	struct dentry *res;
2023  	unsigned add_flags;
2024  
2025  	security_d_instantiate(dentry, inode);
2026  	spin_lock(&inode->i_lock);
2027  	res = __d_find_any_alias(inode);
2028  	if (res) {
2029  		spin_unlock(&inode->i_lock);
2030  		dput(dentry);
2031  		goto out_iput;
2032  	}
2033  
2034  	/* attach a disconnected dentry */
2035  	add_flags = d_flags_for_inode(inode);
2036  
2037  	if (disconnected)
2038  		add_flags |= DCACHE_DISCONNECTED;
2039  
2040  	spin_lock(&dentry->d_lock);
2041  	__d_set_inode_and_type(dentry, inode, add_flags);
2042  	hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2043  	if (!disconnected) {
2044  		hlist_bl_lock(&dentry->d_sb->s_roots);
2045  		hlist_bl_add_head(&dentry->d_hash, &dentry->d_sb->s_roots);
2046  		hlist_bl_unlock(&dentry->d_sb->s_roots);
2047  	}
2048  	spin_unlock(&dentry->d_lock);
2049  	spin_unlock(&inode->i_lock);
2050  
2051  	return dentry;
2052  
2053   out_iput:
2054  	iput(inode);
2055  	return res;
2056  }
2057  
2058  struct dentry *d_instantiate_anon(struct dentry *dentry, struct inode *inode)
2059  {
2060  	return __d_instantiate_anon(dentry, inode, true);
2061  }
2062  EXPORT_SYMBOL(d_instantiate_anon);
2063  
2064  static struct dentry *__d_obtain_alias(struct inode *inode, bool disconnected)
2065  {
2066  	struct dentry *tmp;
2067  	struct dentry *res;
2068  
2069  	if (!inode)
2070  		return ERR_PTR(-ESTALE);
2071  	if (IS_ERR(inode))
2072  		return ERR_CAST(inode);
2073  
2074  	res = d_find_any_alias(inode);
2075  	if (res)
2076  		goto out_iput;
2077  
2078  	tmp = d_alloc_anon(inode->i_sb);
2079  	if (!tmp) {
2080  		res = ERR_PTR(-ENOMEM);
2081  		goto out_iput;
2082  	}
2083  
2084  	return __d_instantiate_anon(tmp, inode, disconnected);
2085  
2086  out_iput:
2087  	iput(inode);
2088  	return res;
2089  }
2090  
2091  /**
2092   * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
2093   * @inode: inode to allocate the dentry for
2094   *
2095   * Obtain a dentry for an inode resulting from NFS filehandle conversion or
2096   * similar open by handle operations.  The returned dentry may be anonymous,
2097   * or may have a full name (if the inode was already in the cache).
2098   *
2099   * When called on a directory inode, we must ensure that the inode only ever
2100   * has one dentry.  If a dentry is found, that is returned instead of
2101   * allocating a new one.
2102   *
2103   * On successful return, the reference to the inode has been transferred
2104   * to the dentry.  In case of an error the reference on the inode is released.
2105   * To make it easier to use in export operations a %NULL or IS_ERR inode may
2106   * be passed in and the error will be propagated to the return value,
2107   * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
2108   */
2109  struct dentry *d_obtain_alias(struct inode *inode)
2110  {
2111  	return __d_obtain_alias(inode, true);
2112  }
2113  EXPORT_SYMBOL(d_obtain_alias);
2114  
2115  /**
2116   * d_obtain_root - find or allocate a dentry for a given inode
2117   * @inode: inode to allocate the dentry for
2118   *
2119   * Obtain an IS_ROOT dentry for the root of a filesystem.
2120   *
2121   * We must ensure that directory inodes only ever have one dentry.  If a
2122   * dentry is found, that is returned instead of allocating a new one.
2123   *
2124   * On successful return, the reference to the inode has been transferred
2125   * to the dentry.  In case of an error the reference on the inode is
2126   * released.  A %NULL or IS_ERR inode may be passed in and will be the
2127   * error will be propagate to the return value, with a %NULL @inode
2128   * replaced by ERR_PTR(-ESTALE).
2129   */
2130  struct dentry *d_obtain_root(struct inode *inode)
2131  {
2132  	return __d_obtain_alias(inode, false);
2133  }
2134  EXPORT_SYMBOL(d_obtain_root);
2135  
2136  /**
2137   * d_add_ci - lookup or allocate new dentry with case-exact name
2138   * @inode:  the inode case-insensitive lookup has found
2139   * @dentry: the negative dentry that was passed to the parent's lookup func
2140   * @name:   the case-exact name to be associated with the returned dentry
2141   *
2142   * This is to avoid filling the dcache with case-insensitive names to the
2143   * same inode, only the actual correct case is stored in the dcache for
2144   * case-insensitive filesystems.
2145   *
2146   * For a case-insensitive lookup match and if the the case-exact dentry
2147   * already exists in in the dcache, use it and return it.
2148   *
2149   * If no entry exists with the exact case name, allocate new dentry with
2150   * the exact case, and return the spliced entry.
2151   */
2152  struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
2153  			struct qstr *name)
2154  {
2155  	struct dentry *found, *res;
2156  
2157  	/*
2158  	 * First check if a dentry matching the name already exists,
2159  	 * if not go ahead and create it now.
2160  	 */
2161  	found = d_hash_and_lookup(dentry->d_parent, name);
2162  	if (found) {
2163  		iput(inode);
2164  		return found;
2165  	}
2166  	if (d_in_lookup(dentry)) {
2167  		found = d_alloc_parallel(dentry->d_parent, name,
2168  					dentry->d_wait);
2169  		if (IS_ERR(found) || !d_in_lookup(found)) {
2170  			iput(inode);
2171  			return found;
2172  		}
2173  	} else {
2174  		found = d_alloc(dentry->d_parent, name);
2175  		if (!found) {
2176  			iput(inode);
2177  			return ERR_PTR(-ENOMEM);
2178  		}
2179  	}
2180  	res = d_splice_alias(inode, found);
2181  	if (res) {
2182  		dput(found);
2183  		return res;
2184  	}
2185  	return found;
2186  }
2187  EXPORT_SYMBOL(d_add_ci);
2188  
2189  
2190  static inline bool d_same_name(const struct dentry *dentry,
2191  				const struct dentry *parent,
2192  				const struct qstr *name)
2193  {
2194  	if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) {
2195  		if (dentry->d_name.len != name->len)
2196  			return false;
2197  		return dentry_cmp(dentry, name->name, name->len) == 0;
2198  	}
2199  	return parent->d_op->d_compare(dentry,
2200  				       dentry->d_name.len, dentry->d_name.name,
2201  				       name) == 0;
2202  }
2203  
2204  /**
2205   * __d_lookup_rcu - search for a dentry (racy, store-free)
2206   * @parent: parent dentry
2207   * @name: qstr of name we wish to find
2208   * @seqp: returns d_seq value at the point where the dentry was found
2209   * Returns: dentry, or NULL
2210   *
2211   * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2212   * resolution (store-free path walking) design described in
2213   * Documentation/filesystems/path-lookup.txt.
2214   *
2215   * This is not to be used outside core vfs.
2216   *
2217   * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2218   * held, and rcu_read_lock held. The returned dentry must not be stored into
2219   * without taking d_lock and checking d_seq sequence count against @seq
2220   * returned here.
2221   *
2222   * A refcount may be taken on the found dentry with the d_rcu_to_refcount
2223   * function.
2224   *
2225   * Alternatively, __d_lookup_rcu may be called again to look up the child of
2226   * the returned dentry, so long as its parent's seqlock is checked after the
2227   * child is looked up. Thus, an interlocking stepping of sequence lock checks
2228   * is formed, giving integrity down the path walk.
2229   *
2230   * NOTE! The caller *has* to check the resulting dentry against the sequence
2231   * number we've returned before using any of the resulting dentry state!
2232   */
2233  struct dentry *__d_lookup_rcu(const struct dentry *parent,
2234  				const struct qstr *name,
2235  				unsigned *seqp)
2236  {
2237  	u64 hashlen = name->hash_len;
2238  	const unsigned char *str = name->name;
2239  	struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen));
2240  	struct hlist_bl_node *node;
2241  	struct dentry *dentry;
2242  
2243  	/*
2244  	 * Note: There is significant duplication with __d_lookup_rcu which is
2245  	 * required to prevent single threaded performance regressions
2246  	 * especially on architectures where smp_rmb (in seqcounts) are costly.
2247  	 * Keep the two functions in sync.
2248  	 */
2249  
2250  	/*
2251  	 * The hash list is protected using RCU.
2252  	 *
2253  	 * Carefully use d_seq when comparing a candidate dentry, to avoid
2254  	 * races with d_move().
2255  	 *
2256  	 * It is possible that concurrent renames can mess up our list
2257  	 * walk here and result in missing our dentry, resulting in the
2258  	 * false-negative result. d_lookup() protects against concurrent
2259  	 * renames using rename_lock seqlock.
2260  	 *
2261  	 * See Documentation/filesystems/path-lookup.txt for more details.
2262  	 */
2263  	hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2264  		unsigned seq;
2265  
2266  seqretry:
2267  		/*
2268  		 * The dentry sequence count protects us from concurrent
2269  		 * renames, and thus protects parent and name fields.
2270  		 *
2271  		 * The caller must perform a seqcount check in order
2272  		 * to do anything useful with the returned dentry.
2273  		 *
2274  		 * NOTE! We do a "raw" seqcount_begin here. That means that
2275  		 * we don't wait for the sequence count to stabilize if it
2276  		 * is in the middle of a sequence change. If we do the slow
2277  		 * dentry compare, we will do seqretries until it is stable,
2278  		 * and if we end up with a successful lookup, we actually
2279  		 * want to exit RCU lookup anyway.
2280  		 *
2281  		 * Note that raw_seqcount_begin still *does* smp_rmb(), so
2282  		 * we are still guaranteed NUL-termination of ->d_name.name.
2283  		 */
2284  		seq = raw_seqcount_begin(&dentry->d_seq);
2285  		if (dentry->d_parent != parent)
2286  			continue;
2287  		if (d_unhashed(dentry))
2288  			continue;
2289  
2290  		if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
2291  			int tlen;
2292  			const char *tname;
2293  			if (dentry->d_name.hash != hashlen_hash(hashlen))
2294  				continue;
2295  			tlen = dentry->d_name.len;
2296  			tname = dentry->d_name.name;
2297  			/* we want a consistent (name,len) pair */
2298  			if (read_seqcount_retry(&dentry->d_seq, seq)) {
2299  				cpu_relax();
2300  				goto seqretry;
2301  			}
2302  			if (parent->d_op->d_compare(dentry,
2303  						    tlen, tname, name) != 0)
2304  				continue;
2305  		} else {
2306  			if (dentry->d_name.hash_len != hashlen)
2307  				continue;
2308  			if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
2309  				continue;
2310  		}
2311  		*seqp = seq;
2312  		return dentry;
2313  	}
2314  	return NULL;
2315  }
2316  
2317  /**
2318   * d_lookup - search for a dentry
2319   * @parent: parent dentry
2320   * @name: qstr of name we wish to find
2321   * Returns: dentry, or NULL
2322   *
2323   * d_lookup searches the children of the parent dentry for the name in
2324   * question. If the dentry is found its reference count is incremented and the
2325   * dentry is returned. The caller must use dput to free the entry when it has
2326   * finished using it. %NULL is returned if the dentry does not exist.
2327   */
2328  struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2329  {
2330  	struct dentry *dentry;
2331  	unsigned seq;
2332  
2333  	do {
2334  		seq = read_seqbegin(&rename_lock);
2335  		dentry = __d_lookup(parent, name);
2336  		if (dentry)
2337  			break;
2338  	} while (read_seqretry(&rename_lock, seq));
2339  	return dentry;
2340  }
2341  EXPORT_SYMBOL(d_lookup);
2342  
2343  /**
2344   * __d_lookup - search for a dentry (racy)
2345   * @parent: parent dentry
2346   * @name: qstr of name we wish to find
2347   * Returns: dentry, or NULL
2348   *
2349   * __d_lookup is like d_lookup, however it may (rarely) return a
2350   * false-negative result due to unrelated rename activity.
2351   *
2352   * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2353   * however it must be used carefully, eg. with a following d_lookup in
2354   * the case of failure.
2355   *
2356   * __d_lookup callers must be commented.
2357   */
2358  struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2359  {
2360  	unsigned int hash = name->hash;
2361  	struct hlist_bl_head *b = d_hash(hash);
2362  	struct hlist_bl_node *node;
2363  	struct dentry *found = NULL;
2364  	struct dentry *dentry;
2365  
2366  	/*
2367  	 * Note: There is significant duplication with __d_lookup_rcu which is
2368  	 * required to prevent single threaded performance regressions
2369  	 * especially on architectures where smp_rmb (in seqcounts) are costly.
2370  	 * Keep the two functions in sync.
2371  	 */
2372  
2373  	/*
2374  	 * The hash list is protected using RCU.
2375  	 *
2376  	 * Take d_lock when comparing a candidate dentry, to avoid races
2377  	 * with d_move().
2378  	 *
2379  	 * It is possible that concurrent renames can mess up our list
2380  	 * walk here and result in missing our dentry, resulting in the
2381  	 * false-negative result. d_lookup() protects against concurrent
2382  	 * renames using rename_lock seqlock.
2383  	 *
2384  	 * See Documentation/filesystems/path-lookup.txt for more details.
2385  	 */
2386  	rcu_read_lock();
2387  
2388  	hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2389  
2390  		if (dentry->d_name.hash != hash)
2391  			continue;
2392  
2393  		spin_lock(&dentry->d_lock);
2394  		if (dentry->d_parent != parent)
2395  			goto next;
2396  		if (d_unhashed(dentry))
2397  			goto next;
2398  
2399  		if (!d_same_name(dentry, parent, name))
2400  			goto next;
2401  
2402  		dentry->d_lockref.count++;
2403  		found = dentry;
2404  		spin_unlock(&dentry->d_lock);
2405  		break;
2406  next:
2407  		spin_unlock(&dentry->d_lock);
2408   	}
2409   	rcu_read_unlock();
2410  
2411   	return found;
2412  }
2413  
2414  /**
2415   * d_hash_and_lookup - hash the qstr then search for a dentry
2416   * @dir: Directory to search in
2417   * @name: qstr of name we wish to find
2418   *
2419   * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2420   */
2421  struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2422  {
2423  	/*
2424  	 * Check for a fs-specific hash function. Note that we must
2425  	 * calculate the standard hash first, as the d_op->d_hash()
2426  	 * routine may choose to leave the hash value unchanged.
2427  	 */
2428  	name->hash = full_name_hash(dir, name->name, name->len);
2429  	if (dir->d_flags & DCACHE_OP_HASH) {
2430  		int err = dir->d_op->d_hash(dir, name);
2431  		if (unlikely(err < 0))
2432  			return ERR_PTR(err);
2433  	}
2434  	return d_lookup(dir, name);
2435  }
2436  EXPORT_SYMBOL(d_hash_and_lookup);
2437  
2438  /*
2439   * When a file is deleted, we have two options:
2440   * - turn this dentry into a negative dentry
2441   * - unhash this dentry and free it.
2442   *
2443   * Usually, we want to just turn this into
2444   * a negative dentry, but if anybody else is
2445   * currently using the dentry or the inode
2446   * we can't do that and we fall back on removing
2447   * it from the hash queues and waiting for
2448   * it to be deleted later when it has no users
2449   */
2450  
2451  /**
2452   * d_delete - delete a dentry
2453   * @dentry: The dentry to delete
2454   *
2455   * Turn the dentry into a negative dentry if possible, otherwise
2456   * remove it from the hash queues so it can be deleted later
2457   */
2458  
2459  void d_delete(struct dentry * dentry)
2460  {
2461  	struct inode *inode = dentry->d_inode;
2462  
2463  	spin_lock(&inode->i_lock);
2464  	spin_lock(&dentry->d_lock);
2465  	/*
2466  	 * Are we the only user?
2467  	 */
2468  	if (dentry->d_lockref.count == 1) {
2469  		dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2470  		dentry_unlink_inode(dentry);
2471  	} else {
2472  		__d_drop(dentry);
2473  		spin_unlock(&dentry->d_lock);
2474  		spin_unlock(&inode->i_lock);
2475  	}
2476  }
2477  EXPORT_SYMBOL(d_delete);
2478  
2479  static void __d_rehash(struct dentry *entry)
2480  {
2481  	struct hlist_bl_head *b = d_hash(entry->d_name.hash);
2482  
2483  	hlist_bl_lock(b);
2484  	hlist_bl_add_head_rcu(&entry->d_hash, b);
2485  	hlist_bl_unlock(b);
2486  }
2487  
2488  /**
2489   * d_rehash	- add an entry back to the hash
2490   * @entry: dentry to add to the hash
2491   *
2492   * Adds a dentry to the hash according to its name.
2493   */
2494  
2495  void d_rehash(struct dentry * entry)
2496  {
2497  	spin_lock(&entry->d_lock);
2498  	__d_rehash(entry);
2499  	spin_unlock(&entry->d_lock);
2500  }
2501  EXPORT_SYMBOL(d_rehash);
2502  
2503  static inline unsigned start_dir_add(struct inode *dir)
2504  {
2505  
2506  	for (;;) {
2507  		unsigned n = dir->i_dir_seq;
2508  		if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
2509  			return n;
2510  		cpu_relax();
2511  	}
2512  }
2513  
2514  static inline void end_dir_add(struct inode *dir, unsigned n)
2515  {
2516  	smp_store_release(&dir->i_dir_seq, n + 2);
2517  }
2518  
2519  static void d_wait_lookup(struct dentry *dentry)
2520  {
2521  	if (d_in_lookup(dentry)) {
2522  		DECLARE_WAITQUEUE(wait, current);
2523  		add_wait_queue(dentry->d_wait, &wait);
2524  		do {
2525  			set_current_state(TASK_UNINTERRUPTIBLE);
2526  			spin_unlock(&dentry->d_lock);
2527  			schedule();
2528  			spin_lock(&dentry->d_lock);
2529  		} while (d_in_lookup(dentry));
2530  	}
2531  }
2532  
2533  struct dentry *d_alloc_parallel(struct dentry *parent,
2534  				const struct qstr *name,
2535  				wait_queue_head_t *wq)
2536  {
2537  	unsigned int hash = name->hash;
2538  	struct hlist_bl_head *b = in_lookup_hash(parent, hash);
2539  	struct hlist_bl_node *node;
2540  	struct dentry *new = d_alloc(parent, name);
2541  	struct dentry *dentry;
2542  	unsigned seq, r_seq, d_seq;
2543  
2544  	if (unlikely(!new))
2545  		return ERR_PTR(-ENOMEM);
2546  
2547  retry:
2548  	rcu_read_lock();
2549  	seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
2550  	r_seq = read_seqbegin(&rename_lock);
2551  	dentry = __d_lookup_rcu(parent, name, &d_seq);
2552  	if (unlikely(dentry)) {
2553  		if (!lockref_get_not_dead(&dentry->d_lockref)) {
2554  			rcu_read_unlock();
2555  			goto retry;
2556  		}
2557  		if (read_seqcount_retry(&dentry->d_seq, d_seq)) {
2558  			rcu_read_unlock();
2559  			dput(dentry);
2560  			goto retry;
2561  		}
2562  		rcu_read_unlock();
2563  		dput(new);
2564  		return dentry;
2565  	}
2566  	if (unlikely(read_seqretry(&rename_lock, r_seq))) {
2567  		rcu_read_unlock();
2568  		goto retry;
2569  	}
2570  
2571  	if (unlikely(seq & 1)) {
2572  		rcu_read_unlock();
2573  		goto retry;
2574  	}
2575  
2576  	hlist_bl_lock(b);
2577  	if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) {
2578  		hlist_bl_unlock(b);
2579  		rcu_read_unlock();
2580  		goto retry;
2581  	}
2582  	/*
2583  	 * No changes for the parent since the beginning of d_lookup().
2584  	 * Since all removals from the chain happen with hlist_bl_lock(),
2585  	 * any potential in-lookup matches are going to stay here until
2586  	 * we unlock the chain.  All fields are stable in everything
2587  	 * we encounter.
2588  	 */
2589  	hlist_bl_for_each_entry(dentry, node, b, d_u.d_in_lookup_hash) {
2590  		if (dentry->d_name.hash != hash)
2591  			continue;
2592  		if (dentry->d_parent != parent)
2593  			continue;
2594  		if (!d_same_name(dentry, parent, name))
2595  			continue;
2596  		hlist_bl_unlock(b);
2597  		/* now we can try to grab a reference */
2598  		if (!lockref_get_not_dead(&dentry->d_lockref)) {
2599  			rcu_read_unlock();
2600  			goto retry;
2601  		}
2602  
2603  		rcu_read_unlock();
2604  		/*
2605  		 * somebody is likely to be still doing lookup for it;
2606  		 * wait for them to finish
2607  		 */
2608  		spin_lock(&dentry->d_lock);
2609  		d_wait_lookup(dentry);
2610  		/*
2611  		 * it's not in-lookup anymore; in principle we should repeat
2612  		 * everything from dcache lookup, but it's likely to be what
2613  		 * d_lookup() would've found anyway.  If it is, just return it;
2614  		 * otherwise we really have to repeat the whole thing.
2615  		 */
2616  		if (unlikely(dentry->d_name.hash != hash))
2617  			goto mismatch;
2618  		if (unlikely(dentry->d_parent != parent))
2619  			goto mismatch;
2620  		if (unlikely(d_unhashed(dentry)))
2621  			goto mismatch;
2622  		if (unlikely(!d_same_name(dentry, parent, name)))
2623  			goto mismatch;
2624  		/* OK, it *is* a hashed match; return it */
2625  		spin_unlock(&dentry->d_lock);
2626  		dput(new);
2627  		return dentry;
2628  	}
2629  	rcu_read_unlock();
2630  	/* we can't take ->d_lock here; it's OK, though. */
2631  	new->d_flags |= DCACHE_PAR_LOOKUP;
2632  	new->d_wait = wq;
2633  	hlist_bl_add_head_rcu(&new->d_u.d_in_lookup_hash, b);
2634  	hlist_bl_unlock(b);
2635  	return new;
2636  mismatch:
2637  	spin_unlock(&dentry->d_lock);
2638  	dput(dentry);
2639  	goto retry;
2640  }
2641  EXPORT_SYMBOL(d_alloc_parallel);
2642  
2643  void __d_lookup_done(struct dentry *dentry)
2644  {
2645  	struct hlist_bl_head *b = in_lookup_hash(dentry->d_parent,
2646  						 dentry->d_name.hash);
2647  	hlist_bl_lock(b);
2648  	dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
2649  	__hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
2650  	wake_up_all(dentry->d_wait);
2651  	dentry->d_wait = NULL;
2652  	hlist_bl_unlock(b);
2653  	INIT_HLIST_NODE(&dentry->d_u.d_alias);
2654  	INIT_LIST_HEAD(&dentry->d_lru);
2655  }
2656  EXPORT_SYMBOL(__d_lookup_done);
2657  
2658  /* inode->i_lock held if inode is non-NULL */
2659  
2660  static inline void __d_add(struct dentry *dentry, struct inode *inode)
2661  {
2662  	struct inode *dir = NULL;
2663  	unsigned n;
2664  	spin_lock(&dentry->d_lock);
2665  	if (unlikely(d_in_lookup(dentry))) {
2666  		dir = dentry->d_parent->d_inode;
2667  		n = start_dir_add(dir);
2668  		__d_lookup_done(dentry);
2669  	}
2670  	if (inode) {
2671  		unsigned add_flags = d_flags_for_inode(inode);
2672  		hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2673  		raw_write_seqcount_begin(&dentry->d_seq);
2674  		__d_set_inode_and_type(dentry, inode, add_flags);
2675  		raw_write_seqcount_end(&dentry->d_seq);
2676  		fsnotify_update_flags(dentry);
2677  	}
2678  	__d_rehash(dentry);
2679  	if (dir)
2680  		end_dir_add(dir, n);
2681  	spin_unlock(&dentry->d_lock);
2682  	if (inode)
2683  		spin_unlock(&inode->i_lock);
2684  }
2685  
2686  /**
2687   * d_add - add dentry to hash queues
2688   * @entry: dentry to add
2689   * @inode: The inode to attach to this dentry
2690   *
2691   * This adds the entry to the hash queues and initializes @inode.
2692   * The entry was actually filled in earlier during d_alloc().
2693   */
2694  
2695  void d_add(struct dentry *entry, struct inode *inode)
2696  {
2697  	if (inode) {
2698  		security_d_instantiate(entry, inode);
2699  		spin_lock(&inode->i_lock);
2700  	}
2701  	__d_add(entry, inode);
2702  }
2703  EXPORT_SYMBOL(d_add);
2704  
2705  /**
2706   * d_exact_alias - find and hash an exact unhashed alias
2707   * @entry: dentry to add
2708   * @inode: The inode to go with this dentry
2709   *
2710   * If an unhashed dentry with the same name/parent and desired
2711   * inode already exists, hash and return it.  Otherwise, return
2712   * NULL.
2713   *
2714   * Parent directory should be locked.
2715   */
2716  struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
2717  {
2718  	struct dentry *alias;
2719  	unsigned int hash = entry->d_name.hash;
2720  
2721  	spin_lock(&inode->i_lock);
2722  	hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
2723  		/*
2724  		 * Don't need alias->d_lock here, because aliases with
2725  		 * d_parent == entry->d_parent are not subject to name or
2726  		 * parent changes, because the parent inode i_mutex is held.
2727  		 */
2728  		if (alias->d_name.hash != hash)
2729  			continue;
2730  		if (alias->d_parent != entry->d_parent)
2731  			continue;
2732  		if (!d_same_name(alias, entry->d_parent, &entry->d_name))
2733  			continue;
2734  		spin_lock(&alias->d_lock);
2735  		if (!d_unhashed(alias)) {
2736  			spin_unlock(&alias->d_lock);
2737  			alias = NULL;
2738  		} else {
2739  			__dget_dlock(alias);
2740  			__d_rehash(alias);
2741  			spin_unlock(&alias->d_lock);
2742  		}
2743  		spin_unlock(&inode->i_lock);
2744  		return alias;
2745  	}
2746  	spin_unlock(&inode->i_lock);
2747  	return NULL;
2748  }
2749  EXPORT_SYMBOL(d_exact_alias);
2750  
2751  static void swap_names(struct dentry *dentry, struct dentry *target)
2752  {
2753  	if (unlikely(dname_external(target))) {
2754  		if (unlikely(dname_external(dentry))) {
2755  			/*
2756  			 * Both external: swap the pointers
2757  			 */
2758  			swap(target->d_name.name, dentry->d_name.name);
2759  		} else {
2760  			/*
2761  			 * dentry:internal, target:external.  Steal target's
2762  			 * storage and make target internal.
2763  			 */
2764  			memcpy(target->d_iname, dentry->d_name.name,
2765  					dentry->d_name.len + 1);
2766  			dentry->d_name.name = target->d_name.name;
2767  			target->d_name.name = target->d_iname;
2768  		}
2769  	} else {
2770  		if (unlikely(dname_external(dentry))) {
2771  			/*
2772  			 * dentry:external, target:internal.  Give dentry's
2773  			 * storage to target and make dentry internal
2774  			 */
2775  			memcpy(dentry->d_iname, target->d_name.name,
2776  					target->d_name.len + 1);
2777  			target->d_name.name = dentry->d_name.name;
2778  			dentry->d_name.name = dentry->d_iname;
2779  		} else {
2780  			/*
2781  			 * Both are internal.
2782  			 */
2783  			unsigned int i;
2784  			BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
2785  			for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
2786  				swap(((long *) &dentry->d_iname)[i],
2787  				     ((long *) &target->d_iname)[i]);
2788  			}
2789  		}
2790  	}
2791  	swap(dentry->d_name.hash_len, target->d_name.hash_len);
2792  }
2793  
2794  static void copy_name(struct dentry *dentry, struct dentry *target)
2795  {
2796  	struct external_name *old_name = NULL;
2797  	if (unlikely(dname_external(dentry)))
2798  		old_name = external_name(dentry);
2799  	if (unlikely(dname_external(target))) {
2800  		atomic_inc(&external_name(target)->u.count);
2801  		dentry->d_name = target->d_name;
2802  	} else {
2803  		memcpy(dentry->d_iname, target->d_name.name,
2804  				target->d_name.len + 1);
2805  		dentry->d_name.name = dentry->d_iname;
2806  		dentry->d_name.hash_len = target->d_name.hash_len;
2807  	}
2808  	if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
2809  		kfree_rcu(old_name, u.head);
2810  }
2811  
2812  /*
2813   * __d_move - move a dentry
2814   * @dentry: entry to move
2815   * @target: new dentry
2816   * @exchange: exchange the two dentries
2817   *
2818   * Update the dcache to reflect the move of a file name. Negative
2819   * dcache entries should not be moved in this way. Caller must hold
2820   * rename_lock, the i_mutex of the source and target directories,
2821   * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2822   */
2823  static void __d_move(struct dentry *dentry, struct dentry *target,
2824  		     bool exchange)
2825  {
2826  	struct dentry *old_parent, *p;
2827  	struct inode *dir = NULL;
2828  	unsigned n;
2829  
2830  	WARN_ON(!dentry->d_inode);
2831  	if (WARN_ON(dentry == target))
2832  		return;
2833  
2834  	BUG_ON(d_ancestor(target, dentry));
2835  	old_parent = dentry->d_parent;
2836  	p = d_ancestor(old_parent, target);
2837  	if (IS_ROOT(dentry)) {
2838  		BUG_ON(p);
2839  		spin_lock(&target->d_parent->d_lock);
2840  	} else if (!p) {
2841  		/* target is not a descendent of dentry->d_parent */
2842  		spin_lock(&target->d_parent->d_lock);
2843  		spin_lock_nested(&old_parent->d_lock, DENTRY_D_LOCK_NESTED);
2844  	} else {
2845  		BUG_ON(p == dentry);
2846  		spin_lock(&old_parent->d_lock);
2847  		if (p != target)
2848  			spin_lock_nested(&target->d_parent->d_lock,
2849  					DENTRY_D_LOCK_NESTED);
2850  	}
2851  	spin_lock_nested(&dentry->d_lock, 2);
2852  	spin_lock_nested(&target->d_lock, 3);
2853  
2854  	if (unlikely(d_in_lookup(target))) {
2855  		dir = target->d_parent->d_inode;
2856  		n = start_dir_add(dir);
2857  		__d_lookup_done(target);
2858  	}
2859  
2860  	write_seqcount_begin(&dentry->d_seq);
2861  	write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2862  
2863  	/* unhash both */
2864  	if (!d_unhashed(dentry))
2865  		___d_drop(dentry);
2866  	if (!d_unhashed(target))
2867  		___d_drop(target);
2868  
2869  	/* ... and switch them in the tree */
2870  	dentry->d_parent = target->d_parent;
2871  	if (!exchange) {
2872  		copy_name(dentry, target);
2873  		target->d_hash.pprev = NULL;
2874  		dentry->d_parent->d_lockref.count++;
2875  		if (dentry != old_parent) /* wasn't IS_ROOT */
2876  			WARN_ON(!--old_parent->d_lockref.count);
2877  	} else {
2878  		target->d_parent = old_parent;
2879  		swap_names(dentry, target);
2880  		list_move(&target->d_child, &target->d_parent->d_subdirs);
2881  		__d_rehash(target);
2882  		fsnotify_update_flags(target);
2883  	}
2884  	list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2885  	__d_rehash(dentry);
2886  	fsnotify_update_flags(dentry);
2887  	fscrypt_handle_d_move(dentry);
2888  
2889  	write_seqcount_end(&target->d_seq);
2890  	write_seqcount_end(&dentry->d_seq);
2891  
2892  	if (dir)
2893  		end_dir_add(dir, n);
2894  
2895  	if (dentry->d_parent != old_parent)
2896  		spin_unlock(&dentry->d_parent->d_lock);
2897  	if (dentry != old_parent)
2898  		spin_unlock(&old_parent->d_lock);
2899  	spin_unlock(&target->d_lock);
2900  	spin_unlock(&dentry->d_lock);
2901  }
2902  
2903  /*
2904   * d_move - move a dentry
2905   * @dentry: entry to move
2906   * @target: new dentry
2907   *
2908   * Update the dcache to reflect the move of a file name. Negative
2909   * dcache entries should not be moved in this way. See the locking
2910   * requirements for __d_move.
2911   */
2912  void d_move(struct dentry *dentry, struct dentry *target)
2913  {
2914  	write_seqlock(&rename_lock);
2915  	__d_move(dentry, target, false);
2916  	write_sequnlock(&rename_lock);
2917  }
2918  EXPORT_SYMBOL(d_move);
2919  
2920  /*
2921   * d_exchange - exchange two dentries
2922   * @dentry1: first dentry
2923   * @dentry2: second dentry
2924   */
2925  void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
2926  {
2927  	write_seqlock(&rename_lock);
2928  
2929  	WARN_ON(!dentry1->d_inode);
2930  	WARN_ON(!dentry2->d_inode);
2931  	WARN_ON(IS_ROOT(dentry1));
2932  	WARN_ON(IS_ROOT(dentry2));
2933  
2934  	__d_move(dentry1, dentry2, true);
2935  
2936  	write_sequnlock(&rename_lock);
2937  }
2938  
2939  /**
2940   * d_ancestor - search for an ancestor
2941   * @p1: ancestor dentry
2942   * @p2: child dentry
2943   *
2944   * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2945   * an ancestor of p2, else NULL.
2946   */
2947  struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2948  {
2949  	struct dentry *p;
2950  
2951  	for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2952  		if (p->d_parent == p1)
2953  			return p;
2954  	}
2955  	return NULL;
2956  }
2957  
2958  /*
2959   * This helper attempts to cope with remotely renamed directories
2960   *
2961   * It assumes that the caller is already holding
2962   * dentry->d_parent->d_inode->i_mutex, and rename_lock
2963   *
2964   * Note: If ever the locking in lock_rename() changes, then please
2965   * remember to update this too...
2966   */
2967  static int __d_unalias(struct inode *inode,
2968  		struct dentry *dentry, struct dentry *alias)
2969  {
2970  	struct mutex *m1 = NULL;
2971  	struct rw_semaphore *m2 = NULL;
2972  	int ret = -ESTALE;
2973  
2974  	/* If alias and dentry share a parent, then no extra locks required */
2975  	if (alias->d_parent == dentry->d_parent)
2976  		goto out_unalias;
2977  
2978  	/* See lock_rename() */
2979  	if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2980  		goto out_err;
2981  	m1 = &dentry->d_sb->s_vfs_rename_mutex;
2982  	if (!inode_trylock_shared(alias->d_parent->d_inode))
2983  		goto out_err;
2984  	m2 = &alias->d_parent->d_inode->i_rwsem;
2985  out_unalias:
2986  	__d_move(alias, dentry, false);
2987  	ret = 0;
2988  out_err:
2989  	if (m2)
2990  		up_read(m2);
2991  	if (m1)
2992  		mutex_unlock(m1);
2993  	return ret;
2994  }
2995  
2996  /**
2997   * d_splice_alias - splice a disconnected dentry into the tree if one exists
2998   * @inode:  the inode which may have a disconnected dentry
2999   * @dentry: a negative dentry which we want to point to the inode.
3000   *
3001   * If inode is a directory and has an IS_ROOT alias, then d_move that in
3002   * place of the given dentry and return it, else simply d_add the inode
3003   * to the dentry and return NULL.
3004   *
3005   * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
3006   * we should error out: directories can't have multiple aliases.
3007   *
3008   * This is needed in the lookup routine of any filesystem that is exportable
3009   * (via knfsd) so that we can build dcache paths to directories effectively.
3010   *
3011   * If a dentry was found and moved, then it is returned.  Otherwise NULL
3012   * is returned.  This matches the expected return value of ->lookup.
3013   *
3014   * Cluster filesystems may call this function with a negative, hashed dentry.
3015   * In that case, we know that the inode will be a regular file, and also this
3016   * will only occur during atomic_open. So we need to check for the dentry
3017   * being already hashed only in the final case.
3018   */
3019  struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
3020  {
3021  	if (IS_ERR(inode))
3022  		return ERR_CAST(inode);
3023  
3024  	BUG_ON(!d_unhashed(dentry));
3025  
3026  	if (!inode)
3027  		goto out;
3028  
3029  	security_d_instantiate(dentry, inode);
3030  	spin_lock(&inode->i_lock);
3031  	if (S_ISDIR(inode->i_mode)) {
3032  		struct dentry *new = __d_find_any_alias(inode);
3033  		if (unlikely(new)) {
3034  			/* The reference to new ensures it remains an alias */
3035  			spin_unlock(&inode->i_lock);
3036  			write_seqlock(&rename_lock);
3037  			if (unlikely(d_ancestor(new, dentry))) {
3038  				write_sequnlock(&rename_lock);
3039  				dput(new);
3040  				new = ERR_PTR(-ELOOP);
3041  				pr_warn_ratelimited(
3042  					"VFS: Lookup of '%s' in %s %s"
3043  					" would have caused loop\n",
3044  					dentry->d_name.name,
3045  					inode->i_sb->s_type->name,
3046  					inode->i_sb->s_id);
3047  			} else if (!IS_ROOT(new)) {
3048  				struct dentry *old_parent = dget(new->d_parent);
3049  				int err = __d_unalias(inode, dentry, new);
3050  				write_sequnlock(&rename_lock);
3051  				if (err) {
3052  					dput(new);
3053  					new = ERR_PTR(err);
3054  				}
3055  				dput(old_parent);
3056  			} else {
3057  				__d_move(new, dentry, false);
3058  				write_sequnlock(&rename_lock);
3059  			}
3060  			iput(inode);
3061  			return new;
3062  		}
3063  	}
3064  out:
3065  	__d_add(dentry, inode);
3066  	return NULL;
3067  }
3068  EXPORT_SYMBOL(d_splice_alias);
3069  
3070  /*
3071   * Test whether new_dentry is a subdirectory of old_dentry.
3072   *
3073   * Trivially implemented using the dcache structure
3074   */
3075  
3076  /**
3077   * is_subdir - is new dentry a subdirectory of old_dentry
3078   * @new_dentry: new dentry
3079   * @old_dentry: old dentry
3080   *
3081   * Returns true if new_dentry is a subdirectory of the parent (at any depth).
3082   * Returns false otherwise.
3083   * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3084   */
3085  
3086  bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3087  {
3088  	bool result;
3089  	unsigned seq;
3090  
3091  	if (new_dentry == old_dentry)
3092  		return true;
3093  
3094  	do {
3095  		/* for restarting inner loop in case of seq retry */
3096  		seq = read_seqbegin(&rename_lock);
3097  		/*
3098  		 * Need rcu_readlock to protect against the d_parent trashing
3099  		 * due to d_move
3100  		 */
3101  		rcu_read_lock();
3102  		if (d_ancestor(old_dentry, new_dentry))
3103  			result = true;
3104  		else
3105  			result = false;
3106  		rcu_read_unlock();
3107  	} while (read_seqretry(&rename_lock, seq));
3108  
3109  	return result;
3110  }
3111  EXPORT_SYMBOL(is_subdir);
3112  
3113  static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3114  {
3115  	struct dentry *root = data;
3116  	if (dentry != root) {
3117  		if (d_unhashed(dentry) || !dentry->d_inode)
3118  			return D_WALK_SKIP;
3119  
3120  		if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3121  			dentry->d_flags |= DCACHE_GENOCIDE;
3122  			dentry->d_lockref.count--;
3123  		}
3124  	}
3125  	return D_WALK_CONTINUE;
3126  }
3127  
3128  void d_genocide(struct dentry *parent)
3129  {
3130  	d_walk(parent, parent, d_genocide_kill);
3131  }
3132  
3133  EXPORT_SYMBOL(d_genocide);
3134  
3135  void d_tmpfile(struct dentry *dentry, struct inode *inode)
3136  {
3137  	inode_dec_link_count(inode);
3138  	BUG_ON(dentry->d_name.name != dentry->d_iname ||
3139  		!hlist_unhashed(&dentry->d_u.d_alias) ||
3140  		!d_unlinked(dentry));
3141  	spin_lock(&dentry->d_parent->d_lock);
3142  	spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3143  	dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3144  				(unsigned long long)inode->i_ino);
3145  	spin_unlock(&dentry->d_lock);
3146  	spin_unlock(&dentry->d_parent->d_lock);
3147  	d_instantiate(dentry, inode);
3148  }
3149  EXPORT_SYMBOL(d_tmpfile);
3150  
3151  static __initdata unsigned long dhash_entries;
3152  static int __init set_dhash_entries(char *str)
3153  {
3154  	if (!str)
3155  		return 0;
3156  	dhash_entries = simple_strtoul(str, &str, 0);
3157  	return 1;
3158  }
3159  __setup("dhash_entries=", set_dhash_entries);
3160  
3161  static void __init dcache_init_early(void)
3162  {
3163  	/* If hashes are distributed across NUMA nodes, defer
3164  	 * hash allocation until vmalloc space is available.
3165  	 */
3166  	if (hashdist)
3167  		return;
3168  
3169  	dentry_hashtable =
3170  		alloc_large_system_hash("Dentry cache",
3171  					sizeof(struct hlist_bl_head),
3172  					dhash_entries,
3173  					13,
3174  					HASH_EARLY | HASH_ZERO,
3175  					&d_hash_shift,
3176  					NULL,
3177  					0,
3178  					0);
3179  	d_hash_shift = 32 - d_hash_shift;
3180  }
3181  
3182  static void __init dcache_init(void)
3183  {
3184  	/*
3185  	 * A constructor could be added for stable state like the lists,
3186  	 * but it is probably not worth it because of the cache nature
3187  	 * of the dcache.
3188  	 */
3189  	dentry_cache = KMEM_CACHE_USERCOPY(dentry,
3190  		SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT,
3191  		d_iname);
3192  
3193  	/* Hash may have been set up in dcache_init_early */
3194  	if (!hashdist)
3195  		return;
3196  
3197  	dentry_hashtable =
3198  		alloc_large_system_hash("Dentry cache",
3199  					sizeof(struct hlist_bl_head),
3200  					dhash_entries,
3201  					13,
3202  					HASH_ZERO,
3203  					&d_hash_shift,
3204  					NULL,
3205  					0,
3206  					0);
3207  	d_hash_shift = 32 - d_hash_shift;
3208  }
3209  
3210  /* SLAB cache for __getname() consumers */
3211  struct kmem_cache *names_cachep __read_mostly;
3212  EXPORT_SYMBOL(names_cachep);
3213  
3214  void __init vfs_caches_init_early(void)
3215  {
3216  	int i;
3217  
3218  	for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++)
3219  		INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]);
3220  
3221  	dcache_init_early();
3222  	inode_init_early();
3223  }
3224  
3225  void __init vfs_caches_init(void)
3226  {
3227  	names_cachep = kmem_cache_create_usercopy("names_cache", PATH_MAX, 0,
3228  			SLAB_HWCACHE_ALIGN|SLAB_PANIC, 0, PATH_MAX, NULL);
3229  
3230  	dcache_init();
3231  	inode_init();
3232  	files_init();
3233  	files_maxfiles_init();
3234  	mnt_init();
3235  	bdev_cache_init();
3236  	chrdev_init();
3237  }
3238