xref: /openbmc/linux/fs/inode.c (revision 609e478b)
1 /*
2  * (C) 1997 Linus Torvalds
3  * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
4  */
5 #include <linux/export.h>
6 #include <linux/fs.h>
7 #include <linux/mm.h>
8 #include <linux/backing-dev.h>
9 #include <linux/hash.h>
10 #include <linux/swap.h>
11 #include <linux/security.h>
12 #include <linux/cdev.h>
13 #include <linux/bootmem.h>
14 #include <linux/fsnotify.h>
15 #include <linux/mount.h>
16 #include <linux/posix_acl.h>
17 #include <linux/prefetch.h>
18 #include <linux/buffer_head.h> /* for inode_has_buffers */
19 #include <linux/ratelimit.h>
20 #include <linux/list_lru.h>
21 #include "internal.h"
22 
23 /*
24  * Inode locking rules:
25  *
26  * inode->i_lock protects:
27  *   inode->i_state, inode->i_hash, __iget()
28  * Inode LRU list locks protect:
29  *   inode->i_sb->s_inode_lru, inode->i_lru
30  * inode_sb_list_lock protects:
31  *   sb->s_inodes, inode->i_sb_list
32  * bdi->wb.list_lock protects:
33  *   bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list
34  * inode_hash_lock protects:
35  *   inode_hashtable, inode->i_hash
36  *
37  * Lock ordering:
38  *
39  * inode_sb_list_lock
40  *   inode->i_lock
41  *     Inode LRU list locks
42  *
43  * bdi->wb.list_lock
44  *   inode->i_lock
45  *
46  * inode_hash_lock
47  *   inode_sb_list_lock
48  *   inode->i_lock
49  *
50  * iunique_lock
51  *   inode_hash_lock
52  */
53 
54 static unsigned int i_hash_mask __read_mostly;
55 static unsigned int i_hash_shift __read_mostly;
56 static struct hlist_head *inode_hashtable __read_mostly;
57 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
58 
59 __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
60 
61 /*
62  * Empty aops. Can be used for the cases where the user does not
63  * define any of the address_space operations.
64  */
65 const struct address_space_operations empty_aops = {
66 };
67 EXPORT_SYMBOL(empty_aops);
68 
69 /*
70  * Statistics gathering..
71  */
72 struct inodes_stat_t inodes_stat;
73 
74 static DEFINE_PER_CPU(unsigned long, nr_inodes);
75 static DEFINE_PER_CPU(unsigned long, nr_unused);
76 
77 static struct kmem_cache *inode_cachep __read_mostly;
78 
79 static long get_nr_inodes(void)
80 {
81 	int i;
82 	long sum = 0;
83 	for_each_possible_cpu(i)
84 		sum += per_cpu(nr_inodes, i);
85 	return sum < 0 ? 0 : sum;
86 }
87 
88 static inline long get_nr_inodes_unused(void)
89 {
90 	int i;
91 	long sum = 0;
92 	for_each_possible_cpu(i)
93 		sum += per_cpu(nr_unused, i);
94 	return sum < 0 ? 0 : sum;
95 }
96 
97 long get_nr_dirty_inodes(void)
98 {
99 	/* not actually dirty inodes, but a wild approximation */
100 	long nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
101 	return nr_dirty > 0 ? nr_dirty : 0;
102 }
103 
104 /*
105  * Handle nr_inode sysctl
106  */
107 #ifdef CONFIG_SYSCTL
108 int proc_nr_inodes(struct ctl_table *table, int write,
109 		   void __user *buffer, size_t *lenp, loff_t *ppos)
110 {
111 	inodes_stat.nr_inodes = get_nr_inodes();
112 	inodes_stat.nr_unused = get_nr_inodes_unused();
113 	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
114 }
115 #endif
116 
117 /**
118  * inode_init_always - perform inode structure intialisation
119  * @sb: superblock inode belongs to
120  * @inode: inode to initialise
121  *
122  * These are initializations that need to be done on every inode
123  * allocation as the fields are not initialised by slab allocation.
124  */
125 int inode_init_always(struct super_block *sb, struct inode *inode)
126 {
127 	static const struct inode_operations empty_iops;
128 	static const struct file_operations empty_fops;
129 	struct address_space *const mapping = &inode->i_data;
130 
131 	inode->i_sb = sb;
132 	inode->i_blkbits = sb->s_blocksize_bits;
133 	inode->i_flags = 0;
134 	atomic_set(&inode->i_count, 1);
135 	inode->i_op = &empty_iops;
136 	inode->i_fop = &empty_fops;
137 	inode->__i_nlink = 1;
138 	inode->i_opflags = 0;
139 	i_uid_write(inode, 0);
140 	i_gid_write(inode, 0);
141 	atomic_set(&inode->i_writecount, 0);
142 	inode->i_size = 0;
143 	inode->i_blocks = 0;
144 	inode->i_bytes = 0;
145 	inode->i_generation = 0;
146 #ifdef CONFIG_QUOTA
147 	memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
148 #endif
149 	inode->i_pipe = NULL;
150 	inode->i_bdev = NULL;
151 	inode->i_cdev = NULL;
152 	inode->i_rdev = 0;
153 	inode->dirtied_when = 0;
154 
155 	if (security_inode_alloc(inode))
156 		goto out;
157 	spin_lock_init(&inode->i_lock);
158 	lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
159 
160 	mutex_init(&inode->i_mutex);
161 	lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key);
162 
163 	atomic_set(&inode->i_dio_count, 0);
164 
165 	mapping->a_ops = &empty_aops;
166 	mapping->host = inode;
167 	mapping->flags = 0;
168 	atomic_set(&mapping->i_mmap_writable, 0);
169 	mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
170 	mapping->private_data = NULL;
171 	mapping->backing_dev_info = &default_backing_dev_info;
172 	mapping->writeback_index = 0;
173 
174 	/*
175 	 * If the block_device provides a backing_dev_info for client
176 	 * inodes then use that.  Otherwise the inode share the bdev's
177 	 * backing_dev_info.
178 	 */
179 	if (sb->s_bdev) {
180 		struct backing_dev_info *bdi;
181 
182 		bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
183 		mapping->backing_dev_info = bdi;
184 	}
185 	inode->i_private = NULL;
186 	inode->i_mapping = mapping;
187 	INIT_HLIST_HEAD(&inode->i_dentry);	/* buggered by rcu freeing */
188 #ifdef CONFIG_FS_POSIX_ACL
189 	inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
190 #endif
191 
192 #ifdef CONFIG_FSNOTIFY
193 	inode->i_fsnotify_mask = 0;
194 #endif
195 
196 	this_cpu_inc(nr_inodes);
197 
198 	return 0;
199 out:
200 	return -ENOMEM;
201 }
202 EXPORT_SYMBOL(inode_init_always);
203 
204 static struct inode *alloc_inode(struct super_block *sb)
205 {
206 	struct inode *inode;
207 
208 	if (sb->s_op->alloc_inode)
209 		inode = sb->s_op->alloc_inode(sb);
210 	else
211 		inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
212 
213 	if (!inode)
214 		return NULL;
215 
216 	if (unlikely(inode_init_always(sb, inode))) {
217 		if (inode->i_sb->s_op->destroy_inode)
218 			inode->i_sb->s_op->destroy_inode(inode);
219 		else
220 			kmem_cache_free(inode_cachep, inode);
221 		return NULL;
222 	}
223 
224 	return inode;
225 }
226 
227 void free_inode_nonrcu(struct inode *inode)
228 {
229 	kmem_cache_free(inode_cachep, inode);
230 }
231 EXPORT_SYMBOL(free_inode_nonrcu);
232 
233 void __destroy_inode(struct inode *inode)
234 {
235 	BUG_ON(inode_has_buffers(inode));
236 	security_inode_free(inode);
237 	fsnotify_inode_delete(inode);
238 	if (!inode->i_nlink) {
239 		WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
240 		atomic_long_dec(&inode->i_sb->s_remove_count);
241 	}
242 
243 #ifdef CONFIG_FS_POSIX_ACL
244 	if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED)
245 		posix_acl_release(inode->i_acl);
246 	if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED)
247 		posix_acl_release(inode->i_default_acl);
248 #endif
249 	this_cpu_dec(nr_inodes);
250 }
251 EXPORT_SYMBOL(__destroy_inode);
252 
253 static void i_callback(struct rcu_head *head)
254 {
255 	struct inode *inode = container_of(head, struct inode, i_rcu);
256 	kmem_cache_free(inode_cachep, inode);
257 }
258 
259 static void destroy_inode(struct inode *inode)
260 {
261 	BUG_ON(!list_empty(&inode->i_lru));
262 	__destroy_inode(inode);
263 	if (inode->i_sb->s_op->destroy_inode)
264 		inode->i_sb->s_op->destroy_inode(inode);
265 	else
266 		call_rcu(&inode->i_rcu, i_callback);
267 }
268 
269 /**
270  * drop_nlink - directly drop an inode's link count
271  * @inode: inode
272  *
273  * This is a low-level filesystem helper to replace any
274  * direct filesystem manipulation of i_nlink.  In cases
275  * where we are attempting to track writes to the
276  * filesystem, a decrement to zero means an imminent
277  * write when the file is truncated and actually unlinked
278  * on the filesystem.
279  */
280 void drop_nlink(struct inode *inode)
281 {
282 	WARN_ON(inode->i_nlink == 0);
283 	inode->__i_nlink--;
284 	if (!inode->i_nlink)
285 		atomic_long_inc(&inode->i_sb->s_remove_count);
286 }
287 EXPORT_SYMBOL(drop_nlink);
288 
289 /**
290  * clear_nlink - directly zero an inode's link count
291  * @inode: inode
292  *
293  * This is a low-level filesystem helper to replace any
294  * direct filesystem manipulation of i_nlink.  See
295  * drop_nlink() for why we care about i_nlink hitting zero.
296  */
297 void clear_nlink(struct inode *inode)
298 {
299 	if (inode->i_nlink) {
300 		inode->__i_nlink = 0;
301 		atomic_long_inc(&inode->i_sb->s_remove_count);
302 	}
303 }
304 EXPORT_SYMBOL(clear_nlink);
305 
306 /**
307  * set_nlink - directly set an inode's link count
308  * @inode: inode
309  * @nlink: new nlink (should be non-zero)
310  *
311  * This is a low-level filesystem helper to replace any
312  * direct filesystem manipulation of i_nlink.
313  */
314 void set_nlink(struct inode *inode, unsigned int nlink)
315 {
316 	if (!nlink) {
317 		clear_nlink(inode);
318 	} else {
319 		/* Yes, some filesystems do change nlink from zero to one */
320 		if (inode->i_nlink == 0)
321 			atomic_long_dec(&inode->i_sb->s_remove_count);
322 
323 		inode->__i_nlink = nlink;
324 	}
325 }
326 EXPORT_SYMBOL(set_nlink);
327 
328 /**
329  * inc_nlink - directly increment an inode's link count
330  * @inode: inode
331  *
332  * This is a low-level filesystem helper to replace any
333  * direct filesystem manipulation of i_nlink.  Currently,
334  * it is only here for parity with dec_nlink().
335  */
336 void inc_nlink(struct inode *inode)
337 {
338 	if (unlikely(inode->i_nlink == 0)) {
339 		WARN_ON(!(inode->i_state & I_LINKABLE));
340 		atomic_long_dec(&inode->i_sb->s_remove_count);
341 	}
342 
343 	inode->__i_nlink++;
344 }
345 EXPORT_SYMBOL(inc_nlink);
346 
347 void address_space_init_once(struct address_space *mapping)
348 {
349 	memset(mapping, 0, sizeof(*mapping));
350 	INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
351 	spin_lock_init(&mapping->tree_lock);
352 	mutex_init(&mapping->i_mmap_mutex);
353 	INIT_LIST_HEAD(&mapping->private_list);
354 	spin_lock_init(&mapping->private_lock);
355 	mapping->i_mmap = RB_ROOT;
356 	INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
357 }
358 EXPORT_SYMBOL(address_space_init_once);
359 
360 /*
361  * These are initializations that only need to be done
362  * once, because the fields are idempotent across use
363  * of the inode, so let the slab aware of that.
364  */
365 void inode_init_once(struct inode *inode)
366 {
367 	memset(inode, 0, sizeof(*inode));
368 	INIT_HLIST_NODE(&inode->i_hash);
369 	INIT_LIST_HEAD(&inode->i_devices);
370 	INIT_LIST_HEAD(&inode->i_wb_list);
371 	INIT_LIST_HEAD(&inode->i_lru);
372 	address_space_init_once(&inode->i_data);
373 	i_size_ordered_init(inode);
374 #ifdef CONFIG_FSNOTIFY
375 	INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
376 #endif
377 }
378 EXPORT_SYMBOL(inode_init_once);
379 
380 static void init_once(void *foo)
381 {
382 	struct inode *inode = (struct inode *) foo;
383 
384 	inode_init_once(inode);
385 }
386 
387 /*
388  * inode->i_lock must be held
389  */
390 void __iget(struct inode *inode)
391 {
392 	atomic_inc(&inode->i_count);
393 }
394 
395 /*
396  * get additional reference to inode; caller must already hold one.
397  */
398 void ihold(struct inode *inode)
399 {
400 	WARN_ON(atomic_inc_return(&inode->i_count) < 2);
401 }
402 EXPORT_SYMBOL(ihold);
403 
404 static void inode_lru_list_add(struct inode *inode)
405 {
406 	if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru))
407 		this_cpu_inc(nr_unused);
408 }
409 
410 /*
411  * Add inode to LRU if needed (inode is unused and clean).
412  *
413  * Needs inode->i_lock held.
414  */
415 void inode_add_lru(struct inode *inode)
416 {
417 	if (!(inode->i_state & (I_DIRTY | I_SYNC | I_FREEING | I_WILL_FREE)) &&
418 	    !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE)
419 		inode_lru_list_add(inode);
420 }
421 
422 
423 static void inode_lru_list_del(struct inode *inode)
424 {
425 
426 	if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru))
427 		this_cpu_dec(nr_unused);
428 }
429 
430 /**
431  * inode_sb_list_add - add inode to the superblock list of inodes
432  * @inode: inode to add
433  */
434 void inode_sb_list_add(struct inode *inode)
435 {
436 	spin_lock(&inode_sb_list_lock);
437 	list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
438 	spin_unlock(&inode_sb_list_lock);
439 }
440 EXPORT_SYMBOL_GPL(inode_sb_list_add);
441 
442 static inline void inode_sb_list_del(struct inode *inode)
443 {
444 	if (!list_empty(&inode->i_sb_list)) {
445 		spin_lock(&inode_sb_list_lock);
446 		list_del_init(&inode->i_sb_list);
447 		spin_unlock(&inode_sb_list_lock);
448 	}
449 }
450 
451 static unsigned long hash(struct super_block *sb, unsigned long hashval)
452 {
453 	unsigned long tmp;
454 
455 	tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
456 			L1_CACHE_BYTES;
457 	tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
458 	return tmp & i_hash_mask;
459 }
460 
461 /**
462  *	__insert_inode_hash - hash an inode
463  *	@inode: unhashed inode
464  *	@hashval: unsigned long value used to locate this object in the
465  *		inode_hashtable.
466  *
467  *	Add an inode to the inode hash for this superblock.
468  */
469 void __insert_inode_hash(struct inode *inode, unsigned long hashval)
470 {
471 	struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
472 
473 	spin_lock(&inode_hash_lock);
474 	spin_lock(&inode->i_lock);
475 	hlist_add_head(&inode->i_hash, b);
476 	spin_unlock(&inode->i_lock);
477 	spin_unlock(&inode_hash_lock);
478 }
479 EXPORT_SYMBOL(__insert_inode_hash);
480 
481 /**
482  *	__remove_inode_hash - remove an inode from the hash
483  *	@inode: inode to unhash
484  *
485  *	Remove an inode from the superblock.
486  */
487 void __remove_inode_hash(struct inode *inode)
488 {
489 	spin_lock(&inode_hash_lock);
490 	spin_lock(&inode->i_lock);
491 	hlist_del_init(&inode->i_hash);
492 	spin_unlock(&inode->i_lock);
493 	spin_unlock(&inode_hash_lock);
494 }
495 EXPORT_SYMBOL(__remove_inode_hash);
496 
497 void clear_inode(struct inode *inode)
498 {
499 	might_sleep();
500 	/*
501 	 * We have to cycle tree_lock here because reclaim can be still in the
502 	 * process of removing the last page (in __delete_from_page_cache())
503 	 * and we must not free mapping under it.
504 	 */
505 	spin_lock_irq(&inode->i_data.tree_lock);
506 	BUG_ON(inode->i_data.nrpages);
507 	BUG_ON(inode->i_data.nrshadows);
508 	spin_unlock_irq(&inode->i_data.tree_lock);
509 	BUG_ON(!list_empty(&inode->i_data.private_list));
510 	BUG_ON(!(inode->i_state & I_FREEING));
511 	BUG_ON(inode->i_state & I_CLEAR);
512 	/* don't need i_lock here, no concurrent mods to i_state */
513 	inode->i_state = I_FREEING | I_CLEAR;
514 }
515 EXPORT_SYMBOL(clear_inode);
516 
517 /*
518  * Free the inode passed in, removing it from the lists it is still connected
519  * to. We remove any pages still attached to the inode and wait for any IO that
520  * is still in progress before finally destroying the inode.
521  *
522  * An inode must already be marked I_FREEING so that we avoid the inode being
523  * moved back onto lists if we race with other code that manipulates the lists
524  * (e.g. writeback_single_inode). The caller is responsible for setting this.
525  *
526  * An inode must already be removed from the LRU list before being evicted from
527  * the cache. This should occur atomically with setting the I_FREEING state
528  * flag, so no inodes here should ever be on the LRU when being evicted.
529  */
530 static void evict(struct inode *inode)
531 {
532 	const struct super_operations *op = inode->i_sb->s_op;
533 
534 	BUG_ON(!(inode->i_state & I_FREEING));
535 	BUG_ON(!list_empty(&inode->i_lru));
536 
537 	if (!list_empty(&inode->i_wb_list))
538 		inode_wb_list_del(inode);
539 
540 	inode_sb_list_del(inode);
541 
542 	/*
543 	 * Wait for flusher thread to be done with the inode so that filesystem
544 	 * does not start destroying it while writeback is still running. Since
545 	 * the inode has I_FREEING set, flusher thread won't start new work on
546 	 * the inode.  We just have to wait for running writeback to finish.
547 	 */
548 	inode_wait_for_writeback(inode);
549 
550 	if (op->evict_inode) {
551 		op->evict_inode(inode);
552 	} else {
553 		truncate_inode_pages_final(&inode->i_data);
554 		clear_inode(inode);
555 	}
556 	if (S_ISBLK(inode->i_mode) && inode->i_bdev)
557 		bd_forget(inode);
558 	if (S_ISCHR(inode->i_mode) && inode->i_cdev)
559 		cd_forget(inode);
560 
561 	remove_inode_hash(inode);
562 
563 	spin_lock(&inode->i_lock);
564 	wake_up_bit(&inode->i_state, __I_NEW);
565 	BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
566 	spin_unlock(&inode->i_lock);
567 
568 	destroy_inode(inode);
569 }
570 
571 /*
572  * dispose_list - dispose of the contents of a local list
573  * @head: the head of the list to free
574  *
575  * Dispose-list gets a local list with local inodes in it, so it doesn't
576  * need to worry about list corruption and SMP locks.
577  */
578 static void dispose_list(struct list_head *head)
579 {
580 	while (!list_empty(head)) {
581 		struct inode *inode;
582 
583 		inode = list_first_entry(head, struct inode, i_lru);
584 		list_del_init(&inode->i_lru);
585 
586 		evict(inode);
587 	}
588 }
589 
590 /**
591  * evict_inodes	- evict all evictable inodes for a superblock
592  * @sb:		superblock to operate on
593  *
594  * Make sure that no inodes with zero refcount are retained.  This is
595  * called by superblock shutdown after having MS_ACTIVE flag removed,
596  * so any inode reaching zero refcount during or after that call will
597  * be immediately evicted.
598  */
599 void evict_inodes(struct super_block *sb)
600 {
601 	struct inode *inode, *next;
602 	LIST_HEAD(dispose);
603 
604 	spin_lock(&inode_sb_list_lock);
605 	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
606 		if (atomic_read(&inode->i_count))
607 			continue;
608 
609 		spin_lock(&inode->i_lock);
610 		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
611 			spin_unlock(&inode->i_lock);
612 			continue;
613 		}
614 
615 		inode->i_state |= I_FREEING;
616 		inode_lru_list_del(inode);
617 		spin_unlock(&inode->i_lock);
618 		list_add(&inode->i_lru, &dispose);
619 	}
620 	spin_unlock(&inode_sb_list_lock);
621 
622 	dispose_list(&dispose);
623 }
624 
625 /**
626  * invalidate_inodes	- attempt to free all inodes on a superblock
627  * @sb:		superblock to operate on
628  * @kill_dirty: flag to guide handling of dirty inodes
629  *
630  * Attempts to free all inodes for a given superblock.  If there were any
631  * busy inodes return a non-zero value, else zero.
632  * If @kill_dirty is set, discard dirty inodes too, otherwise treat
633  * them as busy.
634  */
635 int invalidate_inodes(struct super_block *sb, bool kill_dirty)
636 {
637 	int busy = 0;
638 	struct inode *inode, *next;
639 	LIST_HEAD(dispose);
640 
641 	spin_lock(&inode_sb_list_lock);
642 	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
643 		spin_lock(&inode->i_lock);
644 		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
645 			spin_unlock(&inode->i_lock);
646 			continue;
647 		}
648 		if (inode->i_state & I_DIRTY && !kill_dirty) {
649 			spin_unlock(&inode->i_lock);
650 			busy = 1;
651 			continue;
652 		}
653 		if (atomic_read(&inode->i_count)) {
654 			spin_unlock(&inode->i_lock);
655 			busy = 1;
656 			continue;
657 		}
658 
659 		inode->i_state |= I_FREEING;
660 		inode_lru_list_del(inode);
661 		spin_unlock(&inode->i_lock);
662 		list_add(&inode->i_lru, &dispose);
663 	}
664 	spin_unlock(&inode_sb_list_lock);
665 
666 	dispose_list(&dispose);
667 
668 	return busy;
669 }
670 
671 /*
672  * Isolate the inode from the LRU in preparation for freeing it.
673  *
674  * Any inodes which are pinned purely because of attached pagecache have their
675  * pagecache removed.  If the inode has metadata buffers attached to
676  * mapping->private_list then try to remove them.
677  *
678  * If the inode has the I_REFERENCED flag set, then it means that it has been
679  * used recently - the flag is set in iput_final(). When we encounter such an
680  * inode, clear the flag and move it to the back of the LRU so it gets another
681  * pass through the LRU before it gets reclaimed. This is necessary because of
682  * the fact we are doing lazy LRU updates to minimise lock contention so the
683  * LRU does not have strict ordering. Hence we don't want to reclaim inodes
684  * with this flag set because they are the inodes that are out of order.
685  */
686 static enum lru_status
687 inode_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
688 {
689 	struct list_head *freeable = arg;
690 	struct inode	*inode = container_of(item, struct inode, i_lru);
691 
692 	/*
693 	 * we are inverting the lru lock/inode->i_lock here, so use a trylock.
694 	 * If we fail to get the lock, just skip it.
695 	 */
696 	if (!spin_trylock(&inode->i_lock))
697 		return LRU_SKIP;
698 
699 	/*
700 	 * Referenced or dirty inodes are still in use. Give them another pass
701 	 * through the LRU as we canot reclaim them now.
702 	 */
703 	if (atomic_read(&inode->i_count) ||
704 	    (inode->i_state & ~I_REFERENCED)) {
705 		list_del_init(&inode->i_lru);
706 		spin_unlock(&inode->i_lock);
707 		this_cpu_dec(nr_unused);
708 		return LRU_REMOVED;
709 	}
710 
711 	/* recently referenced inodes get one more pass */
712 	if (inode->i_state & I_REFERENCED) {
713 		inode->i_state &= ~I_REFERENCED;
714 		spin_unlock(&inode->i_lock);
715 		return LRU_ROTATE;
716 	}
717 
718 	if (inode_has_buffers(inode) || inode->i_data.nrpages) {
719 		__iget(inode);
720 		spin_unlock(&inode->i_lock);
721 		spin_unlock(lru_lock);
722 		if (remove_inode_buffers(inode)) {
723 			unsigned long reap;
724 			reap = invalidate_mapping_pages(&inode->i_data, 0, -1);
725 			if (current_is_kswapd())
726 				__count_vm_events(KSWAPD_INODESTEAL, reap);
727 			else
728 				__count_vm_events(PGINODESTEAL, reap);
729 			if (current->reclaim_state)
730 				current->reclaim_state->reclaimed_slab += reap;
731 		}
732 		iput(inode);
733 		spin_lock(lru_lock);
734 		return LRU_RETRY;
735 	}
736 
737 	WARN_ON(inode->i_state & I_NEW);
738 	inode->i_state |= I_FREEING;
739 	list_move(&inode->i_lru, freeable);
740 	spin_unlock(&inode->i_lock);
741 
742 	this_cpu_dec(nr_unused);
743 	return LRU_REMOVED;
744 }
745 
746 /*
747  * Walk the superblock inode LRU for freeable inodes and attempt to free them.
748  * This is called from the superblock shrinker function with a number of inodes
749  * to trim from the LRU. Inodes to be freed are moved to a temporary list and
750  * then are freed outside inode_lock by dispose_list().
751  */
752 long prune_icache_sb(struct super_block *sb, unsigned long nr_to_scan,
753 		     int nid)
754 {
755 	LIST_HEAD(freeable);
756 	long freed;
757 
758 	freed = list_lru_walk_node(&sb->s_inode_lru, nid, inode_lru_isolate,
759 				       &freeable, &nr_to_scan);
760 	dispose_list(&freeable);
761 	return freed;
762 }
763 
764 static void __wait_on_freeing_inode(struct inode *inode);
765 /*
766  * Called with the inode lock held.
767  */
768 static struct inode *find_inode(struct super_block *sb,
769 				struct hlist_head *head,
770 				int (*test)(struct inode *, void *),
771 				void *data)
772 {
773 	struct inode *inode = NULL;
774 
775 repeat:
776 	hlist_for_each_entry(inode, head, i_hash) {
777 		if (inode->i_sb != sb)
778 			continue;
779 		if (!test(inode, data))
780 			continue;
781 		spin_lock(&inode->i_lock);
782 		if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
783 			__wait_on_freeing_inode(inode);
784 			goto repeat;
785 		}
786 		__iget(inode);
787 		spin_unlock(&inode->i_lock);
788 		return inode;
789 	}
790 	return NULL;
791 }
792 
793 /*
794  * find_inode_fast is the fast path version of find_inode, see the comment at
795  * iget_locked for details.
796  */
797 static struct inode *find_inode_fast(struct super_block *sb,
798 				struct hlist_head *head, unsigned long ino)
799 {
800 	struct inode *inode = NULL;
801 
802 repeat:
803 	hlist_for_each_entry(inode, head, i_hash) {
804 		if (inode->i_ino != ino)
805 			continue;
806 		if (inode->i_sb != sb)
807 			continue;
808 		spin_lock(&inode->i_lock);
809 		if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
810 			__wait_on_freeing_inode(inode);
811 			goto repeat;
812 		}
813 		__iget(inode);
814 		spin_unlock(&inode->i_lock);
815 		return inode;
816 	}
817 	return NULL;
818 }
819 
820 /*
821  * Each cpu owns a range of LAST_INO_BATCH numbers.
822  * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
823  * to renew the exhausted range.
824  *
825  * This does not significantly increase overflow rate because every CPU can
826  * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
827  * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
828  * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
829  * overflow rate by 2x, which does not seem too significant.
830  *
831  * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
832  * error if st_ino won't fit in target struct field. Use 32bit counter
833  * here to attempt to avoid that.
834  */
835 #define LAST_INO_BATCH 1024
836 static DEFINE_PER_CPU(unsigned int, last_ino);
837 
838 unsigned int get_next_ino(void)
839 {
840 	unsigned int *p = &get_cpu_var(last_ino);
841 	unsigned int res = *p;
842 
843 #ifdef CONFIG_SMP
844 	if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
845 		static atomic_t shared_last_ino;
846 		int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
847 
848 		res = next - LAST_INO_BATCH;
849 	}
850 #endif
851 
852 	*p = ++res;
853 	put_cpu_var(last_ino);
854 	return res;
855 }
856 EXPORT_SYMBOL(get_next_ino);
857 
858 /**
859  *	new_inode_pseudo 	- obtain an inode
860  *	@sb: superblock
861  *
862  *	Allocates a new inode for given superblock.
863  *	Inode wont be chained in superblock s_inodes list
864  *	This means :
865  *	- fs can't be unmount
866  *	- quotas, fsnotify, writeback can't work
867  */
868 struct inode *new_inode_pseudo(struct super_block *sb)
869 {
870 	struct inode *inode = alloc_inode(sb);
871 
872 	if (inode) {
873 		spin_lock(&inode->i_lock);
874 		inode->i_state = 0;
875 		spin_unlock(&inode->i_lock);
876 		INIT_LIST_HEAD(&inode->i_sb_list);
877 	}
878 	return inode;
879 }
880 
881 /**
882  *	new_inode 	- obtain an inode
883  *	@sb: superblock
884  *
885  *	Allocates a new inode for given superblock. The default gfp_mask
886  *	for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
887  *	If HIGHMEM pages are unsuitable or it is known that pages allocated
888  *	for the page cache are not reclaimable or migratable,
889  *	mapping_set_gfp_mask() must be called with suitable flags on the
890  *	newly created inode's mapping
891  *
892  */
893 struct inode *new_inode(struct super_block *sb)
894 {
895 	struct inode *inode;
896 
897 	spin_lock_prefetch(&inode_sb_list_lock);
898 
899 	inode = new_inode_pseudo(sb);
900 	if (inode)
901 		inode_sb_list_add(inode);
902 	return inode;
903 }
904 EXPORT_SYMBOL(new_inode);
905 
906 #ifdef CONFIG_DEBUG_LOCK_ALLOC
907 void lockdep_annotate_inode_mutex_key(struct inode *inode)
908 {
909 	if (S_ISDIR(inode->i_mode)) {
910 		struct file_system_type *type = inode->i_sb->s_type;
911 
912 		/* Set new key only if filesystem hasn't already changed it */
913 		if (lockdep_match_class(&inode->i_mutex, &type->i_mutex_key)) {
914 			/*
915 			 * ensure nobody is actually holding i_mutex
916 			 */
917 			mutex_destroy(&inode->i_mutex);
918 			mutex_init(&inode->i_mutex);
919 			lockdep_set_class(&inode->i_mutex,
920 					  &type->i_mutex_dir_key);
921 		}
922 	}
923 }
924 EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key);
925 #endif
926 
927 /**
928  * unlock_new_inode - clear the I_NEW state and wake up any waiters
929  * @inode:	new inode to unlock
930  *
931  * Called when the inode is fully initialised to clear the new state of the
932  * inode and wake up anyone waiting for the inode to finish initialisation.
933  */
934 void unlock_new_inode(struct inode *inode)
935 {
936 	lockdep_annotate_inode_mutex_key(inode);
937 	spin_lock(&inode->i_lock);
938 	WARN_ON(!(inode->i_state & I_NEW));
939 	inode->i_state &= ~I_NEW;
940 	smp_mb();
941 	wake_up_bit(&inode->i_state, __I_NEW);
942 	spin_unlock(&inode->i_lock);
943 }
944 EXPORT_SYMBOL(unlock_new_inode);
945 
946 /**
947  * lock_two_nondirectories - take two i_mutexes on non-directory objects
948  *
949  * Lock any non-NULL argument that is not a directory.
950  * Zero, one or two objects may be locked by this function.
951  *
952  * @inode1: first inode to lock
953  * @inode2: second inode to lock
954  */
955 void lock_two_nondirectories(struct inode *inode1, struct inode *inode2)
956 {
957 	if (inode1 > inode2)
958 		swap(inode1, inode2);
959 
960 	if (inode1 && !S_ISDIR(inode1->i_mode))
961 		mutex_lock(&inode1->i_mutex);
962 	if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
963 		mutex_lock_nested(&inode2->i_mutex, I_MUTEX_NONDIR2);
964 }
965 EXPORT_SYMBOL(lock_two_nondirectories);
966 
967 /**
968  * unlock_two_nondirectories - release locks from lock_two_nondirectories()
969  * @inode1: first inode to unlock
970  * @inode2: second inode to unlock
971  */
972 void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2)
973 {
974 	if (inode1 && !S_ISDIR(inode1->i_mode))
975 		mutex_unlock(&inode1->i_mutex);
976 	if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
977 		mutex_unlock(&inode2->i_mutex);
978 }
979 EXPORT_SYMBOL(unlock_two_nondirectories);
980 
981 /**
982  * iget5_locked - obtain an inode from a mounted file system
983  * @sb:		super block of file system
984  * @hashval:	hash value (usually inode number) to get
985  * @test:	callback used for comparisons between inodes
986  * @set:	callback used to initialize a new struct inode
987  * @data:	opaque data pointer to pass to @test and @set
988  *
989  * Search for the inode specified by @hashval and @data in the inode cache,
990  * and if present it is return it with an increased reference count. This is
991  * a generalized version of iget_locked() for file systems where the inode
992  * number is not sufficient for unique identification of an inode.
993  *
994  * If the inode is not in cache, allocate a new inode and return it locked,
995  * hashed, and with the I_NEW flag set. The file system gets to fill it in
996  * before unlocking it via unlock_new_inode().
997  *
998  * Note both @test and @set are called with the inode_hash_lock held, so can't
999  * sleep.
1000  */
1001 struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
1002 		int (*test)(struct inode *, void *),
1003 		int (*set)(struct inode *, void *), void *data)
1004 {
1005 	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1006 	struct inode *inode;
1007 
1008 	spin_lock(&inode_hash_lock);
1009 	inode = find_inode(sb, head, test, data);
1010 	spin_unlock(&inode_hash_lock);
1011 
1012 	if (inode) {
1013 		wait_on_inode(inode);
1014 		return inode;
1015 	}
1016 
1017 	inode = alloc_inode(sb);
1018 	if (inode) {
1019 		struct inode *old;
1020 
1021 		spin_lock(&inode_hash_lock);
1022 		/* We released the lock, so.. */
1023 		old = find_inode(sb, head, test, data);
1024 		if (!old) {
1025 			if (set(inode, data))
1026 				goto set_failed;
1027 
1028 			spin_lock(&inode->i_lock);
1029 			inode->i_state = I_NEW;
1030 			hlist_add_head(&inode->i_hash, head);
1031 			spin_unlock(&inode->i_lock);
1032 			inode_sb_list_add(inode);
1033 			spin_unlock(&inode_hash_lock);
1034 
1035 			/* Return the locked inode with I_NEW set, the
1036 			 * caller is responsible for filling in the contents
1037 			 */
1038 			return inode;
1039 		}
1040 
1041 		/*
1042 		 * Uhhuh, somebody else created the same inode under
1043 		 * us. Use the old inode instead of the one we just
1044 		 * allocated.
1045 		 */
1046 		spin_unlock(&inode_hash_lock);
1047 		destroy_inode(inode);
1048 		inode = old;
1049 		wait_on_inode(inode);
1050 	}
1051 	return inode;
1052 
1053 set_failed:
1054 	spin_unlock(&inode_hash_lock);
1055 	destroy_inode(inode);
1056 	return NULL;
1057 }
1058 EXPORT_SYMBOL(iget5_locked);
1059 
1060 /**
1061  * iget_locked - obtain an inode from a mounted file system
1062  * @sb:		super block of file system
1063  * @ino:	inode number to get
1064  *
1065  * Search for the inode specified by @ino in the inode cache and if present
1066  * return it with an increased reference count. This is for file systems
1067  * where the inode number is sufficient for unique identification of an inode.
1068  *
1069  * If the inode is not in cache, allocate a new inode and return it locked,
1070  * hashed, and with the I_NEW flag set.  The file system gets to fill it in
1071  * before unlocking it via unlock_new_inode().
1072  */
1073 struct inode *iget_locked(struct super_block *sb, unsigned long ino)
1074 {
1075 	struct hlist_head *head = inode_hashtable + hash(sb, ino);
1076 	struct inode *inode;
1077 
1078 	spin_lock(&inode_hash_lock);
1079 	inode = find_inode_fast(sb, head, ino);
1080 	spin_unlock(&inode_hash_lock);
1081 	if (inode) {
1082 		wait_on_inode(inode);
1083 		return inode;
1084 	}
1085 
1086 	inode = alloc_inode(sb);
1087 	if (inode) {
1088 		struct inode *old;
1089 
1090 		spin_lock(&inode_hash_lock);
1091 		/* We released the lock, so.. */
1092 		old = find_inode_fast(sb, head, ino);
1093 		if (!old) {
1094 			inode->i_ino = ino;
1095 			spin_lock(&inode->i_lock);
1096 			inode->i_state = I_NEW;
1097 			hlist_add_head(&inode->i_hash, head);
1098 			spin_unlock(&inode->i_lock);
1099 			inode_sb_list_add(inode);
1100 			spin_unlock(&inode_hash_lock);
1101 
1102 			/* Return the locked inode with I_NEW set, the
1103 			 * caller is responsible for filling in the contents
1104 			 */
1105 			return inode;
1106 		}
1107 
1108 		/*
1109 		 * Uhhuh, somebody else created the same inode under
1110 		 * us. Use the old inode instead of the one we just
1111 		 * allocated.
1112 		 */
1113 		spin_unlock(&inode_hash_lock);
1114 		destroy_inode(inode);
1115 		inode = old;
1116 		wait_on_inode(inode);
1117 	}
1118 	return inode;
1119 }
1120 EXPORT_SYMBOL(iget_locked);
1121 
1122 /*
1123  * search the inode cache for a matching inode number.
1124  * If we find one, then the inode number we are trying to
1125  * allocate is not unique and so we should not use it.
1126  *
1127  * Returns 1 if the inode number is unique, 0 if it is not.
1128  */
1129 static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1130 {
1131 	struct hlist_head *b = inode_hashtable + hash(sb, ino);
1132 	struct inode *inode;
1133 
1134 	spin_lock(&inode_hash_lock);
1135 	hlist_for_each_entry(inode, b, i_hash) {
1136 		if (inode->i_ino == ino && inode->i_sb == sb) {
1137 			spin_unlock(&inode_hash_lock);
1138 			return 0;
1139 		}
1140 	}
1141 	spin_unlock(&inode_hash_lock);
1142 
1143 	return 1;
1144 }
1145 
1146 /**
1147  *	iunique - get a unique inode number
1148  *	@sb: superblock
1149  *	@max_reserved: highest reserved inode number
1150  *
1151  *	Obtain an inode number that is unique on the system for a given
1152  *	superblock. This is used by file systems that have no natural
1153  *	permanent inode numbering system. An inode number is returned that
1154  *	is higher than the reserved limit but unique.
1155  *
1156  *	BUGS:
1157  *	With a large number of inodes live on the file system this function
1158  *	currently becomes quite slow.
1159  */
1160 ino_t iunique(struct super_block *sb, ino_t max_reserved)
1161 {
1162 	/*
1163 	 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
1164 	 * error if st_ino won't fit in target struct field. Use 32bit counter
1165 	 * here to attempt to avoid that.
1166 	 */
1167 	static DEFINE_SPINLOCK(iunique_lock);
1168 	static unsigned int counter;
1169 	ino_t res;
1170 
1171 	spin_lock(&iunique_lock);
1172 	do {
1173 		if (counter <= max_reserved)
1174 			counter = max_reserved + 1;
1175 		res = counter++;
1176 	} while (!test_inode_iunique(sb, res));
1177 	spin_unlock(&iunique_lock);
1178 
1179 	return res;
1180 }
1181 EXPORT_SYMBOL(iunique);
1182 
1183 struct inode *igrab(struct inode *inode)
1184 {
1185 	spin_lock(&inode->i_lock);
1186 	if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
1187 		__iget(inode);
1188 		spin_unlock(&inode->i_lock);
1189 	} else {
1190 		spin_unlock(&inode->i_lock);
1191 		/*
1192 		 * Handle the case where s_op->clear_inode is not been
1193 		 * called yet, and somebody is calling igrab
1194 		 * while the inode is getting freed.
1195 		 */
1196 		inode = NULL;
1197 	}
1198 	return inode;
1199 }
1200 EXPORT_SYMBOL(igrab);
1201 
1202 /**
1203  * ilookup5_nowait - search for an inode in the inode cache
1204  * @sb:		super block of file system to search
1205  * @hashval:	hash value (usually inode number) to search for
1206  * @test:	callback used for comparisons between inodes
1207  * @data:	opaque data pointer to pass to @test
1208  *
1209  * Search for the inode specified by @hashval and @data in the inode cache.
1210  * If the inode is in the cache, the inode is returned with an incremented
1211  * reference count.
1212  *
1213  * Note: I_NEW is not waited upon so you have to be very careful what you do
1214  * with the returned inode.  You probably should be using ilookup5() instead.
1215  *
1216  * Note2: @test is called with the inode_hash_lock held, so can't sleep.
1217  */
1218 struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
1219 		int (*test)(struct inode *, void *), void *data)
1220 {
1221 	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1222 	struct inode *inode;
1223 
1224 	spin_lock(&inode_hash_lock);
1225 	inode = find_inode(sb, head, test, data);
1226 	spin_unlock(&inode_hash_lock);
1227 
1228 	return inode;
1229 }
1230 EXPORT_SYMBOL(ilookup5_nowait);
1231 
1232 /**
1233  * ilookup5 - search for an inode in the inode cache
1234  * @sb:		super block of file system to search
1235  * @hashval:	hash value (usually inode number) to search for
1236  * @test:	callback used for comparisons between inodes
1237  * @data:	opaque data pointer to pass to @test
1238  *
1239  * Search for the inode specified by @hashval and @data in the inode cache,
1240  * and if the inode is in the cache, return the inode with an incremented
1241  * reference count.  Waits on I_NEW before returning the inode.
1242  * returned with an incremented reference count.
1243  *
1244  * This is a generalized version of ilookup() for file systems where the
1245  * inode number is not sufficient for unique identification of an inode.
1246  *
1247  * Note: @test is called with the inode_hash_lock held, so can't sleep.
1248  */
1249 struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
1250 		int (*test)(struct inode *, void *), void *data)
1251 {
1252 	struct inode *inode = ilookup5_nowait(sb, hashval, test, data);
1253 
1254 	if (inode)
1255 		wait_on_inode(inode);
1256 	return inode;
1257 }
1258 EXPORT_SYMBOL(ilookup5);
1259 
1260 /**
1261  * ilookup - search for an inode in the inode cache
1262  * @sb:		super block of file system to search
1263  * @ino:	inode number to search for
1264  *
1265  * Search for the inode @ino in the inode cache, and if the inode is in the
1266  * cache, the inode is returned with an incremented reference count.
1267  */
1268 struct inode *ilookup(struct super_block *sb, unsigned long ino)
1269 {
1270 	struct hlist_head *head = inode_hashtable + hash(sb, ino);
1271 	struct inode *inode;
1272 
1273 	spin_lock(&inode_hash_lock);
1274 	inode = find_inode_fast(sb, head, ino);
1275 	spin_unlock(&inode_hash_lock);
1276 
1277 	if (inode)
1278 		wait_on_inode(inode);
1279 	return inode;
1280 }
1281 EXPORT_SYMBOL(ilookup);
1282 
1283 int insert_inode_locked(struct inode *inode)
1284 {
1285 	struct super_block *sb = inode->i_sb;
1286 	ino_t ino = inode->i_ino;
1287 	struct hlist_head *head = inode_hashtable + hash(sb, ino);
1288 
1289 	while (1) {
1290 		struct inode *old = NULL;
1291 		spin_lock(&inode_hash_lock);
1292 		hlist_for_each_entry(old, head, i_hash) {
1293 			if (old->i_ino != ino)
1294 				continue;
1295 			if (old->i_sb != sb)
1296 				continue;
1297 			spin_lock(&old->i_lock);
1298 			if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1299 				spin_unlock(&old->i_lock);
1300 				continue;
1301 			}
1302 			break;
1303 		}
1304 		if (likely(!old)) {
1305 			spin_lock(&inode->i_lock);
1306 			inode->i_state |= I_NEW;
1307 			hlist_add_head(&inode->i_hash, head);
1308 			spin_unlock(&inode->i_lock);
1309 			spin_unlock(&inode_hash_lock);
1310 			return 0;
1311 		}
1312 		__iget(old);
1313 		spin_unlock(&old->i_lock);
1314 		spin_unlock(&inode_hash_lock);
1315 		wait_on_inode(old);
1316 		if (unlikely(!inode_unhashed(old))) {
1317 			iput(old);
1318 			return -EBUSY;
1319 		}
1320 		iput(old);
1321 	}
1322 }
1323 EXPORT_SYMBOL(insert_inode_locked);
1324 
1325 int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1326 		int (*test)(struct inode *, void *), void *data)
1327 {
1328 	struct super_block *sb = inode->i_sb;
1329 	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1330 
1331 	while (1) {
1332 		struct inode *old = NULL;
1333 
1334 		spin_lock(&inode_hash_lock);
1335 		hlist_for_each_entry(old, head, i_hash) {
1336 			if (old->i_sb != sb)
1337 				continue;
1338 			if (!test(old, data))
1339 				continue;
1340 			spin_lock(&old->i_lock);
1341 			if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1342 				spin_unlock(&old->i_lock);
1343 				continue;
1344 			}
1345 			break;
1346 		}
1347 		if (likely(!old)) {
1348 			spin_lock(&inode->i_lock);
1349 			inode->i_state |= I_NEW;
1350 			hlist_add_head(&inode->i_hash, head);
1351 			spin_unlock(&inode->i_lock);
1352 			spin_unlock(&inode_hash_lock);
1353 			return 0;
1354 		}
1355 		__iget(old);
1356 		spin_unlock(&old->i_lock);
1357 		spin_unlock(&inode_hash_lock);
1358 		wait_on_inode(old);
1359 		if (unlikely(!inode_unhashed(old))) {
1360 			iput(old);
1361 			return -EBUSY;
1362 		}
1363 		iput(old);
1364 	}
1365 }
1366 EXPORT_SYMBOL(insert_inode_locked4);
1367 
1368 
1369 int generic_delete_inode(struct inode *inode)
1370 {
1371 	return 1;
1372 }
1373 EXPORT_SYMBOL(generic_delete_inode);
1374 
1375 /*
1376  * Called when we're dropping the last reference
1377  * to an inode.
1378  *
1379  * Call the FS "drop_inode()" function, defaulting to
1380  * the legacy UNIX filesystem behaviour.  If it tells
1381  * us to evict inode, do so.  Otherwise, retain inode
1382  * in cache if fs is alive, sync and evict if fs is
1383  * shutting down.
1384  */
1385 static void iput_final(struct inode *inode)
1386 {
1387 	struct super_block *sb = inode->i_sb;
1388 	const struct super_operations *op = inode->i_sb->s_op;
1389 	int drop;
1390 
1391 	WARN_ON(inode->i_state & I_NEW);
1392 
1393 	if (op->drop_inode)
1394 		drop = op->drop_inode(inode);
1395 	else
1396 		drop = generic_drop_inode(inode);
1397 
1398 	if (!drop && (sb->s_flags & MS_ACTIVE)) {
1399 		inode->i_state |= I_REFERENCED;
1400 		inode_add_lru(inode);
1401 		spin_unlock(&inode->i_lock);
1402 		return;
1403 	}
1404 
1405 	if (!drop) {
1406 		inode->i_state |= I_WILL_FREE;
1407 		spin_unlock(&inode->i_lock);
1408 		write_inode_now(inode, 1);
1409 		spin_lock(&inode->i_lock);
1410 		WARN_ON(inode->i_state & I_NEW);
1411 		inode->i_state &= ~I_WILL_FREE;
1412 	}
1413 
1414 	inode->i_state |= I_FREEING;
1415 	if (!list_empty(&inode->i_lru))
1416 		inode_lru_list_del(inode);
1417 	spin_unlock(&inode->i_lock);
1418 
1419 	evict(inode);
1420 }
1421 
1422 /**
1423  *	iput	- put an inode
1424  *	@inode: inode to put
1425  *
1426  *	Puts an inode, dropping its usage count. If the inode use count hits
1427  *	zero, the inode is then freed and may also be destroyed.
1428  *
1429  *	Consequently, iput() can sleep.
1430  */
1431 void iput(struct inode *inode)
1432 {
1433 	if (inode) {
1434 		BUG_ON(inode->i_state & I_CLEAR);
1435 
1436 		if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock))
1437 			iput_final(inode);
1438 	}
1439 }
1440 EXPORT_SYMBOL(iput);
1441 
1442 /**
1443  *	bmap	- find a block number in a file
1444  *	@inode: inode of file
1445  *	@block: block to find
1446  *
1447  *	Returns the block number on the device holding the inode that
1448  *	is the disk block number for the block of the file requested.
1449  *	That is, asked for block 4 of inode 1 the function will return the
1450  *	disk block relative to the disk start that holds that block of the
1451  *	file.
1452  */
1453 sector_t bmap(struct inode *inode, sector_t block)
1454 {
1455 	sector_t res = 0;
1456 	if (inode->i_mapping->a_ops->bmap)
1457 		res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
1458 	return res;
1459 }
1460 EXPORT_SYMBOL(bmap);
1461 
1462 /*
1463  * With relative atime, only update atime if the previous atime is
1464  * earlier than either the ctime or mtime or if at least a day has
1465  * passed since the last atime update.
1466  */
1467 static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
1468 			     struct timespec now)
1469 {
1470 
1471 	if (!(mnt->mnt_flags & MNT_RELATIME))
1472 		return 1;
1473 	/*
1474 	 * Is mtime younger than atime? If yes, update atime:
1475 	 */
1476 	if (timespec_compare(&inode->i_mtime, &inode->i_atime) >= 0)
1477 		return 1;
1478 	/*
1479 	 * Is ctime younger than atime? If yes, update atime:
1480 	 */
1481 	if (timespec_compare(&inode->i_ctime, &inode->i_atime) >= 0)
1482 		return 1;
1483 
1484 	/*
1485 	 * Is the previous atime value older than a day? If yes,
1486 	 * update atime:
1487 	 */
1488 	if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
1489 		return 1;
1490 	/*
1491 	 * Good, we can skip the atime update:
1492 	 */
1493 	return 0;
1494 }
1495 
1496 /*
1497  * This does the actual work of updating an inodes time or version.  Must have
1498  * had called mnt_want_write() before calling this.
1499  */
1500 static int update_time(struct inode *inode, struct timespec *time, int flags)
1501 {
1502 	if (inode->i_op->update_time)
1503 		return inode->i_op->update_time(inode, time, flags);
1504 
1505 	if (flags & S_ATIME)
1506 		inode->i_atime = *time;
1507 	if (flags & S_VERSION)
1508 		inode_inc_iversion(inode);
1509 	if (flags & S_CTIME)
1510 		inode->i_ctime = *time;
1511 	if (flags & S_MTIME)
1512 		inode->i_mtime = *time;
1513 	mark_inode_dirty_sync(inode);
1514 	return 0;
1515 }
1516 
1517 /**
1518  *	touch_atime	-	update the access time
1519  *	@path: the &struct path to update
1520  *
1521  *	Update the accessed time on an inode and mark it for writeback.
1522  *	This function automatically handles read only file systems and media,
1523  *	as well as the "noatime" flag and inode specific "noatime" markers.
1524  */
1525 void touch_atime(const struct path *path)
1526 {
1527 	struct vfsmount *mnt = path->mnt;
1528 	struct inode *inode = path->dentry->d_inode;
1529 	struct timespec now;
1530 
1531 	if (inode->i_flags & S_NOATIME)
1532 		return;
1533 	if (IS_NOATIME(inode))
1534 		return;
1535 	if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
1536 		return;
1537 
1538 	if (mnt->mnt_flags & MNT_NOATIME)
1539 		return;
1540 	if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1541 		return;
1542 
1543 	now = current_fs_time(inode->i_sb);
1544 
1545 	if (!relatime_need_update(mnt, inode, now))
1546 		return;
1547 
1548 	if (timespec_equal(&inode->i_atime, &now))
1549 		return;
1550 
1551 	if (!sb_start_write_trylock(inode->i_sb))
1552 		return;
1553 
1554 	if (__mnt_want_write(mnt))
1555 		goto skip_update;
1556 	/*
1557 	 * File systems can error out when updating inodes if they need to
1558 	 * allocate new space to modify an inode (such is the case for
1559 	 * Btrfs), but since we touch atime while walking down the path we
1560 	 * really don't care if we failed to update the atime of the file,
1561 	 * so just ignore the return value.
1562 	 * We may also fail on filesystems that have the ability to make parts
1563 	 * of the fs read only, e.g. subvolumes in Btrfs.
1564 	 */
1565 	update_time(inode, &now, S_ATIME);
1566 	__mnt_drop_write(mnt);
1567 skip_update:
1568 	sb_end_write(inode->i_sb);
1569 }
1570 EXPORT_SYMBOL(touch_atime);
1571 
1572 /*
1573  * The logic we want is
1574  *
1575  *	if suid or (sgid and xgrp)
1576  *		remove privs
1577  */
1578 int should_remove_suid(struct dentry *dentry)
1579 {
1580 	umode_t mode = dentry->d_inode->i_mode;
1581 	int kill = 0;
1582 
1583 	/* suid always must be killed */
1584 	if (unlikely(mode & S_ISUID))
1585 		kill = ATTR_KILL_SUID;
1586 
1587 	/*
1588 	 * sgid without any exec bits is just a mandatory locking mark; leave
1589 	 * it alone.  If some exec bits are set, it's a real sgid; kill it.
1590 	 */
1591 	if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1592 		kill |= ATTR_KILL_SGID;
1593 
1594 	if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
1595 		return kill;
1596 
1597 	return 0;
1598 }
1599 EXPORT_SYMBOL(should_remove_suid);
1600 
1601 static int __remove_suid(struct dentry *dentry, int kill)
1602 {
1603 	struct iattr newattrs;
1604 
1605 	newattrs.ia_valid = ATTR_FORCE | kill;
1606 	/*
1607 	 * Note we call this on write, so notify_change will not
1608 	 * encounter any conflicting delegations:
1609 	 */
1610 	return notify_change(dentry, &newattrs, NULL);
1611 }
1612 
1613 int file_remove_suid(struct file *file)
1614 {
1615 	struct dentry *dentry = file->f_path.dentry;
1616 	struct inode *inode = dentry->d_inode;
1617 	int killsuid;
1618 	int killpriv;
1619 	int error = 0;
1620 
1621 	/* Fast path for nothing security related */
1622 	if (IS_NOSEC(inode))
1623 		return 0;
1624 
1625 	killsuid = should_remove_suid(dentry);
1626 	killpriv = security_inode_need_killpriv(dentry);
1627 
1628 	if (killpriv < 0)
1629 		return killpriv;
1630 	if (killpriv)
1631 		error = security_inode_killpriv(dentry);
1632 	if (!error && killsuid)
1633 		error = __remove_suid(dentry, killsuid);
1634 	if (!error && (inode->i_sb->s_flags & MS_NOSEC))
1635 		inode->i_flags |= S_NOSEC;
1636 
1637 	return error;
1638 }
1639 EXPORT_SYMBOL(file_remove_suid);
1640 
1641 /**
1642  *	file_update_time	-	update mtime and ctime time
1643  *	@file: file accessed
1644  *
1645  *	Update the mtime and ctime members of an inode and mark the inode
1646  *	for writeback.  Note that this function is meant exclusively for
1647  *	usage in the file write path of filesystems, and filesystems may
1648  *	choose to explicitly ignore update via this function with the
1649  *	S_NOCMTIME inode flag, e.g. for network filesystem where these
1650  *	timestamps are handled by the server.  This can return an error for
1651  *	file systems who need to allocate space in order to update an inode.
1652  */
1653 
1654 int file_update_time(struct file *file)
1655 {
1656 	struct inode *inode = file_inode(file);
1657 	struct timespec now;
1658 	int sync_it = 0;
1659 	int ret;
1660 
1661 	/* First try to exhaust all avenues to not sync */
1662 	if (IS_NOCMTIME(inode))
1663 		return 0;
1664 
1665 	now = current_fs_time(inode->i_sb);
1666 	if (!timespec_equal(&inode->i_mtime, &now))
1667 		sync_it = S_MTIME;
1668 
1669 	if (!timespec_equal(&inode->i_ctime, &now))
1670 		sync_it |= S_CTIME;
1671 
1672 	if (IS_I_VERSION(inode))
1673 		sync_it |= S_VERSION;
1674 
1675 	if (!sync_it)
1676 		return 0;
1677 
1678 	/* Finally allowed to write? Takes lock. */
1679 	if (__mnt_want_write_file(file))
1680 		return 0;
1681 
1682 	ret = update_time(inode, &now, sync_it);
1683 	__mnt_drop_write_file(file);
1684 
1685 	return ret;
1686 }
1687 EXPORT_SYMBOL(file_update_time);
1688 
1689 int inode_needs_sync(struct inode *inode)
1690 {
1691 	if (IS_SYNC(inode))
1692 		return 1;
1693 	if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
1694 		return 1;
1695 	return 0;
1696 }
1697 EXPORT_SYMBOL(inode_needs_sync);
1698 
1699 /*
1700  * If we try to find an inode in the inode hash while it is being
1701  * deleted, we have to wait until the filesystem completes its
1702  * deletion before reporting that it isn't found.  This function waits
1703  * until the deletion _might_ have completed.  Callers are responsible
1704  * to recheck inode state.
1705  *
1706  * It doesn't matter if I_NEW is not set initially, a call to
1707  * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
1708  * will DTRT.
1709  */
1710 static void __wait_on_freeing_inode(struct inode *inode)
1711 {
1712 	wait_queue_head_t *wq;
1713 	DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
1714 	wq = bit_waitqueue(&inode->i_state, __I_NEW);
1715 	prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
1716 	spin_unlock(&inode->i_lock);
1717 	spin_unlock(&inode_hash_lock);
1718 	schedule();
1719 	finish_wait(wq, &wait.wait);
1720 	spin_lock(&inode_hash_lock);
1721 }
1722 
1723 static __initdata unsigned long ihash_entries;
1724 static int __init set_ihash_entries(char *str)
1725 {
1726 	if (!str)
1727 		return 0;
1728 	ihash_entries = simple_strtoul(str, &str, 0);
1729 	return 1;
1730 }
1731 __setup("ihash_entries=", set_ihash_entries);
1732 
1733 /*
1734  * Initialize the waitqueues and inode hash table.
1735  */
1736 void __init inode_init_early(void)
1737 {
1738 	unsigned int loop;
1739 
1740 	/* If hashes are distributed across NUMA nodes, defer
1741 	 * hash allocation until vmalloc space is available.
1742 	 */
1743 	if (hashdist)
1744 		return;
1745 
1746 	inode_hashtable =
1747 		alloc_large_system_hash("Inode-cache",
1748 					sizeof(struct hlist_head),
1749 					ihash_entries,
1750 					14,
1751 					HASH_EARLY,
1752 					&i_hash_shift,
1753 					&i_hash_mask,
1754 					0,
1755 					0);
1756 
1757 	for (loop = 0; loop < (1U << i_hash_shift); loop++)
1758 		INIT_HLIST_HEAD(&inode_hashtable[loop]);
1759 }
1760 
1761 void __init inode_init(void)
1762 {
1763 	unsigned int loop;
1764 
1765 	/* inode slab cache */
1766 	inode_cachep = kmem_cache_create("inode_cache",
1767 					 sizeof(struct inode),
1768 					 0,
1769 					 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
1770 					 SLAB_MEM_SPREAD),
1771 					 init_once);
1772 
1773 	/* Hash may have been set up in inode_init_early */
1774 	if (!hashdist)
1775 		return;
1776 
1777 	inode_hashtable =
1778 		alloc_large_system_hash("Inode-cache",
1779 					sizeof(struct hlist_head),
1780 					ihash_entries,
1781 					14,
1782 					0,
1783 					&i_hash_shift,
1784 					&i_hash_mask,
1785 					0,
1786 					0);
1787 
1788 	for (loop = 0; loop < (1U << i_hash_shift); loop++)
1789 		INIT_HLIST_HEAD(&inode_hashtable[loop]);
1790 }
1791 
1792 void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
1793 {
1794 	inode->i_mode = mode;
1795 	if (S_ISCHR(mode)) {
1796 		inode->i_fop = &def_chr_fops;
1797 		inode->i_rdev = rdev;
1798 	} else if (S_ISBLK(mode)) {
1799 		inode->i_fop = &def_blk_fops;
1800 		inode->i_rdev = rdev;
1801 	} else if (S_ISFIFO(mode))
1802 		inode->i_fop = &pipefifo_fops;
1803 	else if (S_ISSOCK(mode))
1804 		inode->i_fop = &bad_sock_fops;
1805 	else
1806 		printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
1807 				  " inode %s:%lu\n", mode, inode->i_sb->s_id,
1808 				  inode->i_ino);
1809 }
1810 EXPORT_SYMBOL(init_special_inode);
1811 
1812 /**
1813  * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
1814  * @inode: New inode
1815  * @dir: Directory inode
1816  * @mode: mode of the new inode
1817  */
1818 void inode_init_owner(struct inode *inode, const struct inode *dir,
1819 			umode_t mode)
1820 {
1821 	inode->i_uid = current_fsuid();
1822 	if (dir && dir->i_mode & S_ISGID) {
1823 		inode->i_gid = dir->i_gid;
1824 		if (S_ISDIR(mode))
1825 			mode |= S_ISGID;
1826 	} else
1827 		inode->i_gid = current_fsgid();
1828 	inode->i_mode = mode;
1829 }
1830 EXPORT_SYMBOL(inode_init_owner);
1831 
1832 /**
1833  * inode_owner_or_capable - check current task permissions to inode
1834  * @inode: inode being checked
1835  *
1836  * Return true if current either has CAP_FOWNER in a namespace with the
1837  * inode owner uid mapped, or owns the file.
1838  */
1839 bool inode_owner_or_capable(const struct inode *inode)
1840 {
1841 	struct user_namespace *ns;
1842 
1843 	if (uid_eq(current_fsuid(), inode->i_uid))
1844 		return true;
1845 
1846 	ns = current_user_ns();
1847 	if (ns_capable(ns, CAP_FOWNER) && kuid_has_mapping(ns, inode->i_uid))
1848 		return true;
1849 	return false;
1850 }
1851 EXPORT_SYMBOL(inode_owner_or_capable);
1852 
1853 /*
1854  * Direct i/o helper functions
1855  */
1856 static void __inode_dio_wait(struct inode *inode)
1857 {
1858 	wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
1859 	DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
1860 
1861 	do {
1862 		prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE);
1863 		if (atomic_read(&inode->i_dio_count))
1864 			schedule();
1865 	} while (atomic_read(&inode->i_dio_count));
1866 	finish_wait(wq, &q.wait);
1867 }
1868 
1869 /**
1870  * inode_dio_wait - wait for outstanding DIO requests to finish
1871  * @inode: inode to wait for
1872  *
1873  * Waits for all pending direct I/O requests to finish so that we can
1874  * proceed with a truncate or equivalent operation.
1875  *
1876  * Must be called under a lock that serializes taking new references
1877  * to i_dio_count, usually by inode->i_mutex.
1878  */
1879 void inode_dio_wait(struct inode *inode)
1880 {
1881 	if (atomic_read(&inode->i_dio_count))
1882 		__inode_dio_wait(inode);
1883 }
1884 EXPORT_SYMBOL(inode_dio_wait);
1885 
1886 /*
1887  * inode_dio_done - signal finish of a direct I/O requests
1888  * @inode: inode the direct I/O happens on
1889  *
1890  * This is called once we've finished processing a direct I/O request,
1891  * and is used to wake up callers waiting for direct I/O to be quiesced.
1892  */
1893 void inode_dio_done(struct inode *inode)
1894 {
1895 	if (atomic_dec_and_test(&inode->i_dio_count))
1896 		wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
1897 }
1898 EXPORT_SYMBOL(inode_dio_done);
1899 
1900 /*
1901  * inode_set_flags - atomically set some inode flags
1902  *
1903  * Note: the caller should be holding i_mutex, or else be sure that
1904  * they have exclusive access to the inode structure (i.e., while the
1905  * inode is being instantiated).  The reason for the cmpxchg() loop
1906  * --- which wouldn't be necessary if all code paths which modify
1907  * i_flags actually followed this rule, is that there is at least one
1908  * code path which doesn't today --- for example,
1909  * __generic_file_aio_write() calls file_remove_suid() without holding
1910  * i_mutex --- so we use cmpxchg() out of an abundance of caution.
1911  *
1912  * In the long run, i_mutex is overkill, and we should probably look
1913  * at using the i_lock spinlock to protect i_flags, and then make sure
1914  * it is so documented in include/linux/fs.h and that all code follows
1915  * the locking convention!!
1916  */
1917 void inode_set_flags(struct inode *inode, unsigned int flags,
1918 		     unsigned int mask)
1919 {
1920 	unsigned int old_flags, new_flags;
1921 
1922 	WARN_ON_ONCE(flags & ~mask);
1923 	do {
1924 		old_flags = ACCESS_ONCE(inode->i_flags);
1925 		new_flags = (old_flags & ~mask) | flags;
1926 	} while (unlikely(cmpxchg(&inode->i_flags, old_flags,
1927 				  new_flags) != old_flags));
1928 }
1929 EXPORT_SYMBOL(inode_set_flags);
1930