xref: /openbmc/linux/fs/inode.c (revision f33c596a2f921696391801b637ed50d514634079)
1 /*
2  * (C) 1997 Linus Torvalds
3  * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
4  */
5 #include <linux/fs.h>
6 #include <linux/mm.h>
7 #include <linux/dcache.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/writeback.h>
11 #include <linux/module.h>
12 #include <linux/backing-dev.h>
13 #include <linux/wait.h>
14 #include <linux/rwsem.h>
15 #include <linux/hash.h>
16 #include <linux/swap.h>
17 #include <linux/security.h>
18 #include <linux/pagemap.h>
19 #include <linux/cdev.h>
20 #include <linux/bootmem.h>
21 #include <linux/fsnotify.h>
22 #include <linux/mount.h>
23 #include <linux/async.h>
24 #include <linux/posix_acl.h>
25 #include <linux/prefetch.h>
26 #include <linux/ima.h>
27 #include <linux/cred.h>
28 #include <linux/buffer_head.h> /* for inode_has_buffers */
29 #include "internal.h"
30 
31 /*
32  * Inode locking rules:
33  *
34  * inode->i_lock protects:
35  *   inode->i_state, inode->i_hash, __iget()
36  * inode->i_sb->s_inode_lru_lock protects:
37  *   inode->i_sb->s_inode_lru, inode->i_lru
38  * inode_sb_list_lock protects:
39  *   sb->s_inodes, inode->i_sb_list
40  * bdi->wb.list_lock protects:
41  *   bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list
42  * inode_hash_lock protects:
43  *   inode_hashtable, inode->i_hash
44  *
45  * Lock ordering:
46  *
47  * inode_sb_list_lock
48  *   inode->i_lock
49  *     inode->i_sb->s_inode_lru_lock
50  *
51  * bdi->wb.list_lock
52  *   inode->i_lock
53  *
54  * inode_hash_lock
55  *   inode_sb_list_lock
56  *   inode->i_lock
57  *
58  * iunique_lock
59  *   inode_hash_lock
60  */
61 
62 static unsigned int i_hash_mask __read_mostly;
63 static unsigned int i_hash_shift __read_mostly;
64 static struct hlist_head *inode_hashtable __read_mostly;
65 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
66 
67 __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
68 
69 /*
70  * Empty aops. Can be used for the cases where the user does not
71  * define any of the address_space operations.
72  */
73 const struct address_space_operations empty_aops = {
74 };
75 EXPORT_SYMBOL(empty_aops);
76 
77 /*
78  * Statistics gathering..
79  */
80 struct inodes_stat_t inodes_stat;
81 
82 static DEFINE_PER_CPU(unsigned int, nr_inodes);
83 static DEFINE_PER_CPU(unsigned int, nr_unused);
84 
85 static struct kmem_cache *inode_cachep __read_mostly;
86 
87 static int get_nr_inodes(void)
88 {
89 	int i;
90 	int sum = 0;
91 	for_each_possible_cpu(i)
92 		sum += per_cpu(nr_inodes, i);
93 	return sum < 0 ? 0 : sum;
94 }
95 
96 static inline int get_nr_inodes_unused(void)
97 {
98 	int i;
99 	int sum = 0;
100 	for_each_possible_cpu(i)
101 		sum += per_cpu(nr_unused, i);
102 	return sum < 0 ? 0 : sum;
103 }
104 
105 int get_nr_dirty_inodes(void)
106 {
107 	/* not actually dirty inodes, but a wild approximation */
108 	int nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
109 	return nr_dirty > 0 ? nr_dirty : 0;
110 }
111 
112 /*
113  * Handle nr_inode sysctl
114  */
115 #ifdef CONFIG_SYSCTL
116 int proc_nr_inodes(ctl_table *table, int write,
117 		   void __user *buffer, size_t *lenp, loff_t *ppos)
118 {
119 	inodes_stat.nr_inodes = get_nr_inodes();
120 	inodes_stat.nr_unused = get_nr_inodes_unused();
121 	return proc_dointvec(table, write, buffer, lenp, ppos);
122 }
123 #endif
124 
125 /**
126  * inode_init_always - perform inode structure intialisation
127  * @sb: superblock inode belongs to
128  * @inode: inode to initialise
129  *
130  * These are initializations that need to be done on every inode
131  * allocation as the fields are not initialised by slab allocation.
132  */
133 int inode_init_always(struct super_block *sb, struct inode *inode)
134 {
135 	static const struct inode_operations empty_iops;
136 	static const struct file_operations empty_fops;
137 	struct address_space *const mapping = &inode->i_data;
138 
139 	inode->i_sb = sb;
140 	inode->i_blkbits = sb->s_blocksize_bits;
141 	inode->i_flags = 0;
142 	atomic_set(&inode->i_count, 1);
143 	inode->i_op = &empty_iops;
144 	inode->i_fop = &empty_fops;
145 	inode->i_nlink = 1;
146 	inode->i_uid = 0;
147 	inode->i_gid = 0;
148 	atomic_set(&inode->i_writecount, 0);
149 	inode->i_size = 0;
150 	inode->i_blocks = 0;
151 	inode->i_bytes = 0;
152 	inode->i_generation = 0;
153 #ifdef CONFIG_QUOTA
154 	memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
155 #endif
156 	inode->i_pipe = NULL;
157 	inode->i_bdev = NULL;
158 	inode->i_cdev = NULL;
159 	inode->i_rdev = 0;
160 	inode->dirtied_when = 0;
161 
162 	if (security_inode_alloc(inode))
163 		goto out;
164 	spin_lock_init(&inode->i_lock);
165 	lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
166 
167 	mutex_init(&inode->i_mutex);
168 	lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key);
169 
170 	atomic_set(&inode->i_dio_count, 0);
171 
172 	mapping->a_ops = &empty_aops;
173 	mapping->host = inode;
174 	mapping->flags = 0;
175 	mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
176 	mapping->assoc_mapping = NULL;
177 	mapping->backing_dev_info = &default_backing_dev_info;
178 	mapping->writeback_index = 0;
179 
180 	/*
181 	 * If the block_device provides a backing_dev_info for client
182 	 * inodes then use that.  Otherwise the inode share the bdev's
183 	 * backing_dev_info.
184 	 */
185 	if (sb->s_bdev) {
186 		struct backing_dev_info *bdi;
187 
188 		bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
189 		mapping->backing_dev_info = bdi;
190 	}
191 	inode->i_private = NULL;
192 	inode->i_mapping = mapping;
193 #ifdef CONFIG_FS_POSIX_ACL
194 	inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
195 #endif
196 
197 #ifdef CONFIG_FSNOTIFY
198 	inode->i_fsnotify_mask = 0;
199 #endif
200 
201 	this_cpu_inc(nr_inodes);
202 
203 	return 0;
204 out:
205 	return -ENOMEM;
206 }
207 EXPORT_SYMBOL(inode_init_always);
208 
209 static struct inode *alloc_inode(struct super_block *sb)
210 {
211 	struct inode *inode;
212 
213 	if (sb->s_op->alloc_inode)
214 		inode = sb->s_op->alloc_inode(sb);
215 	else
216 		inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
217 
218 	if (!inode)
219 		return NULL;
220 
221 	if (unlikely(inode_init_always(sb, inode))) {
222 		if (inode->i_sb->s_op->destroy_inode)
223 			inode->i_sb->s_op->destroy_inode(inode);
224 		else
225 			kmem_cache_free(inode_cachep, inode);
226 		return NULL;
227 	}
228 
229 	return inode;
230 }
231 
232 void free_inode_nonrcu(struct inode *inode)
233 {
234 	kmem_cache_free(inode_cachep, inode);
235 }
236 EXPORT_SYMBOL(free_inode_nonrcu);
237 
238 void __destroy_inode(struct inode *inode)
239 {
240 	BUG_ON(inode_has_buffers(inode));
241 	security_inode_free(inode);
242 	fsnotify_inode_delete(inode);
243 #ifdef CONFIG_FS_POSIX_ACL
244 	if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED)
245 		posix_acl_release(inode->i_acl);
246 	if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED)
247 		posix_acl_release(inode->i_default_acl);
248 #endif
249 	this_cpu_dec(nr_inodes);
250 }
251 EXPORT_SYMBOL(__destroy_inode);
252 
253 static void i_callback(struct rcu_head *head)
254 {
255 	struct inode *inode = container_of(head, struct inode, i_rcu);
256 	INIT_LIST_HEAD(&inode->i_dentry);
257 	kmem_cache_free(inode_cachep, inode);
258 }
259 
260 static void destroy_inode(struct inode *inode)
261 {
262 	BUG_ON(!list_empty(&inode->i_lru));
263 	__destroy_inode(inode);
264 	if (inode->i_sb->s_op->destroy_inode)
265 		inode->i_sb->s_op->destroy_inode(inode);
266 	else
267 		call_rcu(&inode->i_rcu, i_callback);
268 }
269 
270 void address_space_init_once(struct address_space *mapping)
271 {
272 	memset(mapping, 0, sizeof(*mapping));
273 	INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
274 	spin_lock_init(&mapping->tree_lock);
275 	mutex_init(&mapping->i_mmap_mutex);
276 	INIT_LIST_HEAD(&mapping->private_list);
277 	spin_lock_init(&mapping->private_lock);
278 	INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
279 	INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
280 }
281 EXPORT_SYMBOL(address_space_init_once);
282 
283 /*
284  * These are initializations that only need to be done
285  * once, because the fields are idempotent across use
286  * of the inode, so let the slab aware of that.
287  */
288 void inode_init_once(struct inode *inode)
289 {
290 	memset(inode, 0, sizeof(*inode));
291 	INIT_HLIST_NODE(&inode->i_hash);
292 	INIT_LIST_HEAD(&inode->i_dentry);
293 	INIT_LIST_HEAD(&inode->i_devices);
294 	INIT_LIST_HEAD(&inode->i_wb_list);
295 	INIT_LIST_HEAD(&inode->i_lru);
296 	address_space_init_once(&inode->i_data);
297 	i_size_ordered_init(inode);
298 #ifdef CONFIG_FSNOTIFY
299 	INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
300 #endif
301 }
302 EXPORT_SYMBOL(inode_init_once);
303 
304 static void init_once(void *foo)
305 {
306 	struct inode *inode = (struct inode *) foo;
307 
308 	inode_init_once(inode);
309 }
310 
311 /*
312  * inode->i_lock must be held
313  */
314 void __iget(struct inode *inode)
315 {
316 	atomic_inc(&inode->i_count);
317 }
318 
319 /*
320  * get additional reference to inode; caller must already hold one.
321  */
322 void ihold(struct inode *inode)
323 {
324 	WARN_ON(atomic_inc_return(&inode->i_count) < 2);
325 }
326 EXPORT_SYMBOL(ihold);
327 
328 static void inode_lru_list_add(struct inode *inode)
329 {
330 	spin_lock(&inode->i_sb->s_inode_lru_lock);
331 	if (list_empty(&inode->i_lru)) {
332 		list_add(&inode->i_lru, &inode->i_sb->s_inode_lru);
333 		inode->i_sb->s_nr_inodes_unused++;
334 		this_cpu_inc(nr_unused);
335 	}
336 	spin_unlock(&inode->i_sb->s_inode_lru_lock);
337 }
338 
339 static void inode_lru_list_del(struct inode *inode)
340 {
341 	spin_lock(&inode->i_sb->s_inode_lru_lock);
342 	if (!list_empty(&inode->i_lru)) {
343 		list_del_init(&inode->i_lru);
344 		inode->i_sb->s_nr_inodes_unused--;
345 		this_cpu_dec(nr_unused);
346 	}
347 	spin_unlock(&inode->i_sb->s_inode_lru_lock);
348 }
349 
350 /**
351  * inode_sb_list_add - add inode to the superblock list of inodes
352  * @inode: inode to add
353  */
354 void inode_sb_list_add(struct inode *inode)
355 {
356 	spin_lock(&inode_sb_list_lock);
357 	list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
358 	spin_unlock(&inode_sb_list_lock);
359 }
360 EXPORT_SYMBOL_GPL(inode_sb_list_add);
361 
362 static inline void inode_sb_list_del(struct inode *inode)
363 {
364 	if (!list_empty(&inode->i_sb_list)) {
365 		spin_lock(&inode_sb_list_lock);
366 		list_del_init(&inode->i_sb_list);
367 		spin_unlock(&inode_sb_list_lock);
368 	}
369 }
370 
371 static unsigned long hash(struct super_block *sb, unsigned long hashval)
372 {
373 	unsigned long tmp;
374 
375 	tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
376 			L1_CACHE_BYTES;
377 	tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
378 	return tmp & i_hash_mask;
379 }
380 
381 /**
382  *	__insert_inode_hash - hash an inode
383  *	@inode: unhashed inode
384  *	@hashval: unsigned long value used to locate this object in the
385  *		inode_hashtable.
386  *
387  *	Add an inode to the inode hash for this superblock.
388  */
389 void __insert_inode_hash(struct inode *inode, unsigned long hashval)
390 {
391 	struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
392 
393 	spin_lock(&inode_hash_lock);
394 	spin_lock(&inode->i_lock);
395 	hlist_add_head(&inode->i_hash, b);
396 	spin_unlock(&inode->i_lock);
397 	spin_unlock(&inode_hash_lock);
398 }
399 EXPORT_SYMBOL(__insert_inode_hash);
400 
401 /**
402  *	remove_inode_hash - remove an inode from the hash
403  *	@inode: inode to unhash
404  *
405  *	Remove an inode from the superblock.
406  */
407 void remove_inode_hash(struct inode *inode)
408 {
409 	spin_lock(&inode_hash_lock);
410 	spin_lock(&inode->i_lock);
411 	hlist_del_init(&inode->i_hash);
412 	spin_unlock(&inode->i_lock);
413 	spin_unlock(&inode_hash_lock);
414 }
415 EXPORT_SYMBOL(remove_inode_hash);
416 
417 void end_writeback(struct inode *inode)
418 {
419 	might_sleep();
420 	/*
421 	 * We have to cycle tree_lock here because reclaim can be still in the
422 	 * process of removing the last page (in __delete_from_page_cache())
423 	 * and we must not free mapping under it.
424 	 */
425 	spin_lock_irq(&inode->i_data.tree_lock);
426 	BUG_ON(inode->i_data.nrpages);
427 	spin_unlock_irq(&inode->i_data.tree_lock);
428 	BUG_ON(!list_empty(&inode->i_data.private_list));
429 	BUG_ON(!(inode->i_state & I_FREEING));
430 	BUG_ON(inode->i_state & I_CLEAR);
431 	inode_sync_wait(inode);
432 	/* don't need i_lock here, no concurrent mods to i_state */
433 	inode->i_state = I_FREEING | I_CLEAR;
434 }
435 EXPORT_SYMBOL(end_writeback);
436 
437 /*
438  * Free the inode passed in, removing it from the lists it is still connected
439  * to. We remove any pages still attached to the inode and wait for any IO that
440  * is still in progress before finally destroying the inode.
441  *
442  * An inode must already be marked I_FREEING so that we avoid the inode being
443  * moved back onto lists if we race with other code that manipulates the lists
444  * (e.g. writeback_single_inode). The caller is responsible for setting this.
445  *
446  * An inode must already be removed from the LRU list before being evicted from
447  * the cache. This should occur atomically with setting the I_FREEING state
448  * flag, so no inodes here should ever be on the LRU when being evicted.
449  */
450 static void evict(struct inode *inode)
451 {
452 	const struct super_operations *op = inode->i_sb->s_op;
453 
454 	BUG_ON(!(inode->i_state & I_FREEING));
455 	BUG_ON(!list_empty(&inode->i_lru));
456 
457 	inode_wb_list_del(inode);
458 	inode_sb_list_del(inode);
459 
460 	if (op->evict_inode) {
461 		op->evict_inode(inode);
462 	} else {
463 		if (inode->i_data.nrpages)
464 			truncate_inode_pages(&inode->i_data, 0);
465 		end_writeback(inode);
466 	}
467 	if (S_ISBLK(inode->i_mode) && inode->i_bdev)
468 		bd_forget(inode);
469 	if (S_ISCHR(inode->i_mode) && inode->i_cdev)
470 		cd_forget(inode);
471 
472 	remove_inode_hash(inode);
473 
474 	spin_lock(&inode->i_lock);
475 	wake_up_bit(&inode->i_state, __I_NEW);
476 	BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
477 	spin_unlock(&inode->i_lock);
478 
479 	destroy_inode(inode);
480 }
481 
482 /*
483  * dispose_list - dispose of the contents of a local list
484  * @head: the head of the list to free
485  *
486  * Dispose-list gets a local list with local inodes in it, so it doesn't
487  * need to worry about list corruption and SMP locks.
488  */
489 static void dispose_list(struct list_head *head)
490 {
491 	while (!list_empty(head)) {
492 		struct inode *inode;
493 
494 		inode = list_first_entry(head, struct inode, i_lru);
495 		list_del_init(&inode->i_lru);
496 
497 		evict(inode);
498 	}
499 }
500 
501 /**
502  * evict_inodes	- evict all evictable inodes for a superblock
503  * @sb:		superblock to operate on
504  *
505  * Make sure that no inodes with zero refcount are retained.  This is
506  * called by superblock shutdown after having MS_ACTIVE flag removed,
507  * so any inode reaching zero refcount during or after that call will
508  * be immediately evicted.
509  */
510 void evict_inodes(struct super_block *sb)
511 {
512 	struct inode *inode, *next;
513 	LIST_HEAD(dispose);
514 
515 	spin_lock(&inode_sb_list_lock);
516 	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
517 		if (atomic_read(&inode->i_count))
518 			continue;
519 
520 		spin_lock(&inode->i_lock);
521 		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
522 			spin_unlock(&inode->i_lock);
523 			continue;
524 		}
525 
526 		inode->i_state |= I_FREEING;
527 		inode_lru_list_del(inode);
528 		spin_unlock(&inode->i_lock);
529 		list_add(&inode->i_lru, &dispose);
530 	}
531 	spin_unlock(&inode_sb_list_lock);
532 
533 	dispose_list(&dispose);
534 }
535 
536 /**
537  * invalidate_inodes	- attempt to free all inodes on a superblock
538  * @sb:		superblock to operate on
539  * @kill_dirty: flag to guide handling of dirty inodes
540  *
541  * Attempts to free all inodes for a given superblock.  If there were any
542  * busy inodes return a non-zero value, else zero.
543  * If @kill_dirty is set, discard dirty inodes too, otherwise treat
544  * them as busy.
545  */
546 int invalidate_inodes(struct super_block *sb, bool kill_dirty)
547 {
548 	int busy = 0;
549 	struct inode *inode, *next;
550 	LIST_HEAD(dispose);
551 
552 	spin_lock(&inode_sb_list_lock);
553 	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
554 		spin_lock(&inode->i_lock);
555 		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
556 			spin_unlock(&inode->i_lock);
557 			continue;
558 		}
559 		if (inode->i_state & I_DIRTY && !kill_dirty) {
560 			spin_unlock(&inode->i_lock);
561 			busy = 1;
562 			continue;
563 		}
564 		if (atomic_read(&inode->i_count)) {
565 			spin_unlock(&inode->i_lock);
566 			busy = 1;
567 			continue;
568 		}
569 
570 		inode->i_state |= I_FREEING;
571 		inode_lru_list_del(inode);
572 		spin_unlock(&inode->i_lock);
573 		list_add(&inode->i_lru, &dispose);
574 	}
575 	spin_unlock(&inode_sb_list_lock);
576 
577 	dispose_list(&dispose);
578 
579 	return busy;
580 }
581 
582 static int can_unuse(struct inode *inode)
583 {
584 	if (inode->i_state & ~I_REFERENCED)
585 		return 0;
586 	if (inode_has_buffers(inode))
587 		return 0;
588 	if (atomic_read(&inode->i_count))
589 		return 0;
590 	if (inode->i_data.nrpages)
591 		return 0;
592 	return 1;
593 }
594 
595 /*
596  * Walk the superblock inode LRU for freeable inodes and attempt to free them.
597  * This is called from the superblock shrinker function with a number of inodes
598  * to trim from the LRU. Inodes to be freed are moved to a temporary list and
599  * then are freed outside inode_lock by dispose_list().
600  *
601  * Any inodes which are pinned purely because of attached pagecache have their
602  * pagecache removed.  If the inode has metadata buffers attached to
603  * mapping->private_list then try to remove them.
604  *
605  * If the inode has the I_REFERENCED flag set, then it means that it has been
606  * used recently - the flag is set in iput_final(). When we encounter such an
607  * inode, clear the flag and move it to the back of the LRU so it gets another
608  * pass through the LRU before it gets reclaimed. This is necessary because of
609  * the fact we are doing lazy LRU updates to minimise lock contention so the
610  * LRU does not have strict ordering. Hence we don't want to reclaim inodes
611  * with this flag set because they are the inodes that are out of order.
612  */
613 void prune_icache_sb(struct super_block *sb, int nr_to_scan)
614 {
615 	LIST_HEAD(freeable);
616 	int nr_scanned;
617 	unsigned long reap = 0;
618 
619 	spin_lock(&sb->s_inode_lru_lock);
620 	for (nr_scanned = nr_to_scan; nr_scanned >= 0; nr_scanned--) {
621 		struct inode *inode;
622 
623 		if (list_empty(&sb->s_inode_lru))
624 			break;
625 
626 		inode = list_entry(sb->s_inode_lru.prev, struct inode, i_lru);
627 
628 		/*
629 		 * we are inverting the sb->s_inode_lru_lock/inode->i_lock here,
630 		 * so use a trylock. If we fail to get the lock, just move the
631 		 * inode to the back of the list so we don't spin on it.
632 		 */
633 		if (!spin_trylock(&inode->i_lock)) {
634 			list_move(&inode->i_lru, &sb->s_inode_lru);
635 			continue;
636 		}
637 
638 		/*
639 		 * Referenced or dirty inodes are still in use. Give them
640 		 * another pass through the LRU as we canot reclaim them now.
641 		 */
642 		if (atomic_read(&inode->i_count) ||
643 		    (inode->i_state & ~I_REFERENCED)) {
644 			list_del_init(&inode->i_lru);
645 			spin_unlock(&inode->i_lock);
646 			sb->s_nr_inodes_unused--;
647 			this_cpu_dec(nr_unused);
648 			continue;
649 		}
650 
651 		/* recently referenced inodes get one more pass */
652 		if (inode->i_state & I_REFERENCED) {
653 			inode->i_state &= ~I_REFERENCED;
654 			list_move(&inode->i_lru, &sb->s_inode_lru);
655 			spin_unlock(&inode->i_lock);
656 			continue;
657 		}
658 		if (inode_has_buffers(inode) || inode->i_data.nrpages) {
659 			__iget(inode);
660 			spin_unlock(&inode->i_lock);
661 			spin_unlock(&sb->s_inode_lru_lock);
662 			if (remove_inode_buffers(inode))
663 				reap += invalidate_mapping_pages(&inode->i_data,
664 								0, -1);
665 			iput(inode);
666 			spin_lock(&sb->s_inode_lru_lock);
667 
668 			if (inode != list_entry(sb->s_inode_lru.next,
669 						struct inode, i_lru))
670 				continue;	/* wrong inode or list_empty */
671 			/* avoid lock inversions with trylock */
672 			if (!spin_trylock(&inode->i_lock))
673 				continue;
674 			if (!can_unuse(inode)) {
675 				spin_unlock(&inode->i_lock);
676 				continue;
677 			}
678 		}
679 		WARN_ON(inode->i_state & I_NEW);
680 		inode->i_state |= I_FREEING;
681 		spin_unlock(&inode->i_lock);
682 
683 		list_move(&inode->i_lru, &freeable);
684 		sb->s_nr_inodes_unused--;
685 		this_cpu_dec(nr_unused);
686 	}
687 	if (current_is_kswapd())
688 		__count_vm_events(KSWAPD_INODESTEAL, reap);
689 	else
690 		__count_vm_events(PGINODESTEAL, reap);
691 	spin_unlock(&sb->s_inode_lru_lock);
692 
693 	dispose_list(&freeable);
694 }
695 
696 static void __wait_on_freeing_inode(struct inode *inode);
697 /*
698  * Called with the inode lock held.
699  */
700 static struct inode *find_inode(struct super_block *sb,
701 				struct hlist_head *head,
702 				int (*test)(struct inode *, void *),
703 				void *data)
704 {
705 	struct hlist_node *node;
706 	struct inode *inode = NULL;
707 
708 repeat:
709 	hlist_for_each_entry(inode, node, head, i_hash) {
710 		spin_lock(&inode->i_lock);
711 		if (inode->i_sb != sb) {
712 			spin_unlock(&inode->i_lock);
713 			continue;
714 		}
715 		if (!test(inode, data)) {
716 			spin_unlock(&inode->i_lock);
717 			continue;
718 		}
719 		if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
720 			__wait_on_freeing_inode(inode);
721 			goto repeat;
722 		}
723 		__iget(inode);
724 		spin_unlock(&inode->i_lock);
725 		return inode;
726 	}
727 	return NULL;
728 }
729 
730 /*
731  * find_inode_fast is the fast path version of find_inode, see the comment at
732  * iget_locked for details.
733  */
734 static struct inode *find_inode_fast(struct super_block *sb,
735 				struct hlist_head *head, unsigned long ino)
736 {
737 	struct hlist_node *node;
738 	struct inode *inode = NULL;
739 
740 repeat:
741 	hlist_for_each_entry(inode, node, head, i_hash) {
742 		spin_lock(&inode->i_lock);
743 		if (inode->i_ino != ino) {
744 			spin_unlock(&inode->i_lock);
745 			continue;
746 		}
747 		if (inode->i_sb != sb) {
748 			spin_unlock(&inode->i_lock);
749 			continue;
750 		}
751 		if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
752 			__wait_on_freeing_inode(inode);
753 			goto repeat;
754 		}
755 		__iget(inode);
756 		spin_unlock(&inode->i_lock);
757 		return inode;
758 	}
759 	return NULL;
760 }
761 
762 /*
763  * Each cpu owns a range of LAST_INO_BATCH numbers.
764  * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
765  * to renew the exhausted range.
766  *
767  * This does not significantly increase overflow rate because every CPU can
768  * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
769  * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
770  * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
771  * overflow rate by 2x, which does not seem too significant.
772  *
773  * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
774  * error if st_ino won't fit in target struct field. Use 32bit counter
775  * here to attempt to avoid that.
776  */
777 #define LAST_INO_BATCH 1024
778 static DEFINE_PER_CPU(unsigned int, last_ino);
779 
780 unsigned int get_next_ino(void)
781 {
782 	unsigned int *p = &get_cpu_var(last_ino);
783 	unsigned int res = *p;
784 
785 #ifdef CONFIG_SMP
786 	if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
787 		static atomic_t shared_last_ino;
788 		int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
789 
790 		res = next - LAST_INO_BATCH;
791 	}
792 #endif
793 
794 	*p = ++res;
795 	put_cpu_var(last_ino);
796 	return res;
797 }
798 EXPORT_SYMBOL(get_next_ino);
799 
800 /**
801  *	new_inode_pseudo 	- obtain an inode
802  *	@sb: superblock
803  *
804  *	Allocates a new inode for given superblock.
805  *	Inode wont be chained in superblock s_inodes list
806  *	This means :
807  *	- fs can't be unmount
808  *	- quotas, fsnotify, writeback can't work
809  */
810 struct inode *new_inode_pseudo(struct super_block *sb)
811 {
812 	struct inode *inode = alloc_inode(sb);
813 
814 	if (inode) {
815 		spin_lock(&inode->i_lock);
816 		inode->i_state = 0;
817 		spin_unlock(&inode->i_lock);
818 		INIT_LIST_HEAD(&inode->i_sb_list);
819 	}
820 	return inode;
821 }
822 
823 /**
824  *	new_inode 	- obtain an inode
825  *	@sb: superblock
826  *
827  *	Allocates a new inode for given superblock. The default gfp_mask
828  *	for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
829  *	If HIGHMEM pages are unsuitable or it is known that pages allocated
830  *	for the page cache are not reclaimable or migratable,
831  *	mapping_set_gfp_mask() must be called with suitable flags on the
832  *	newly created inode's mapping
833  *
834  */
835 struct inode *new_inode(struct super_block *sb)
836 {
837 	struct inode *inode;
838 
839 	spin_lock_prefetch(&inode_sb_list_lock);
840 
841 	inode = new_inode_pseudo(sb);
842 	if (inode)
843 		inode_sb_list_add(inode);
844 	return inode;
845 }
846 EXPORT_SYMBOL(new_inode);
847 
848 /**
849  * unlock_new_inode - clear the I_NEW state and wake up any waiters
850  * @inode:	new inode to unlock
851  *
852  * Called when the inode is fully initialised to clear the new state of the
853  * inode and wake up anyone waiting for the inode to finish initialisation.
854  */
855 void unlock_new_inode(struct inode *inode)
856 {
857 #ifdef CONFIG_DEBUG_LOCK_ALLOC
858 	if (S_ISDIR(inode->i_mode)) {
859 		struct file_system_type *type = inode->i_sb->s_type;
860 
861 		/* Set new key only if filesystem hasn't already changed it */
862 		if (!lockdep_match_class(&inode->i_mutex,
863 		    &type->i_mutex_key)) {
864 			/*
865 			 * ensure nobody is actually holding i_mutex
866 			 */
867 			mutex_destroy(&inode->i_mutex);
868 			mutex_init(&inode->i_mutex);
869 			lockdep_set_class(&inode->i_mutex,
870 					  &type->i_mutex_dir_key);
871 		}
872 	}
873 #endif
874 	spin_lock(&inode->i_lock);
875 	WARN_ON(!(inode->i_state & I_NEW));
876 	inode->i_state &= ~I_NEW;
877 	wake_up_bit(&inode->i_state, __I_NEW);
878 	spin_unlock(&inode->i_lock);
879 }
880 EXPORT_SYMBOL(unlock_new_inode);
881 
882 /**
883  * iget5_locked - obtain an inode from a mounted file system
884  * @sb:		super block of file system
885  * @hashval:	hash value (usually inode number) to get
886  * @test:	callback used for comparisons between inodes
887  * @set:	callback used to initialize a new struct inode
888  * @data:	opaque data pointer to pass to @test and @set
889  *
890  * Search for the inode specified by @hashval and @data in the inode cache,
891  * and if present it is return it with an increased reference count. This is
892  * a generalized version of iget_locked() for file systems where the inode
893  * number is not sufficient for unique identification of an inode.
894  *
895  * If the inode is not in cache, allocate a new inode and return it locked,
896  * hashed, and with the I_NEW flag set. The file system gets to fill it in
897  * before unlocking it via unlock_new_inode().
898  *
899  * Note both @test and @set are called with the inode_hash_lock held, so can't
900  * sleep.
901  */
902 struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
903 		int (*test)(struct inode *, void *),
904 		int (*set)(struct inode *, void *), void *data)
905 {
906 	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
907 	struct inode *inode;
908 
909 	spin_lock(&inode_hash_lock);
910 	inode = find_inode(sb, head, test, data);
911 	spin_unlock(&inode_hash_lock);
912 
913 	if (inode) {
914 		wait_on_inode(inode);
915 		return inode;
916 	}
917 
918 	inode = alloc_inode(sb);
919 	if (inode) {
920 		struct inode *old;
921 
922 		spin_lock(&inode_hash_lock);
923 		/* We released the lock, so.. */
924 		old = find_inode(sb, head, test, data);
925 		if (!old) {
926 			if (set(inode, data))
927 				goto set_failed;
928 
929 			spin_lock(&inode->i_lock);
930 			inode->i_state = I_NEW;
931 			hlist_add_head(&inode->i_hash, head);
932 			spin_unlock(&inode->i_lock);
933 			inode_sb_list_add(inode);
934 			spin_unlock(&inode_hash_lock);
935 
936 			/* Return the locked inode with I_NEW set, the
937 			 * caller is responsible for filling in the contents
938 			 */
939 			return inode;
940 		}
941 
942 		/*
943 		 * Uhhuh, somebody else created the same inode under
944 		 * us. Use the old inode instead of the one we just
945 		 * allocated.
946 		 */
947 		spin_unlock(&inode_hash_lock);
948 		destroy_inode(inode);
949 		inode = old;
950 		wait_on_inode(inode);
951 	}
952 	return inode;
953 
954 set_failed:
955 	spin_unlock(&inode_hash_lock);
956 	destroy_inode(inode);
957 	return NULL;
958 }
959 EXPORT_SYMBOL(iget5_locked);
960 
961 /**
962  * iget_locked - obtain an inode from a mounted file system
963  * @sb:		super block of file system
964  * @ino:	inode number to get
965  *
966  * Search for the inode specified by @ino in the inode cache and if present
967  * return it with an increased reference count. This is for file systems
968  * where the inode number is sufficient for unique identification of an inode.
969  *
970  * If the inode is not in cache, allocate a new inode and return it locked,
971  * hashed, and with the I_NEW flag set.  The file system gets to fill it in
972  * before unlocking it via unlock_new_inode().
973  */
974 struct inode *iget_locked(struct super_block *sb, unsigned long ino)
975 {
976 	struct hlist_head *head = inode_hashtable + hash(sb, ino);
977 	struct inode *inode;
978 
979 	spin_lock(&inode_hash_lock);
980 	inode = find_inode_fast(sb, head, ino);
981 	spin_unlock(&inode_hash_lock);
982 	if (inode) {
983 		wait_on_inode(inode);
984 		return inode;
985 	}
986 
987 	inode = alloc_inode(sb);
988 	if (inode) {
989 		struct inode *old;
990 
991 		spin_lock(&inode_hash_lock);
992 		/* We released the lock, so.. */
993 		old = find_inode_fast(sb, head, ino);
994 		if (!old) {
995 			inode->i_ino = ino;
996 			spin_lock(&inode->i_lock);
997 			inode->i_state = I_NEW;
998 			hlist_add_head(&inode->i_hash, head);
999 			spin_unlock(&inode->i_lock);
1000 			inode_sb_list_add(inode);
1001 			spin_unlock(&inode_hash_lock);
1002 
1003 			/* Return the locked inode with I_NEW set, the
1004 			 * caller is responsible for filling in the contents
1005 			 */
1006 			return inode;
1007 		}
1008 
1009 		/*
1010 		 * Uhhuh, somebody else created the same inode under
1011 		 * us. Use the old inode instead of the one we just
1012 		 * allocated.
1013 		 */
1014 		spin_unlock(&inode_hash_lock);
1015 		destroy_inode(inode);
1016 		inode = old;
1017 		wait_on_inode(inode);
1018 	}
1019 	return inode;
1020 }
1021 EXPORT_SYMBOL(iget_locked);
1022 
1023 /*
1024  * search the inode cache for a matching inode number.
1025  * If we find one, then the inode number we are trying to
1026  * allocate is not unique and so we should not use it.
1027  *
1028  * Returns 1 if the inode number is unique, 0 if it is not.
1029  */
1030 static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1031 {
1032 	struct hlist_head *b = inode_hashtable + hash(sb, ino);
1033 	struct hlist_node *node;
1034 	struct inode *inode;
1035 
1036 	spin_lock(&inode_hash_lock);
1037 	hlist_for_each_entry(inode, node, b, i_hash) {
1038 		if (inode->i_ino == ino && inode->i_sb == sb) {
1039 			spin_unlock(&inode_hash_lock);
1040 			return 0;
1041 		}
1042 	}
1043 	spin_unlock(&inode_hash_lock);
1044 
1045 	return 1;
1046 }
1047 
1048 /**
1049  *	iunique - get a unique inode number
1050  *	@sb: superblock
1051  *	@max_reserved: highest reserved inode number
1052  *
1053  *	Obtain an inode number that is unique on the system for a given
1054  *	superblock. This is used by file systems that have no natural
1055  *	permanent inode numbering system. An inode number is returned that
1056  *	is higher than the reserved limit but unique.
1057  *
1058  *	BUGS:
1059  *	With a large number of inodes live on the file system this function
1060  *	currently becomes quite slow.
1061  */
1062 ino_t iunique(struct super_block *sb, ino_t max_reserved)
1063 {
1064 	/*
1065 	 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
1066 	 * error if st_ino won't fit in target struct field. Use 32bit counter
1067 	 * here to attempt to avoid that.
1068 	 */
1069 	static DEFINE_SPINLOCK(iunique_lock);
1070 	static unsigned int counter;
1071 	ino_t res;
1072 
1073 	spin_lock(&iunique_lock);
1074 	do {
1075 		if (counter <= max_reserved)
1076 			counter = max_reserved + 1;
1077 		res = counter++;
1078 	} while (!test_inode_iunique(sb, res));
1079 	spin_unlock(&iunique_lock);
1080 
1081 	return res;
1082 }
1083 EXPORT_SYMBOL(iunique);
1084 
1085 struct inode *igrab(struct inode *inode)
1086 {
1087 	spin_lock(&inode->i_lock);
1088 	if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
1089 		__iget(inode);
1090 		spin_unlock(&inode->i_lock);
1091 	} else {
1092 		spin_unlock(&inode->i_lock);
1093 		/*
1094 		 * Handle the case where s_op->clear_inode is not been
1095 		 * called yet, and somebody is calling igrab
1096 		 * while the inode is getting freed.
1097 		 */
1098 		inode = NULL;
1099 	}
1100 	return inode;
1101 }
1102 EXPORT_SYMBOL(igrab);
1103 
1104 /**
1105  * ilookup5_nowait - search for an inode in the inode cache
1106  * @sb:		super block of file system to search
1107  * @hashval:	hash value (usually inode number) to search for
1108  * @test:	callback used for comparisons between inodes
1109  * @data:	opaque data pointer to pass to @test
1110  *
1111  * Search for the inode specified by @hashval and @data in the inode cache.
1112  * If the inode is in the cache, the inode is returned with an incremented
1113  * reference count.
1114  *
1115  * Note: I_NEW is not waited upon so you have to be very careful what you do
1116  * with the returned inode.  You probably should be using ilookup5() instead.
1117  *
1118  * Note2: @test is called with the inode_hash_lock held, so can't sleep.
1119  */
1120 struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
1121 		int (*test)(struct inode *, void *), void *data)
1122 {
1123 	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1124 	struct inode *inode;
1125 
1126 	spin_lock(&inode_hash_lock);
1127 	inode = find_inode(sb, head, test, data);
1128 	spin_unlock(&inode_hash_lock);
1129 
1130 	return inode;
1131 }
1132 EXPORT_SYMBOL(ilookup5_nowait);
1133 
1134 /**
1135  * ilookup5 - search for an inode in the inode cache
1136  * @sb:		super block of file system to search
1137  * @hashval:	hash value (usually inode number) to search for
1138  * @test:	callback used for comparisons between inodes
1139  * @data:	opaque data pointer to pass to @test
1140  *
1141  * Search for the inode specified by @hashval and @data in the inode cache,
1142  * and if the inode is in the cache, return the inode with an incremented
1143  * reference count.  Waits on I_NEW before returning the inode.
1144  * returned with an incremented reference count.
1145  *
1146  * This is a generalized version of ilookup() for file systems where the
1147  * inode number is not sufficient for unique identification of an inode.
1148  *
1149  * Note: @test is called with the inode_hash_lock held, so can't sleep.
1150  */
1151 struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
1152 		int (*test)(struct inode *, void *), void *data)
1153 {
1154 	struct inode *inode = ilookup5_nowait(sb, hashval, test, data);
1155 
1156 	if (inode)
1157 		wait_on_inode(inode);
1158 	return inode;
1159 }
1160 EXPORT_SYMBOL(ilookup5);
1161 
1162 /**
1163  * ilookup - search for an inode in the inode cache
1164  * @sb:		super block of file system to search
1165  * @ino:	inode number to search for
1166  *
1167  * Search for the inode @ino in the inode cache, and if the inode is in the
1168  * cache, the inode is returned with an incremented reference count.
1169  */
1170 struct inode *ilookup(struct super_block *sb, unsigned long ino)
1171 {
1172 	struct hlist_head *head = inode_hashtable + hash(sb, ino);
1173 	struct inode *inode;
1174 
1175 	spin_lock(&inode_hash_lock);
1176 	inode = find_inode_fast(sb, head, ino);
1177 	spin_unlock(&inode_hash_lock);
1178 
1179 	if (inode)
1180 		wait_on_inode(inode);
1181 	return inode;
1182 }
1183 EXPORT_SYMBOL(ilookup);
1184 
1185 int insert_inode_locked(struct inode *inode)
1186 {
1187 	struct super_block *sb = inode->i_sb;
1188 	ino_t ino = inode->i_ino;
1189 	struct hlist_head *head = inode_hashtable + hash(sb, ino);
1190 
1191 	while (1) {
1192 		struct hlist_node *node;
1193 		struct inode *old = NULL;
1194 		spin_lock(&inode_hash_lock);
1195 		hlist_for_each_entry(old, node, head, i_hash) {
1196 			if (old->i_ino != ino)
1197 				continue;
1198 			if (old->i_sb != sb)
1199 				continue;
1200 			spin_lock(&old->i_lock);
1201 			if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1202 				spin_unlock(&old->i_lock);
1203 				continue;
1204 			}
1205 			break;
1206 		}
1207 		if (likely(!node)) {
1208 			spin_lock(&inode->i_lock);
1209 			inode->i_state |= I_NEW;
1210 			hlist_add_head(&inode->i_hash, head);
1211 			spin_unlock(&inode->i_lock);
1212 			spin_unlock(&inode_hash_lock);
1213 			return 0;
1214 		}
1215 		__iget(old);
1216 		spin_unlock(&old->i_lock);
1217 		spin_unlock(&inode_hash_lock);
1218 		wait_on_inode(old);
1219 		if (unlikely(!inode_unhashed(old))) {
1220 			iput(old);
1221 			return -EBUSY;
1222 		}
1223 		iput(old);
1224 	}
1225 }
1226 EXPORT_SYMBOL(insert_inode_locked);
1227 
1228 int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1229 		int (*test)(struct inode *, void *), void *data)
1230 {
1231 	struct super_block *sb = inode->i_sb;
1232 	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1233 
1234 	while (1) {
1235 		struct hlist_node *node;
1236 		struct inode *old = NULL;
1237 
1238 		spin_lock(&inode_hash_lock);
1239 		hlist_for_each_entry(old, node, head, i_hash) {
1240 			if (old->i_sb != sb)
1241 				continue;
1242 			if (!test(old, data))
1243 				continue;
1244 			spin_lock(&old->i_lock);
1245 			if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1246 				spin_unlock(&old->i_lock);
1247 				continue;
1248 			}
1249 			break;
1250 		}
1251 		if (likely(!node)) {
1252 			spin_lock(&inode->i_lock);
1253 			inode->i_state |= I_NEW;
1254 			hlist_add_head(&inode->i_hash, head);
1255 			spin_unlock(&inode->i_lock);
1256 			spin_unlock(&inode_hash_lock);
1257 			return 0;
1258 		}
1259 		__iget(old);
1260 		spin_unlock(&old->i_lock);
1261 		spin_unlock(&inode_hash_lock);
1262 		wait_on_inode(old);
1263 		if (unlikely(!inode_unhashed(old))) {
1264 			iput(old);
1265 			return -EBUSY;
1266 		}
1267 		iput(old);
1268 	}
1269 }
1270 EXPORT_SYMBOL(insert_inode_locked4);
1271 
1272 
1273 int generic_delete_inode(struct inode *inode)
1274 {
1275 	return 1;
1276 }
1277 EXPORT_SYMBOL(generic_delete_inode);
1278 
1279 /*
1280  * Normal UNIX filesystem behaviour: delete the
1281  * inode when the usage count drops to zero, and
1282  * i_nlink is zero.
1283  */
1284 int generic_drop_inode(struct inode *inode)
1285 {
1286 	return !inode->i_nlink || inode_unhashed(inode);
1287 }
1288 EXPORT_SYMBOL_GPL(generic_drop_inode);
1289 
1290 /*
1291  * Called when we're dropping the last reference
1292  * to an inode.
1293  *
1294  * Call the FS "drop_inode()" function, defaulting to
1295  * the legacy UNIX filesystem behaviour.  If it tells
1296  * us to evict inode, do so.  Otherwise, retain inode
1297  * in cache if fs is alive, sync and evict if fs is
1298  * shutting down.
1299  */
1300 static void iput_final(struct inode *inode)
1301 {
1302 	struct super_block *sb = inode->i_sb;
1303 	const struct super_operations *op = inode->i_sb->s_op;
1304 	int drop;
1305 
1306 	WARN_ON(inode->i_state & I_NEW);
1307 
1308 	if (op->drop_inode)
1309 		drop = op->drop_inode(inode);
1310 	else
1311 		drop = generic_drop_inode(inode);
1312 
1313 	if (!drop && (sb->s_flags & MS_ACTIVE)) {
1314 		inode->i_state |= I_REFERENCED;
1315 		if (!(inode->i_state & (I_DIRTY|I_SYNC)))
1316 			inode_lru_list_add(inode);
1317 		spin_unlock(&inode->i_lock);
1318 		return;
1319 	}
1320 
1321 	if (!drop) {
1322 		inode->i_state |= I_WILL_FREE;
1323 		spin_unlock(&inode->i_lock);
1324 		write_inode_now(inode, 1);
1325 		spin_lock(&inode->i_lock);
1326 		WARN_ON(inode->i_state & I_NEW);
1327 		inode->i_state &= ~I_WILL_FREE;
1328 	}
1329 
1330 	inode->i_state |= I_FREEING;
1331 	inode_lru_list_del(inode);
1332 	spin_unlock(&inode->i_lock);
1333 
1334 	evict(inode);
1335 }
1336 
1337 /**
1338  *	iput	- put an inode
1339  *	@inode: inode to put
1340  *
1341  *	Puts an inode, dropping its usage count. If the inode use count hits
1342  *	zero, the inode is then freed and may also be destroyed.
1343  *
1344  *	Consequently, iput() can sleep.
1345  */
1346 void iput(struct inode *inode)
1347 {
1348 	if (inode) {
1349 		BUG_ON(inode->i_state & I_CLEAR);
1350 
1351 		if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock))
1352 			iput_final(inode);
1353 	}
1354 }
1355 EXPORT_SYMBOL(iput);
1356 
1357 /**
1358  *	bmap	- find a block number in a file
1359  *	@inode: inode of file
1360  *	@block: block to find
1361  *
1362  *	Returns the block number on the device holding the inode that
1363  *	is the disk block number for the block of the file requested.
1364  *	That is, asked for block 4 of inode 1 the function will return the
1365  *	disk block relative to the disk start that holds that block of the
1366  *	file.
1367  */
1368 sector_t bmap(struct inode *inode, sector_t block)
1369 {
1370 	sector_t res = 0;
1371 	if (inode->i_mapping->a_ops->bmap)
1372 		res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
1373 	return res;
1374 }
1375 EXPORT_SYMBOL(bmap);
1376 
1377 /*
1378  * With relative atime, only update atime if the previous atime is
1379  * earlier than either the ctime or mtime or if at least a day has
1380  * passed since the last atime update.
1381  */
1382 static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
1383 			     struct timespec now)
1384 {
1385 
1386 	if (!(mnt->mnt_flags & MNT_RELATIME))
1387 		return 1;
1388 	/*
1389 	 * Is mtime younger than atime? If yes, update atime:
1390 	 */
1391 	if (timespec_compare(&inode->i_mtime, &inode->i_atime) >= 0)
1392 		return 1;
1393 	/*
1394 	 * Is ctime younger than atime? If yes, update atime:
1395 	 */
1396 	if (timespec_compare(&inode->i_ctime, &inode->i_atime) >= 0)
1397 		return 1;
1398 
1399 	/*
1400 	 * Is the previous atime value older than a day? If yes,
1401 	 * update atime:
1402 	 */
1403 	if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
1404 		return 1;
1405 	/*
1406 	 * Good, we can skip the atime update:
1407 	 */
1408 	return 0;
1409 }
1410 
1411 /**
1412  *	touch_atime	-	update the access time
1413  *	@mnt: mount the inode is accessed on
1414  *	@dentry: dentry accessed
1415  *
1416  *	Update the accessed time on an inode and mark it for writeback.
1417  *	This function automatically handles read only file systems and media,
1418  *	as well as the "noatime" flag and inode specific "noatime" markers.
1419  */
1420 void touch_atime(struct vfsmount *mnt, struct dentry *dentry)
1421 {
1422 	struct inode *inode = dentry->d_inode;
1423 	struct timespec now;
1424 
1425 	if (inode->i_flags & S_NOATIME)
1426 		return;
1427 	if (IS_NOATIME(inode))
1428 		return;
1429 	if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
1430 		return;
1431 
1432 	if (mnt->mnt_flags & MNT_NOATIME)
1433 		return;
1434 	if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1435 		return;
1436 
1437 	now = current_fs_time(inode->i_sb);
1438 
1439 	if (!relatime_need_update(mnt, inode, now))
1440 		return;
1441 
1442 	if (timespec_equal(&inode->i_atime, &now))
1443 		return;
1444 
1445 	if (mnt_want_write(mnt))
1446 		return;
1447 
1448 	inode->i_atime = now;
1449 	mark_inode_dirty_sync(inode);
1450 	mnt_drop_write(mnt);
1451 }
1452 EXPORT_SYMBOL(touch_atime);
1453 
1454 /**
1455  *	file_update_time	-	update mtime and ctime time
1456  *	@file: file accessed
1457  *
1458  *	Update the mtime and ctime members of an inode and mark the inode
1459  *	for writeback.  Note that this function is meant exclusively for
1460  *	usage in the file write path of filesystems, and filesystems may
1461  *	choose to explicitly ignore update via this function with the
1462  *	S_NOCMTIME inode flag, e.g. for network filesystem where these
1463  *	timestamps are handled by the server.
1464  */
1465 
1466 void file_update_time(struct file *file)
1467 {
1468 	struct inode *inode = file->f_path.dentry->d_inode;
1469 	struct timespec now;
1470 	enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
1471 
1472 	/* First try to exhaust all avenues to not sync */
1473 	if (IS_NOCMTIME(inode))
1474 		return;
1475 
1476 	now = current_fs_time(inode->i_sb);
1477 	if (!timespec_equal(&inode->i_mtime, &now))
1478 		sync_it = S_MTIME;
1479 
1480 	if (!timespec_equal(&inode->i_ctime, &now))
1481 		sync_it |= S_CTIME;
1482 
1483 	if (IS_I_VERSION(inode))
1484 		sync_it |= S_VERSION;
1485 
1486 	if (!sync_it)
1487 		return;
1488 
1489 	/* Finally allowed to write? Takes lock. */
1490 	if (mnt_want_write_file(file))
1491 		return;
1492 
1493 	/* Only change inode inside the lock region */
1494 	if (sync_it & S_VERSION)
1495 		inode_inc_iversion(inode);
1496 	if (sync_it & S_CTIME)
1497 		inode->i_ctime = now;
1498 	if (sync_it & S_MTIME)
1499 		inode->i_mtime = now;
1500 	mark_inode_dirty_sync(inode);
1501 	mnt_drop_write(file->f_path.mnt);
1502 }
1503 EXPORT_SYMBOL(file_update_time);
1504 
1505 int inode_needs_sync(struct inode *inode)
1506 {
1507 	if (IS_SYNC(inode))
1508 		return 1;
1509 	if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
1510 		return 1;
1511 	return 0;
1512 }
1513 EXPORT_SYMBOL(inode_needs_sync);
1514 
1515 int inode_wait(void *word)
1516 {
1517 	schedule();
1518 	return 0;
1519 }
1520 EXPORT_SYMBOL(inode_wait);
1521 
1522 /*
1523  * If we try to find an inode in the inode hash while it is being
1524  * deleted, we have to wait until the filesystem completes its
1525  * deletion before reporting that it isn't found.  This function waits
1526  * until the deletion _might_ have completed.  Callers are responsible
1527  * to recheck inode state.
1528  *
1529  * It doesn't matter if I_NEW is not set initially, a call to
1530  * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
1531  * will DTRT.
1532  */
1533 static void __wait_on_freeing_inode(struct inode *inode)
1534 {
1535 	wait_queue_head_t *wq;
1536 	DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
1537 	wq = bit_waitqueue(&inode->i_state, __I_NEW);
1538 	prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
1539 	spin_unlock(&inode->i_lock);
1540 	spin_unlock(&inode_hash_lock);
1541 	schedule();
1542 	finish_wait(wq, &wait.wait);
1543 	spin_lock(&inode_hash_lock);
1544 }
1545 
1546 static __initdata unsigned long ihash_entries;
1547 static int __init set_ihash_entries(char *str)
1548 {
1549 	if (!str)
1550 		return 0;
1551 	ihash_entries = simple_strtoul(str, &str, 0);
1552 	return 1;
1553 }
1554 __setup("ihash_entries=", set_ihash_entries);
1555 
1556 /*
1557  * Initialize the waitqueues and inode hash table.
1558  */
1559 void __init inode_init_early(void)
1560 {
1561 	int loop;
1562 
1563 	/* If hashes are distributed across NUMA nodes, defer
1564 	 * hash allocation until vmalloc space is available.
1565 	 */
1566 	if (hashdist)
1567 		return;
1568 
1569 	inode_hashtable =
1570 		alloc_large_system_hash("Inode-cache",
1571 					sizeof(struct hlist_head),
1572 					ihash_entries,
1573 					14,
1574 					HASH_EARLY,
1575 					&i_hash_shift,
1576 					&i_hash_mask,
1577 					0);
1578 
1579 	for (loop = 0; loop < (1 << i_hash_shift); loop++)
1580 		INIT_HLIST_HEAD(&inode_hashtable[loop]);
1581 }
1582 
1583 void __init inode_init(void)
1584 {
1585 	int loop;
1586 
1587 	/* inode slab cache */
1588 	inode_cachep = kmem_cache_create("inode_cache",
1589 					 sizeof(struct inode),
1590 					 0,
1591 					 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
1592 					 SLAB_MEM_SPREAD),
1593 					 init_once);
1594 
1595 	/* Hash may have been set up in inode_init_early */
1596 	if (!hashdist)
1597 		return;
1598 
1599 	inode_hashtable =
1600 		alloc_large_system_hash("Inode-cache",
1601 					sizeof(struct hlist_head),
1602 					ihash_entries,
1603 					14,
1604 					0,
1605 					&i_hash_shift,
1606 					&i_hash_mask,
1607 					0);
1608 
1609 	for (loop = 0; loop < (1 << i_hash_shift); loop++)
1610 		INIT_HLIST_HEAD(&inode_hashtable[loop]);
1611 }
1612 
1613 void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
1614 {
1615 	inode->i_mode = mode;
1616 	if (S_ISCHR(mode)) {
1617 		inode->i_fop = &def_chr_fops;
1618 		inode->i_rdev = rdev;
1619 	} else if (S_ISBLK(mode)) {
1620 		inode->i_fop = &def_blk_fops;
1621 		inode->i_rdev = rdev;
1622 	} else if (S_ISFIFO(mode))
1623 		inode->i_fop = &def_fifo_fops;
1624 	else if (S_ISSOCK(mode))
1625 		inode->i_fop = &bad_sock_fops;
1626 	else
1627 		printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
1628 				  " inode %s:%lu\n", mode, inode->i_sb->s_id,
1629 				  inode->i_ino);
1630 }
1631 EXPORT_SYMBOL(init_special_inode);
1632 
1633 /**
1634  * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
1635  * @inode: New inode
1636  * @dir: Directory inode
1637  * @mode: mode of the new inode
1638  */
1639 void inode_init_owner(struct inode *inode, const struct inode *dir,
1640 			mode_t mode)
1641 {
1642 	inode->i_uid = current_fsuid();
1643 	if (dir && dir->i_mode & S_ISGID) {
1644 		inode->i_gid = dir->i_gid;
1645 		if (S_ISDIR(mode))
1646 			mode |= S_ISGID;
1647 	} else
1648 		inode->i_gid = current_fsgid();
1649 	inode->i_mode = mode;
1650 }
1651 EXPORT_SYMBOL(inode_init_owner);
1652 
1653 /**
1654  * inode_owner_or_capable - check current task permissions to inode
1655  * @inode: inode being checked
1656  *
1657  * Return true if current either has CAP_FOWNER to the inode, or
1658  * owns the file.
1659  */
1660 bool inode_owner_or_capable(const struct inode *inode)
1661 {
1662 	struct user_namespace *ns = inode_userns(inode);
1663 
1664 	if (current_user_ns() == ns && current_fsuid() == inode->i_uid)
1665 		return true;
1666 	if (ns_capable(ns, CAP_FOWNER))
1667 		return true;
1668 	return false;
1669 }
1670 EXPORT_SYMBOL(inode_owner_or_capable);
1671