xref: /openbmc/linux/fs/inode.c (revision e0f6d1a5)
1 /*
2  * (C) 1997 Linus Torvalds
3  * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
4  */
5 #include <linux/export.h>
6 #include <linux/fs.h>
7 #include <linux/mm.h>
8 #include <linux/backing-dev.h>
9 #include <linux/hash.h>
10 #include <linux/swap.h>
11 #include <linux/security.h>
12 #include <linux/cdev.h>
13 #include <linux/bootmem.h>
14 #include <linux/fsnotify.h>
15 #include <linux/mount.h>
16 #include <linux/posix_acl.h>
17 #include <linux/prefetch.h>
18 #include <linux/buffer_head.h> /* for inode_has_buffers */
19 #include <linux/ratelimit.h>
20 #include <linux/list_lru.h>
21 #include <linux/iversion.h>
22 #include <trace/events/writeback.h>
23 #include "internal.h"
24 
25 /*
26  * Inode locking rules:
27  *
28  * inode->i_lock protects:
29  *   inode->i_state, inode->i_hash, __iget()
30  * Inode LRU list locks protect:
31  *   inode->i_sb->s_inode_lru, inode->i_lru
32  * inode->i_sb->s_inode_list_lock protects:
33  *   inode->i_sb->s_inodes, inode->i_sb_list
34  * bdi->wb.list_lock protects:
35  *   bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list
36  * inode_hash_lock protects:
37  *   inode_hashtable, inode->i_hash
38  *
39  * Lock ordering:
40  *
41  * inode->i_sb->s_inode_list_lock
42  *   inode->i_lock
43  *     Inode LRU list locks
44  *
45  * bdi->wb.list_lock
46  *   inode->i_lock
47  *
48  * inode_hash_lock
49  *   inode->i_sb->s_inode_list_lock
50  *   inode->i_lock
51  *
52  * iunique_lock
53  *   inode_hash_lock
54  */
55 
56 static unsigned int i_hash_mask __read_mostly;
57 static unsigned int i_hash_shift __read_mostly;
58 static struct hlist_head *inode_hashtable __read_mostly;
59 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
60 
61 /*
62  * Empty aops. Can be used for the cases where the user does not
63  * define any of the address_space operations.
64  */
65 const struct address_space_operations empty_aops = {
66 };
67 EXPORT_SYMBOL(empty_aops);
68 
69 /*
70  * Statistics gathering..
71  */
72 struct inodes_stat_t inodes_stat;
73 
74 static DEFINE_PER_CPU(unsigned long, nr_inodes);
75 static DEFINE_PER_CPU(unsigned long, nr_unused);
76 
77 static struct kmem_cache *inode_cachep __read_mostly;
78 
79 static long get_nr_inodes(void)
80 {
81 	int i;
82 	long sum = 0;
83 	for_each_possible_cpu(i)
84 		sum += per_cpu(nr_inodes, i);
85 	return sum < 0 ? 0 : sum;
86 }
87 
88 static inline long get_nr_inodes_unused(void)
89 {
90 	int i;
91 	long sum = 0;
92 	for_each_possible_cpu(i)
93 		sum += per_cpu(nr_unused, i);
94 	return sum < 0 ? 0 : sum;
95 }
96 
97 long get_nr_dirty_inodes(void)
98 {
99 	/* not actually dirty inodes, but a wild approximation */
100 	long nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
101 	return nr_dirty > 0 ? nr_dirty : 0;
102 }
103 
104 /*
105  * Handle nr_inode sysctl
106  */
107 #ifdef CONFIG_SYSCTL
108 int proc_nr_inodes(struct ctl_table *table, int write,
109 		   void __user *buffer, size_t *lenp, loff_t *ppos)
110 {
111 	inodes_stat.nr_inodes = get_nr_inodes();
112 	inodes_stat.nr_unused = get_nr_inodes_unused();
113 	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
114 }
115 #endif
116 
117 static int no_open(struct inode *inode, struct file *file)
118 {
119 	return -ENXIO;
120 }
121 
122 /**
123  * inode_init_always - perform inode structure initialisation
124  * @sb: superblock inode belongs to
125  * @inode: inode to initialise
126  *
127  * These are initializations that need to be done on every inode
128  * allocation as the fields are not initialised by slab allocation.
129  */
130 int inode_init_always(struct super_block *sb, struct inode *inode)
131 {
132 	static const struct inode_operations empty_iops;
133 	static const struct file_operations no_open_fops = {.open = no_open};
134 	struct address_space *const mapping = &inode->i_data;
135 
136 	inode->i_sb = sb;
137 	inode->i_blkbits = sb->s_blocksize_bits;
138 	inode->i_flags = 0;
139 	atomic_set(&inode->i_count, 1);
140 	inode->i_op = &empty_iops;
141 	inode->i_fop = &no_open_fops;
142 	inode->__i_nlink = 1;
143 	inode->i_opflags = 0;
144 	if (sb->s_xattr)
145 		inode->i_opflags |= IOP_XATTR;
146 	i_uid_write(inode, 0);
147 	i_gid_write(inode, 0);
148 	atomic_set(&inode->i_writecount, 0);
149 	inode->i_size = 0;
150 	inode->i_write_hint = WRITE_LIFE_NOT_SET;
151 	inode->i_blocks = 0;
152 	inode->i_bytes = 0;
153 	inode->i_generation = 0;
154 	inode->i_pipe = NULL;
155 	inode->i_bdev = NULL;
156 	inode->i_cdev = NULL;
157 	inode->i_link = NULL;
158 	inode->i_dir_seq = 0;
159 	inode->i_rdev = 0;
160 	inode->dirtied_when = 0;
161 
162 #ifdef CONFIG_CGROUP_WRITEBACK
163 	inode->i_wb_frn_winner = 0;
164 	inode->i_wb_frn_avg_time = 0;
165 	inode->i_wb_frn_history = 0;
166 #endif
167 
168 	if (security_inode_alloc(inode))
169 		goto out;
170 	spin_lock_init(&inode->i_lock);
171 	lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
172 
173 	init_rwsem(&inode->i_rwsem);
174 	lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key);
175 
176 	atomic_set(&inode->i_dio_count, 0);
177 
178 	mapping->a_ops = &empty_aops;
179 	mapping->host = inode;
180 	mapping->flags = 0;
181 	atomic_set(&mapping->i_mmap_writable, 0);
182 	mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
183 	mapping->private_data = NULL;
184 	mapping->writeback_index = 0;
185 	inode->i_private = NULL;
186 	inode->i_mapping = mapping;
187 	INIT_HLIST_HEAD(&inode->i_dentry);	/* buggered by rcu freeing */
188 #ifdef CONFIG_FS_POSIX_ACL
189 	inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
190 #endif
191 
192 #ifdef CONFIG_FSNOTIFY
193 	inode->i_fsnotify_mask = 0;
194 #endif
195 	inode->i_flctx = NULL;
196 	this_cpu_inc(nr_inodes);
197 
198 	return 0;
199 out:
200 	return -ENOMEM;
201 }
202 EXPORT_SYMBOL(inode_init_always);
203 
204 static struct inode *alloc_inode(struct super_block *sb)
205 {
206 	struct inode *inode;
207 
208 	if (sb->s_op->alloc_inode)
209 		inode = sb->s_op->alloc_inode(sb);
210 	else
211 		inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
212 
213 	if (!inode)
214 		return NULL;
215 
216 	if (unlikely(inode_init_always(sb, inode))) {
217 		if (inode->i_sb->s_op->destroy_inode)
218 			inode->i_sb->s_op->destroy_inode(inode);
219 		else
220 			kmem_cache_free(inode_cachep, inode);
221 		return NULL;
222 	}
223 
224 	return inode;
225 }
226 
227 void free_inode_nonrcu(struct inode *inode)
228 {
229 	kmem_cache_free(inode_cachep, inode);
230 }
231 EXPORT_SYMBOL(free_inode_nonrcu);
232 
233 void __destroy_inode(struct inode *inode)
234 {
235 	BUG_ON(inode_has_buffers(inode));
236 	inode_detach_wb(inode);
237 	security_inode_free(inode);
238 	fsnotify_inode_delete(inode);
239 	locks_free_lock_context(inode);
240 	if (!inode->i_nlink) {
241 		WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
242 		atomic_long_dec(&inode->i_sb->s_remove_count);
243 	}
244 
245 #ifdef CONFIG_FS_POSIX_ACL
246 	if (inode->i_acl && !is_uncached_acl(inode->i_acl))
247 		posix_acl_release(inode->i_acl);
248 	if (inode->i_default_acl && !is_uncached_acl(inode->i_default_acl))
249 		posix_acl_release(inode->i_default_acl);
250 #endif
251 	this_cpu_dec(nr_inodes);
252 }
253 EXPORT_SYMBOL(__destroy_inode);
254 
255 static void i_callback(struct rcu_head *head)
256 {
257 	struct inode *inode = container_of(head, struct inode, i_rcu);
258 	kmem_cache_free(inode_cachep, inode);
259 }
260 
261 static void destroy_inode(struct inode *inode)
262 {
263 	BUG_ON(!list_empty(&inode->i_lru));
264 	__destroy_inode(inode);
265 	if (inode->i_sb->s_op->destroy_inode)
266 		inode->i_sb->s_op->destroy_inode(inode);
267 	else
268 		call_rcu(&inode->i_rcu, i_callback);
269 }
270 
271 /**
272  * drop_nlink - directly drop an inode's link count
273  * @inode: inode
274  *
275  * This is a low-level filesystem helper to replace any
276  * direct filesystem manipulation of i_nlink.  In cases
277  * where we are attempting to track writes to the
278  * filesystem, a decrement to zero means an imminent
279  * write when the file is truncated and actually unlinked
280  * on the filesystem.
281  */
282 void drop_nlink(struct inode *inode)
283 {
284 	WARN_ON(inode->i_nlink == 0);
285 	inode->__i_nlink--;
286 	if (!inode->i_nlink)
287 		atomic_long_inc(&inode->i_sb->s_remove_count);
288 }
289 EXPORT_SYMBOL(drop_nlink);
290 
291 /**
292  * clear_nlink - directly zero an inode's link count
293  * @inode: inode
294  *
295  * This is a low-level filesystem helper to replace any
296  * direct filesystem manipulation of i_nlink.  See
297  * drop_nlink() for why we care about i_nlink hitting zero.
298  */
299 void clear_nlink(struct inode *inode)
300 {
301 	if (inode->i_nlink) {
302 		inode->__i_nlink = 0;
303 		atomic_long_inc(&inode->i_sb->s_remove_count);
304 	}
305 }
306 EXPORT_SYMBOL(clear_nlink);
307 
308 /**
309  * set_nlink - directly set an inode's link count
310  * @inode: inode
311  * @nlink: new nlink (should be non-zero)
312  *
313  * This is a low-level filesystem helper to replace any
314  * direct filesystem manipulation of i_nlink.
315  */
316 void set_nlink(struct inode *inode, unsigned int nlink)
317 {
318 	if (!nlink) {
319 		clear_nlink(inode);
320 	} else {
321 		/* Yes, some filesystems do change nlink from zero to one */
322 		if (inode->i_nlink == 0)
323 			atomic_long_dec(&inode->i_sb->s_remove_count);
324 
325 		inode->__i_nlink = nlink;
326 	}
327 }
328 EXPORT_SYMBOL(set_nlink);
329 
330 /**
331  * inc_nlink - directly increment an inode's link count
332  * @inode: inode
333  *
334  * This is a low-level filesystem helper to replace any
335  * direct filesystem manipulation of i_nlink.  Currently,
336  * it is only here for parity with dec_nlink().
337  */
338 void inc_nlink(struct inode *inode)
339 {
340 	if (unlikely(inode->i_nlink == 0)) {
341 		WARN_ON(!(inode->i_state & I_LINKABLE));
342 		atomic_long_dec(&inode->i_sb->s_remove_count);
343 	}
344 
345 	inode->__i_nlink++;
346 }
347 EXPORT_SYMBOL(inc_nlink);
348 
349 static void __address_space_init_once(struct address_space *mapping)
350 {
351 	INIT_RADIX_TREE(&mapping->i_pages, GFP_ATOMIC | __GFP_ACCOUNT);
352 	init_rwsem(&mapping->i_mmap_rwsem);
353 	INIT_LIST_HEAD(&mapping->private_list);
354 	spin_lock_init(&mapping->private_lock);
355 	mapping->i_mmap = RB_ROOT_CACHED;
356 }
357 
358 void address_space_init_once(struct address_space *mapping)
359 {
360 	memset(mapping, 0, sizeof(*mapping));
361 	__address_space_init_once(mapping);
362 }
363 EXPORT_SYMBOL(address_space_init_once);
364 
365 /*
366  * These are initializations that only need to be done
367  * once, because the fields are idempotent across use
368  * of the inode, so let the slab aware of that.
369  */
370 void inode_init_once(struct inode *inode)
371 {
372 	memset(inode, 0, sizeof(*inode));
373 	INIT_HLIST_NODE(&inode->i_hash);
374 	INIT_LIST_HEAD(&inode->i_devices);
375 	INIT_LIST_HEAD(&inode->i_io_list);
376 	INIT_LIST_HEAD(&inode->i_wb_list);
377 	INIT_LIST_HEAD(&inode->i_lru);
378 	__address_space_init_once(&inode->i_data);
379 	i_size_ordered_init(inode);
380 }
381 EXPORT_SYMBOL(inode_init_once);
382 
383 static void init_once(void *foo)
384 {
385 	struct inode *inode = (struct inode *) foo;
386 
387 	inode_init_once(inode);
388 }
389 
390 /*
391  * inode->i_lock must be held
392  */
393 void __iget(struct inode *inode)
394 {
395 	atomic_inc(&inode->i_count);
396 }
397 
398 /*
399  * get additional reference to inode; caller must already hold one.
400  */
401 void ihold(struct inode *inode)
402 {
403 	WARN_ON(atomic_inc_return(&inode->i_count) < 2);
404 }
405 EXPORT_SYMBOL(ihold);
406 
407 static void inode_lru_list_add(struct inode *inode)
408 {
409 	if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru))
410 		this_cpu_inc(nr_unused);
411 	else
412 		inode->i_state |= I_REFERENCED;
413 }
414 
415 /*
416  * Add inode to LRU if needed (inode is unused and clean).
417  *
418  * Needs inode->i_lock held.
419  */
420 void inode_add_lru(struct inode *inode)
421 {
422 	if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC |
423 				I_FREEING | I_WILL_FREE)) &&
424 	    !atomic_read(&inode->i_count) && inode->i_sb->s_flags & SB_ACTIVE)
425 		inode_lru_list_add(inode);
426 }
427 
428 
429 static void inode_lru_list_del(struct inode *inode)
430 {
431 
432 	if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru))
433 		this_cpu_dec(nr_unused);
434 }
435 
436 /**
437  * inode_sb_list_add - add inode to the superblock list of inodes
438  * @inode: inode to add
439  */
440 void inode_sb_list_add(struct inode *inode)
441 {
442 	spin_lock(&inode->i_sb->s_inode_list_lock);
443 	list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
444 	spin_unlock(&inode->i_sb->s_inode_list_lock);
445 }
446 EXPORT_SYMBOL_GPL(inode_sb_list_add);
447 
448 static inline void inode_sb_list_del(struct inode *inode)
449 {
450 	if (!list_empty(&inode->i_sb_list)) {
451 		spin_lock(&inode->i_sb->s_inode_list_lock);
452 		list_del_init(&inode->i_sb_list);
453 		spin_unlock(&inode->i_sb->s_inode_list_lock);
454 	}
455 }
456 
457 static unsigned long hash(struct super_block *sb, unsigned long hashval)
458 {
459 	unsigned long tmp;
460 
461 	tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
462 			L1_CACHE_BYTES;
463 	tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
464 	return tmp & i_hash_mask;
465 }
466 
467 /**
468  *	__insert_inode_hash - hash an inode
469  *	@inode: unhashed inode
470  *	@hashval: unsigned long value used to locate this object in the
471  *		inode_hashtable.
472  *
473  *	Add an inode to the inode hash for this superblock.
474  */
475 void __insert_inode_hash(struct inode *inode, unsigned long hashval)
476 {
477 	struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
478 
479 	spin_lock(&inode_hash_lock);
480 	spin_lock(&inode->i_lock);
481 	hlist_add_head(&inode->i_hash, b);
482 	spin_unlock(&inode->i_lock);
483 	spin_unlock(&inode_hash_lock);
484 }
485 EXPORT_SYMBOL(__insert_inode_hash);
486 
487 /**
488  *	__remove_inode_hash - remove an inode from the hash
489  *	@inode: inode to unhash
490  *
491  *	Remove an inode from the superblock.
492  */
493 void __remove_inode_hash(struct inode *inode)
494 {
495 	spin_lock(&inode_hash_lock);
496 	spin_lock(&inode->i_lock);
497 	hlist_del_init(&inode->i_hash);
498 	spin_unlock(&inode->i_lock);
499 	spin_unlock(&inode_hash_lock);
500 }
501 EXPORT_SYMBOL(__remove_inode_hash);
502 
503 void clear_inode(struct inode *inode)
504 {
505 	/*
506 	 * We have to cycle the i_pages lock here because reclaim can be in the
507 	 * process of removing the last page (in __delete_from_page_cache())
508 	 * and we must not free the mapping under it.
509 	 */
510 	xa_lock_irq(&inode->i_data.i_pages);
511 	BUG_ON(inode->i_data.nrpages);
512 	BUG_ON(inode->i_data.nrexceptional);
513 	xa_unlock_irq(&inode->i_data.i_pages);
514 	BUG_ON(!list_empty(&inode->i_data.private_list));
515 	BUG_ON(!(inode->i_state & I_FREEING));
516 	BUG_ON(inode->i_state & I_CLEAR);
517 	BUG_ON(!list_empty(&inode->i_wb_list));
518 	/* don't need i_lock here, no concurrent mods to i_state */
519 	inode->i_state = I_FREEING | I_CLEAR;
520 }
521 EXPORT_SYMBOL(clear_inode);
522 
523 /*
524  * Free the inode passed in, removing it from the lists it is still connected
525  * to. We remove any pages still attached to the inode and wait for any IO that
526  * is still in progress before finally destroying the inode.
527  *
528  * An inode must already be marked I_FREEING so that we avoid the inode being
529  * moved back onto lists if we race with other code that manipulates the lists
530  * (e.g. writeback_single_inode). The caller is responsible for setting this.
531  *
532  * An inode must already be removed from the LRU list before being evicted from
533  * the cache. This should occur atomically with setting the I_FREEING state
534  * flag, so no inodes here should ever be on the LRU when being evicted.
535  */
536 static void evict(struct inode *inode)
537 {
538 	const struct super_operations *op = inode->i_sb->s_op;
539 
540 	BUG_ON(!(inode->i_state & I_FREEING));
541 	BUG_ON(!list_empty(&inode->i_lru));
542 
543 	if (!list_empty(&inode->i_io_list))
544 		inode_io_list_del(inode);
545 
546 	inode_sb_list_del(inode);
547 
548 	/*
549 	 * Wait for flusher thread to be done with the inode so that filesystem
550 	 * does not start destroying it while writeback is still running. Since
551 	 * the inode has I_FREEING set, flusher thread won't start new work on
552 	 * the inode.  We just have to wait for running writeback to finish.
553 	 */
554 	inode_wait_for_writeback(inode);
555 
556 	if (op->evict_inode) {
557 		op->evict_inode(inode);
558 	} else {
559 		truncate_inode_pages_final(&inode->i_data);
560 		clear_inode(inode);
561 	}
562 	if (S_ISBLK(inode->i_mode) && inode->i_bdev)
563 		bd_forget(inode);
564 	if (S_ISCHR(inode->i_mode) && inode->i_cdev)
565 		cd_forget(inode);
566 
567 	remove_inode_hash(inode);
568 
569 	spin_lock(&inode->i_lock);
570 	wake_up_bit(&inode->i_state, __I_NEW);
571 	BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
572 	spin_unlock(&inode->i_lock);
573 
574 	destroy_inode(inode);
575 }
576 
577 /*
578  * dispose_list - dispose of the contents of a local list
579  * @head: the head of the list to free
580  *
581  * Dispose-list gets a local list with local inodes in it, so it doesn't
582  * need to worry about list corruption and SMP locks.
583  */
584 static void dispose_list(struct list_head *head)
585 {
586 	while (!list_empty(head)) {
587 		struct inode *inode;
588 
589 		inode = list_first_entry(head, struct inode, i_lru);
590 		list_del_init(&inode->i_lru);
591 
592 		evict(inode);
593 		cond_resched();
594 	}
595 }
596 
597 /**
598  * evict_inodes	- evict all evictable inodes for a superblock
599  * @sb:		superblock to operate on
600  *
601  * Make sure that no inodes with zero refcount are retained.  This is
602  * called by superblock shutdown after having SB_ACTIVE flag removed,
603  * so any inode reaching zero refcount during or after that call will
604  * be immediately evicted.
605  */
606 void evict_inodes(struct super_block *sb)
607 {
608 	struct inode *inode, *next;
609 	LIST_HEAD(dispose);
610 
611 again:
612 	spin_lock(&sb->s_inode_list_lock);
613 	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
614 		if (atomic_read(&inode->i_count))
615 			continue;
616 
617 		spin_lock(&inode->i_lock);
618 		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
619 			spin_unlock(&inode->i_lock);
620 			continue;
621 		}
622 
623 		inode->i_state |= I_FREEING;
624 		inode_lru_list_del(inode);
625 		spin_unlock(&inode->i_lock);
626 		list_add(&inode->i_lru, &dispose);
627 
628 		/*
629 		 * We can have a ton of inodes to evict at unmount time given
630 		 * enough memory, check to see if we need to go to sleep for a
631 		 * bit so we don't livelock.
632 		 */
633 		if (need_resched()) {
634 			spin_unlock(&sb->s_inode_list_lock);
635 			cond_resched();
636 			dispose_list(&dispose);
637 			goto again;
638 		}
639 	}
640 	spin_unlock(&sb->s_inode_list_lock);
641 
642 	dispose_list(&dispose);
643 }
644 EXPORT_SYMBOL_GPL(evict_inodes);
645 
646 /**
647  * invalidate_inodes	- attempt to free all inodes on a superblock
648  * @sb:		superblock to operate on
649  * @kill_dirty: flag to guide handling of dirty inodes
650  *
651  * Attempts to free all inodes for a given superblock.  If there were any
652  * busy inodes return a non-zero value, else zero.
653  * If @kill_dirty is set, discard dirty inodes too, otherwise treat
654  * them as busy.
655  */
656 int invalidate_inodes(struct super_block *sb, bool kill_dirty)
657 {
658 	int busy = 0;
659 	struct inode *inode, *next;
660 	LIST_HEAD(dispose);
661 
662 	spin_lock(&sb->s_inode_list_lock);
663 	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
664 		spin_lock(&inode->i_lock);
665 		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
666 			spin_unlock(&inode->i_lock);
667 			continue;
668 		}
669 		if (inode->i_state & I_DIRTY_ALL && !kill_dirty) {
670 			spin_unlock(&inode->i_lock);
671 			busy = 1;
672 			continue;
673 		}
674 		if (atomic_read(&inode->i_count)) {
675 			spin_unlock(&inode->i_lock);
676 			busy = 1;
677 			continue;
678 		}
679 
680 		inode->i_state |= I_FREEING;
681 		inode_lru_list_del(inode);
682 		spin_unlock(&inode->i_lock);
683 		list_add(&inode->i_lru, &dispose);
684 	}
685 	spin_unlock(&sb->s_inode_list_lock);
686 
687 	dispose_list(&dispose);
688 
689 	return busy;
690 }
691 
692 /*
693  * Isolate the inode from the LRU in preparation for freeing it.
694  *
695  * Any inodes which are pinned purely because of attached pagecache have their
696  * pagecache removed.  If the inode has metadata buffers attached to
697  * mapping->private_list then try to remove them.
698  *
699  * If the inode has the I_REFERENCED flag set, then it means that it has been
700  * used recently - the flag is set in iput_final(). When we encounter such an
701  * inode, clear the flag and move it to the back of the LRU so it gets another
702  * pass through the LRU before it gets reclaimed. This is necessary because of
703  * the fact we are doing lazy LRU updates to minimise lock contention so the
704  * LRU does not have strict ordering. Hence we don't want to reclaim inodes
705  * with this flag set because they are the inodes that are out of order.
706  */
707 static enum lru_status inode_lru_isolate(struct list_head *item,
708 		struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
709 {
710 	struct list_head *freeable = arg;
711 	struct inode	*inode = container_of(item, struct inode, i_lru);
712 
713 	/*
714 	 * we are inverting the lru lock/inode->i_lock here, so use a trylock.
715 	 * If we fail to get the lock, just skip it.
716 	 */
717 	if (!spin_trylock(&inode->i_lock))
718 		return LRU_SKIP;
719 
720 	/*
721 	 * Referenced or dirty inodes are still in use. Give them another pass
722 	 * through the LRU as we canot reclaim them now.
723 	 */
724 	if (atomic_read(&inode->i_count) ||
725 	    (inode->i_state & ~I_REFERENCED)) {
726 		list_lru_isolate(lru, &inode->i_lru);
727 		spin_unlock(&inode->i_lock);
728 		this_cpu_dec(nr_unused);
729 		return LRU_REMOVED;
730 	}
731 
732 	/* recently referenced inodes get one more pass */
733 	if (inode->i_state & I_REFERENCED) {
734 		inode->i_state &= ~I_REFERENCED;
735 		spin_unlock(&inode->i_lock);
736 		return LRU_ROTATE;
737 	}
738 
739 	if (inode_has_buffers(inode) || inode->i_data.nrpages) {
740 		__iget(inode);
741 		spin_unlock(&inode->i_lock);
742 		spin_unlock(lru_lock);
743 		if (remove_inode_buffers(inode)) {
744 			unsigned long reap;
745 			reap = invalidate_mapping_pages(&inode->i_data, 0, -1);
746 			if (current_is_kswapd())
747 				__count_vm_events(KSWAPD_INODESTEAL, reap);
748 			else
749 				__count_vm_events(PGINODESTEAL, reap);
750 			if (current->reclaim_state)
751 				current->reclaim_state->reclaimed_slab += reap;
752 		}
753 		iput(inode);
754 		spin_lock(lru_lock);
755 		return LRU_RETRY;
756 	}
757 
758 	WARN_ON(inode->i_state & I_NEW);
759 	inode->i_state |= I_FREEING;
760 	list_lru_isolate_move(lru, &inode->i_lru, freeable);
761 	spin_unlock(&inode->i_lock);
762 
763 	this_cpu_dec(nr_unused);
764 	return LRU_REMOVED;
765 }
766 
767 /*
768  * Walk the superblock inode LRU for freeable inodes and attempt to free them.
769  * This is called from the superblock shrinker function with a number of inodes
770  * to trim from the LRU. Inodes to be freed are moved to a temporary list and
771  * then are freed outside inode_lock by dispose_list().
772  */
773 long prune_icache_sb(struct super_block *sb, struct shrink_control *sc)
774 {
775 	LIST_HEAD(freeable);
776 	long freed;
777 
778 	freed = list_lru_shrink_walk(&sb->s_inode_lru, sc,
779 				     inode_lru_isolate, &freeable);
780 	dispose_list(&freeable);
781 	return freed;
782 }
783 
784 static void __wait_on_freeing_inode(struct inode *inode);
785 /*
786  * Called with the inode lock held.
787  */
788 static struct inode *find_inode(struct super_block *sb,
789 				struct hlist_head *head,
790 				int (*test)(struct inode *, void *),
791 				void *data)
792 {
793 	struct inode *inode = NULL;
794 
795 repeat:
796 	hlist_for_each_entry(inode, head, i_hash) {
797 		if (inode->i_sb != sb)
798 			continue;
799 		if (!test(inode, data))
800 			continue;
801 		spin_lock(&inode->i_lock);
802 		if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
803 			__wait_on_freeing_inode(inode);
804 			goto repeat;
805 		}
806 		__iget(inode);
807 		spin_unlock(&inode->i_lock);
808 		return inode;
809 	}
810 	return NULL;
811 }
812 
813 /*
814  * find_inode_fast is the fast path version of find_inode, see the comment at
815  * iget_locked for details.
816  */
817 static struct inode *find_inode_fast(struct super_block *sb,
818 				struct hlist_head *head, unsigned long ino)
819 {
820 	struct inode *inode = NULL;
821 
822 repeat:
823 	hlist_for_each_entry(inode, head, i_hash) {
824 		if (inode->i_ino != ino)
825 			continue;
826 		if (inode->i_sb != sb)
827 			continue;
828 		spin_lock(&inode->i_lock);
829 		if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
830 			__wait_on_freeing_inode(inode);
831 			goto repeat;
832 		}
833 		__iget(inode);
834 		spin_unlock(&inode->i_lock);
835 		return inode;
836 	}
837 	return NULL;
838 }
839 
840 /*
841  * Each cpu owns a range of LAST_INO_BATCH numbers.
842  * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
843  * to renew the exhausted range.
844  *
845  * This does not significantly increase overflow rate because every CPU can
846  * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
847  * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
848  * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
849  * overflow rate by 2x, which does not seem too significant.
850  *
851  * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
852  * error if st_ino won't fit in target struct field. Use 32bit counter
853  * here to attempt to avoid that.
854  */
855 #define LAST_INO_BATCH 1024
856 static DEFINE_PER_CPU(unsigned int, last_ino);
857 
858 unsigned int get_next_ino(void)
859 {
860 	unsigned int *p = &get_cpu_var(last_ino);
861 	unsigned int res = *p;
862 
863 #ifdef CONFIG_SMP
864 	if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
865 		static atomic_t shared_last_ino;
866 		int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
867 
868 		res = next - LAST_INO_BATCH;
869 	}
870 #endif
871 
872 	res++;
873 	/* get_next_ino should not provide a 0 inode number */
874 	if (unlikely(!res))
875 		res++;
876 	*p = res;
877 	put_cpu_var(last_ino);
878 	return res;
879 }
880 EXPORT_SYMBOL(get_next_ino);
881 
882 /**
883  *	new_inode_pseudo 	- obtain an inode
884  *	@sb: superblock
885  *
886  *	Allocates a new inode for given superblock.
887  *	Inode wont be chained in superblock s_inodes list
888  *	This means :
889  *	- fs can't be unmount
890  *	- quotas, fsnotify, writeback can't work
891  */
892 struct inode *new_inode_pseudo(struct super_block *sb)
893 {
894 	struct inode *inode = alloc_inode(sb);
895 
896 	if (inode) {
897 		spin_lock(&inode->i_lock);
898 		inode->i_state = 0;
899 		spin_unlock(&inode->i_lock);
900 		INIT_LIST_HEAD(&inode->i_sb_list);
901 	}
902 	return inode;
903 }
904 
905 /**
906  *	new_inode 	- obtain an inode
907  *	@sb: superblock
908  *
909  *	Allocates a new inode for given superblock. The default gfp_mask
910  *	for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
911  *	If HIGHMEM pages are unsuitable or it is known that pages allocated
912  *	for the page cache are not reclaimable or migratable,
913  *	mapping_set_gfp_mask() must be called with suitable flags on the
914  *	newly created inode's mapping
915  *
916  */
917 struct inode *new_inode(struct super_block *sb)
918 {
919 	struct inode *inode;
920 
921 	spin_lock_prefetch(&sb->s_inode_list_lock);
922 
923 	inode = new_inode_pseudo(sb);
924 	if (inode)
925 		inode_sb_list_add(inode);
926 	return inode;
927 }
928 EXPORT_SYMBOL(new_inode);
929 
930 #ifdef CONFIG_DEBUG_LOCK_ALLOC
931 void lockdep_annotate_inode_mutex_key(struct inode *inode)
932 {
933 	if (S_ISDIR(inode->i_mode)) {
934 		struct file_system_type *type = inode->i_sb->s_type;
935 
936 		/* Set new key only if filesystem hasn't already changed it */
937 		if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) {
938 			/*
939 			 * ensure nobody is actually holding i_mutex
940 			 */
941 			// mutex_destroy(&inode->i_mutex);
942 			init_rwsem(&inode->i_rwsem);
943 			lockdep_set_class(&inode->i_rwsem,
944 					  &type->i_mutex_dir_key);
945 		}
946 	}
947 }
948 EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key);
949 #endif
950 
951 /**
952  * unlock_new_inode - clear the I_NEW state and wake up any waiters
953  * @inode:	new inode to unlock
954  *
955  * Called when the inode is fully initialised to clear the new state of the
956  * inode and wake up anyone waiting for the inode to finish initialisation.
957  */
958 void unlock_new_inode(struct inode *inode)
959 {
960 	lockdep_annotate_inode_mutex_key(inode);
961 	spin_lock(&inode->i_lock);
962 	WARN_ON(!(inode->i_state & I_NEW));
963 	inode->i_state &= ~I_NEW;
964 	smp_mb();
965 	wake_up_bit(&inode->i_state, __I_NEW);
966 	spin_unlock(&inode->i_lock);
967 }
968 EXPORT_SYMBOL(unlock_new_inode);
969 
970 /**
971  * lock_two_nondirectories - take two i_mutexes on non-directory objects
972  *
973  * Lock any non-NULL argument that is not a directory.
974  * Zero, one or two objects may be locked by this function.
975  *
976  * @inode1: first inode to lock
977  * @inode2: second inode to lock
978  */
979 void lock_two_nondirectories(struct inode *inode1, struct inode *inode2)
980 {
981 	if (inode1 > inode2)
982 		swap(inode1, inode2);
983 
984 	if (inode1 && !S_ISDIR(inode1->i_mode))
985 		inode_lock(inode1);
986 	if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
987 		inode_lock_nested(inode2, I_MUTEX_NONDIR2);
988 }
989 EXPORT_SYMBOL(lock_two_nondirectories);
990 
991 /**
992  * unlock_two_nondirectories - release locks from lock_two_nondirectories()
993  * @inode1: first inode to unlock
994  * @inode2: second inode to unlock
995  */
996 void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2)
997 {
998 	if (inode1 && !S_ISDIR(inode1->i_mode))
999 		inode_unlock(inode1);
1000 	if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
1001 		inode_unlock(inode2);
1002 }
1003 EXPORT_SYMBOL(unlock_two_nondirectories);
1004 
1005 /**
1006  * iget5_locked - obtain an inode from a mounted file system
1007  * @sb:		super block of file system
1008  * @hashval:	hash value (usually inode number) to get
1009  * @test:	callback used for comparisons between inodes
1010  * @set:	callback used to initialize a new struct inode
1011  * @data:	opaque data pointer to pass to @test and @set
1012  *
1013  * Search for the inode specified by @hashval and @data in the inode cache,
1014  * and if present it is return it with an increased reference count. This is
1015  * a generalized version of iget_locked() for file systems where the inode
1016  * number is not sufficient for unique identification of an inode.
1017  *
1018  * If the inode is not in cache, allocate a new inode and return it locked,
1019  * hashed, and with the I_NEW flag set. The file system gets to fill it in
1020  * before unlocking it via unlock_new_inode().
1021  *
1022  * Note both @test and @set are called with the inode_hash_lock held, so can't
1023  * sleep.
1024  */
1025 struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
1026 		int (*test)(struct inode *, void *),
1027 		int (*set)(struct inode *, void *), void *data)
1028 {
1029 	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1030 	struct inode *inode;
1031 again:
1032 	spin_lock(&inode_hash_lock);
1033 	inode = find_inode(sb, head, test, data);
1034 	spin_unlock(&inode_hash_lock);
1035 
1036 	if (inode) {
1037 		wait_on_inode(inode);
1038 		if (unlikely(inode_unhashed(inode))) {
1039 			iput(inode);
1040 			goto again;
1041 		}
1042 		return inode;
1043 	}
1044 
1045 	inode = alloc_inode(sb);
1046 	if (inode) {
1047 		struct inode *old;
1048 
1049 		spin_lock(&inode_hash_lock);
1050 		/* We released the lock, so.. */
1051 		old = find_inode(sb, head, test, data);
1052 		if (!old) {
1053 			if (set(inode, data))
1054 				goto set_failed;
1055 
1056 			spin_lock(&inode->i_lock);
1057 			inode->i_state = I_NEW;
1058 			hlist_add_head(&inode->i_hash, head);
1059 			spin_unlock(&inode->i_lock);
1060 			inode_sb_list_add(inode);
1061 			spin_unlock(&inode_hash_lock);
1062 
1063 			/* Return the locked inode with I_NEW set, the
1064 			 * caller is responsible for filling in the contents
1065 			 */
1066 			return inode;
1067 		}
1068 
1069 		/*
1070 		 * Uhhuh, somebody else created the same inode under
1071 		 * us. Use the old inode instead of the one we just
1072 		 * allocated.
1073 		 */
1074 		spin_unlock(&inode_hash_lock);
1075 		destroy_inode(inode);
1076 		inode = old;
1077 		wait_on_inode(inode);
1078 		if (unlikely(inode_unhashed(inode))) {
1079 			iput(inode);
1080 			goto again;
1081 		}
1082 	}
1083 	return inode;
1084 
1085 set_failed:
1086 	spin_unlock(&inode_hash_lock);
1087 	destroy_inode(inode);
1088 	return NULL;
1089 }
1090 EXPORT_SYMBOL(iget5_locked);
1091 
1092 /**
1093  * iget_locked - obtain an inode from a mounted file system
1094  * @sb:		super block of file system
1095  * @ino:	inode number to get
1096  *
1097  * Search for the inode specified by @ino in the inode cache and if present
1098  * return it with an increased reference count. This is for file systems
1099  * where the inode number is sufficient for unique identification of an inode.
1100  *
1101  * If the inode is not in cache, allocate a new inode and return it locked,
1102  * hashed, and with the I_NEW flag set.  The file system gets to fill it in
1103  * before unlocking it via unlock_new_inode().
1104  */
1105 struct inode *iget_locked(struct super_block *sb, unsigned long ino)
1106 {
1107 	struct hlist_head *head = inode_hashtable + hash(sb, ino);
1108 	struct inode *inode;
1109 again:
1110 	spin_lock(&inode_hash_lock);
1111 	inode = find_inode_fast(sb, head, ino);
1112 	spin_unlock(&inode_hash_lock);
1113 	if (inode) {
1114 		wait_on_inode(inode);
1115 		if (unlikely(inode_unhashed(inode))) {
1116 			iput(inode);
1117 			goto again;
1118 		}
1119 		return inode;
1120 	}
1121 
1122 	inode = alloc_inode(sb);
1123 	if (inode) {
1124 		struct inode *old;
1125 
1126 		spin_lock(&inode_hash_lock);
1127 		/* We released the lock, so.. */
1128 		old = find_inode_fast(sb, head, ino);
1129 		if (!old) {
1130 			inode->i_ino = ino;
1131 			spin_lock(&inode->i_lock);
1132 			inode->i_state = I_NEW;
1133 			hlist_add_head(&inode->i_hash, head);
1134 			spin_unlock(&inode->i_lock);
1135 			inode_sb_list_add(inode);
1136 			spin_unlock(&inode_hash_lock);
1137 
1138 			/* Return the locked inode with I_NEW set, the
1139 			 * caller is responsible for filling in the contents
1140 			 */
1141 			return inode;
1142 		}
1143 
1144 		/*
1145 		 * Uhhuh, somebody else created the same inode under
1146 		 * us. Use the old inode instead of the one we just
1147 		 * allocated.
1148 		 */
1149 		spin_unlock(&inode_hash_lock);
1150 		destroy_inode(inode);
1151 		inode = old;
1152 		wait_on_inode(inode);
1153 		if (unlikely(inode_unhashed(inode))) {
1154 			iput(inode);
1155 			goto again;
1156 		}
1157 	}
1158 	return inode;
1159 }
1160 EXPORT_SYMBOL(iget_locked);
1161 
1162 /*
1163  * search the inode cache for a matching inode number.
1164  * If we find one, then the inode number we are trying to
1165  * allocate is not unique and so we should not use it.
1166  *
1167  * Returns 1 if the inode number is unique, 0 if it is not.
1168  */
1169 static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1170 {
1171 	struct hlist_head *b = inode_hashtable + hash(sb, ino);
1172 	struct inode *inode;
1173 
1174 	spin_lock(&inode_hash_lock);
1175 	hlist_for_each_entry(inode, b, i_hash) {
1176 		if (inode->i_ino == ino && inode->i_sb == sb) {
1177 			spin_unlock(&inode_hash_lock);
1178 			return 0;
1179 		}
1180 	}
1181 	spin_unlock(&inode_hash_lock);
1182 
1183 	return 1;
1184 }
1185 
1186 /**
1187  *	iunique - get a unique inode number
1188  *	@sb: superblock
1189  *	@max_reserved: highest reserved inode number
1190  *
1191  *	Obtain an inode number that is unique on the system for a given
1192  *	superblock. This is used by file systems that have no natural
1193  *	permanent inode numbering system. An inode number is returned that
1194  *	is higher than the reserved limit but unique.
1195  *
1196  *	BUGS:
1197  *	With a large number of inodes live on the file system this function
1198  *	currently becomes quite slow.
1199  */
1200 ino_t iunique(struct super_block *sb, ino_t max_reserved)
1201 {
1202 	/*
1203 	 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
1204 	 * error if st_ino won't fit in target struct field. Use 32bit counter
1205 	 * here to attempt to avoid that.
1206 	 */
1207 	static DEFINE_SPINLOCK(iunique_lock);
1208 	static unsigned int counter;
1209 	ino_t res;
1210 
1211 	spin_lock(&iunique_lock);
1212 	do {
1213 		if (counter <= max_reserved)
1214 			counter = max_reserved + 1;
1215 		res = counter++;
1216 	} while (!test_inode_iunique(sb, res));
1217 	spin_unlock(&iunique_lock);
1218 
1219 	return res;
1220 }
1221 EXPORT_SYMBOL(iunique);
1222 
1223 struct inode *igrab(struct inode *inode)
1224 {
1225 	spin_lock(&inode->i_lock);
1226 	if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
1227 		__iget(inode);
1228 		spin_unlock(&inode->i_lock);
1229 	} else {
1230 		spin_unlock(&inode->i_lock);
1231 		/*
1232 		 * Handle the case where s_op->clear_inode is not been
1233 		 * called yet, and somebody is calling igrab
1234 		 * while the inode is getting freed.
1235 		 */
1236 		inode = NULL;
1237 	}
1238 	return inode;
1239 }
1240 EXPORT_SYMBOL(igrab);
1241 
1242 /**
1243  * ilookup5_nowait - search for an inode in the inode cache
1244  * @sb:		super block of file system to search
1245  * @hashval:	hash value (usually inode number) to search for
1246  * @test:	callback used for comparisons between inodes
1247  * @data:	opaque data pointer to pass to @test
1248  *
1249  * Search for the inode specified by @hashval and @data in the inode cache.
1250  * If the inode is in the cache, the inode is returned with an incremented
1251  * reference count.
1252  *
1253  * Note: I_NEW is not waited upon so you have to be very careful what you do
1254  * with the returned inode.  You probably should be using ilookup5() instead.
1255  *
1256  * Note2: @test is called with the inode_hash_lock held, so can't sleep.
1257  */
1258 struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
1259 		int (*test)(struct inode *, void *), void *data)
1260 {
1261 	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1262 	struct inode *inode;
1263 
1264 	spin_lock(&inode_hash_lock);
1265 	inode = find_inode(sb, head, test, data);
1266 	spin_unlock(&inode_hash_lock);
1267 
1268 	return inode;
1269 }
1270 EXPORT_SYMBOL(ilookup5_nowait);
1271 
1272 /**
1273  * ilookup5 - search for an inode in the inode cache
1274  * @sb:		super block of file system to search
1275  * @hashval:	hash value (usually inode number) to search for
1276  * @test:	callback used for comparisons between inodes
1277  * @data:	opaque data pointer to pass to @test
1278  *
1279  * Search for the inode specified by @hashval and @data in the inode cache,
1280  * and if the inode is in the cache, return the inode with an incremented
1281  * reference count.  Waits on I_NEW before returning the inode.
1282  * returned with an incremented reference count.
1283  *
1284  * This is a generalized version of ilookup() for file systems where the
1285  * inode number is not sufficient for unique identification of an inode.
1286  *
1287  * Note: @test is called with the inode_hash_lock held, so can't sleep.
1288  */
1289 struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
1290 		int (*test)(struct inode *, void *), void *data)
1291 {
1292 	struct inode *inode;
1293 again:
1294 	inode = ilookup5_nowait(sb, hashval, test, data);
1295 	if (inode) {
1296 		wait_on_inode(inode);
1297 		if (unlikely(inode_unhashed(inode))) {
1298 			iput(inode);
1299 			goto again;
1300 		}
1301 	}
1302 	return inode;
1303 }
1304 EXPORT_SYMBOL(ilookup5);
1305 
1306 /**
1307  * ilookup - search for an inode in the inode cache
1308  * @sb:		super block of file system to search
1309  * @ino:	inode number to search for
1310  *
1311  * Search for the inode @ino in the inode cache, and if the inode is in the
1312  * cache, the inode is returned with an incremented reference count.
1313  */
1314 struct inode *ilookup(struct super_block *sb, unsigned long ino)
1315 {
1316 	struct hlist_head *head = inode_hashtable + hash(sb, ino);
1317 	struct inode *inode;
1318 again:
1319 	spin_lock(&inode_hash_lock);
1320 	inode = find_inode_fast(sb, head, ino);
1321 	spin_unlock(&inode_hash_lock);
1322 
1323 	if (inode) {
1324 		wait_on_inode(inode);
1325 		if (unlikely(inode_unhashed(inode))) {
1326 			iput(inode);
1327 			goto again;
1328 		}
1329 	}
1330 	return inode;
1331 }
1332 EXPORT_SYMBOL(ilookup);
1333 
1334 /**
1335  * find_inode_nowait - find an inode in the inode cache
1336  * @sb:		super block of file system to search
1337  * @hashval:	hash value (usually inode number) to search for
1338  * @match:	callback used for comparisons between inodes
1339  * @data:	opaque data pointer to pass to @match
1340  *
1341  * Search for the inode specified by @hashval and @data in the inode
1342  * cache, where the helper function @match will return 0 if the inode
1343  * does not match, 1 if the inode does match, and -1 if the search
1344  * should be stopped.  The @match function must be responsible for
1345  * taking the i_lock spin_lock and checking i_state for an inode being
1346  * freed or being initialized, and incrementing the reference count
1347  * before returning 1.  It also must not sleep, since it is called with
1348  * the inode_hash_lock spinlock held.
1349  *
1350  * This is a even more generalized version of ilookup5() when the
1351  * function must never block --- find_inode() can block in
1352  * __wait_on_freeing_inode() --- or when the caller can not increment
1353  * the reference count because the resulting iput() might cause an
1354  * inode eviction.  The tradeoff is that the @match funtion must be
1355  * very carefully implemented.
1356  */
1357 struct inode *find_inode_nowait(struct super_block *sb,
1358 				unsigned long hashval,
1359 				int (*match)(struct inode *, unsigned long,
1360 					     void *),
1361 				void *data)
1362 {
1363 	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1364 	struct inode *inode, *ret_inode = NULL;
1365 	int mval;
1366 
1367 	spin_lock(&inode_hash_lock);
1368 	hlist_for_each_entry(inode, head, i_hash) {
1369 		if (inode->i_sb != sb)
1370 			continue;
1371 		mval = match(inode, hashval, data);
1372 		if (mval == 0)
1373 			continue;
1374 		if (mval == 1)
1375 			ret_inode = inode;
1376 		goto out;
1377 	}
1378 out:
1379 	spin_unlock(&inode_hash_lock);
1380 	return ret_inode;
1381 }
1382 EXPORT_SYMBOL(find_inode_nowait);
1383 
1384 int insert_inode_locked(struct inode *inode)
1385 {
1386 	struct super_block *sb = inode->i_sb;
1387 	ino_t ino = inode->i_ino;
1388 	struct hlist_head *head = inode_hashtable + hash(sb, ino);
1389 
1390 	while (1) {
1391 		struct inode *old = NULL;
1392 		spin_lock(&inode_hash_lock);
1393 		hlist_for_each_entry(old, head, i_hash) {
1394 			if (old->i_ino != ino)
1395 				continue;
1396 			if (old->i_sb != sb)
1397 				continue;
1398 			spin_lock(&old->i_lock);
1399 			if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1400 				spin_unlock(&old->i_lock);
1401 				continue;
1402 			}
1403 			break;
1404 		}
1405 		if (likely(!old)) {
1406 			spin_lock(&inode->i_lock);
1407 			inode->i_state |= I_NEW;
1408 			hlist_add_head(&inode->i_hash, head);
1409 			spin_unlock(&inode->i_lock);
1410 			spin_unlock(&inode_hash_lock);
1411 			return 0;
1412 		}
1413 		__iget(old);
1414 		spin_unlock(&old->i_lock);
1415 		spin_unlock(&inode_hash_lock);
1416 		wait_on_inode(old);
1417 		if (unlikely(!inode_unhashed(old))) {
1418 			iput(old);
1419 			return -EBUSY;
1420 		}
1421 		iput(old);
1422 	}
1423 }
1424 EXPORT_SYMBOL(insert_inode_locked);
1425 
1426 int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1427 		int (*test)(struct inode *, void *), void *data)
1428 {
1429 	struct super_block *sb = inode->i_sb;
1430 	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1431 
1432 	while (1) {
1433 		struct inode *old = NULL;
1434 
1435 		spin_lock(&inode_hash_lock);
1436 		hlist_for_each_entry(old, head, i_hash) {
1437 			if (old->i_sb != sb)
1438 				continue;
1439 			if (!test(old, data))
1440 				continue;
1441 			spin_lock(&old->i_lock);
1442 			if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1443 				spin_unlock(&old->i_lock);
1444 				continue;
1445 			}
1446 			break;
1447 		}
1448 		if (likely(!old)) {
1449 			spin_lock(&inode->i_lock);
1450 			inode->i_state |= I_NEW;
1451 			hlist_add_head(&inode->i_hash, head);
1452 			spin_unlock(&inode->i_lock);
1453 			spin_unlock(&inode_hash_lock);
1454 			return 0;
1455 		}
1456 		__iget(old);
1457 		spin_unlock(&old->i_lock);
1458 		spin_unlock(&inode_hash_lock);
1459 		wait_on_inode(old);
1460 		if (unlikely(!inode_unhashed(old))) {
1461 			iput(old);
1462 			return -EBUSY;
1463 		}
1464 		iput(old);
1465 	}
1466 }
1467 EXPORT_SYMBOL(insert_inode_locked4);
1468 
1469 
1470 int generic_delete_inode(struct inode *inode)
1471 {
1472 	return 1;
1473 }
1474 EXPORT_SYMBOL(generic_delete_inode);
1475 
1476 /*
1477  * Called when we're dropping the last reference
1478  * to an inode.
1479  *
1480  * Call the FS "drop_inode()" function, defaulting to
1481  * the legacy UNIX filesystem behaviour.  If it tells
1482  * us to evict inode, do so.  Otherwise, retain inode
1483  * in cache if fs is alive, sync and evict if fs is
1484  * shutting down.
1485  */
1486 static void iput_final(struct inode *inode)
1487 {
1488 	struct super_block *sb = inode->i_sb;
1489 	const struct super_operations *op = inode->i_sb->s_op;
1490 	int drop;
1491 
1492 	WARN_ON(inode->i_state & I_NEW);
1493 
1494 	if (op->drop_inode)
1495 		drop = op->drop_inode(inode);
1496 	else
1497 		drop = generic_drop_inode(inode);
1498 
1499 	if (!drop && (sb->s_flags & SB_ACTIVE)) {
1500 		inode_add_lru(inode);
1501 		spin_unlock(&inode->i_lock);
1502 		return;
1503 	}
1504 
1505 	if (!drop) {
1506 		inode->i_state |= I_WILL_FREE;
1507 		spin_unlock(&inode->i_lock);
1508 		write_inode_now(inode, 1);
1509 		spin_lock(&inode->i_lock);
1510 		WARN_ON(inode->i_state & I_NEW);
1511 		inode->i_state &= ~I_WILL_FREE;
1512 	}
1513 
1514 	inode->i_state |= I_FREEING;
1515 	if (!list_empty(&inode->i_lru))
1516 		inode_lru_list_del(inode);
1517 	spin_unlock(&inode->i_lock);
1518 
1519 	evict(inode);
1520 }
1521 
1522 /**
1523  *	iput	- put an inode
1524  *	@inode: inode to put
1525  *
1526  *	Puts an inode, dropping its usage count. If the inode use count hits
1527  *	zero, the inode is then freed and may also be destroyed.
1528  *
1529  *	Consequently, iput() can sleep.
1530  */
1531 void iput(struct inode *inode)
1532 {
1533 	if (!inode)
1534 		return;
1535 	BUG_ON(inode->i_state & I_CLEAR);
1536 retry:
1537 	if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) {
1538 		if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) {
1539 			atomic_inc(&inode->i_count);
1540 			spin_unlock(&inode->i_lock);
1541 			trace_writeback_lazytime_iput(inode);
1542 			mark_inode_dirty_sync(inode);
1543 			goto retry;
1544 		}
1545 		iput_final(inode);
1546 	}
1547 }
1548 EXPORT_SYMBOL(iput);
1549 
1550 /**
1551  *	bmap	- find a block number in a file
1552  *	@inode: inode of file
1553  *	@block: block to find
1554  *
1555  *	Returns the block number on the device holding the inode that
1556  *	is the disk block number for the block of the file requested.
1557  *	That is, asked for block 4 of inode 1 the function will return the
1558  *	disk block relative to the disk start that holds that block of the
1559  *	file.
1560  */
1561 sector_t bmap(struct inode *inode, sector_t block)
1562 {
1563 	sector_t res = 0;
1564 	if (inode->i_mapping->a_ops->bmap)
1565 		res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
1566 	return res;
1567 }
1568 EXPORT_SYMBOL(bmap);
1569 
1570 /*
1571  * Update times in overlayed inode from underlying real inode
1572  */
1573 static void update_ovl_inode_times(struct dentry *dentry, struct inode *inode,
1574 			       bool rcu)
1575 {
1576 	struct dentry *upperdentry;
1577 
1578 	/*
1579 	 * Nothing to do if in rcu or if non-overlayfs
1580 	 */
1581 	if (rcu || likely(!(dentry->d_flags & DCACHE_OP_REAL)))
1582 		return;
1583 
1584 	upperdentry = d_real(dentry, NULL, 0, D_REAL_UPPER);
1585 
1586 	/*
1587 	 * If file is on lower then we can't update atime, so no worries about
1588 	 * stale mtime/ctime.
1589 	 */
1590 	if (upperdentry) {
1591 		struct inode *realinode = d_inode(upperdentry);
1592 
1593 		if ((!timespec_equal(&inode->i_mtime, &realinode->i_mtime) ||
1594 		     !timespec_equal(&inode->i_ctime, &realinode->i_ctime))) {
1595 			inode->i_mtime = realinode->i_mtime;
1596 			inode->i_ctime = realinode->i_ctime;
1597 		}
1598 	}
1599 }
1600 
1601 /*
1602  * With relative atime, only update atime if the previous atime is
1603  * earlier than either the ctime or mtime or if at least a day has
1604  * passed since the last atime update.
1605  */
1606 static int relatime_need_update(const struct path *path, struct inode *inode,
1607 				struct timespec now, bool rcu)
1608 {
1609 
1610 	if (!(path->mnt->mnt_flags & MNT_RELATIME))
1611 		return 1;
1612 
1613 	update_ovl_inode_times(path->dentry, inode, rcu);
1614 	/*
1615 	 * Is mtime younger than atime? If yes, update atime:
1616 	 */
1617 	if (timespec_compare(&inode->i_mtime, &inode->i_atime) >= 0)
1618 		return 1;
1619 	/*
1620 	 * Is ctime younger than atime? If yes, update atime:
1621 	 */
1622 	if (timespec_compare(&inode->i_ctime, &inode->i_atime) >= 0)
1623 		return 1;
1624 
1625 	/*
1626 	 * Is the previous atime value older than a day? If yes,
1627 	 * update atime:
1628 	 */
1629 	if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
1630 		return 1;
1631 	/*
1632 	 * Good, we can skip the atime update:
1633 	 */
1634 	return 0;
1635 }
1636 
1637 int generic_update_time(struct inode *inode, struct timespec *time, int flags)
1638 {
1639 	int iflags = I_DIRTY_TIME;
1640 	bool dirty = false;
1641 
1642 	if (flags & S_ATIME)
1643 		inode->i_atime = *time;
1644 	if (flags & S_VERSION)
1645 		dirty = inode_maybe_inc_iversion(inode, false);
1646 	if (flags & S_CTIME)
1647 		inode->i_ctime = *time;
1648 	if (flags & S_MTIME)
1649 		inode->i_mtime = *time;
1650 	if ((flags & (S_ATIME | S_CTIME | S_MTIME)) &&
1651 	    !(inode->i_sb->s_flags & SB_LAZYTIME))
1652 		dirty = true;
1653 
1654 	if (dirty)
1655 		iflags |= I_DIRTY_SYNC;
1656 	__mark_inode_dirty(inode, iflags);
1657 	return 0;
1658 }
1659 EXPORT_SYMBOL(generic_update_time);
1660 
1661 /*
1662  * This does the actual work of updating an inodes time or version.  Must have
1663  * had called mnt_want_write() before calling this.
1664  */
1665 static int update_time(struct inode *inode, struct timespec *time, int flags)
1666 {
1667 	int (*update_time)(struct inode *, struct timespec *, int);
1668 
1669 	update_time = inode->i_op->update_time ? inode->i_op->update_time :
1670 		generic_update_time;
1671 
1672 	return update_time(inode, time, flags);
1673 }
1674 
1675 /**
1676  *	touch_atime	-	update the access time
1677  *	@path: the &struct path to update
1678  *	@inode: inode to update
1679  *
1680  *	Update the accessed time on an inode and mark it for writeback.
1681  *	This function automatically handles read only file systems and media,
1682  *	as well as the "noatime" flag and inode specific "noatime" markers.
1683  */
1684 bool __atime_needs_update(const struct path *path, struct inode *inode,
1685 			  bool rcu)
1686 {
1687 	struct vfsmount *mnt = path->mnt;
1688 	struct timespec now;
1689 
1690 	if (inode->i_flags & S_NOATIME)
1691 		return false;
1692 
1693 	/* Atime updates will likely cause i_uid and i_gid to be written
1694 	 * back improprely if their true value is unknown to the vfs.
1695 	 */
1696 	if (HAS_UNMAPPED_ID(inode))
1697 		return false;
1698 
1699 	if (IS_NOATIME(inode))
1700 		return false;
1701 	if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
1702 		return false;
1703 
1704 	if (mnt->mnt_flags & MNT_NOATIME)
1705 		return false;
1706 	if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1707 		return false;
1708 
1709 	now = current_time(inode);
1710 
1711 	if (!relatime_need_update(path, inode, now, rcu))
1712 		return false;
1713 
1714 	if (timespec_equal(&inode->i_atime, &now))
1715 		return false;
1716 
1717 	return true;
1718 }
1719 
1720 void touch_atime(const struct path *path)
1721 {
1722 	struct vfsmount *mnt = path->mnt;
1723 	struct inode *inode = d_inode(path->dentry);
1724 	struct timespec now;
1725 
1726 	if (!__atime_needs_update(path, inode, false))
1727 		return;
1728 
1729 	if (!sb_start_write_trylock(inode->i_sb))
1730 		return;
1731 
1732 	if (__mnt_want_write(mnt) != 0)
1733 		goto skip_update;
1734 	/*
1735 	 * File systems can error out when updating inodes if they need to
1736 	 * allocate new space to modify an inode (such is the case for
1737 	 * Btrfs), but since we touch atime while walking down the path we
1738 	 * really don't care if we failed to update the atime of the file,
1739 	 * so just ignore the return value.
1740 	 * We may also fail on filesystems that have the ability to make parts
1741 	 * of the fs read only, e.g. subvolumes in Btrfs.
1742 	 */
1743 	now = current_time(inode);
1744 	update_time(inode, &now, S_ATIME);
1745 	__mnt_drop_write(mnt);
1746 skip_update:
1747 	sb_end_write(inode->i_sb);
1748 }
1749 EXPORT_SYMBOL(touch_atime);
1750 
1751 /*
1752  * The logic we want is
1753  *
1754  *	if suid or (sgid and xgrp)
1755  *		remove privs
1756  */
1757 int should_remove_suid(struct dentry *dentry)
1758 {
1759 	umode_t mode = d_inode(dentry)->i_mode;
1760 	int kill = 0;
1761 
1762 	/* suid always must be killed */
1763 	if (unlikely(mode & S_ISUID))
1764 		kill = ATTR_KILL_SUID;
1765 
1766 	/*
1767 	 * sgid without any exec bits is just a mandatory locking mark; leave
1768 	 * it alone.  If some exec bits are set, it's a real sgid; kill it.
1769 	 */
1770 	if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1771 		kill |= ATTR_KILL_SGID;
1772 
1773 	if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
1774 		return kill;
1775 
1776 	return 0;
1777 }
1778 EXPORT_SYMBOL(should_remove_suid);
1779 
1780 /*
1781  * Return mask of changes for notify_change() that need to be done as a
1782  * response to write or truncate. Return 0 if nothing has to be changed.
1783  * Negative value on error (change should be denied).
1784  */
1785 int dentry_needs_remove_privs(struct dentry *dentry)
1786 {
1787 	struct inode *inode = d_inode(dentry);
1788 	int mask = 0;
1789 	int ret;
1790 
1791 	if (IS_NOSEC(inode))
1792 		return 0;
1793 
1794 	mask = should_remove_suid(dentry);
1795 	ret = security_inode_need_killpriv(dentry);
1796 	if (ret < 0)
1797 		return ret;
1798 	if (ret)
1799 		mask |= ATTR_KILL_PRIV;
1800 	return mask;
1801 }
1802 
1803 static int __remove_privs(struct dentry *dentry, int kill)
1804 {
1805 	struct iattr newattrs;
1806 
1807 	newattrs.ia_valid = ATTR_FORCE | kill;
1808 	/*
1809 	 * Note we call this on write, so notify_change will not
1810 	 * encounter any conflicting delegations:
1811 	 */
1812 	return notify_change(dentry, &newattrs, NULL);
1813 }
1814 
1815 /*
1816  * Remove special file priviledges (suid, capabilities) when file is written
1817  * to or truncated.
1818  */
1819 int file_remove_privs(struct file *file)
1820 {
1821 	struct dentry *dentry = file_dentry(file);
1822 	struct inode *inode = file_inode(file);
1823 	int kill;
1824 	int error = 0;
1825 
1826 	/* Fast path for nothing security related */
1827 	if (IS_NOSEC(inode))
1828 		return 0;
1829 
1830 	kill = dentry_needs_remove_privs(dentry);
1831 	if (kill < 0)
1832 		return kill;
1833 	if (kill)
1834 		error = __remove_privs(dentry, kill);
1835 	if (!error)
1836 		inode_has_no_xattr(inode);
1837 
1838 	return error;
1839 }
1840 EXPORT_SYMBOL(file_remove_privs);
1841 
1842 /**
1843  *	file_update_time	-	update mtime and ctime time
1844  *	@file: file accessed
1845  *
1846  *	Update the mtime and ctime members of an inode and mark the inode
1847  *	for writeback.  Note that this function is meant exclusively for
1848  *	usage in the file write path of filesystems, and filesystems may
1849  *	choose to explicitly ignore update via this function with the
1850  *	S_NOCMTIME inode flag, e.g. for network filesystem where these
1851  *	timestamps are handled by the server.  This can return an error for
1852  *	file systems who need to allocate space in order to update an inode.
1853  */
1854 
1855 int file_update_time(struct file *file)
1856 {
1857 	struct inode *inode = file_inode(file);
1858 	struct timespec now;
1859 	int sync_it = 0;
1860 	int ret;
1861 
1862 	/* First try to exhaust all avenues to not sync */
1863 	if (IS_NOCMTIME(inode))
1864 		return 0;
1865 
1866 	now = current_time(inode);
1867 	if (!timespec_equal(&inode->i_mtime, &now))
1868 		sync_it = S_MTIME;
1869 
1870 	if (!timespec_equal(&inode->i_ctime, &now))
1871 		sync_it |= S_CTIME;
1872 
1873 	if (IS_I_VERSION(inode) && inode_iversion_need_inc(inode))
1874 		sync_it |= S_VERSION;
1875 
1876 	if (!sync_it)
1877 		return 0;
1878 
1879 	/* Finally allowed to write? Takes lock. */
1880 	if (__mnt_want_write_file(file))
1881 		return 0;
1882 
1883 	ret = update_time(inode, &now, sync_it);
1884 	__mnt_drop_write_file(file);
1885 
1886 	return ret;
1887 }
1888 EXPORT_SYMBOL(file_update_time);
1889 
1890 int inode_needs_sync(struct inode *inode)
1891 {
1892 	if (IS_SYNC(inode))
1893 		return 1;
1894 	if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
1895 		return 1;
1896 	return 0;
1897 }
1898 EXPORT_SYMBOL(inode_needs_sync);
1899 
1900 /*
1901  * If we try to find an inode in the inode hash while it is being
1902  * deleted, we have to wait until the filesystem completes its
1903  * deletion before reporting that it isn't found.  This function waits
1904  * until the deletion _might_ have completed.  Callers are responsible
1905  * to recheck inode state.
1906  *
1907  * It doesn't matter if I_NEW is not set initially, a call to
1908  * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
1909  * will DTRT.
1910  */
1911 static void __wait_on_freeing_inode(struct inode *inode)
1912 {
1913 	wait_queue_head_t *wq;
1914 	DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
1915 	wq = bit_waitqueue(&inode->i_state, __I_NEW);
1916 	prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
1917 	spin_unlock(&inode->i_lock);
1918 	spin_unlock(&inode_hash_lock);
1919 	schedule();
1920 	finish_wait(wq, &wait.wq_entry);
1921 	spin_lock(&inode_hash_lock);
1922 }
1923 
1924 static __initdata unsigned long ihash_entries;
1925 static int __init set_ihash_entries(char *str)
1926 {
1927 	if (!str)
1928 		return 0;
1929 	ihash_entries = simple_strtoul(str, &str, 0);
1930 	return 1;
1931 }
1932 __setup("ihash_entries=", set_ihash_entries);
1933 
1934 /*
1935  * Initialize the waitqueues and inode hash table.
1936  */
1937 void __init inode_init_early(void)
1938 {
1939 	/* If hashes are distributed across NUMA nodes, defer
1940 	 * hash allocation until vmalloc space is available.
1941 	 */
1942 	if (hashdist)
1943 		return;
1944 
1945 	inode_hashtable =
1946 		alloc_large_system_hash("Inode-cache",
1947 					sizeof(struct hlist_head),
1948 					ihash_entries,
1949 					14,
1950 					HASH_EARLY | HASH_ZERO,
1951 					&i_hash_shift,
1952 					&i_hash_mask,
1953 					0,
1954 					0);
1955 }
1956 
1957 void __init inode_init(void)
1958 {
1959 	/* inode slab cache */
1960 	inode_cachep = kmem_cache_create("inode_cache",
1961 					 sizeof(struct inode),
1962 					 0,
1963 					 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
1964 					 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1965 					 init_once);
1966 
1967 	/* Hash may have been set up in inode_init_early */
1968 	if (!hashdist)
1969 		return;
1970 
1971 	inode_hashtable =
1972 		alloc_large_system_hash("Inode-cache",
1973 					sizeof(struct hlist_head),
1974 					ihash_entries,
1975 					14,
1976 					HASH_ZERO,
1977 					&i_hash_shift,
1978 					&i_hash_mask,
1979 					0,
1980 					0);
1981 }
1982 
1983 void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
1984 {
1985 	inode->i_mode = mode;
1986 	if (S_ISCHR(mode)) {
1987 		inode->i_fop = &def_chr_fops;
1988 		inode->i_rdev = rdev;
1989 	} else if (S_ISBLK(mode)) {
1990 		inode->i_fop = &def_blk_fops;
1991 		inode->i_rdev = rdev;
1992 	} else if (S_ISFIFO(mode))
1993 		inode->i_fop = &pipefifo_fops;
1994 	else if (S_ISSOCK(mode))
1995 		;	/* leave it no_open_fops */
1996 	else
1997 		printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
1998 				  " inode %s:%lu\n", mode, inode->i_sb->s_id,
1999 				  inode->i_ino);
2000 }
2001 EXPORT_SYMBOL(init_special_inode);
2002 
2003 /**
2004  * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
2005  * @inode: New inode
2006  * @dir: Directory inode
2007  * @mode: mode of the new inode
2008  */
2009 void inode_init_owner(struct inode *inode, const struct inode *dir,
2010 			umode_t mode)
2011 {
2012 	inode->i_uid = current_fsuid();
2013 	if (dir && dir->i_mode & S_ISGID) {
2014 		inode->i_gid = dir->i_gid;
2015 		if (S_ISDIR(mode))
2016 			mode |= S_ISGID;
2017 	} else
2018 		inode->i_gid = current_fsgid();
2019 	inode->i_mode = mode;
2020 }
2021 EXPORT_SYMBOL(inode_init_owner);
2022 
2023 /**
2024  * inode_owner_or_capable - check current task permissions to inode
2025  * @inode: inode being checked
2026  *
2027  * Return true if current either has CAP_FOWNER in a namespace with the
2028  * inode owner uid mapped, or owns the file.
2029  */
2030 bool inode_owner_or_capable(const struct inode *inode)
2031 {
2032 	struct user_namespace *ns;
2033 
2034 	if (uid_eq(current_fsuid(), inode->i_uid))
2035 		return true;
2036 
2037 	ns = current_user_ns();
2038 	if (kuid_has_mapping(ns, inode->i_uid) && ns_capable(ns, CAP_FOWNER))
2039 		return true;
2040 	return false;
2041 }
2042 EXPORT_SYMBOL(inode_owner_or_capable);
2043 
2044 /*
2045  * Direct i/o helper functions
2046  */
2047 static void __inode_dio_wait(struct inode *inode)
2048 {
2049 	wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
2050 	DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
2051 
2052 	do {
2053 		prepare_to_wait(wq, &q.wq_entry, TASK_UNINTERRUPTIBLE);
2054 		if (atomic_read(&inode->i_dio_count))
2055 			schedule();
2056 	} while (atomic_read(&inode->i_dio_count));
2057 	finish_wait(wq, &q.wq_entry);
2058 }
2059 
2060 /**
2061  * inode_dio_wait - wait for outstanding DIO requests to finish
2062  * @inode: inode to wait for
2063  *
2064  * Waits for all pending direct I/O requests to finish so that we can
2065  * proceed with a truncate or equivalent operation.
2066  *
2067  * Must be called under a lock that serializes taking new references
2068  * to i_dio_count, usually by inode->i_mutex.
2069  */
2070 void inode_dio_wait(struct inode *inode)
2071 {
2072 	if (atomic_read(&inode->i_dio_count))
2073 		__inode_dio_wait(inode);
2074 }
2075 EXPORT_SYMBOL(inode_dio_wait);
2076 
2077 /*
2078  * inode_set_flags - atomically set some inode flags
2079  *
2080  * Note: the caller should be holding i_mutex, or else be sure that
2081  * they have exclusive access to the inode structure (i.e., while the
2082  * inode is being instantiated).  The reason for the cmpxchg() loop
2083  * --- which wouldn't be necessary if all code paths which modify
2084  * i_flags actually followed this rule, is that there is at least one
2085  * code path which doesn't today so we use cmpxchg() out of an abundance
2086  * of caution.
2087  *
2088  * In the long run, i_mutex is overkill, and we should probably look
2089  * at using the i_lock spinlock to protect i_flags, and then make sure
2090  * it is so documented in include/linux/fs.h and that all code follows
2091  * the locking convention!!
2092  */
2093 void inode_set_flags(struct inode *inode, unsigned int flags,
2094 		     unsigned int mask)
2095 {
2096 	unsigned int old_flags, new_flags;
2097 
2098 	WARN_ON_ONCE(flags & ~mask);
2099 	do {
2100 		old_flags = READ_ONCE(inode->i_flags);
2101 		new_flags = (old_flags & ~mask) | flags;
2102 	} while (unlikely(cmpxchg(&inode->i_flags, old_flags,
2103 				  new_flags) != old_flags));
2104 }
2105 EXPORT_SYMBOL(inode_set_flags);
2106 
2107 void inode_nohighmem(struct inode *inode)
2108 {
2109 	mapping_set_gfp_mask(inode->i_mapping, GFP_USER);
2110 }
2111 EXPORT_SYMBOL(inode_nohighmem);
2112 
2113 /**
2114  * current_time - Return FS time
2115  * @inode: inode.
2116  *
2117  * Return the current time truncated to the time granularity supported by
2118  * the fs.
2119  *
2120  * Note that inode and inode->sb cannot be NULL.
2121  * Otherwise, the function warns and returns time without truncation.
2122  */
2123 struct timespec current_time(struct inode *inode)
2124 {
2125 	struct timespec now = current_kernel_time();
2126 
2127 	if (unlikely(!inode->i_sb)) {
2128 		WARN(1, "current_time() called with uninitialized super_block in the inode");
2129 		return now;
2130 	}
2131 
2132 	return timespec_trunc(now, inode->i_sb->s_time_gran);
2133 }
2134 EXPORT_SYMBOL(current_time);
2135