xref: /openbmc/linux/fs/inode.c (revision 45fe9262)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * (C) 1997 Linus Torvalds
4  * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
5  */
6 #include <linux/export.h>
7 #include <linux/fs.h>
8 #include <linux/mm.h>
9 #include <linux/backing-dev.h>
10 #include <linux/hash.h>
11 #include <linux/swap.h>
12 #include <linux/security.h>
13 #include <linux/cdev.h>
14 #include <linux/memblock.h>
15 #include <linux/fscrypt.h>
16 #include <linux/fsnotify.h>
17 #include <linux/mount.h>
18 #include <linux/posix_acl.h>
19 #include <linux/prefetch.h>
20 #include <linux/buffer_head.h> /* for inode_has_buffers */
21 #include <linux/ratelimit.h>
22 #include <linux/list_lru.h>
23 #include <linux/iversion.h>
24 #include <trace/events/writeback.h>
25 #include "internal.h"
26 
27 /*
28  * Inode locking rules:
29  *
30  * inode->i_lock protects:
31  *   inode->i_state, inode->i_hash, __iget()
32  * Inode LRU list locks protect:
33  *   inode->i_sb->s_inode_lru, inode->i_lru
34  * inode->i_sb->s_inode_list_lock protects:
35  *   inode->i_sb->s_inodes, inode->i_sb_list
36  * bdi->wb.list_lock protects:
37  *   bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list
38  * inode_hash_lock protects:
39  *   inode_hashtable, inode->i_hash
40  *
41  * Lock ordering:
42  *
43  * inode->i_sb->s_inode_list_lock
44  *   inode->i_lock
45  *     Inode LRU list locks
46  *
47  * bdi->wb.list_lock
48  *   inode->i_lock
49  *
50  * inode_hash_lock
51  *   inode->i_sb->s_inode_list_lock
52  *   inode->i_lock
53  *
54  * iunique_lock
55  *   inode_hash_lock
56  */
57 
58 static unsigned int i_hash_mask __read_mostly;
59 static unsigned int i_hash_shift __read_mostly;
60 static struct hlist_head *inode_hashtable __read_mostly;
61 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
62 
63 /*
64  * Empty aops. Can be used for the cases where the user does not
65  * define any of the address_space operations.
66  */
67 const struct address_space_operations empty_aops = {
68 };
69 EXPORT_SYMBOL(empty_aops);
70 
71 /*
72  * Statistics gathering..
73  */
74 struct inodes_stat_t inodes_stat;
75 
76 static DEFINE_PER_CPU(unsigned long, nr_inodes);
77 static DEFINE_PER_CPU(unsigned long, nr_unused);
78 
79 static struct kmem_cache *inode_cachep __read_mostly;
80 
81 static long get_nr_inodes(void)
82 {
83 	int i;
84 	long sum = 0;
85 	for_each_possible_cpu(i)
86 		sum += per_cpu(nr_inodes, i);
87 	return sum < 0 ? 0 : sum;
88 }
89 
90 static inline long get_nr_inodes_unused(void)
91 {
92 	int i;
93 	long sum = 0;
94 	for_each_possible_cpu(i)
95 		sum += per_cpu(nr_unused, i);
96 	return sum < 0 ? 0 : sum;
97 }
98 
99 long get_nr_dirty_inodes(void)
100 {
101 	/* not actually dirty inodes, but a wild approximation */
102 	long nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
103 	return nr_dirty > 0 ? nr_dirty : 0;
104 }
105 
106 /*
107  * Handle nr_inode sysctl
108  */
109 #ifdef CONFIG_SYSCTL
110 int proc_nr_inodes(struct ctl_table *table, int write,
111 		   void *buffer, size_t *lenp, loff_t *ppos)
112 {
113 	inodes_stat.nr_inodes = get_nr_inodes();
114 	inodes_stat.nr_unused = get_nr_inodes_unused();
115 	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
116 }
117 #endif
118 
119 static int no_open(struct inode *inode, struct file *file)
120 {
121 	return -ENXIO;
122 }
123 
124 /**
125  * inode_init_always - perform inode structure initialisation
126  * @sb: superblock inode belongs to
127  * @inode: inode to initialise
128  *
129  * These are initializations that need to be done on every inode
130  * allocation as the fields are not initialised by slab allocation.
131  */
132 int inode_init_always(struct super_block *sb, struct inode *inode)
133 {
134 	static const struct inode_operations empty_iops;
135 	static const struct file_operations no_open_fops = {.open = no_open};
136 	struct address_space *const mapping = &inode->i_data;
137 
138 	inode->i_sb = sb;
139 	inode->i_blkbits = sb->s_blocksize_bits;
140 	inode->i_flags = 0;
141 	atomic64_set(&inode->i_sequence, 0);
142 	atomic_set(&inode->i_count, 1);
143 	inode->i_op = &empty_iops;
144 	inode->i_fop = &no_open_fops;
145 	inode->__i_nlink = 1;
146 	inode->i_opflags = 0;
147 	if (sb->s_xattr)
148 		inode->i_opflags |= IOP_XATTR;
149 	i_uid_write(inode, 0);
150 	i_gid_write(inode, 0);
151 	atomic_set(&inode->i_writecount, 0);
152 	inode->i_size = 0;
153 	inode->i_write_hint = WRITE_LIFE_NOT_SET;
154 	inode->i_blocks = 0;
155 	inode->i_bytes = 0;
156 	inode->i_generation = 0;
157 	inode->i_pipe = NULL;
158 	inode->i_cdev = NULL;
159 	inode->i_link = NULL;
160 	inode->i_dir_seq = 0;
161 	inode->i_rdev = 0;
162 	inode->dirtied_when = 0;
163 
164 #ifdef CONFIG_CGROUP_WRITEBACK
165 	inode->i_wb_frn_winner = 0;
166 	inode->i_wb_frn_avg_time = 0;
167 	inode->i_wb_frn_history = 0;
168 #endif
169 
170 	if (security_inode_alloc(inode))
171 		goto out;
172 	spin_lock_init(&inode->i_lock);
173 	lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
174 
175 	init_rwsem(&inode->i_rwsem);
176 	lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key);
177 
178 	atomic_set(&inode->i_dio_count, 0);
179 
180 	mapping->a_ops = &empty_aops;
181 	mapping->host = inode;
182 	mapping->flags = 0;
183 	if (sb->s_type->fs_flags & FS_THP_SUPPORT)
184 		__set_bit(AS_THP_SUPPORT, &mapping->flags);
185 	mapping->wb_err = 0;
186 	atomic_set(&mapping->i_mmap_writable, 0);
187 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
188 	atomic_set(&mapping->nr_thps, 0);
189 #endif
190 	mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
191 	mapping->private_data = NULL;
192 	mapping->writeback_index = 0;
193 	inode->i_private = NULL;
194 	inode->i_mapping = mapping;
195 	INIT_HLIST_HEAD(&inode->i_dentry);	/* buggered by rcu freeing */
196 #ifdef CONFIG_FS_POSIX_ACL
197 	inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
198 #endif
199 
200 #ifdef CONFIG_FSNOTIFY
201 	inode->i_fsnotify_mask = 0;
202 #endif
203 	inode->i_flctx = NULL;
204 	this_cpu_inc(nr_inodes);
205 
206 	return 0;
207 out:
208 	return -ENOMEM;
209 }
210 EXPORT_SYMBOL(inode_init_always);
211 
212 void free_inode_nonrcu(struct inode *inode)
213 {
214 	kmem_cache_free(inode_cachep, inode);
215 }
216 EXPORT_SYMBOL(free_inode_nonrcu);
217 
218 static void i_callback(struct rcu_head *head)
219 {
220 	struct inode *inode = container_of(head, struct inode, i_rcu);
221 	if (inode->free_inode)
222 		inode->free_inode(inode);
223 	else
224 		free_inode_nonrcu(inode);
225 }
226 
227 static struct inode *alloc_inode(struct super_block *sb)
228 {
229 	const struct super_operations *ops = sb->s_op;
230 	struct inode *inode;
231 
232 	if (ops->alloc_inode)
233 		inode = ops->alloc_inode(sb);
234 	else
235 		inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
236 
237 	if (!inode)
238 		return NULL;
239 
240 	if (unlikely(inode_init_always(sb, inode))) {
241 		if (ops->destroy_inode) {
242 			ops->destroy_inode(inode);
243 			if (!ops->free_inode)
244 				return NULL;
245 		}
246 		inode->free_inode = ops->free_inode;
247 		i_callback(&inode->i_rcu);
248 		return NULL;
249 	}
250 
251 	return inode;
252 }
253 
254 void __destroy_inode(struct inode *inode)
255 {
256 	BUG_ON(inode_has_buffers(inode));
257 	inode_detach_wb(inode);
258 	security_inode_free(inode);
259 	fsnotify_inode_delete(inode);
260 	locks_free_lock_context(inode);
261 	if (!inode->i_nlink) {
262 		WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
263 		atomic_long_dec(&inode->i_sb->s_remove_count);
264 	}
265 
266 #ifdef CONFIG_FS_POSIX_ACL
267 	if (inode->i_acl && !is_uncached_acl(inode->i_acl))
268 		posix_acl_release(inode->i_acl);
269 	if (inode->i_default_acl && !is_uncached_acl(inode->i_default_acl))
270 		posix_acl_release(inode->i_default_acl);
271 #endif
272 	this_cpu_dec(nr_inodes);
273 }
274 EXPORT_SYMBOL(__destroy_inode);
275 
276 static void destroy_inode(struct inode *inode)
277 {
278 	const struct super_operations *ops = inode->i_sb->s_op;
279 
280 	BUG_ON(!list_empty(&inode->i_lru));
281 	__destroy_inode(inode);
282 	if (ops->destroy_inode) {
283 		ops->destroy_inode(inode);
284 		if (!ops->free_inode)
285 			return;
286 	}
287 	inode->free_inode = ops->free_inode;
288 	call_rcu(&inode->i_rcu, i_callback);
289 }
290 
291 /**
292  * drop_nlink - directly drop an inode's link count
293  * @inode: inode
294  *
295  * This is a low-level filesystem helper to replace any
296  * direct filesystem manipulation of i_nlink.  In cases
297  * where we are attempting to track writes to the
298  * filesystem, a decrement to zero means an imminent
299  * write when the file is truncated and actually unlinked
300  * on the filesystem.
301  */
302 void drop_nlink(struct inode *inode)
303 {
304 	WARN_ON(inode->i_nlink == 0);
305 	inode->__i_nlink--;
306 	if (!inode->i_nlink)
307 		atomic_long_inc(&inode->i_sb->s_remove_count);
308 }
309 EXPORT_SYMBOL(drop_nlink);
310 
311 /**
312  * clear_nlink - directly zero an inode's link count
313  * @inode: inode
314  *
315  * This is a low-level filesystem helper to replace any
316  * direct filesystem manipulation of i_nlink.  See
317  * drop_nlink() for why we care about i_nlink hitting zero.
318  */
319 void clear_nlink(struct inode *inode)
320 {
321 	if (inode->i_nlink) {
322 		inode->__i_nlink = 0;
323 		atomic_long_inc(&inode->i_sb->s_remove_count);
324 	}
325 }
326 EXPORT_SYMBOL(clear_nlink);
327 
328 /**
329  * set_nlink - directly set an inode's link count
330  * @inode: inode
331  * @nlink: new nlink (should be non-zero)
332  *
333  * This is a low-level filesystem helper to replace any
334  * direct filesystem manipulation of i_nlink.
335  */
336 void set_nlink(struct inode *inode, unsigned int nlink)
337 {
338 	if (!nlink) {
339 		clear_nlink(inode);
340 	} else {
341 		/* Yes, some filesystems do change nlink from zero to one */
342 		if (inode->i_nlink == 0)
343 			atomic_long_dec(&inode->i_sb->s_remove_count);
344 
345 		inode->__i_nlink = nlink;
346 	}
347 }
348 EXPORT_SYMBOL(set_nlink);
349 
350 /**
351  * inc_nlink - directly increment an inode's link count
352  * @inode: inode
353  *
354  * This is a low-level filesystem helper to replace any
355  * direct filesystem manipulation of i_nlink.  Currently,
356  * it is only here for parity with dec_nlink().
357  */
358 void inc_nlink(struct inode *inode)
359 {
360 	if (unlikely(inode->i_nlink == 0)) {
361 		WARN_ON(!(inode->i_state & I_LINKABLE));
362 		atomic_long_dec(&inode->i_sb->s_remove_count);
363 	}
364 
365 	inode->__i_nlink++;
366 }
367 EXPORT_SYMBOL(inc_nlink);
368 
369 static void __address_space_init_once(struct address_space *mapping)
370 {
371 	xa_init_flags(&mapping->i_pages, XA_FLAGS_LOCK_IRQ | XA_FLAGS_ACCOUNT);
372 	init_rwsem(&mapping->i_mmap_rwsem);
373 	INIT_LIST_HEAD(&mapping->private_list);
374 	spin_lock_init(&mapping->private_lock);
375 	mapping->i_mmap = RB_ROOT_CACHED;
376 }
377 
378 void address_space_init_once(struct address_space *mapping)
379 {
380 	memset(mapping, 0, sizeof(*mapping));
381 	__address_space_init_once(mapping);
382 }
383 EXPORT_SYMBOL(address_space_init_once);
384 
385 /*
386  * These are initializations that only need to be done
387  * once, because the fields are idempotent across use
388  * of the inode, so let the slab aware of that.
389  */
390 void inode_init_once(struct inode *inode)
391 {
392 	memset(inode, 0, sizeof(*inode));
393 	INIT_HLIST_NODE(&inode->i_hash);
394 	INIT_LIST_HEAD(&inode->i_devices);
395 	INIT_LIST_HEAD(&inode->i_io_list);
396 	INIT_LIST_HEAD(&inode->i_wb_list);
397 	INIT_LIST_HEAD(&inode->i_lru);
398 	__address_space_init_once(&inode->i_data);
399 	i_size_ordered_init(inode);
400 }
401 EXPORT_SYMBOL(inode_init_once);
402 
403 static void init_once(void *foo)
404 {
405 	struct inode *inode = (struct inode *) foo;
406 
407 	inode_init_once(inode);
408 }
409 
410 /*
411  * inode->i_lock must be held
412  */
413 void __iget(struct inode *inode)
414 {
415 	atomic_inc(&inode->i_count);
416 }
417 
418 /*
419  * get additional reference to inode; caller must already hold one.
420  */
421 void ihold(struct inode *inode)
422 {
423 	WARN_ON(atomic_inc_return(&inode->i_count) < 2);
424 }
425 EXPORT_SYMBOL(ihold);
426 
427 static void inode_lru_list_add(struct inode *inode)
428 {
429 	if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru))
430 		this_cpu_inc(nr_unused);
431 	else
432 		inode->i_state |= I_REFERENCED;
433 }
434 
435 /*
436  * Add inode to LRU if needed (inode is unused and clean).
437  *
438  * Needs inode->i_lock held.
439  */
440 void inode_add_lru(struct inode *inode)
441 {
442 	if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC |
443 				I_FREEING | I_WILL_FREE)) &&
444 	    !atomic_read(&inode->i_count) && inode->i_sb->s_flags & SB_ACTIVE)
445 		inode_lru_list_add(inode);
446 }
447 
448 
449 static void inode_lru_list_del(struct inode *inode)
450 {
451 
452 	if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru))
453 		this_cpu_dec(nr_unused);
454 }
455 
456 /**
457  * inode_sb_list_add - add inode to the superblock list of inodes
458  * @inode: inode to add
459  */
460 void inode_sb_list_add(struct inode *inode)
461 {
462 	spin_lock(&inode->i_sb->s_inode_list_lock);
463 	list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
464 	spin_unlock(&inode->i_sb->s_inode_list_lock);
465 }
466 EXPORT_SYMBOL_GPL(inode_sb_list_add);
467 
468 static inline void inode_sb_list_del(struct inode *inode)
469 {
470 	if (!list_empty(&inode->i_sb_list)) {
471 		spin_lock(&inode->i_sb->s_inode_list_lock);
472 		list_del_init(&inode->i_sb_list);
473 		spin_unlock(&inode->i_sb->s_inode_list_lock);
474 	}
475 }
476 
477 static unsigned long hash(struct super_block *sb, unsigned long hashval)
478 {
479 	unsigned long tmp;
480 
481 	tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
482 			L1_CACHE_BYTES;
483 	tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
484 	return tmp & i_hash_mask;
485 }
486 
487 /**
488  *	__insert_inode_hash - hash an inode
489  *	@inode: unhashed inode
490  *	@hashval: unsigned long value used to locate this object in the
491  *		inode_hashtable.
492  *
493  *	Add an inode to the inode hash for this superblock.
494  */
495 void __insert_inode_hash(struct inode *inode, unsigned long hashval)
496 {
497 	struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
498 
499 	spin_lock(&inode_hash_lock);
500 	spin_lock(&inode->i_lock);
501 	hlist_add_head_rcu(&inode->i_hash, b);
502 	spin_unlock(&inode->i_lock);
503 	spin_unlock(&inode_hash_lock);
504 }
505 EXPORT_SYMBOL(__insert_inode_hash);
506 
507 /**
508  *	__remove_inode_hash - remove an inode from the hash
509  *	@inode: inode to unhash
510  *
511  *	Remove an inode from the superblock.
512  */
513 void __remove_inode_hash(struct inode *inode)
514 {
515 	spin_lock(&inode_hash_lock);
516 	spin_lock(&inode->i_lock);
517 	hlist_del_init_rcu(&inode->i_hash);
518 	spin_unlock(&inode->i_lock);
519 	spin_unlock(&inode_hash_lock);
520 }
521 EXPORT_SYMBOL(__remove_inode_hash);
522 
523 void clear_inode(struct inode *inode)
524 {
525 	/*
526 	 * We have to cycle the i_pages lock here because reclaim can be in the
527 	 * process of removing the last page (in __delete_from_page_cache())
528 	 * and we must not free the mapping under it.
529 	 */
530 	xa_lock_irq(&inode->i_data.i_pages);
531 	BUG_ON(inode->i_data.nrpages);
532 	BUG_ON(inode->i_data.nrexceptional);
533 	xa_unlock_irq(&inode->i_data.i_pages);
534 	BUG_ON(!list_empty(&inode->i_data.private_list));
535 	BUG_ON(!(inode->i_state & I_FREEING));
536 	BUG_ON(inode->i_state & I_CLEAR);
537 	BUG_ON(!list_empty(&inode->i_wb_list));
538 	/* don't need i_lock here, no concurrent mods to i_state */
539 	inode->i_state = I_FREEING | I_CLEAR;
540 }
541 EXPORT_SYMBOL(clear_inode);
542 
543 /*
544  * Free the inode passed in, removing it from the lists it is still connected
545  * to. We remove any pages still attached to the inode and wait for any IO that
546  * is still in progress before finally destroying the inode.
547  *
548  * An inode must already be marked I_FREEING so that we avoid the inode being
549  * moved back onto lists if we race with other code that manipulates the lists
550  * (e.g. writeback_single_inode). The caller is responsible for setting this.
551  *
552  * An inode must already be removed from the LRU list before being evicted from
553  * the cache. This should occur atomically with setting the I_FREEING state
554  * flag, so no inodes here should ever be on the LRU when being evicted.
555  */
556 static void evict(struct inode *inode)
557 {
558 	const struct super_operations *op = inode->i_sb->s_op;
559 
560 	BUG_ON(!(inode->i_state & I_FREEING));
561 	BUG_ON(!list_empty(&inode->i_lru));
562 
563 	if (!list_empty(&inode->i_io_list))
564 		inode_io_list_del(inode);
565 
566 	inode_sb_list_del(inode);
567 
568 	/*
569 	 * Wait for flusher thread to be done with the inode so that filesystem
570 	 * does not start destroying it while writeback is still running. Since
571 	 * the inode has I_FREEING set, flusher thread won't start new work on
572 	 * the inode.  We just have to wait for running writeback to finish.
573 	 */
574 	inode_wait_for_writeback(inode);
575 
576 	if (op->evict_inode) {
577 		op->evict_inode(inode);
578 	} else {
579 		truncate_inode_pages_final(&inode->i_data);
580 		clear_inode(inode);
581 	}
582 	if (S_ISCHR(inode->i_mode) && inode->i_cdev)
583 		cd_forget(inode);
584 
585 	remove_inode_hash(inode);
586 
587 	spin_lock(&inode->i_lock);
588 	wake_up_bit(&inode->i_state, __I_NEW);
589 	BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
590 	spin_unlock(&inode->i_lock);
591 
592 	destroy_inode(inode);
593 }
594 
595 /*
596  * dispose_list - dispose of the contents of a local list
597  * @head: the head of the list to free
598  *
599  * Dispose-list gets a local list with local inodes in it, so it doesn't
600  * need to worry about list corruption and SMP locks.
601  */
602 static void dispose_list(struct list_head *head)
603 {
604 	while (!list_empty(head)) {
605 		struct inode *inode;
606 
607 		inode = list_first_entry(head, struct inode, i_lru);
608 		list_del_init(&inode->i_lru);
609 
610 		evict(inode);
611 		cond_resched();
612 	}
613 }
614 
615 /**
616  * evict_inodes	- evict all evictable inodes for a superblock
617  * @sb:		superblock to operate on
618  *
619  * Make sure that no inodes with zero refcount are retained.  This is
620  * called by superblock shutdown after having SB_ACTIVE flag removed,
621  * so any inode reaching zero refcount during or after that call will
622  * be immediately evicted.
623  */
624 void evict_inodes(struct super_block *sb)
625 {
626 	struct inode *inode, *next;
627 	LIST_HEAD(dispose);
628 
629 again:
630 	spin_lock(&sb->s_inode_list_lock);
631 	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
632 		if (atomic_read(&inode->i_count))
633 			continue;
634 
635 		spin_lock(&inode->i_lock);
636 		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
637 			spin_unlock(&inode->i_lock);
638 			continue;
639 		}
640 
641 		inode->i_state |= I_FREEING;
642 		inode_lru_list_del(inode);
643 		spin_unlock(&inode->i_lock);
644 		list_add(&inode->i_lru, &dispose);
645 
646 		/*
647 		 * We can have a ton of inodes to evict at unmount time given
648 		 * enough memory, check to see if we need to go to sleep for a
649 		 * bit so we don't livelock.
650 		 */
651 		if (need_resched()) {
652 			spin_unlock(&sb->s_inode_list_lock);
653 			cond_resched();
654 			dispose_list(&dispose);
655 			goto again;
656 		}
657 	}
658 	spin_unlock(&sb->s_inode_list_lock);
659 
660 	dispose_list(&dispose);
661 }
662 EXPORT_SYMBOL_GPL(evict_inodes);
663 
664 /**
665  * invalidate_inodes	- attempt to free all inodes on a superblock
666  * @sb:		superblock to operate on
667  * @kill_dirty: flag to guide handling of dirty inodes
668  *
669  * Attempts to free all inodes for a given superblock.  If there were any
670  * busy inodes return a non-zero value, else zero.
671  * If @kill_dirty is set, discard dirty inodes too, otherwise treat
672  * them as busy.
673  */
674 int invalidate_inodes(struct super_block *sb, bool kill_dirty)
675 {
676 	int busy = 0;
677 	struct inode *inode, *next;
678 	LIST_HEAD(dispose);
679 
680 again:
681 	spin_lock(&sb->s_inode_list_lock);
682 	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
683 		spin_lock(&inode->i_lock);
684 		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
685 			spin_unlock(&inode->i_lock);
686 			continue;
687 		}
688 		if (inode->i_state & I_DIRTY_ALL && !kill_dirty) {
689 			spin_unlock(&inode->i_lock);
690 			busy = 1;
691 			continue;
692 		}
693 		if (atomic_read(&inode->i_count)) {
694 			spin_unlock(&inode->i_lock);
695 			busy = 1;
696 			continue;
697 		}
698 
699 		inode->i_state |= I_FREEING;
700 		inode_lru_list_del(inode);
701 		spin_unlock(&inode->i_lock);
702 		list_add(&inode->i_lru, &dispose);
703 		if (need_resched()) {
704 			spin_unlock(&sb->s_inode_list_lock);
705 			cond_resched();
706 			dispose_list(&dispose);
707 			goto again;
708 		}
709 	}
710 	spin_unlock(&sb->s_inode_list_lock);
711 
712 	dispose_list(&dispose);
713 
714 	return busy;
715 }
716 
717 /*
718  * Isolate the inode from the LRU in preparation for freeing it.
719  *
720  * Any inodes which are pinned purely because of attached pagecache have their
721  * pagecache removed.  If the inode has metadata buffers attached to
722  * mapping->private_list then try to remove them.
723  *
724  * If the inode has the I_REFERENCED flag set, then it means that it has been
725  * used recently - the flag is set in iput_final(). When we encounter such an
726  * inode, clear the flag and move it to the back of the LRU so it gets another
727  * pass through the LRU before it gets reclaimed. This is necessary because of
728  * the fact we are doing lazy LRU updates to minimise lock contention so the
729  * LRU does not have strict ordering. Hence we don't want to reclaim inodes
730  * with this flag set because they are the inodes that are out of order.
731  */
732 static enum lru_status inode_lru_isolate(struct list_head *item,
733 		struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
734 {
735 	struct list_head *freeable = arg;
736 	struct inode	*inode = container_of(item, struct inode, i_lru);
737 
738 	/*
739 	 * we are inverting the lru lock/inode->i_lock here, so use a trylock.
740 	 * If we fail to get the lock, just skip it.
741 	 */
742 	if (!spin_trylock(&inode->i_lock))
743 		return LRU_SKIP;
744 
745 	/*
746 	 * Referenced or dirty inodes are still in use. Give them another pass
747 	 * through the LRU as we canot reclaim them now.
748 	 */
749 	if (atomic_read(&inode->i_count) ||
750 	    (inode->i_state & ~I_REFERENCED)) {
751 		list_lru_isolate(lru, &inode->i_lru);
752 		spin_unlock(&inode->i_lock);
753 		this_cpu_dec(nr_unused);
754 		return LRU_REMOVED;
755 	}
756 
757 	/* recently referenced inodes get one more pass */
758 	if (inode->i_state & I_REFERENCED) {
759 		inode->i_state &= ~I_REFERENCED;
760 		spin_unlock(&inode->i_lock);
761 		return LRU_ROTATE;
762 	}
763 
764 	if (inode_has_buffers(inode) || inode->i_data.nrpages) {
765 		__iget(inode);
766 		spin_unlock(&inode->i_lock);
767 		spin_unlock(lru_lock);
768 		if (remove_inode_buffers(inode)) {
769 			unsigned long reap;
770 			reap = invalidate_mapping_pages(&inode->i_data, 0, -1);
771 			if (current_is_kswapd())
772 				__count_vm_events(KSWAPD_INODESTEAL, reap);
773 			else
774 				__count_vm_events(PGINODESTEAL, reap);
775 			if (current->reclaim_state)
776 				current->reclaim_state->reclaimed_slab += reap;
777 		}
778 		iput(inode);
779 		spin_lock(lru_lock);
780 		return LRU_RETRY;
781 	}
782 
783 	WARN_ON(inode->i_state & I_NEW);
784 	inode->i_state |= I_FREEING;
785 	list_lru_isolate_move(lru, &inode->i_lru, freeable);
786 	spin_unlock(&inode->i_lock);
787 
788 	this_cpu_dec(nr_unused);
789 	return LRU_REMOVED;
790 }
791 
792 /*
793  * Walk the superblock inode LRU for freeable inodes and attempt to free them.
794  * This is called from the superblock shrinker function with a number of inodes
795  * to trim from the LRU. Inodes to be freed are moved to a temporary list and
796  * then are freed outside inode_lock by dispose_list().
797  */
798 long prune_icache_sb(struct super_block *sb, struct shrink_control *sc)
799 {
800 	LIST_HEAD(freeable);
801 	long freed;
802 
803 	freed = list_lru_shrink_walk(&sb->s_inode_lru, sc,
804 				     inode_lru_isolate, &freeable);
805 	dispose_list(&freeable);
806 	return freed;
807 }
808 
809 static void __wait_on_freeing_inode(struct inode *inode);
810 /*
811  * Called with the inode lock held.
812  */
813 static struct inode *find_inode(struct super_block *sb,
814 				struct hlist_head *head,
815 				int (*test)(struct inode *, void *),
816 				void *data)
817 {
818 	struct inode *inode = NULL;
819 
820 repeat:
821 	hlist_for_each_entry(inode, head, i_hash) {
822 		if (inode->i_sb != sb)
823 			continue;
824 		if (!test(inode, data))
825 			continue;
826 		spin_lock(&inode->i_lock);
827 		if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
828 			__wait_on_freeing_inode(inode);
829 			goto repeat;
830 		}
831 		if (unlikely(inode->i_state & I_CREATING)) {
832 			spin_unlock(&inode->i_lock);
833 			return ERR_PTR(-ESTALE);
834 		}
835 		__iget(inode);
836 		spin_unlock(&inode->i_lock);
837 		return inode;
838 	}
839 	return NULL;
840 }
841 
842 /*
843  * find_inode_fast is the fast path version of find_inode, see the comment at
844  * iget_locked for details.
845  */
846 static struct inode *find_inode_fast(struct super_block *sb,
847 				struct hlist_head *head, unsigned long ino)
848 {
849 	struct inode *inode = NULL;
850 
851 repeat:
852 	hlist_for_each_entry(inode, head, i_hash) {
853 		if (inode->i_ino != ino)
854 			continue;
855 		if (inode->i_sb != sb)
856 			continue;
857 		spin_lock(&inode->i_lock);
858 		if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
859 			__wait_on_freeing_inode(inode);
860 			goto repeat;
861 		}
862 		if (unlikely(inode->i_state & I_CREATING)) {
863 			spin_unlock(&inode->i_lock);
864 			return ERR_PTR(-ESTALE);
865 		}
866 		__iget(inode);
867 		spin_unlock(&inode->i_lock);
868 		return inode;
869 	}
870 	return NULL;
871 }
872 
873 /*
874  * Each cpu owns a range of LAST_INO_BATCH numbers.
875  * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
876  * to renew the exhausted range.
877  *
878  * This does not significantly increase overflow rate because every CPU can
879  * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
880  * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
881  * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
882  * overflow rate by 2x, which does not seem too significant.
883  *
884  * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
885  * error if st_ino won't fit in target struct field. Use 32bit counter
886  * here to attempt to avoid that.
887  */
888 #define LAST_INO_BATCH 1024
889 static DEFINE_PER_CPU(unsigned int, last_ino);
890 
891 unsigned int get_next_ino(void)
892 {
893 	unsigned int *p = &get_cpu_var(last_ino);
894 	unsigned int res = *p;
895 
896 #ifdef CONFIG_SMP
897 	if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
898 		static atomic_t shared_last_ino;
899 		int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
900 
901 		res = next - LAST_INO_BATCH;
902 	}
903 #endif
904 
905 	res++;
906 	/* get_next_ino should not provide a 0 inode number */
907 	if (unlikely(!res))
908 		res++;
909 	*p = res;
910 	put_cpu_var(last_ino);
911 	return res;
912 }
913 EXPORT_SYMBOL(get_next_ino);
914 
915 /**
916  *	new_inode_pseudo 	- obtain an inode
917  *	@sb: superblock
918  *
919  *	Allocates a new inode for given superblock.
920  *	Inode wont be chained in superblock s_inodes list
921  *	This means :
922  *	- fs can't be unmount
923  *	- quotas, fsnotify, writeback can't work
924  */
925 struct inode *new_inode_pseudo(struct super_block *sb)
926 {
927 	struct inode *inode = alloc_inode(sb);
928 
929 	if (inode) {
930 		spin_lock(&inode->i_lock);
931 		inode->i_state = 0;
932 		spin_unlock(&inode->i_lock);
933 		INIT_LIST_HEAD(&inode->i_sb_list);
934 	}
935 	return inode;
936 }
937 
938 /**
939  *	new_inode 	- obtain an inode
940  *	@sb: superblock
941  *
942  *	Allocates a new inode for given superblock. The default gfp_mask
943  *	for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
944  *	If HIGHMEM pages are unsuitable or it is known that pages allocated
945  *	for the page cache are not reclaimable or migratable,
946  *	mapping_set_gfp_mask() must be called with suitable flags on the
947  *	newly created inode's mapping
948  *
949  */
950 struct inode *new_inode(struct super_block *sb)
951 {
952 	struct inode *inode;
953 
954 	spin_lock_prefetch(&sb->s_inode_list_lock);
955 
956 	inode = new_inode_pseudo(sb);
957 	if (inode)
958 		inode_sb_list_add(inode);
959 	return inode;
960 }
961 EXPORT_SYMBOL(new_inode);
962 
963 #ifdef CONFIG_DEBUG_LOCK_ALLOC
964 void lockdep_annotate_inode_mutex_key(struct inode *inode)
965 {
966 	if (S_ISDIR(inode->i_mode)) {
967 		struct file_system_type *type = inode->i_sb->s_type;
968 
969 		/* Set new key only if filesystem hasn't already changed it */
970 		if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) {
971 			/*
972 			 * ensure nobody is actually holding i_mutex
973 			 */
974 			// mutex_destroy(&inode->i_mutex);
975 			init_rwsem(&inode->i_rwsem);
976 			lockdep_set_class(&inode->i_rwsem,
977 					  &type->i_mutex_dir_key);
978 		}
979 	}
980 }
981 EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key);
982 #endif
983 
984 /**
985  * unlock_new_inode - clear the I_NEW state and wake up any waiters
986  * @inode:	new inode to unlock
987  *
988  * Called when the inode is fully initialised to clear the new state of the
989  * inode and wake up anyone waiting for the inode to finish initialisation.
990  */
991 void unlock_new_inode(struct inode *inode)
992 {
993 	lockdep_annotate_inode_mutex_key(inode);
994 	spin_lock(&inode->i_lock);
995 	WARN_ON(!(inode->i_state & I_NEW));
996 	inode->i_state &= ~I_NEW & ~I_CREATING;
997 	smp_mb();
998 	wake_up_bit(&inode->i_state, __I_NEW);
999 	spin_unlock(&inode->i_lock);
1000 }
1001 EXPORT_SYMBOL(unlock_new_inode);
1002 
1003 void discard_new_inode(struct inode *inode)
1004 {
1005 	lockdep_annotate_inode_mutex_key(inode);
1006 	spin_lock(&inode->i_lock);
1007 	WARN_ON(!(inode->i_state & I_NEW));
1008 	inode->i_state &= ~I_NEW;
1009 	smp_mb();
1010 	wake_up_bit(&inode->i_state, __I_NEW);
1011 	spin_unlock(&inode->i_lock);
1012 	iput(inode);
1013 }
1014 EXPORT_SYMBOL(discard_new_inode);
1015 
1016 /**
1017  * lock_two_nondirectories - take two i_mutexes on non-directory objects
1018  *
1019  * Lock any non-NULL argument that is not a directory.
1020  * Zero, one or two objects may be locked by this function.
1021  *
1022  * @inode1: first inode to lock
1023  * @inode2: second inode to lock
1024  */
1025 void lock_two_nondirectories(struct inode *inode1, struct inode *inode2)
1026 {
1027 	if (inode1 > inode2)
1028 		swap(inode1, inode2);
1029 
1030 	if (inode1 && !S_ISDIR(inode1->i_mode))
1031 		inode_lock(inode1);
1032 	if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
1033 		inode_lock_nested(inode2, I_MUTEX_NONDIR2);
1034 }
1035 EXPORT_SYMBOL(lock_two_nondirectories);
1036 
1037 /**
1038  * unlock_two_nondirectories - release locks from lock_two_nondirectories()
1039  * @inode1: first inode to unlock
1040  * @inode2: second inode to unlock
1041  */
1042 void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2)
1043 {
1044 	if (inode1 && !S_ISDIR(inode1->i_mode))
1045 		inode_unlock(inode1);
1046 	if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
1047 		inode_unlock(inode2);
1048 }
1049 EXPORT_SYMBOL(unlock_two_nondirectories);
1050 
1051 /**
1052  * inode_insert5 - obtain an inode from a mounted file system
1053  * @inode:	pre-allocated inode to use for insert to cache
1054  * @hashval:	hash value (usually inode number) to get
1055  * @test:	callback used for comparisons between inodes
1056  * @set:	callback used to initialize a new struct inode
1057  * @data:	opaque data pointer to pass to @test and @set
1058  *
1059  * Search for the inode specified by @hashval and @data in the inode cache,
1060  * and if present it is return it with an increased reference count. This is
1061  * a variant of iget5_locked() for callers that don't want to fail on memory
1062  * allocation of inode.
1063  *
1064  * If the inode is not in cache, insert the pre-allocated inode to cache and
1065  * return it locked, hashed, and with the I_NEW flag set. The file system gets
1066  * to fill it in before unlocking it via unlock_new_inode().
1067  *
1068  * Note both @test and @set are called with the inode_hash_lock held, so can't
1069  * sleep.
1070  */
1071 struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
1072 			    int (*test)(struct inode *, void *),
1073 			    int (*set)(struct inode *, void *), void *data)
1074 {
1075 	struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval);
1076 	struct inode *old;
1077 	bool creating = inode->i_state & I_CREATING;
1078 
1079 again:
1080 	spin_lock(&inode_hash_lock);
1081 	old = find_inode(inode->i_sb, head, test, data);
1082 	if (unlikely(old)) {
1083 		/*
1084 		 * Uhhuh, somebody else created the same inode under us.
1085 		 * Use the old inode instead of the preallocated one.
1086 		 */
1087 		spin_unlock(&inode_hash_lock);
1088 		if (IS_ERR(old))
1089 			return NULL;
1090 		wait_on_inode(old);
1091 		if (unlikely(inode_unhashed(old))) {
1092 			iput(old);
1093 			goto again;
1094 		}
1095 		return old;
1096 	}
1097 
1098 	if (set && unlikely(set(inode, data))) {
1099 		inode = NULL;
1100 		goto unlock;
1101 	}
1102 
1103 	/*
1104 	 * Return the locked inode with I_NEW set, the
1105 	 * caller is responsible for filling in the contents
1106 	 */
1107 	spin_lock(&inode->i_lock);
1108 	inode->i_state |= I_NEW;
1109 	hlist_add_head_rcu(&inode->i_hash, head);
1110 	spin_unlock(&inode->i_lock);
1111 	if (!creating)
1112 		inode_sb_list_add(inode);
1113 unlock:
1114 	spin_unlock(&inode_hash_lock);
1115 
1116 	return inode;
1117 }
1118 EXPORT_SYMBOL(inode_insert5);
1119 
1120 /**
1121  * iget5_locked - obtain an inode from a mounted file system
1122  * @sb:		super block of file system
1123  * @hashval:	hash value (usually inode number) to get
1124  * @test:	callback used for comparisons between inodes
1125  * @set:	callback used to initialize a new struct inode
1126  * @data:	opaque data pointer to pass to @test and @set
1127  *
1128  * Search for the inode specified by @hashval and @data in the inode cache,
1129  * and if present it is return it with an increased reference count. This is
1130  * a generalized version of iget_locked() for file systems where the inode
1131  * number is not sufficient for unique identification of an inode.
1132  *
1133  * If the inode is not in cache, allocate a new inode and return it locked,
1134  * hashed, and with the I_NEW flag set. The file system gets to fill it in
1135  * before unlocking it via unlock_new_inode().
1136  *
1137  * Note both @test and @set are called with the inode_hash_lock held, so can't
1138  * sleep.
1139  */
1140 struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
1141 		int (*test)(struct inode *, void *),
1142 		int (*set)(struct inode *, void *), void *data)
1143 {
1144 	struct inode *inode = ilookup5(sb, hashval, test, data);
1145 
1146 	if (!inode) {
1147 		struct inode *new = alloc_inode(sb);
1148 
1149 		if (new) {
1150 			new->i_state = 0;
1151 			inode = inode_insert5(new, hashval, test, set, data);
1152 			if (unlikely(inode != new))
1153 				destroy_inode(new);
1154 		}
1155 	}
1156 	return inode;
1157 }
1158 EXPORT_SYMBOL(iget5_locked);
1159 
1160 /**
1161  * iget_locked - obtain an inode from a mounted file system
1162  * @sb:		super block of file system
1163  * @ino:	inode number to get
1164  *
1165  * Search for the inode specified by @ino in the inode cache and if present
1166  * return it with an increased reference count. This is for file systems
1167  * where the inode number is sufficient for unique identification of an inode.
1168  *
1169  * If the inode is not in cache, allocate a new inode and return it locked,
1170  * hashed, and with the I_NEW flag set.  The file system gets to fill it in
1171  * before unlocking it via unlock_new_inode().
1172  */
1173 struct inode *iget_locked(struct super_block *sb, unsigned long ino)
1174 {
1175 	struct hlist_head *head = inode_hashtable + hash(sb, ino);
1176 	struct inode *inode;
1177 again:
1178 	spin_lock(&inode_hash_lock);
1179 	inode = find_inode_fast(sb, head, ino);
1180 	spin_unlock(&inode_hash_lock);
1181 	if (inode) {
1182 		if (IS_ERR(inode))
1183 			return NULL;
1184 		wait_on_inode(inode);
1185 		if (unlikely(inode_unhashed(inode))) {
1186 			iput(inode);
1187 			goto again;
1188 		}
1189 		return inode;
1190 	}
1191 
1192 	inode = alloc_inode(sb);
1193 	if (inode) {
1194 		struct inode *old;
1195 
1196 		spin_lock(&inode_hash_lock);
1197 		/* We released the lock, so.. */
1198 		old = find_inode_fast(sb, head, ino);
1199 		if (!old) {
1200 			inode->i_ino = ino;
1201 			spin_lock(&inode->i_lock);
1202 			inode->i_state = I_NEW;
1203 			hlist_add_head_rcu(&inode->i_hash, head);
1204 			spin_unlock(&inode->i_lock);
1205 			inode_sb_list_add(inode);
1206 			spin_unlock(&inode_hash_lock);
1207 
1208 			/* Return the locked inode with I_NEW set, the
1209 			 * caller is responsible for filling in the contents
1210 			 */
1211 			return inode;
1212 		}
1213 
1214 		/*
1215 		 * Uhhuh, somebody else created the same inode under
1216 		 * us. Use the old inode instead of the one we just
1217 		 * allocated.
1218 		 */
1219 		spin_unlock(&inode_hash_lock);
1220 		destroy_inode(inode);
1221 		if (IS_ERR(old))
1222 			return NULL;
1223 		inode = old;
1224 		wait_on_inode(inode);
1225 		if (unlikely(inode_unhashed(inode))) {
1226 			iput(inode);
1227 			goto again;
1228 		}
1229 	}
1230 	return inode;
1231 }
1232 EXPORT_SYMBOL(iget_locked);
1233 
1234 /*
1235  * search the inode cache for a matching inode number.
1236  * If we find one, then the inode number we are trying to
1237  * allocate is not unique and so we should not use it.
1238  *
1239  * Returns 1 if the inode number is unique, 0 if it is not.
1240  */
1241 static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1242 {
1243 	struct hlist_head *b = inode_hashtable + hash(sb, ino);
1244 	struct inode *inode;
1245 
1246 	hlist_for_each_entry_rcu(inode, b, i_hash) {
1247 		if (inode->i_ino == ino && inode->i_sb == sb)
1248 			return 0;
1249 	}
1250 	return 1;
1251 }
1252 
1253 /**
1254  *	iunique - get a unique inode number
1255  *	@sb: superblock
1256  *	@max_reserved: highest reserved inode number
1257  *
1258  *	Obtain an inode number that is unique on the system for a given
1259  *	superblock. This is used by file systems that have no natural
1260  *	permanent inode numbering system. An inode number is returned that
1261  *	is higher than the reserved limit but unique.
1262  *
1263  *	BUGS:
1264  *	With a large number of inodes live on the file system this function
1265  *	currently becomes quite slow.
1266  */
1267 ino_t iunique(struct super_block *sb, ino_t max_reserved)
1268 {
1269 	/*
1270 	 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
1271 	 * error if st_ino won't fit in target struct field. Use 32bit counter
1272 	 * here to attempt to avoid that.
1273 	 */
1274 	static DEFINE_SPINLOCK(iunique_lock);
1275 	static unsigned int counter;
1276 	ino_t res;
1277 
1278 	rcu_read_lock();
1279 	spin_lock(&iunique_lock);
1280 	do {
1281 		if (counter <= max_reserved)
1282 			counter = max_reserved + 1;
1283 		res = counter++;
1284 	} while (!test_inode_iunique(sb, res));
1285 	spin_unlock(&iunique_lock);
1286 	rcu_read_unlock();
1287 
1288 	return res;
1289 }
1290 EXPORT_SYMBOL(iunique);
1291 
1292 struct inode *igrab(struct inode *inode)
1293 {
1294 	spin_lock(&inode->i_lock);
1295 	if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
1296 		__iget(inode);
1297 		spin_unlock(&inode->i_lock);
1298 	} else {
1299 		spin_unlock(&inode->i_lock);
1300 		/*
1301 		 * Handle the case where s_op->clear_inode is not been
1302 		 * called yet, and somebody is calling igrab
1303 		 * while the inode is getting freed.
1304 		 */
1305 		inode = NULL;
1306 	}
1307 	return inode;
1308 }
1309 EXPORT_SYMBOL(igrab);
1310 
1311 /**
1312  * ilookup5_nowait - search for an inode in the inode cache
1313  * @sb:		super block of file system to search
1314  * @hashval:	hash value (usually inode number) to search for
1315  * @test:	callback used for comparisons between inodes
1316  * @data:	opaque data pointer to pass to @test
1317  *
1318  * Search for the inode specified by @hashval and @data in the inode cache.
1319  * If the inode is in the cache, the inode is returned with an incremented
1320  * reference count.
1321  *
1322  * Note: I_NEW is not waited upon so you have to be very careful what you do
1323  * with the returned inode.  You probably should be using ilookup5() instead.
1324  *
1325  * Note2: @test is called with the inode_hash_lock held, so can't sleep.
1326  */
1327 struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
1328 		int (*test)(struct inode *, void *), void *data)
1329 {
1330 	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1331 	struct inode *inode;
1332 
1333 	spin_lock(&inode_hash_lock);
1334 	inode = find_inode(sb, head, test, data);
1335 	spin_unlock(&inode_hash_lock);
1336 
1337 	return IS_ERR(inode) ? NULL : inode;
1338 }
1339 EXPORT_SYMBOL(ilookup5_nowait);
1340 
1341 /**
1342  * ilookup5 - search for an inode in the inode cache
1343  * @sb:		super block of file system to search
1344  * @hashval:	hash value (usually inode number) to search for
1345  * @test:	callback used for comparisons between inodes
1346  * @data:	opaque data pointer to pass to @test
1347  *
1348  * Search for the inode specified by @hashval and @data in the inode cache,
1349  * and if the inode is in the cache, return the inode with an incremented
1350  * reference count.  Waits on I_NEW before returning the inode.
1351  * returned with an incremented reference count.
1352  *
1353  * This is a generalized version of ilookup() for file systems where the
1354  * inode number is not sufficient for unique identification of an inode.
1355  *
1356  * Note: @test is called with the inode_hash_lock held, so can't sleep.
1357  */
1358 struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
1359 		int (*test)(struct inode *, void *), void *data)
1360 {
1361 	struct inode *inode;
1362 again:
1363 	inode = ilookup5_nowait(sb, hashval, test, data);
1364 	if (inode) {
1365 		wait_on_inode(inode);
1366 		if (unlikely(inode_unhashed(inode))) {
1367 			iput(inode);
1368 			goto again;
1369 		}
1370 	}
1371 	return inode;
1372 }
1373 EXPORT_SYMBOL(ilookup5);
1374 
1375 /**
1376  * ilookup - search for an inode in the inode cache
1377  * @sb:		super block of file system to search
1378  * @ino:	inode number to search for
1379  *
1380  * Search for the inode @ino in the inode cache, and if the inode is in the
1381  * cache, the inode is returned with an incremented reference count.
1382  */
1383 struct inode *ilookup(struct super_block *sb, unsigned long ino)
1384 {
1385 	struct hlist_head *head = inode_hashtable + hash(sb, ino);
1386 	struct inode *inode;
1387 again:
1388 	spin_lock(&inode_hash_lock);
1389 	inode = find_inode_fast(sb, head, ino);
1390 	spin_unlock(&inode_hash_lock);
1391 
1392 	if (inode) {
1393 		if (IS_ERR(inode))
1394 			return NULL;
1395 		wait_on_inode(inode);
1396 		if (unlikely(inode_unhashed(inode))) {
1397 			iput(inode);
1398 			goto again;
1399 		}
1400 	}
1401 	return inode;
1402 }
1403 EXPORT_SYMBOL(ilookup);
1404 
1405 /**
1406  * find_inode_nowait - find an inode in the inode cache
1407  * @sb:		super block of file system to search
1408  * @hashval:	hash value (usually inode number) to search for
1409  * @match:	callback used for comparisons between inodes
1410  * @data:	opaque data pointer to pass to @match
1411  *
1412  * Search for the inode specified by @hashval and @data in the inode
1413  * cache, where the helper function @match will return 0 if the inode
1414  * does not match, 1 if the inode does match, and -1 if the search
1415  * should be stopped.  The @match function must be responsible for
1416  * taking the i_lock spin_lock and checking i_state for an inode being
1417  * freed or being initialized, and incrementing the reference count
1418  * before returning 1.  It also must not sleep, since it is called with
1419  * the inode_hash_lock spinlock held.
1420  *
1421  * This is a even more generalized version of ilookup5() when the
1422  * function must never block --- find_inode() can block in
1423  * __wait_on_freeing_inode() --- or when the caller can not increment
1424  * the reference count because the resulting iput() might cause an
1425  * inode eviction.  The tradeoff is that the @match funtion must be
1426  * very carefully implemented.
1427  */
1428 struct inode *find_inode_nowait(struct super_block *sb,
1429 				unsigned long hashval,
1430 				int (*match)(struct inode *, unsigned long,
1431 					     void *),
1432 				void *data)
1433 {
1434 	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1435 	struct inode *inode, *ret_inode = NULL;
1436 	int mval;
1437 
1438 	spin_lock(&inode_hash_lock);
1439 	hlist_for_each_entry(inode, head, i_hash) {
1440 		if (inode->i_sb != sb)
1441 			continue;
1442 		mval = match(inode, hashval, data);
1443 		if (mval == 0)
1444 			continue;
1445 		if (mval == 1)
1446 			ret_inode = inode;
1447 		goto out;
1448 	}
1449 out:
1450 	spin_unlock(&inode_hash_lock);
1451 	return ret_inode;
1452 }
1453 EXPORT_SYMBOL(find_inode_nowait);
1454 
1455 /**
1456  * find_inode_rcu - find an inode in the inode cache
1457  * @sb:		Super block of file system to search
1458  * @hashval:	Key to hash
1459  * @test:	Function to test match on an inode
1460  * @data:	Data for test function
1461  *
1462  * Search for the inode specified by @hashval and @data in the inode cache,
1463  * where the helper function @test will return 0 if the inode does not match
1464  * and 1 if it does.  The @test function must be responsible for taking the
1465  * i_lock spin_lock and checking i_state for an inode being freed or being
1466  * initialized.
1467  *
1468  * If successful, this will return the inode for which the @test function
1469  * returned 1 and NULL otherwise.
1470  *
1471  * The @test function is not permitted to take a ref on any inode presented.
1472  * It is also not permitted to sleep.
1473  *
1474  * The caller must hold the RCU read lock.
1475  */
1476 struct inode *find_inode_rcu(struct super_block *sb, unsigned long hashval,
1477 			     int (*test)(struct inode *, void *), void *data)
1478 {
1479 	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1480 	struct inode *inode;
1481 
1482 	RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
1483 			 "suspicious find_inode_rcu() usage");
1484 
1485 	hlist_for_each_entry_rcu(inode, head, i_hash) {
1486 		if (inode->i_sb == sb &&
1487 		    !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)) &&
1488 		    test(inode, data))
1489 			return inode;
1490 	}
1491 	return NULL;
1492 }
1493 EXPORT_SYMBOL(find_inode_rcu);
1494 
1495 /**
1496  * find_inode_by_rcu - Find an inode in the inode cache
1497  * @sb:		Super block of file system to search
1498  * @ino:	The inode number to match
1499  *
1500  * Search for the inode specified by @hashval and @data in the inode cache,
1501  * where the helper function @test will return 0 if the inode does not match
1502  * and 1 if it does.  The @test function must be responsible for taking the
1503  * i_lock spin_lock and checking i_state for an inode being freed or being
1504  * initialized.
1505  *
1506  * If successful, this will return the inode for which the @test function
1507  * returned 1 and NULL otherwise.
1508  *
1509  * The @test function is not permitted to take a ref on any inode presented.
1510  * It is also not permitted to sleep.
1511  *
1512  * The caller must hold the RCU read lock.
1513  */
1514 struct inode *find_inode_by_ino_rcu(struct super_block *sb,
1515 				    unsigned long ino)
1516 {
1517 	struct hlist_head *head = inode_hashtable + hash(sb, ino);
1518 	struct inode *inode;
1519 
1520 	RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
1521 			 "suspicious find_inode_by_ino_rcu() usage");
1522 
1523 	hlist_for_each_entry_rcu(inode, head, i_hash) {
1524 		if (inode->i_ino == ino &&
1525 		    inode->i_sb == sb &&
1526 		    !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)))
1527 		    return inode;
1528 	}
1529 	return NULL;
1530 }
1531 EXPORT_SYMBOL(find_inode_by_ino_rcu);
1532 
1533 int insert_inode_locked(struct inode *inode)
1534 {
1535 	struct super_block *sb = inode->i_sb;
1536 	ino_t ino = inode->i_ino;
1537 	struct hlist_head *head = inode_hashtable + hash(sb, ino);
1538 
1539 	while (1) {
1540 		struct inode *old = NULL;
1541 		spin_lock(&inode_hash_lock);
1542 		hlist_for_each_entry(old, head, i_hash) {
1543 			if (old->i_ino != ino)
1544 				continue;
1545 			if (old->i_sb != sb)
1546 				continue;
1547 			spin_lock(&old->i_lock);
1548 			if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1549 				spin_unlock(&old->i_lock);
1550 				continue;
1551 			}
1552 			break;
1553 		}
1554 		if (likely(!old)) {
1555 			spin_lock(&inode->i_lock);
1556 			inode->i_state |= I_NEW | I_CREATING;
1557 			hlist_add_head_rcu(&inode->i_hash, head);
1558 			spin_unlock(&inode->i_lock);
1559 			spin_unlock(&inode_hash_lock);
1560 			return 0;
1561 		}
1562 		if (unlikely(old->i_state & I_CREATING)) {
1563 			spin_unlock(&old->i_lock);
1564 			spin_unlock(&inode_hash_lock);
1565 			return -EBUSY;
1566 		}
1567 		__iget(old);
1568 		spin_unlock(&old->i_lock);
1569 		spin_unlock(&inode_hash_lock);
1570 		wait_on_inode(old);
1571 		if (unlikely(!inode_unhashed(old))) {
1572 			iput(old);
1573 			return -EBUSY;
1574 		}
1575 		iput(old);
1576 	}
1577 }
1578 EXPORT_SYMBOL(insert_inode_locked);
1579 
1580 int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1581 		int (*test)(struct inode *, void *), void *data)
1582 {
1583 	struct inode *old;
1584 
1585 	inode->i_state |= I_CREATING;
1586 	old = inode_insert5(inode, hashval, test, NULL, data);
1587 
1588 	if (old != inode) {
1589 		iput(old);
1590 		return -EBUSY;
1591 	}
1592 	return 0;
1593 }
1594 EXPORT_SYMBOL(insert_inode_locked4);
1595 
1596 
1597 int generic_delete_inode(struct inode *inode)
1598 {
1599 	return 1;
1600 }
1601 EXPORT_SYMBOL(generic_delete_inode);
1602 
1603 /*
1604  * Called when we're dropping the last reference
1605  * to an inode.
1606  *
1607  * Call the FS "drop_inode()" function, defaulting to
1608  * the legacy UNIX filesystem behaviour.  If it tells
1609  * us to evict inode, do so.  Otherwise, retain inode
1610  * in cache if fs is alive, sync and evict if fs is
1611  * shutting down.
1612  */
1613 static void iput_final(struct inode *inode)
1614 {
1615 	struct super_block *sb = inode->i_sb;
1616 	const struct super_operations *op = inode->i_sb->s_op;
1617 	unsigned long state;
1618 	int drop;
1619 
1620 	WARN_ON(inode->i_state & I_NEW);
1621 
1622 	if (op->drop_inode)
1623 		drop = op->drop_inode(inode);
1624 	else
1625 		drop = generic_drop_inode(inode);
1626 
1627 	if (!drop &&
1628 	    !(inode->i_state & I_DONTCACHE) &&
1629 	    (sb->s_flags & SB_ACTIVE)) {
1630 		inode_add_lru(inode);
1631 		spin_unlock(&inode->i_lock);
1632 		return;
1633 	}
1634 
1635 	state = inode->i_state;
1636 	if (!drop) {
1637 		WRITE_ONCE(inode->i_state, state | I_WILL_FREE);
1638 		spin_unlock(&inode->i_lock);
1639 
1640 		write_inode_now(inode, 1);
1641 
1642 		spin_lock(&inode->i_lock);
1643 		state = inode->i_state;
1644 		WARN_ON(state & I_NEW);
1645 		state &= ~I_WILL_FREE;
1646 	}
1647 
1648 	WRITE_ONCE(inode->i_state, state | I_FREEING);
1649 	if (!list_empty(&inode->i_lru))
1650 		inode_lru_list_del(inode);
1651 	spin_unlock(&inode->i_lock);
1652 
1653 	evict(inode);
1654 }
1655 
1656 /**
1657  *	iput	- put an inode
1658  *	@inode: inode to put
1659  *
1660  *	Puts an inode, dropping its usage count. If the inode use count hits
1661  *	zero, the inode is then freed and may also be destroyed.
1662  *
1663  *	Consequently, iput() can sleep.
1664  */
1665 void iput(struct inode *inode)
1666 {
1667 	if (!inode)
1668 		return;
1669 	BUG_ON(inode->i_state & I_CLEAR);
1670 retry:
1671 	if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) {
1672 		if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) {
1673 			atomic_inc(&inode->i_count);
1674 			spin_unlock(&inode->i_lock);
1675 			trace_writeback_lazytime_iput(inode);
1676 			mark_inode_dirty_sync(inode);
1677 			goto retry;
1678 		}
1679 		iput_final(inode);
1680 	}
1681 }
1682 EXPORT_SYMBOL(iput);
1683 
1684 #ifdef CONFIG_BLOCK
1685 /**
1686  *	bmap	- find a block number in a file
1687  *	@inode:  inode owning the block number being requested
1688  *	@block: pointer containing the block to find
1689  *
1690  *	Replaces the value in ``*block`` with the block number on the device holding
1691  *	corresponding to the requested block number in the file.
1692  *	That is, asked for block 4 of inode 1 the function will replace the
1693  *	4 in ``*block``, with disk block relative to the disk start that holds that
1694  *	block of the file.
1695  *
1696  *	Returns -EINVAL in case of error, 0 otherwise. If mapping falls into a
1697  *	hole, returns 0 and ``*block`` is also set to 0.
1698  */
1699 int bmap(struct inode *inode, sector_t *block)
1700 {
1701 	if (!inode->i_mapping->a_ops->bmap)
1702 		return -EINVAL;
1703 
1704 	*block = inode->i_mapping->a_ops->bmap(inode->i_mapping, *block);
1705 	return 0;
1706 }
1707 EXPORT_SYMBOL(bmap);
1708 #endif
1709 
1710 /*
1711  * With relative atime, only update atime if the previous atime is
1712  * earlier than either the ctime or mtime or if at least a day has
1713  * passed since the last atime update.
1714  */
1715 static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
1716 			     struct timespec64 now)
1717 {
1718 
1719 	if (!(mnt->mnt_flags & MNT_RELATIME))
1720 		return 1;
1721 	/*
1722 	 * Is mtime younger than atime? If yes, update atime:
1723 	 */
1724 	if (timespec64_compare(&inode->i_mtime, &inode->i_atime) >= 0)
1725 		return 1;
1726 	/*
1727 	 * Is ctime younger than atime? If yes, update atime:
1728 	 */
1729 	if (timespec64_compare(&inode->i_ctime, &inode->i_atime) >= 0)
1730 		return 1;
1731 
1732 	/*
1733 	 * Is the previous atime value older than a day? If yes,
1734 	 * update atime:
1735 	 */
1736 	if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
1737 		return 1;
1738 	/*
1739 	 * Good, we can skip the atime update:
1740 	 */
1741 	return 0;
1742 }
1743 
1744 int generic_update_time(struct inode *inode, struct timespec64 *time, int flags)
1745 {
1746 	int iflags = I_DIRTY_TIME;
1747 	bool dirty = false;
1748 
1749 	if (flags & S_ATIME)
1750 		inode->i_atime = *time;
1751 	if (flags & S_VERSION)
1752 		dirty = inode_maybe_inc_iversion(inode, false);
1753 	if (flags & S_CTIME)
1754 		inode->i_ctime = *time;
1755 	if (flags & S_MTIME)
1756 		inode->i_mtime = *time;
1757 	if ((flags & (S_ATIME | S_CTIME | S_MTIME)) &&
1758 	    !(inode->i_sb->s_flags & SB_LAZYTIME))
1759 		dirty = true;
1760 
1761 	if (dirty)
1762 		iflags |= I_DIRTY_SYNC;
1763 	__mark_inode_dirty(inode, iflags);
1764 	return 0;
1765 }
1766 EXPORT_SYMBOL(generic_update_time);
1767 
1768 /*
1769  * This does the actual work of updating an inodes time or version.  Must have
1770  * had called mnt_want_write() before calling this.
1771  */
1772 static int update_time(struct inode *inode, struct timespec64 *time, int flags)
1773 {
1774 	if (inode->i_op->update_time)
1775 		return inode->i_op->update_time(inode, time, flags);
1776 	return generic_update_time(inode, time, flags);
1777 }
1778 
1779 /**
1780  *	touch_atime	-	update the access time
1781  *	@path: the &struct path to update
1782  *	@inode: inode to update
1783  *
1784  *	Update the accessed time on an inode and mark it for writeback.
1785  *	This function automatically handles read only file systems and media,
1786  *	as well as the "noatime" flag and inode specific "noatime" markers.
1787  */
1788 bool atime_needs_update(const struct path *path, struct inode *inode)
1789 {
1790 	struct vfsmount *mnt = path->mnt;
1791 	struct timespec64 now;
1792 
1793 	if (inode->i_flags & S_NOATIME)
1794 		return false;
1795 
1796 	/* Atime updates will likely cause i_uid and i_gid to be written
1797 	 * back improprely if their true value is unknown to the vfs.
1798 	 */
1799 	if (HAS_UNMAPPED_ID(inode))
1800 		return false;
1801 
1802 	if (IS_NOATIME(inode))
1803 		return false;
1804 	if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
1805 		return false;
1806 
1807 	if (mnt->mnt_flags & MNT_NOATIME)
1808 		return false;
1809 	if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1810 		return false;
1811 
1812 	now = current_time(inode);
1813 
1814 	if (!relatime_need_update(mnt, inode, now))
1815 		return false;
1816 
1817 	if (timespec64_equal(&inode->i_atime, &now))
1818 		return false;
1819 
1820 	return true;
1821 }
1822 
1823 void touch_atime(const struct path *path)
1824 {
1825 	struct vfsmount *mnt = path->mnt;
1826 	struct inode *inode = d_inode(path->dentry);
1827 	struct timespec64 now;
1828 
1829 	if (!atime_needs_update(path, inode))
1830 		return;
1831 
1832 	if (!sb_start_write_trylock(inode->i_sb))
1833 		return;
1834 
1835 	if (__mnt_want_write(mnt) != 0)
1836 		goto skip_update;
1837 	/*
1838 	 * File systems can error out when updating inodes if they need to
1839 	 * allocate new space to modify an inode (such is the case for
1840 	 * Btrfs), but since we touch atime while walking down the path we
1841 	 * really don't care if we failed to update the atime of the file,
1842 	 * so just ignore the return value.
1843 	 * We may also fail on filesystems that have the ability to make parts
1844 	 * of the fs read only, e.g. subvolumes in Btrfs.
1845 	 */
1846 	now = current_time(inode);
1847 	update_time(inode, &now, S_ATIME);
1848 	__mnt_drop_write(mnt);
1849 skip_update:
1850 	sb_end_write(inode->i_sb);
1851 }
1852 EXPORT_SYMBOL(touch_atime);
1853 
1854 /*
1855  * The logic we want is
1856  *
1857  *	if suid or (sgid and xgrp)
1858  *		remove privs
1859  */
1860 int should_remove_suid(struct dentry *dentry)
1861 {
1862 	umode_t mode = d_inode(dentry)->i_mode;
1863 	int kill = 0;
1864 
1865 	/* suid always must be killed */
1866 	if (unlikely(mode & S_ISUID))
1867 		kill = ATTR_KILL_SUID;
1868 
1869 	/*
1870 	 * sgid without any exec bits is just a mandatory locking mark; leave
1871 	 * it alone.  If some exec bits are set, it's a real sgid; kill it.
1872 	 */
1873 	if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1874 		kill |= ATTR_KILL_SGID;
1875 
1876 	if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
1877 		return kill;
1878 
1879 	return 0;
1880 }
1881 EXPORT_SYMBOL(should_remove_suid);
1882 
1883 /*
1884  * Return mask of changes for notify_change() that need to be done as a
1885  * response to write or truncate. Return 0 if nothing has to be changed.
1886  * Negative value on error (change should be denied).
1887  */
1888 int dentry_needs_remove_privs(struct dentry *dentry)
1889 {
1890 	struct inode *inode = d_inode(dentry);
1891 	int mask = 0;
1892 	int ret;
1893 
1894 	if (IS_NOSEC(inode))
1895 		return 0;
1896 
1897 	mask = should_remove_suid(dentry);
1898 	ret = security_inode_need_killpriv(dentry);
1899 	if (ret < 0)
1900 		return ret;
1901 	if (ret)
1902 		mask |= ATTR_KILL_PRIV;
1903 	return mask;
1904 }
1905 
1906 static int __remove_privs(struct dentry *dentry, int kill)
1907 {
1908 	struct iattr newattrs;
1909 
1910 	newattrs.ia_valid = ATTR_FORCE | kill;
1911 	/*
1912 	 * Note we call this on write, so notify_change will not
1913 	 * encounter any conflicting delegations:
1914 	 */
1915 	return notify_change(dentry, &newattrs, NULL);
1916 }
1917 
1918 /*
1919  * Remove special file priviledges (suid, capabilities) when file is written
1920  * to or truncated.
1921  */
1922 int file_remove_privs(struct file *file)
1923 {
1924 	struct dentry *dentry = file_dentry(file);
1925 	struct inode *inode = file_inode(file);
1926 	int kill;
1927 	int error = 0;
1928 
1929 	/*
1930 	 * Fast path for nothing security related.
1931 	 * As well for non-regular files, e.g. blkdev inodes.
1932 	 * For example, blkdev_write_iter() might get here
1933 	 * trying to remove privs which it is not allowed to.
1934 	 */
1935 	if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode))
1936 		return 0;
1937 
1938 	kill = dentry_needs_remove_privs(dentry);
1939 	if (kill < 0)
1940 		return kill;
1941 	if (kill)
1942 		error = __remove_privs(dentry, kill);
1943 	if (!error)
1944 		inode_has_no_xattr(inode);
1945 
1946 	return error;
1947 }
1948 EXPORT_SYMBOL(file_remove_privs);
1949 
1950 /**
1951  *	file_update_time	-	update mtime and ctime time
1952  *	@file: file accessed
1953  *
1954  *	Update the mtime and ctime members of an inode and mark the inode
1955  *	for writeback.  Note that this function is meant exclusively for
1956  *	usage in the file write path of filesystems, and filesystems may
1957  *	choose to explicitly ignore update via this function with the
1958  *	S_NOCMTIME inode flag, e.g. for network filesystem where these
1959  *	timestamps are handled by the server.  This can return an error for
1960  *	file systems who need to allocate space in order to update an inode.
1961  */
1962 
1963 int file_update_time(struct file *file)
1964 {
1965 	struct inode *inode = file_inode(file);
1966 	struct timespec64 now;
1967 	int sync_it = 0;
1968 	int ret;
1969 
1970 	/* First try to exhaust all avenues to not sync */
1971 	if (IS_NOCMTIME(inode))
1972 		return 0;
1973 
1974 	now = current_time(inode);
1975 	if (!timespec64_equal(&inode->i_mtime, &now))
1976 		sync_it = S_MTIME;
1977 
1978 	if (!timespec64_equal(&inode->i_ctime, &now))
1979 		sync_it |= S_CTIME;
1980 
1981 	if (IS_I_VERSION(inode) && inode_iversion_need_inc(inode))
1982 		sync_it |= S_VERSION;
1983 
1984 	if (!sync_it)
1985 		return 0;
1986 
1987 	/* Finally allowed to write? Takes lock. */
1988 	if (__mnt_want_write_file(file))
1989 		return 0;
1990 
1991 	ret = update_time(inode, &now, sync_it);
1992 	__mnt_drop_write_file(file);
1993 
1994 	return ret;
1995 }
1996 EXPORT_SYMBOL(file_update_time);
1997 
1998 /* Caller must hold the file's inode lock */
1999 int file_modified(struct file *file)
2000 {
2001 	int err;
2002 
2003 	/*
2004 	 * Clear the security bits if the process is not being run by root.
2005 	 * This keeps people from modifying setuid and setgid binaries.
2006 	 */
2007 	err = file_remove_privs(file);
2008 	if (err)
2009 		return err;
2010 
2011 	if (unlikely(file->f_mode & FMODE_NOCMTIME))
2012 		return 0;
2013 
2014 	return file_update_time(file);
2015 }
2016 EXPORT_SYMBOL(file_modified);
2017 
2018 int inode_needs_sync(struct inode *inode)
2019 {
2020 	if (IS_SYNC(inode))
2021 		return 1;
2022 	if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
2023 		return 1;
2024 	return 0;
2025 }
2026 EXPORT_SYMBOL(inode_needs_sync);
2027 
2028 /*
2029  * If we try to find an inode in the inode hash while it is being
2030  * deleted, we have to wait until the filesystem completes its
2031  * deletion before reporting that it isn't found.  This function waits
2032  * until the deletion _might_ have completed.  Callers are responsible
2033  * to recheck inode state.
2034  *
2035  * It doesn't matter if I_NEW is not set initially, a call to
2036  * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
2037  * will DTRT.
2038  */
2039 static void __wait_on_freeing_inode(struct inode *inode)
2040 {
2041 	wait_queue_head_t *wq;
2042 	DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
2043 	wq = bit_waitqueue(&inode->i_state, __I_NEW);
2044 	prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2045 	spin_unlock(&inode->i_lock);
2046 	spin_unlock(&inode_hash_lock);
2047 	schedule();
2048 	finish_wait(wq, &wait.wq_entry);
2049 	spin_lock(&inode_hash_lock);
2050 }
2051 
2052 static __initdata unsigned long ihash_entries;
2053 static int __init set_ihash_entries(char *str)
2054 {
2055 	if (!str)
2056 		return 0;
2057 	ihash_entries = simple_strtoul(str, &str, 0);
2058 	return 1;
2059 }
2060 __setup("ihash_entries=", set_ihash_entries);
2061 
2062 /*
2063  * Initialize the waitqueues and inode hash table.
2064  */
2065 void __init inode_init_early(void)
2066 {
2067 	/* If hashes are distributed across NUMA nodes, defer
2068 	 * hash allocation until vmalloc space is available.
2069 	 */
2070 	if (hashdist)
2071 		return;
2072 
2073 	inode_hashtable =
2074 		alloc_large_system_hash("Inode-cache",
2075 					sizeof(struct hlist_head),
2076 					ihash_entries,
2077 					14,
2078 					HASH_EARLY | HASH_ZERO,
2079 					&i_hash_shift,
2080 					&i_hash_mask,
2081 					0,
2082 					0);
2083 }
2084 
2085 void __init inode_init(void)
2086 {
2087 	/* inode slab cache */
2088 	inode_cachep = kmem_cache_create("inode_cache",
2089 					 sizeof(struct inode),
2090 					 0,
2091 					 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
2092 					 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
2093 					 init_once);
2094 
2095 	/* Hash may have been set up in inode_init_early */
2096 	if (!hashdist)
2097 		return;
2098 
2099 	inode_hashtable =
2100 		alloc_large_system_hash("Inode-cache",
2101 					sizeof(struct hlist_head),
2102 					ihash_entries,
2103 					14,
2104 					HASH_ZERO,
2105 					&i_hash_shift,
2106 					&i_hash_mask,
2107 					0,
2108 					0);
2109 }
2110 
2111 void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
2112 {
2113 	inode->i_mode = mode;
2114 	if (S_ISCHR(mode)) {
2115 		inode->i_fop = &def_chr_fops;
2116 		inode->i_rdev = rdev;
2117 	} else if (S_ISBLK(mode)) {
2118 		inode->i_fop = &def_blk_fops;
2119 		inode->i_rdev = rdev;
2120 	} else if (S_ISFIFO(mode))
2121 		inode->i_fop = &pipefifo_fops;
2122 	else if (S_ISSOCK(mode))
2123 		;	/* leave it no_open_fops */
2124 	else
2125 		printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
2126 				  " inode %s:%lu\n", mode, inode->i_sb->s_id,
2127 				  inode->i_ino);
2128 }
2129 EXPORT_SYMBOL(init_special_inode);
2130 
2131 /**
2132  * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
2133  * @inode: New inode
2134  * @dir: Directory inode
2135  * @mode: mode of the new inode
2136  */
2137 void inode_init_owner(struct inode *inode, const struct inode *dir,
2138 			umode_t mode)
2139 {
2140 	inode->i_uid = current_fsuid();
2141 	if (dir && dir->i_mode & S_ISGID) {
2142 		inode->i_gid = dir->i_gid;
2143 
2144 		/* Directories are special, and always inherit S_ISGID */
2145 		if (S_ISDIR(mode))
2146 			mode |= S_ISGID;
2147 		else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) &&
2148 			 !in_group_p(inode->i_gid) &&
2149 			 !capable_wrt_inode_uidgid(dir, CAP_FSETID))
2150 			mode &= ~S_ISGID;
2151 	} else
2152 		inode->i_gid = current_fsgid();
2153 	inode->i_mode = mode;
2154 }
2155 EXPORT_SYMBOL(inode_init_owner);
2156 
2157 /**
2158  * inode_owner_or_capable - check current task permissions to inode
2159  * @inode: inode being checked
2160  *
2161  * Return true if current either has CAP_FOWNER in a namespace with the
2162  * inode owner uid mapped, or owns the file.
2163  */
2164 bool inode_owner_or_capable(const struct inode *inode)
2165 {
2166 	struct user_namespace *ns;
2167 
2168 	if (uid_eq(current_fsuid(), inode->i_uid))
2169 		return true;
2170 
2171 	ns = current_user_ns();
2172 	if (kuid_has_mapping(ns, inode->i_uid) && ns_capable(ns, CAP_FOWNER))
2173 		return true;
2174 	return false;
2175 }
2176 EXPORT_SYMBOL(inode_owner_or_capable);
2177 
2178 /*
2179  * Direct i/o helper functions
2180  */
2181 static void __inode_dio_wait(struct inode *inode)
2182 {
2183 	wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
2184 	DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
2185 
2186 	do {
2187 		prepare_to_wait(wq, &q.wq_entry, TASK_UNINTERRUPTIBLE);
2188 		if (atomic_read(&inode->i_dio_count))
2189 			schedule();
2190 	} while (atomic_read(&inode->i_dio_count));
2191 	finish_wait(wq, &q.wq_entry);
2192 }
2193 
2194 /**
2195  * inode_dio_wait - wait for outstanding DIO requests to finish
2196  * @inode: inode to wait for
2197  *
2198  * Waits for all pending direct I/O requests to finish so that we can
2199  * proceed with a truncate or equivalent operation.
2200  *
2201  * Must be called under a lock that serializes taking new references
2202  * to i_dio_count, usually by inode->i_mutex.
2203  */
2204 void inode_dio_wait(struct inode *inode)
2205 {
2206 	if (atomic_read(&inode->i_dio_count))
2207 		__inode_dio_wait(inode);
2208 }
2209 EXPORT_SYMBOL(inode_dio_wait);
2210 
2211 /*
2212  * inode_set_flags - atomically set some inode flags
2213  *
2214  * Note: the caller should be holding i_mutex, or else be sure that
2215  * they have exclusive access to the inode structure (i.e., while the
2216  * inode is being instantiated).  The reason for the cmpxchg() loop
2217  * --- which wouldn't be necessary if all code paths which modify
2218  * i_flags actually followed this rule, is that there is at least one
2219  * code path which doesn't today so we use cmpxchg() out of an abundance
2220  * of caution.
2221  *
2222  * In the long run, i_mutex is overkill, and we should probably look
2223  * at using the i_lock spinlock to protect i_flags, and then make sure
2224  * it is so documented in include/linux/fs.h and that all code follows
2225  * the locking convention!!
2226  */
2227 void inode_set_flags(struct inode *inode, unsigned int flags,
2228 		     unsigned int mask)
2229 {
2230 	WARN_ON_ONCE(flags & ~mask);
2231 	set_mask_bits(&inode->i_flags, mask, flags);
2232 }
2233 EXPORT_SYMBOL(inode_set_flags);
2234 
2235 void inode_nohighmem(struct inode *inode)
2236 {
2237 	mapping_set_gfp_mask(inode->i_mapping, GFP_USER);
2238 }
2239 EXPORT_SYMBOL(inode_nohighmem);
2240 
2241 /**
2242  * timestamp_truncate - Truncate timespec to a granularity
2243  * @t: Timespec
2244  * @inode: inode being updated
2245  *
2246  * Truncate a timespec to the granularity supported by the fs
2247  * containing the inode. Always rounds down. gran must
2248  * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns).
2249  */
2250 struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode)
2251 {
2252 	struct super_block *sb = inode->i_sb;
2253 	unsigned int gran = sb->s_time_gran;
2254 
2255 	t.tv_sec = clamp(t.tv_sec, sb->s_time_min, sb->s_time_max);
2256 	if (unlikely(t.tv_sec == sb->s_time_max || t.tv_sec == sb->s_time_min))
2257 		t.tv_nsec = 0;
2258 
2259 	/* Avoid division in the common cases 1 ns and 1 s. */
2260 	if (gran == 1)
2261 		; /* nothing */
2262 	else if (gran == NSEC_PER_SEC)
2263 		t.tv_nsec = 0;
2264 	else if (gran > 1 && gran < NSEC_PER_SEC)
2265 		t.tv_nsec -= t.tv_nsec % gran;
2266 	else
2267 		WARN(1, "invalid file time granularity: %u", gran);
2268 	return t;
2269 }
2270 EXPORT_SYMBOL(timestamp_truncate);
2271 
2272 /**
2273  * current_time - Return FS time
2274  * @inode: inode.
2275  *
2276  * Return the current time truncated to the time granularity supported by
2277  * the fs.
2278  *
2279  * Note that inode and inode->sb cannot be NULL.
2280  * Otherwise, the function warns and returns time without truncation.
2281  */
2282 struct timespec64 current_time(struct inode *inode)
2283 {
2284 	struct timespec64 now;
2285 
2286 	ktime_get_coarse_real_ts64(&now);
2287 
2288 	if (unlikely(!inode->i_sb)) {
2289 		WARN(1, "current_time() called with uninitialized super_block in the inode");
2290 		return now;
2291 	}
2292 
2293 	return timestamp_truncate(now, inode);
2294 }
2295 EXPORT_SYMBOL(current_time);
2296 
2297 /*
2298  * Generic function to check FS_IOC_SETFLAGS values and reject any invalid
2299  * configurations.
2300  *
2301  * Note: the caller should be holding i_mutex, or else be sure that they have
2302  * exclusive access to the inode structure.
2303  */
2304 int vfs_ioc_setflags_prepare(struct inode *inode, unsigned int oldflags,
2305 			     unsigned int flags)
2306 {
2307 	/*
2308 	 * The IMMUTABLE and APPEND_ONLY flags can only be changed by
2309 	 * the relevant capability.
2310 	 *
2311 	 * This test looks nicer. Thanks to Pauline Middelink
2312 	 */
2313 	if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL) &&
2314 	    !capable(CAP_LINUX_IMMUTABLE))
2315 		return -EPERM;
2316 
2317 	return fscrypt_prepare_setflags(inode, oldflags, flags);
2318 }
2319 EXPORT_SYMBOL(vfs_ioc_setflags_prepare);
2320 
2321 /*
2322  * Generic function to check FS_IOC_FSSETXATTR values and reject any invalid
2323  * configurations.
2324  *
2325  * Note: the caller should be holding i_mutex, or else be sure that they have
2326  * exclusive access to the inode structure.
2327  */
2328 int vfs_ioc_fssetxattr_check(struct inode *inode, const struct fsxattr *old_fa,
2329 			     struct fsxattr *fa)
2330 {
2331 	/*
2332 	 * Can't modify an immutable/append-only file unless we have
2333 	 * appropriate permission.
2334 	 */
2335 	if ((old_fa->fsx_xflags ^ fa->fsx_xflags) &
2336 			(FS_XFLAG_IMMUTABLE | FS_XFLAG_APPEND) &&
2337 	    !capable(CAP_LINUX_IMMUTABLE))
2338 		return -EPERM;
2339 
2340 	/*
2341 	 * Project Quota ID state is only allowed to change from within the init
2342 	 * namespace. Enforce that restriction only if we are trying to change
2343 	 * the quota ID state. Everything else is allowed in user namespaces.
2344 	 */
2345 	if (current_user_ns() != &init_user_ns) {
2346 		if (old_fa->fsx_projid != fa->fsx_projid)
2347 			return -EINVAL;
2348 		if ((old_fa->fsx_xflags ^ fa->fsx_xflags) &
2349 				FS_XFLAG_PROJINHERIT)
2350 			return -EINVAL;
2351 	}
2352 
2353 	/* Check extent size hints. */
2354 	if ((fa->fsx_xflags & FS_XFLAG_EXTSIZE) && !S_ISREG(inode->i_mode))
2355 		return -EINVAL;
2356 
2357 	if ((fa->fsx_xflags & FS_XFLAG_EXTSZINHERIT) &&
2358 			!S_ISDIR(inode->i_mode))
2359 		return -EINVAL;
2360 
2361 	if ((fa->fsx_xflags & FS_XFLAG_COWEXTSIZE) &&
2362 	    !S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
2363 		return -EINVAL;
2364 
2365 	/*
2366 	 * It is only valid to set the DAX flag on regular files and
2367 	 * directories on filesystems.
2368 	 */
2369 	if ((fa->fsx_xflags & FS_XFLAG_DAX) &&
2370 	    !(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)))
2371 		return -EINVAL;
2372 
2373 	/* Extent size hints of zero turn off the flags. */
2374 	if (fa->fsx_extsize == 0)
2375 		fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE | FS_XFLAG_EXTSZINHERIT);
2376 	if (fa->fsx_cowextsize == 0)
2377 		fa->fsx_xflags &= ~FS_XFLAG_COWEXTSIZE;
2378 
2379 	return 0;
2380 }
2381 EXPORT_SYMBOL(vfs_ioc_fssetxattr_check);
2382