1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * (C) 1997 Linus Torvalds 4 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation) 5 */ 6 #include <linux/export.h> 7 #include <linux/fs.h> 8 #include <linux/mm.h> 9 #include <linux/backing-dev.h> 10 #include <linux/hash.h> 11 #include <linux/swap.h> 12 #include <linux/security.h> 13 #include <linux/cdev.h> 14 #include <linux/memblock.h> 15 #include <linux/fsnotify.h> 16 #include <linux/mount.h> 17 #include <linux/posix_acl.h> 18 #include <linux/prefetch.h> 19 #include <linux/buffer_head.h> /* for inode_has_buffers */ 20 #include <linux/ratelimit.h> 21 #include <linux/list_lru.h> 22 #include <linux/iversion.h> 23 #include <trace/events/writeback.h> 24 #include "internal.h" 25 26 /* 27 * Inode locking rules: 28 * 29 * inode->i_lock protects: 30 * inode->i_state, inode->i_hash, __iget() 31 * Inode LRU list locks protect: 32 * inode->i_sb->s_inode_lru, inode->i_lru 33 * inode->i_sb->s_inode_list_lock protects: 34 * inode->i_sb->s_inodes, inode->i_sb_list 35 * bdi->wb.list_lock protects: 36 * bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list 37 * inode_hash_lock protects: 38 * inode_hashtable, inode->i_hash 39 * 40 * Lock ordering: 41 * 42 * inode->i_sb->s_inode_list_lock 43 * inode->i_lock 44 * Inode LRU list locks 45 * 46 * bdi->wb.list_lock 47 * inode->i_lock 48 * 49 * inode_hash_lock 50 * inode->i_sb->s_inode_list_lock 51 * inode->i_lock 52 * 53 * iunique_lock 54 * inode_hash_lock 55 */ 56 57 static unsigned int i_hash_mask __read_mostly; 58 static unsigned int i_hash_shift __read_mostly; 59 static struct hlist_head *inode_hashtable __read_mostly; 60 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); 61 62 /* 63 * Empty aops. Can be used for the cases where the user does not 64 * define any of the address_space operations. 65 */ 66 const struct address_space_operations empty_aops = { 67 }; 68 EXPORT_SYMBOL(empty_aops); 69 70 /* 71 * Statistics gathering.. 72 */ 73 struct inodes_stat_t inodes_stat; 74 75 static DEFINE_PER_CPU(unsigned long, nr_inodes); 76 static DEFINE_PER_CPU(unsigned long, nr_unused); 77 78 static struct kmem_cache *inode_cachep __read_mostly; 79 80 static long get_nr_inodes(void) 81 { 82 int i; 83 long sum = 0; 84 for_each_possible_cpu(i) 85 sum += per_cpu(nr_inodes, i); 86 return sum < 0 ? 0 : sum; 87 } 88 89 static inline long get_nr_inodes_unused(void) 90 { 91 int i; 92 long sum = 0; 93 for_each_possible_cpu(i) 94 sum += per_cpu(nr_unused, i); 95 return sum < 0 ? 0 : sum; 96 } 97 98 long get_nr_dirty_inodes(void) 99 { 100 /* not actually dirty inodes, but a wild approximation */ 101 long nr_dirty = get_nr_inodes() - get_nr_inodes_unused(); 102 return nr_dirty > 0 ? nr_dirty : 0; 103 } 104 105 /* 106 * Handle nr_inode sysctl 107 */ 108 #ifdef CONFIG_SYSCTL 109 int proc_nr_inodes(struct ctl_table *table, int write, 110 void *buffer, size_t *lenp, loff_t *ppos) 111 { 112 inodes_stat.nr_inodes = get_nr_inodes(); 113 inodes_stat.nr_unused = get_nr_inodes_unused(); 114 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 115 } 116 #endif 117 118 static int no_open(struct inode *inode, struct file *file) 119 { 120 return -ENXIO; 121 } 122 123 /** 124 * inode_init_always - perform inode structure initialisation 125 * @sb: superblock inode belongs to 126 * @inode: inode to initialise 127 * 128 * These are initializations that need to be done on every inode 129 * allocation as the fields are not initialised by slab allocation. 130 */ 131 int inode_init_always(struct super_block *sb, struct inode *inode) 132 { 133 static const struct inode_operations empty_iops; 134 static const struct file_operations no_open_fops = {.open = no_open}; 135 struct address_space *const mapping = &inode->i_data; 136 137 inode->i_sb = sb; 138 inode->i_blkbits = sb->s_blocksize_bits; 139 inode->i_flags = 0; 140 atomic64_set(&inode->i_sequence, 0); 141 atomic_set(&inode->i_count, 1); 142 inode->i_op = &empty_iops; 143 inode->i_fop = &no_open_fops; 144 inode->i_ino = 0; 145 inode->__i_nlink = 1; 146 inode->i_opflags = 0; 147 if (sb->s_xattr) 148 inode->i_opflags |= IOP_XATTR; 149 i_uid_write(inode, 0); 150 i_gid_write(inode, 0); 151 atomic_set(&inode->i_writecount, 0); 152 inode->i_size = 0; 153 inode->i_write_hint = WRITE_LIFE_NOT_SET; 154 inode->i_blocks = 0; 155 inode->i_bytes = 0; 156 inode->i_generation = 0; 157 inode->i_pipe = NULL; 158 inode->i_cdev = NULL; 159 inode->i_link = NULL; 160 inode->i_dir_seq = 0; 161 inode->i_rdev = 0; 162 inode->dirtied_when = 0; 163 164 #ifdef CONFIG_CGROUP_WRITEBACK 165 inode->i_wb_frn_winner = 0; 166 inode->i_wb_frn_avg_time = 0; 167 inode->i_wb_frn_history = 0; 168 #endif 169 170 if (security_inode_alloc(inode)) 171 goto out; 172 spin_lock_init(&inode->i_lock); 173 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); 174 175 init_rwsem(&inode->i_rwsem); 176 lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key); 177 178 atomic_set(&inode->i_dio_count, 0); 179 180 mapping->a_ops = &empty_aops; 181 mapping->host = inode; 182 mapping->flags = 0; 183 mapping->wb_err = 0; 184 atomic_set(&mapping->i_mmap_writable, 0); 185 #ifdef CONFIG_READ_ONLY_THP_FOR_FS 186 atomic_set(&mapping->nr_thps, 0); 187 #endif 188 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); 189 mapping->private_data = NULL; 190 mapping->writeback_index = 0; 191 init_rwsem(&mapping->invalidate_lock); 192 lockdep_set_class_and_name(&mapping->invalidate_lock, 193 &sb->s_type->invalidate_lock_key, 194 "mapping.invalidate_lock"); 195 inode->i_private = NULL; 196 inode->i_mapping = mapping; 197 INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */ 198 #ifdef CONFIG_FS_POSIX_ACL 199 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED; 200 #endif 201 202 #ifdef CONFIG_FSNOTIFY 203 inode->i_fsnotify_mask = 0; 204 #endif 205 inode->i_flctx = NULL; 206 this_cpu_inc(nr_inodes); 207 208 return 0; 209 out: 210 return -ENOMEM; 211 } 212 EXPORT_SYMBOL(inode_init_always); 213 214 void free_inode_nonrcu(struct inode *inode) 215 { 216 kmem_cache_free(inode_cachep, inode); 217 } 218 EXPORT_SYMBOL(free_inode_nonrcu); 219 220 static void i_callback(struct rcu_head *head) 221 { 222 struct inode *inode = container_of(head, struct inode, i_rcu); 223 if (inode->free_inode) 224 inode->free_inode(inode); 225 else 226 free_inode_nonrcu(inode); 227 } 228 229 static struct inode *alloc_inode(struct super_block *sb) 230 { 231 const struct super_operations *ops = sb->s_op; 232 struct inode *inode; 233 234 if (ops->alloc_inode) 235 inode = ops->alloc_inode(sb); 236 else 237 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL); 238 239 if (!inode) 240 return NULL; 241 242 if (unlikely(inode_init_always(sb, inode))) { 243 if (ops->destroy_inode) { 244 ops->destroy_inode(inode); 245 if (!ops->free_inode) 246 return NULL; 247 } 248 inode->free_inode = ops->free_inode; 249 i_callback(&inode->i_rcu); 250 return NULL; 251 } 252 253 return inode; 254 } 255 256 void __destroy_inode(struct inode *inode) 257 { 258 BUG_ON(inode_has_buffers(inode)); 259 inode_detach_wb(inode); 260 security_inode_free(inode); 261 fsnotify_inode_delete(inode); 262 locks_free_lock_context(inode); 263 if (!inode->i_nlink) { 264 WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0); 265 atomic_long_dec(&inode->i_sb->s_remove_count); 266 } 267 268 #ifdef CONFIG_FS_POSIX_ACL 269 if (inode->i_acl && !is_uncached_acl(inode->i_acl)) 270 posix_acl_release(inode->i_acl); 271 if (inode->i_default_acl && !is_uncached_acl(inode->i_default_acl)) 272 posix_acl_release(inode->i_default_acl); 273 #endif 274 this_cpu_dec(nr_inodes); 275 } 276 EXPORT_SYMBOL(__destroy_inode); 277 278 static void destroy_inode(struct inode *inode) 279 { 280 const struct super_operations *ops = inode->i_sb->s_op; 281 282 BUG_ON(!list_empty(&inode->i_lru)); 283 __destroy_inode(inode); 284 if (ops->destroy_inode) { 285 ops->destroy_inode(inode); 286 if (!ops->free_inode) 287 return; 288 } 289 inode->free_inode = ops->free_inode; 290 call_rcu(&inode->i_rcu, i_callback); 291 } 292 293 /** 294 * drop_nlink - directly drop an inode's link count 295 * @inode: inode 296 * 297 * This is a low-level filesystem helper to replace any 298 * direct filesystem manipulation of i_nlink. In cases 299 * where we are attempting to track writes to the 300 * filesystem, a decrement to zero means an imminent 301 * write when the file is truncated and actually unlinked 302 * on the filesystem. 303 */ 304 void drop_nlink(struct inode *inode) 305 { 306 WARN_ON(inode->i_nlink == 0); 307 inode->__i_nlink--; 308 if (!inode->i_nlink) 309 atomic_long_inc(&inode->i_sb->s_remove_count); 310 } 311 EXPORT_SYMBOL(drop_nlink); 312 313 /** 314 * clear_nlink - directly zero an inode's link count 315 * @inode: inode 316 * 317 * This is a low-level filesystem helper to replace any 318 * direct filesystem manipulation of i_nlink. See 319 * drop_nlink() for why we care about i_nlink hitting zero. 320 */ 321 void clear_nlink(struct inode *inode) 322 { 323 if (inode->i_nlink) { 324 inode->__i_nlink = 0; 325 atomic_long_inc(&inode->i_sb->s_remove_count); 326 } 327 } 328 EXPORT_SYMBOL(clear_nlink); 329 330 /** 331 * set_nlink - directly set an inode's link count 332 * @inode: inode 333 * @nlink: new nlink (should be non-zero) 334 * 335 * This is a low-level filesystem helper to replace any 336 * direct filesystem manipulation of i_nlink. 337 */ 338 void set_nlink(struct inode *inode, unsigned int nlink) 339 { 340 if (!nlink) { 341 clear_nlink(inode); 342 } else { 343 /* Yes, some filesystems do change nlink from zero to one */ 344 if (inode->i_nlink == 0) 345 atomic_long_dec(&inode->i_sb->s_remove_count); 346 347 inode->__i_nlink = nlink; 348 } 349 } 350 EXPORT_SYMBOL(set_nlink); 351 352 /** 353 * inc_nlink - directly increment an inode's link count 354 * @inode: inode 355 * 356 * This is a low-level filesystem helper to replace any 357 * direct filesystem manipulation of i_nlink. Currently, 358 * it is only here for parity with dec_nlink(). 359 */ 360 void inc_nlink(struct inode *inode) 361 { 362 if (unlikely(inode->i_nlink == 0)) { 363 WARN_ON(!(inode->i_state & I_LINKABLE)); 364 atomic_long_dec(&inode->i_sb->s_remove_count); 365 } 366 367 inode->__i_nlink++; 368 } 369 EXPORT_SYMBOL(inc_nlink); 370 371 static void __address_space_init_once(struct address_space *mapping) 372 { 373 xa_init_flags(&mapping->i_pages, XA_FLAGS_LOCK_IRQ | XA_FLAGS_ACCOUNT); 374 init_rwsem(&mapping->i_mmap_rwsem); 375 INIT_LIST_HEAD(&mapping->private_list); 376 spin_lock_init(&mapping->private_lock); 377 mapping->i_mmap = RB_ROOT_CACHED; 378 } 379 380 void address_space_init_once(struct address_space *mapping) 381 { 382 memset(mapping, 0, sizeof(*mapping)); 383 __address_space_init_once(mapping); 384 } 385 EXPORT_SYMBOL(address_space_init_once); 386 387 /* 388 * These are initializations that only need to be done 389 * once, because the fields are idempotent across use 390 * of the inode, so let the slab aware of that. 391 */ 392 void inode_init_once(struct inode *inode) 393 { 394 memset(inode, 0, sizeof(*inode)); 395 INIT_HLIST_NODE(&inode->i_hash); 396 INIT_LIST_HEAD(&inode->i_devices); 397 INIT_LIST_HEAD(&inode->i_io_list); 398 INIT_LIST_HEAD(&inode->i_wb_list); 399 INIT_LIST_HEAD(&inode->i_lru); 400 __address_space_init_once(&inode->i_data); 401 i_size_ordered_init(inode); 402 } 403 EXPORT_SYMBOL(inode_init_once); 404 405 static void init_once(void *foo) 406 { 407 struct inode *inode = (struct inode *) foo; 408 409 inode_init_once(inode); 410 } 411 412 /* 413 * inode->i_lock must be held 414 */ 415 void __iget(struct inode *inode) 416 { 417 atomic_inc(&inode->i_count); 418 } 419 420 /* 421 * get additional reference to inode; caller must already hold one. 422 */ 423 void ihold(struct inode *inode) 424 { 425 WARN_ON(atomic_inc_return(&inode->i_count) < 2); 426 } 427 EXPORT_SYMBOL(ihold); 428 429 static void __inode_add_lru(struct inode *inode, bool rotate) 430 { 431 if (inode->i_state & (I_DIRTY_ALL | I_SYNC | I_FREEING | I_WILL_FREE)) 432 return; 433 if (atomic_read(&inode->i_count)) 434 return; 435 if (!(inode->i_sb->s_flags & SB_ACTIVE)) 436 return; 437 if (!mapping_shrinkable(&inode->i_data)) 438 return; 439 440 if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru)) 441 this_cpu_inc(nr_unused); 442 else if (rotate) 443 inode->i_state |= I_REFERENCED; 444 } 445 446 /* 447 * Add inode to LRU if needed (inode is unused and clean). 448 * 449 * Needs inode->i_lock held. 450 */ 451 void inode_add_lru(struct inode *inode) 452 { 453 __inode_add_lru(inode, false); 454 } 455 456 static void inode_lru_list_del(struct inode *inode) 457 { 458 if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru)) 459 this_cpu_dec(nr_unused); 460 } 461 462 /** 463 * inode_sb_list_add - add inode to the superblock list of inodes 464 * @inode: inode to add 465 */ 466 void inode_sb_list_add(struct inode *inode) 467 { 468 spin_lock(&inode->i_sb->s_inode_list_lock); 469 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes); 470 spin_unlock(&inode->i_sb->s_inode_list_lock); 471 } 472 EXPORT_SYMBOL_GPL(inode_sb_list_add); 473 474 static inline void inode_sb_list_del(struct inode *inode) 475 { 476 if (!list_empty(&inode->i_sb_list)) { 477 spin_lock(&inode->i_sb->s_inode_list_lock); 478 list_del_init(&inode->i_sb_list); 479 spin_unlock(&inode->i_sb->s_inode_list_lock); 480 } 481 } 482 483 static unsigned long hash(struct super_block *sb, unsigned long hashval) 484 { 485 unsigned long tmp; 486 487 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) / 488 L1_CACHE_BYTES; 489 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift); 490 return tmp & i_hash_mask; 491 } 492 493 /** 494 * __insert_inode_hash - hash an inode 495 * @inode: unhashed inode 496 * @hashval: unsigned long value used to locate this object in the 497 * inode_hashtable. 498 * 499 * Add an inode to the inode hash for this superblock. 500 */ 501 void __insert_inode_hash(struct inode *inode, unsigned long hashval) 502 { 503 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval); 504 505 spin_lock(&inode_hash_lock); 506 spin_lock(&inode->i_lock); 507 hlist_add_head_rcu(&inode->i_hash, b); 508 spin_unlock(&inode->i_lock); 509 spin_unlock(&inode_hash_lock); 510 } 511 EXPORT_SYMBOL(__insert_inode_hash); 512 513 /** 514 * __remove_inode_hash - remove an inode from the hash 515 * @inode: inode to unhash 516 * 517 * Remove an inode from the superblock. 518 */ 519 void __remove_inode_hash(struct inode *inode) 520 { 521 spin_lock(&inode_hash_lock); 522 spin_lock(&inode->i_lock); 523 hlist_del_init_rcu(&inode->i_hash); 524 spin_unlock(&inode->i_lock); 525 spin_unlock(&inode_hash_lock); 526 } 527 EXPORT_SYMBOL(__remove_inode_hash); 528 529 void clear_inode(struct inode *inode) 530 { 531 /* 532 * We have to cycle the i_pages lock here because reclaim can be in the 533 * process of removing the last page (in __delete_from_page_cache()) 534 * and we must not free the mapping under it. 535 */ 536 xa_lock_irq(&inode->i_data.i_pages); 537 BUG_ON(inode->i_data.nrpages); 538 /* 539 * Almost always, mapping_empty(&inode->i_data) here; but there are 540 * two known and long-standing ways in which nodes may get left behind 541 * (when deep radix-tree node allocation failed partway; or when THP 542 * collapse_file() failed). Until those two known cases are cleaned up, 543 * or a cleanup function is called here, do not BUG_ON(!mapping_empty), 544 * nor even WARN_ON(!mapping_empty). 545 */ 546 xa_unlock_irq(&inode->i_data.i_pages); 547 BUG_ON(!list_empty(&inode->i_data.private_list)); 548 BUG_ON(!(inode->i_state & I_FREEING)); 549 BUG_ON(inode->i_state & I_CLEAR); 550 BUG_ON(!list_empty(&inode->i_wb_list)); 551 /* don't need i_lock here, no concurrent mods to i_state */ 552 inode->i_state = I_FREEING | I_CLEAR; 553 } 554 EXPORT_SYMBOL(clear_inode); 555 556 /* 557 * Free the inode passed in, removing it from the lists it is still connected 558 * to. We remove any pages still attached to the inode and wait for any IO that 559 * is still in progress before finally destroying the inode. 560 * 561 * An inode must already be marked I_FREEING so that we avoid the inode being 562 * moved back onto lists if we race with other code that manipulates the lists 563 * (e.g. writeback_single_inode). The caller is responsible for setting this. 564 * 565 * An inode must already be removed from the LRU list before being evicted from 566 * the cache. This should occur atomically with setting the I_FREEING state 567 * flag, so no inodes here should ever be on the LRU when being evicted. 568 */ 569 static void evict(struct inode *inode) 570 { 571 const struct super_operations *op = inode->i_sb->s_op; 572 573 BUG_ON(!(inode->i_state & I_FREEING)); 574 BUG_ON(!list_empty(&inode->i_lru)); 575 576 if (!list_empty(&inode->i_io_list)) 577 inode_io_list_del(inode); 578 579 inode_sb_list_del(inode); 580 581 /* 582 * Wait for flusher thread to be done with the inode so that filesystem 583 * does not start destroying it while writeback is still running. Since 584 * the inode has I_FREEING set, flusher thread won't start new work on 585 * the inode. We just have to wait for running writeback to finish. 586 */ 587 inode_wait_for_writeback(inode); 588 589 if (op->evict_inode) { 590 op->evict_inode(inode); 591 } else { 592 truncate_inode_pages_final(&inode->i_data); 593 clear_inode(inode); 594 } 595 if (S_ISCHR(inode->i_mode) && inode->i_cdev) 596 cd_forget(inode); 597 598 remove_inode_hash(inode); 599 600 spin_lock(&inode->i_lock); 601 wake_up_bit(&inode->i_state, __I_NEW); 602 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR)); 603 spin_unlock(&inode->i_lock); 604 605 destroy_inode(inode); 606 } 607 608 /* 609 * dispose_list - dispose of the contents of a local list 610 * @head: the head of the list to free 611 * 612 * Dispose-list gets a local list with local inodes in it, so it doesn't 613 * need to worry about list corruption and SMP locks. 614 */ 615 static void dispose_list(struct list_head *head) 616 { 617 while (!list_empty(head)) { 618 struct inode *inode; 619 620 inode = list_first_entry(head, struct inode, i_lru); 621 list_del_init(&inode->i_lru); 622 623 evict(inode); 624 cond_resched(); 625 } 626 } 627 628 /** 629 * evict_inodes - evict all evictable inodes for a superblock 630 * @sb: superblock to operate on 631 * 632 * Make sure that no inodes with zero refcount are retained. This is 633 * called by superblock shutdown after having SB_ACTIVE flag removed, 634 * so any inode reaching zero refcount during or after that call will 635 * be immediately evicted. 636 */ 637 void evict_inodes(struct super_block *sb) 638 { 639 struct inode *inode, *next; 640 LIST_HEAD(dispose); 641 642 again: 643 spin_lock(&sb->s_inode_list_lock); 644 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 645 if (atomic_read(&inode->i_count)) 646 continue; 647 648 spin_lock(&inode->i_lock); 649 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 650 spin_unlock(&inode->i_lock); 651 continue; 652 } 653 654 inode->i_state |= I_FREEING; 655 inode_lru_list_del(inode); 656 spin_unlock(&inode->i_lock); 657 list_add(&inode->i_lru, &dispose); 658 659 /* 660 * We can have a ton of inodes to evict at unmount time given 661 * enough memory, check to see if we need to go to sleep for a 662 * bit so we don't livelock. 663 */ 664 if (need_resched()) { 665 spin_unlock(&sb->s_inode_list_lock); 666 cond_resched(); 667 dispose_list(&dispose); 668 goto again; 669 } 670 } 671 spin_unlock(&sb->s_inode_list_lock); 672 673 dispose_list(&dispose); 674 } 675 EXPORT_SYMBOL_GPL(evict_inodes); 676 677 /** 678 * invalidate_inodes - attempt to free all inodes on a superblock 679 * @sb: superblock to operate on 680 * @kill_dirty: flag to guide handling of dirty inodes 681 * 682 * Attempts to free all inodes for a given superblock. If there were any 683 * busy inodes return a non-zero value, else zero. 684 * If @kill_dirty is set, discard dirty inodes too, otherwise treat 685 * them as busy. 686 */ 687 int invalidate_inodes(struct super_block *sb, bool kill_dirty) 688 { 689 int busy = 0; 690 struct inode *inode, *next; 691 LIST_HEAD(dispose); 692 693 again: 694 spin_lock(&sb->s_inode_list_lock); 695 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 696 spin_lock(&inode->i_lock); 697 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 698 spin_unlock(&inode->i_lock); 699 continue; 700 } 701 if (inode->i_state & I_DIRTY_ALL && !kill_dirty) { 702 spin_unlock(&inode->i_lock); 703 busy = 1; 704 continue; 705 } 706 if (atomic_read(&inode->i_count)) { 707 spin_unlock(&inode->i_lock); 708 busy = 1; 709 continue; 710 } 711 712 inode->i_state |= I_FREEING; 713 inode_lru_list_del(inode); 714 spin_unlock(&inode->i_lock); 715 list_add(&inode->i_lru, &dispose); 716 if (need_resched()) { 717 spin_unlock(&sb->s_inode_list_lock); 718 cond_resched(); 719 dispose_list(&dispose); 720 goto again; 721 } 722 } 723 spin_unlock(&sb->s_inode_list_lock); 724 725 dispose_list(&dispose); 726 727 return busy; 728 } 729 730 /* 731 * Isolate the inode from the LRU in preparation for freeing it. 732 * 733 * If the inode has the I_REFERENCED flag set, then it means that it has been 734 * used recently - the flag is set in iput_final(). When we encounter such an 735 * inode, clear the flag and move it to the back of the LRU so it gets another 736 * pass through the LRU before it gets reclaimed. This is necessary because of 737 * the fact we are doing lazy LRU updates to minimise lock contention so the 738 * LRU does not have strict ordering. Hence we don't want to reclaim inodes 739 * with this flag set because they are the inodes that are out of order. 740 */ 741 static enum lru_status inode_lru_isolate(struct list_head *item, 742 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) 743 { 744 struct list_head *freeable = arg; 745 struct inode *inode = container_of(item, struct inode, i_lru); 746 747 /* 748 * We are inverting the lru lock/inode->i_lock here, so use a 749 * trylock. If we fail to get the lock, just skip it. 750 */ 751 if (!spin_trylock(&inode->i_lock)) 752 return LRU_SKIP; 753 754 /* 755 * Inodes can get referenced, redirtied, or repopulated while 756 * they're already on the LRU, and this can make them 757 * unreclaimable for a while. Remove them lazily here; iput, 758 * sync, or the last page cache deletion will requeue them. 759 */ 760 if (atomic_read(&inode->i_count) || 761 (inode->i_state & ~I_REFERENCED) || 762 !mapping_shrinkable(&inode->i_data)) { 763 list_lru_isolate(lru, &inode->i_lru); 764 spin_unlock(&inode->i_lock); 765 this_cpu_dec(nr_unused); 766 return LRU_REMOVED; 767 } 768 769 /* Recently referenced inodes get one more pass */ 770 if (inode->i_state & I_REFERENCED) { 771 inode->i_state &= ~I_REFERENCED; 772 spin_unlock(&inode->i_lock); 773 return LRU_ROTATE; 774 } 775 776 /* 777 * On highmem systems, mapping_shrinkable() permits dropping 778 * page cache in order to free up struct inodes: lowmem might 779 * be under pressure before the cache inside the highmem zone. 780 */ 781 if (inode_has_buffers(inode) || !mapping_empty(&inode->i_data)) { 782 __iget(inode); 783 spin_unlock(&inode->i_lock); 784 spin_unlock(lru_lock); 785 if (remove_inode_buffers(inode)) { 786 unsigned long reap; 787 reap = invalidate_mapping_pages(&inode->i_data, 0, -1); 788 if (current_is_kswapd()) 789 __count_vm_events(KSWAPD_INODESTEAL, reap); 790 else 791 __count_vm_events(PGINODESTEAL, reap); 792 if (current->reclaim_state) 793 current->reclaim_state->reclaimed_slab += reap; 794 } 795 iput(inode); 796 spin_lock(lru_lock); 797 return LRU_RETRY; 798 } 799 800 WARN_ON(inode->i_state & I_NEW); 801 inode->i_state |= I_FREEING; 802 list_lru_isolate_move(lru, &inode->i_lru, freeable); 803 spin_unlock(&inode->i_lock); 804 805 this_cpu_dec(nr_unused); 806 return LRU_REMOVED; 807 } 808 809 /* 810 * Walk the superblock inode LRU for freeable inodes and attempt to free them. 811 * This is called from the superblock shrinker function with a number of inodes 812 * to trim from the LRU. Inodes to be freed are moved to a temporary list and 813 * then are freed outside inode_lock by dispose_list(). 814 */ 815 long prune_icache_sb(struct super_block *sb, struct shrink_control *sc) 816 { 817 LIST_HEAD(freeable); 818 long freed; 819 820 freed = list_lru_shrink_walk(&sb->s_inode_lru, sc, 821 inode_lru_isolate, &freeable); 822 dispose_list(&freeable); 823 return freed; 824 } 825 826 static void __wait_on_freeing_inode(struct inode *inode); 827 /* 828 * Called with the inode lock held. 829 */ 830 static struct inode *find_inode(struct super_block *sb, 831 struct hlist_head *head, 832 int (*test)(struct inode *, void *), 833 void *data) 834 { 835 struct inode *inode = NULL; 836 837 repeat: 838 hlist_for_each_entry(inode, head, i_hash) { 839 if (inode->i_sb != sb) 840 continue; 841 if (!test(inode, data)) 842 continue; 843 spin_lock(&inode->i_lock); 844 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 845 __wait_on_freeing_inode(inode); 846 goto repeat; 847 } 848 if (unlikely(inode->i_state & I_CREATING)) { 849 spin_unlock(&inode->i_lock); 850 return ERR_PTR(-ESTALE); 851 } 852 __iget(inode); 853 spin_unlock(&inode->i_lock); 854 return inode; 855 } 856 return NULL; 857 } 858 859 /* 860 * find_inode_fast is the fast path version of find_inode, see the comment at 861 * iget_locked for details. 862 */ 863 static struct inode *find_inode_fast(struct super_block *sb, 864 struct hlist_head *head, unsigned long ino) 865 { 866 struct inode *inode = NULL; 867 868 repeat: 869 hlist_for_each_entry(inode, head, i_hash) { 870 if (inode->i_ino != ino) 871 continue; 872 if (inode->i_sb != sb) 873 continue; 874 spin_lock(&inode->i_lock); 875 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 876 __wait_on_freeing_inode(inode); 877 goto repeat; 878 } 879 if (unlikely(inode->i_state & I_CREATING)) { 880 spin_unlock(&inode->i_lock); 881 return ERR_PTR(-ESTALE); 882 } 883 __iget(inode); 884 spin_unlock(&inode->i_lock); 885 return inode; 886 } 887 return NULL; 888 } 889 890 /* 891 * Each cpu owns a range of LAST_INO_BATCH numbers. 892 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations, 893 * to renew the exhausted range. 894 * 895 * This does not significantly increase overflow rate because every CPU can 896 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is 897 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the 898 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase 899 * overflow rate by 2x, which does not seem too significant. 900 * 901 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 902 * error if st_ino won't fit in target struct field. Use 32bit counter 903 * here to attempt to avoid that. 904 */ 905 #define LAST_INO_BATCH 1024 906 static DEFINE_PER_CPU(unsigned int, last_ino); 907 908 unsigned int get_next_ino(void) 909 { 910 unsigned int *p = &get_cpu_var(last_ino); 911 unsigned int res = *p; 912 913 #ifdef CONFIG_SMP 914 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) { 915 static atomic_t shared_last_ino; 916 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino); 917 918 res = next - LAST_INO_BATCH; 919 } 920 #endif 921 922 res++; 923 /* get_next_ino should not provide a 0 inode number */ 924 if (unlikely(!res)) 925 res++; 926 *p = res; 927 put_cpu_var(last_ino); 928 return res; 929 } 930 EXPORT_SYMBOL(get_next_ino); 931 932 /** 933 * new_inode_pseudo - obtain an inode 934 * @sb: superblock 935 * 936 * Allocates a new inode for given superblock. 937 * Inode wont be chained in superblock s_inodes list 938 * This means : 939 * - fs can't be unmount 940 * - quotas, fsnotify, writeback can't work 941 */ 942 struct inode *new_inode_pseudo(struct super_block *sb) 943 { 944 struct inode *inode = alloc_inode(sb); 945 946 if (inode) { 947 spin_lock(&inode->i_lock); 948 inode->i_state = 0; 949 spin_unlock(&inode->i_lock); 950 INIT_LIST_HEAD(&inode->i_sb_list); 951 } 952 return inode; 953 } 954 955 /** 956 * new_inode - obtain an inode 957 * @sb: superblock 958 * 959 * Allocates a new inode for given superblock. The default gfp_mask 960 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE. 961 * If HIGHMEM pages are unsuitable or it is known that pages allocated 962 * for the page cache are not reclaimable or migratable, 963 * mapping_set_gfp_mask() must be called with suitable flags on the 964 * newly created inode's mapping 965 * 966 */ 967 struct inode *new_inode(struct super_block *sb) 968 { 969 struct inode *inode; 970 971 spin_lock_prefetch(&sb->s_inode_list_lock); 972 973 inode = new_inode_pseudo(sb); 974 if (inode) 975 inode_sb_list_add(inode); 976 return inode; 977 } 978 EXPORT_SYMBOL(new_inode); 979 980 #ifdef CONFIG_DEBUG_LOCK_ALLOC 981 void lockdep_annotate_inode_mutex_key(struct inode *inode) 982 { 983 if (S_ISDIR(inode->i_mode)) { 984 struct file_system_type *type = inode->i_sb->s_type; 985 986 /* Set new key only if filesystem hasn't already changed it */ 987 if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) { 988 /* 989 * ensure nobody is actually holding i_mutex 990 */ 991 // mutex_destroy(&inode->i_mutex); 992 init_rwsem(&inode->i_rwsem); 993 lockdep_set_class(&inode->i_rwsem, 994 &type->i_mutex_dir_key); 995 } 996 } 997 } 998 EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key); 999 #endif 1000 1001 /** 1002 * unlock_new_inode - clear the I_NEW state and wake up any waiters 1003 * @inode: new inode to unlock 1004 * 1005 * Called when the inode is fully initialised to clear the new state of the 1006 * inode and wake up anyone waiting for the inode to finish initialisation. 1007 */ 1008 void unlock_new_inode(struct inode *inode) 1009 { 1010 lockdep_annotate_inode_mutex_key(inode); 1011 spin_lock(&inode->i_lock); 1012 WARN_ON(!(inode->i_state & I_NEW)); 1013 inode->i_state &= ~I_NEW & ~I_CREATING; 1014 smp_mb(); 1015 wake_up_bit(&inode->i_state, __I_NEW); 1016 spin_unlock(&inode->i_lock); 1017 } 1018 EXPORT_SYMBOL(unlock_new_inode); 1019 1020 void discard_new_inode(struct inode *inode) 1021 { 1022 lockdep_annotate_inode_mutex_key(inode); 1023 spin_lock(&inode->i_lock); 1024 WARN_ON(!(inode->i_state & I_NEW)); 1025 inode->i_state &= ~I_NEW; 1026 smp_mb(); 1027 wake_up_bit(&inode->i_state, __I_NEW); 1028 spin_unlock(&inode->i_lock); 1029 iput(inode); 1030 } 1031 EXPORT_SYMBOL(discard_new_inode); 1032 1033 /** 1034 * lock_two_nondirectories - take two i_mutexes on non-directory objects 1035 * 1036 * Lock any non-NULL argument that is not a directory. 1037 * Zero, one or two objects may be locked by this function. 1038 * 1039 * @inode1: first inode to lock 1040 * @inode2: second inode to lock 1041 */ 1042 void lock_two_nondirectories(struct inode *inode1, struct inode *inode2) 1043 { 1044 if (inode1 > inode2) 1045 swap(inode1, inode2); 1046 1047 if (inode1 && !S_ISDIR(inode1->i_mode)) 1048 inode_lock(inode1); 1049 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1) 1050 inode_lock_nested(inode2, I_MUTEX_NONDIR2); 1051 } 1052 EXPORT_SYMBOL(lock_two_nondirectories); 1053 1054 /** 1055 * unlock_two_nondirectories - release locks from lock_two_nondirectories() 1056 * @inode1: first inode to unlock 1057 * @inode2: second inode to unlock 1058 */ 1059 void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2) 1060 { 1061 if (inode1 && !S_ISDIR(inode1->i_mode)) 1062 inode_unlock(inode1); 1063 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1) 1064 inode_unlock(inode2); 1065 } 1066 EXPORT_SYMBOL(unlock_two_nondirectories); 1067 1068 /** 1069 * inode_insert5 - obtain an inode from a mounted file system 1070 * @inode: pre-allocated inode to use for insert to cache 1071 * @hashval: hash value (usually inode number) to get 1072 * @test: callback used for comparisons between inodes 1073 * @set: callback used to initialize a new struct inode 1074 * @data: opaque data pointer to pass to @test and @set 1075 * 1076 * Search for the inode specified by @hashval and @data in the inode cache, 1077 * and if present it is return it with an increased reference count. This is 1078 * a variant of iget5_locked() for callers that don't want to fail on memory 1079 * allocation of inode. 1080 * 1081 * If the inode is not in cache, insert the pre-allocated inode to cache and 1082 * return it locked, hashed, and with the I_NEW flag set. The file system gets 1083 * to fill it in before unlocking it via unlock_new_inode(). 1084 * 1085 * Note both @test and @set are called with the inode_hash_lock held, so can't 1086 * sleep. 1087 */ 1088 struct inode *inode_insert5(struct inode *inode, unsigned long hashval, 1089 int (*test)(struct inode *, void *), 1090 int (*set)(struct inode *, void *), void *data) 1091 { 1092 struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval); 1093 struct inode *old; 1094 bool creating = inode->i_state & I_CREATING; 1095 1096 again: 1097 spin_lock(&inode_hash_lock); 1098 old = find_inode(inode->i_sb, head, test, data); 1099 if (unlikely(old)) { 1100 /* 1101 * Uhhuh, somebody else created the same inode under us. 1102 * Use the old inode instead of the preallocated one. 1103 */ 1104 spin_unlock(&inode_hash_lock); 1105 if (IS_ERR(old)) 1106 return NULL; 1107 wait_on_inode(old); 1108 if (unlikely(inode_unhashed(old))) { 1109 iput(old); 1110 goto again; 1111 } 1112 return old; 1113 } 1114 1115 if (set && unlikely(set(inode, data))) { 1116 inode = NULL; 1117 goto unlock; 1118 } 1119 1120 /* 1121 * Return the locked inode with I_NEW set, the 1122 * caller is responsible for filling in the contents 1123 */ 1124 spin_lock(&inode->i_lock); 1125 inode->i_state |= I_NEW; 1126 hlist_add_head_rcu(&inode->i_hash, head); 1127 spin_unlock(&inode->i_lock); 1128 if (!creating) 1129 inode_sb_list_add(inode); 1130 unlock: 1131 spin_unlock(&inode_hash_lock); 1132 1133 return inode; 1134 } 1135 EXPORT_SYMBOL(inode_insert5); 1136 1137 /** 1138 * iget5_locked - obtain an inode from a mounted file system 1139 * @sb: super block of file system 1140 * @hashval: hash value (usually inode number) to get 1141 * @test: callback used for comparisons between inodes 1142 * @set: callback used to initialize a new struct inode 1143 * @data: opaque data pointer to pass to @test and @set 1144 * 1145 * Search for the inode specified by @hashval and @data in the inode cache, 1146 * and if present it is return it with an increased reference count. This is 1147 * a generalized version of iget_locked() for file systems where the inode 1148 * number is not sufficient for unique identification of an inode. 1149 * 1150 * If the inode is not in cache, allocate a new inode and return it locked, 1151 * hashed, and with the I_NEW flag set. The file system gets to fill it in 1152 * before unlocking it via unlock_new_inode(). 1153 * 1154 * Note both @test and @set are called with the inode_hash_lock held, so can't 1155 * sleep. 1156 */ 1157 struct inode *iget5_locked(struct super_block *sb, unsigned long hashval, 1158 int (*test)(struct inode *, void *), 1159 int (*set)(struct inode *, void *), void *data) 1160 { 1161 struct inode *inode = ilookup5(sb, hashval, test, data); 1162 1163 if (!inode) { 1164 struct inode *new = alloc_inode(sb); 1165 1166 if (new) { 1167 new->i_state = 0; 1168 inode = inode_insert5(new, hashval, test, set, data); 1169 if (unlikely(inode != new)) 1170 destroy_inode(new); 1171 } 1172 } 1173 return inode; 1174 } 1175 EXPORT_SYMBOL(iget5_locked); 1176 1177 /** 1178 * iget_locked - obtain an inode from a mounted file system 1179 * @sb: super block of file system 1180 * @ino: inode number to get 1181 * 1182 * Search for the inode specified by @ino in the inode cache and if present 1183 * return it with an increased reference count. This is for file systems 1184 * where the inode number is sufficient for unique identification of an inode. 1185 * 1186 * If the inode is not in cache, allocate a new inode and return it locked, 1187 * hashed, and with the I_NEW flag set. The file system gets to fill it in 1188 * before unlocking it via unlock_new_inode(). 1189 */ 1190 struct inode *iget_locked(struct super_block *sb, unsigned long ino) 1191 { 1192 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1193 struct inode *inode; 1194 again: 1195 spin_lock(&inode_hash_lock); 1196 inode = find_inode_fast(sb, head, ino); 1197 spin_unlock(&inode_hash_lock); 1198 if (inode) { 1199 if (IS_ERR(inode)) 1200 return NULL; 1201 wait_on_inode(inode); 1202 if (unlikely(inode_unhashed(inode))) { 1203 iput(inode); 1204 goto again; 1205 } 1206 return inode; 1207 } 1208 1209 inode = alloc_inode(sb); 1210 if (inode) { 1211 struct inode *old; 1212 1213 spin_lock(&inode_hash_lock); 1214 /* We released the lock, so.. */ 1215 old = find_inode_fast(sb, head, ino); 1216 if (!old) { 1217 inode->i_ino = ino; 1218 spin_lock(&inode->i_lock); 1219 inode->i_state = I_NEW; 1220 hlist_add_head_rcu(&inode->i_hash, head); 1221 spin_unlock(&inode->i_lock); 1222 inode_sb_list_add(inode); 1223 spin_unlock(&inode_hash_lock); 1224 1225 /* Return the locked inode with I_NEW set, the 1226 * caller is responsible for filling in the contents 1227 */ 1228 return inode; 1229 } 1230 1231 /* 1232 * Uhhuh, somebody else created the same inode under 1233 * us. Use the old inode instead of the one we just 1234 * allocated. 1235 */ 1236 spin_unlock(&inode_hash_lock); 1237 destroy_inode(inode); 1238 if (IS_ERR(old)) 1239 return NULL; 1240 inode = old; 1241 wait_on_inode(inode); 1242 if (unlikely(inode_unhashed(inode))) { 1243 iput(inode); 1244 goto again; 1245 } 1246 } 1247 return inode; 1248 } 1249 EXPORT_SYMBOL(iget_locked); 1250 1251 /* 1252 * search the inode cache for a matching inode number. 1253 * If we find one, then the inode number we are trying to 1254 * allocate is not unique and so we should not use it. 1255 * 1256 * Returns 1 if the inode number is unique, 0 if it is not. 1257 */ 1258 static int test_inode_iunique(struct super_block *sb, unsigned long ino) 1259 { 1260 struct hlist_head *b = inode_hashtable + hash(sb, ino); 1261 struct inode *inode; 1262 1263 hlist_for_each_entry_rcu(inode, b, i_hash) { 1264 if (inode->i_ino == ino && inode->i_sb == sb) 1265 return 0; 1266 } 1267 return 1; 1268 } 1269 1270 /** 1271 * iunique - get a unique inode number 1272 * @sb: superblock 1273 * @max_reserved: highest reserved inode number 1274 * 1275 * Obtain an inode number that is unique on the system for a given 1276 * superblock. This is used by file systems that have no natural 1277 * permanent inode numbering system. An inode number is returned that 1278 * is higher than the reserved limit but unique. 1279 * 1280 * BUGS: 1281 * With a large number of inodes live on the file system this function 1282 * currently becomes quite slow. 1283 */ 1284 ino_t iunique(struct super_block *sb, ino_t max_reserved) 1285 { 1286 /* 1287 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 1288 * error if st_ino won't fit in target struct field. Use 32bit counter 1289 * here to attempt to avoid that. 1290 */ 1291 static DEFINE_SPINLOCK(iunique_lock); 1292 static unsigned int counter; 1293 ino_t res; 1294 1295 rcu_read_lock(); 1296 spin_lock(&iunique_lock); 1297 do { 1298 if (counter <= max_reserved) 1299 counter = max_reserved + 1; 1300 res = counter++; 1301 } while (!test_inode_iunique(sb, res)); 1302 spin_unlock(&iunique_lock); 1303 rcu_read_unlock(); 1304 1305 return res; 1306 } 1307 EXPORT_SYMBOL(iunique); 1308 1309 struct inode *igrab(struct inode *inode) 1310 { 1311 spin_lock(&inode->i_lock); 1312 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) { 1313 __iget(inode); 1314 spin_unlock(&inode->i_lock); 1315 } else { 1316 spin_unlock(&inode->i_lock); 1317 /* 1318 * Handle the case where s_op->clear_inode is not been 1319 * called yet, and somebody is calling igrab 1320 * while the inode is getting freed. 1321 */ 1322 inode = NULL; 1323 } 1324 return inode; 1325 } 1326 EXPORT_SYMBOL(igrab); 1327 1328 /** 1329 * ilookup5_nowait - search for an inode in the inode cache 1330 * @sb: super block of file system to search 1331 * @hashval: hash value (usually inode number) to search for 1332 * @test: callback used for comparisons between inodes 1333 * @data: opaque data pointer to pass to @test 1334 * 1335 * Search for the inode specified by @hashval and @data in the inode cache. 1336 * If the inode is in the cache, the inode is returned with an incremented 1337 * reference count. 1338 * 1339 * Note: I_NEW is not waited upon so you have to be very careful what you do 1340 * with the returned inode. You probably should be using ilookup5() instead. 1341 * 1342 * Note2: @test is called with the inode_hash_lock held, so can't sleep. 1343 */ 1344 struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, 1345 int (*test)(struct inode *, void *), void *data) 1346 { 1347 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1348 struct inode *inode; 1349 1350 spin_lock(&inode_hash_lock); 1351 inode = find_inode(sb, head, test, data); 1352 spin_unlock(&inode_hash_lock); 1353 1354 return IS_ERR(inode) ? NULL : inode; 1355 } 1356 EXPORT_SYMBOL(ilookup5_nowait); 1357 1358 /** 1359 * ilookup5 - search for an inode in the inode cache 1360 * @sb: super block of file system to search 1361 * @hashval: hash value (usually inode number) to search for 1362 * @test: callback used for comparisons between inodes 1363 * @data: opaque data pointer to pass to @test 1364 * 1365 * Search for the inode specified by @hashval and @data in the inode cache, 1366 * and if the inode is in the cache, return the inode with an incremented 1367 * reference count. Waits on I_NEW before returning the inode. 1368 * returned with an incremented reference count. 1369 * 1370 * This is a generalized version of ilookup() for file systems where the 1371 * inode number is not sufficient for unique identification of an inode. 1372 * 1373 * Note: @test is called with the inode_hash_lock held, so can't sleep. 1374 */ 1375 struct inode *ilookup5(struct super_block *sb, unsigned long hashval, 1376 int (*test)(struct inode *, void *), void *data) 1377 { 1378 struct inode *inode; 1379 again: 1380 inode = ilookup5_nowait(sb, hashval, test, data); 1381 if (inode) { 1382 wait_on_inode(inode); 1383 if (unlikely(inode_unhashed(inode))) { 1384 iput(inode); 1385 goto again; 1386 } 1387 } 1388 return inode; 1389 } 1390 EXPORT_SYMBOL(ilookup5); 1391 1392 /** 1393 * ilookup - search for an inode in the inode cache 1394 * @sb: super block of file system to search 1395 * @ino: inode number to search for 1396 * 1397 * Search for the inode @ino in the inode cache, and if the inode is in the 1398 * cache, the inode is returned with an incremented reference count. 1399 */ 1400 struct inode *ilookup(struct super_block *sb, unsigned long ino) 1401 { 1402 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1403 struct inode *inode; 1404 again: 1405 spin_lock(&inode_hash_lock); 1406 inode = find_inode_fast(sb, head, ino); 1407 spin_unlock(&inode_hash_lock); 1408 1409 if (inode) { 1410 if (IS_ERR(inode)) 1411 return NULL; 1412 wait_on_inode(inode); 1413 if (unlikely(inode_unhashed(inode))) { 1414 iput(inode); 1415 goto again; 1416 } 1417 } 1418 return inode; 1419 } 1420 EXPORT_SYMBOL(ilookup); 1421 1422 /** 1423 * find_inode_nowait - find an inode in the inode cache 1424 * @sb: super block of file system to search 1425 * @hashval: hash value (usually inode number) to search for 1426 * @match: callback used for comparisons between inodes 1427 * @data: opaque data pointer to pass to @match 1428 * 1429 * Search for the inode specified by @hashval and @data in the inode 1430 * cache, where the helper function @match will return 0 if the inode 1431 * does not match, 1 if the inode does match, and -1 if the search 1432 * should be stopped. The @match function must be responsible for 1433 * taking the i_lock spin_lock and checking i_state for an inode being 1434 * freed or being initialized, and incrementing the reference count 1435 * before returning 1. It also must not sleep, since it is called with 1436 * the inode_hash_lock spinlock held. 1437 * 1438 * This is a even more generalized version of ilookup5() when the 1439 * function must never block --- find_inode() can block in 1440 * __wait_on_freeing_inode() --- or when the caller can not increment 1441 * the reference count because the resulting iput() might cause an 1442 * inode eviction. The tradeoff is that the @match funtion must be 1443 * very carefully implemented. 1444 */ 1445 struct inode *find_inode_nowait(struct super_block *sb, 1446 unsigned long hashval, 1447 int (*match)(struct inode *, unsigned long, 1448 void *), 1449 void *data) 1450 { 1451 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1452 struct inode *inode, *ret_inode = NULL; 1453 int mval; 1454 1455 spin_lock(&inode_hash_lock); 1456 hlist_for_each_entry(inode, head, i_hash) { 1457 if (inode->i_sb != sb) 1458 continue; 1459 mval = match(inode, hashval, data); 1460 if (mval == 0) 1461 continue; 1462 if (mval == 1) 1463 ret_inode = inode; 1464 goto out; 1465 } 1466 out: 1467 spin_unlock(&inode_hash_lock); 1468 return ret_inode; 1469 } 1470 EXPORT_SYMBOL(find_inode_nowait); 1471 1472 /** 1473 * find_inode_rcu - find an inode in the inode cache 1474 * @sb: Super block of file system to search 1475 * @hashval: Key to hash 1476 * @test: Function to test match on an inode 1477 * @data: Data for test function 1478 * 1479 * Search for the inode specified by @hashval and @data in the inode cache, 1480 * where the helper function @test will return 0 if the inode does not match 1481 * and 1 if it does. The @test function must be responsible for taking the 1482 * i_lock spin_lock and checking i_state for an inode being freed or being 1483 * initialized. 1484 * 1485 * If successful, this will return the inode for which the @test function 1486 * returned 1 and NULL otherwise. 1487 * 1488 * The @test function is not permitted to take a ref on any inode presented. 1489 * It is also not permitted to sleep. 1490 * 1491 * The caller must hold the RCU read lock. 1492 */ 1493 struct inode *find_inode_rcu(struct super_block *sb, unsigned long hashval, 1494 int (*test)(struct inode *, void *), void *data) 1495 { 1496 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1497 struct inode *inode; 1498 1499 RCU_LOCKDEP_WARN(!rcu_read_lock_held(), 1500 "suspicious find_inode_rcu() usage"); 1501 1502 hlist_for_each_entry_rcu(inode, head, i_hash) { 1503 if (inode->i_sb == sb && 1504 !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)) && 1505 test(inode, data)) 1506 return inode; 1507 } 1508 return NULL; 1509 } 1510 EXPORT_SYMBOL(find_inode_rcu); 1511 1512 /** 1513 * find_inode_by_ino_rcu - Find an inode in the inode cache 1514 * @sb: Super block of file system to search 1515 * @ino: The inode number to match 1516 * 1517 * Search for the inode specified by @hashval and @data in the inode cache, 1518 * where the helper function @test will return 0 if the inode does not match 1519 * and 1 if it does. The @test function must be responsible for taking the 1520 * i_lock spin_lock and checking i_state for an inode being freed or being 1521 * initialized. 1522 * 1523 * If successful, this will return the inode for which the @test function 1524 * returned 1 and NULL otherwise. 1525 * 1526 * The @test function is not permitted to take a ref on any inode presented. 1527 * It is also not permitted to sleep. 1528 * 1529 * The caller must hold the RCU read lock. 1530 */ 1531 struct inode *find_inode_by_ino_rcu(struct super_block *sb, 1532 unsigned long ino) 1533 { 1534 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1535 struct inode *inode; 1536 1537 RCU_LOCKDEP_WARN(!rcu_read_lock_held(), 1538 "suspicious find_inode_by_ino_rcu() usage"); 1539 1540 hlist_for_each_entry_rcu(inode, head, i_hash) { 1541 if (inode->i_ino == ino && 1542 inode->i_sb == sb && 1543 !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE))) 1544 return inode; 1545 } 1546 return NULL; 1547 } 1548 EXPORT_SYMBOL(find_inode_by_ino_rcu); 1549 1550 int insert_inode_locked(struct inode *inode) 1551 { 1552 struct super_block *sb = inode->i_sb; 1553 ino_t ino = inode->i_ino; 1554 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1555 1556 while (1) { 1557 struct inode *old = NULL; 1558 spin_lock(&inode_hash_lock); 1559 hlist_for_each_entry(old, head, i_hash) { 1560 if (old->i_ino != ino) 1561 continue; 1562 if (old->i_sb != sb) 1563 continue; 1564 spin_lock(&old->i_lock); 1565 if (old->i_state & (I_FREEING|I_WILL_FREE)) { 1566 spin_unlock(&old->i_lock); 1567 continue; 1568 } 1569 break; 1570 } 1571 if (likely(!old)) { 1572 spin_lock(&inode->i_lock); 1573 inode->i_state |= I_NEW | I_CREATING; 1574 hlist_add_head_rcu(&inode->i_hash, head); 1575 spin_unlock(&inode->i_lock); 1576 spin_unlock(&inode_hash_lock); 1577 return 0; 1578 } 1579 if (unlikely(old->i_state & I_CREATING)) { 1580 spin_unlock(&old->i_lock); 1581 spin_unlock(&inode_hash_lock); 1582 return -EBUSY; 1583 } 1584 __iget(old); 1585 spin_unlock(&old->i_lock); 1586 spin_unlock(&inode_hash_lock); 1587 wait_on_inode(old); 1588 if (unlikely(!inode_unhashed(old))) { 1589 iput(old); 1590 return -EBUSY; 1591 } 1592 iput(old); 1593 } 1594 } 1595 EXPORT_SYMBOL(insert_inode_locked); 1596 1597 int insert_inode_locked4(struct inode *inode, unsigned long hashval, 1598 int (*test)(struct inode *, void *), void *data) 1599 { 1600 struct inode *old; 1601 1602 inode->i_state |= I_CREATING; 1603 old = inode_insert5(inode, hashval, test, NULL, data); 1604 1605 if (old != inode) { 1606 iput(old); 1607 return -EBUSY; 1608 } 1609 return 0; 1610 } 1611 EXPORT_SYMBOL(insert_inode_locked4); 1612 1613 1614 int generic_delete_inode(struct inode *inode) 1615 { 1616 return 1; 1617 } 1618 EXPORT_SYMBOL(generic_delete_inode); 1619 1620 /* 1621 * Called when we're dropping the last reference 1622 * to an inode. 1623 * 1624 * Call the FS "drop_inode()" function, defaulting to 1625 * the legacy UNIX filesystem behaviour. If it tells 1626 * us to evict inode, do so. Otherwise, retain inode 1627 * in cache if fs is alive, sync and evict if fs is 1628 * shutting down. 1629 */ 1630 static void iput_final(struct inode *inode) 1631 { 1632 struct super_block *sb = inode->i_sb; 1633 const struct super_operations *op = inode->i_sb->s_op; 1634 unsigned long state; 1635 int drop; 1636 1637 WARN_ON(inode->i_state & I_NEW); 1638 1639 if (op->drop_inode) 1640 drop = op->drop_inode(inode); 1641 else 1642 drop = generic_drop_inode(inode); 1643 1644 if (!drop && 1645 !(inode->i_state & I_DONTCACHE) && 1646 (sb->s_flags & SB_ACTIVE)) { 1647 __inode_add_lru(inode, true); 1648 spin_unlock(&inode->i_lock); 1649 return; 1650 } 1651 1652 state = inode->i_state; 1653 if (!drop) { 1654 WRITE_ONCE(inode->i_state, state | I_WILL_FREE); 1655 spin_unlock(&inode->i_lock); 1656 1657 write_inode_now(inode, 1); 1658 1659 spin_lock(&inode->i_lock); 1660 state = inode->i_state; 1661 WARN_ON(state & I_NEW); 1662 state &= ~I_WILL_FREE; 1663 } 1664 1665 WRITE_ONCE(inode->i_state, state | I_FREEING); 1666 if (!list_empty(&inode->i_lru)) 1667 inode_lru_list_del(inode); 1668 spin_unlock(&inode->i_lock); 1669 1670 evict(inode); 1671 } 1672 1673 /** 1674 * iput - put an inode 1675 * @inode: inode to put 1676 * 1677 * Puts an inode, dropping its usage count. If the inode use count hits 1678 * zero, the inode is then freed and may also be destroyed. 1679 * 1680 * Consequently, iput() can sleep. 1681 */ 1682 void iput(struct inode *inode) 1683 { 1684 if (!inode) 1685 return; 1686 BUG_ON(inode->i_state & I_CLEAR); 1687 retry: 1688 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) { 1689 if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) { 1690 atomic_inc(&inode->i_count); 1691 spin_unlock(&inode->i_lock); 1692 trace_writeback_lazytime_iput(inode); 1693 mark_inode_dirty_sync(inode); 1694 goto retry; 1695 } 1696 iput_final(inode); 1697 } 1698 } 1699 EXPORT_SYMBOL(iput); 1700 1701 #ifdef CONFIG_BLOCK 1702 /** 1703 * bmap - find a block number in a file 1704 * @inode: inode owning the block number being requested 1705 * @block: pointer containing the block to find 1706 * 1707 * Replaces the value in ``*block`` with the block number on the device holding 1708 * corresponding to the requested block number in the file. 1709 * That is, asked for block 4 of inode 1 the function will replace the 1710 * 4 in ``*block``, with disk block relative to the disk start that holds that 1711 * block of the file. 1712 * 1713 * Returns -EINVAL in case of error, 0 otherwise. If mapping falls into a 1714 * hole, returns 0 and ``*block`` is also set to 0. 1715 */ 1716 int bmap(struct inode *inode, sector_t *block) 1717 { 1718 if (!inode->i_mapping->a_ops->bmap) 1719 return -EINVAL; 1720 1721 *block = inode->i_mapping->a_ops->bmap(inode->i_mapping, *block); 1722 return 0; 1723 } 1724 EXPORT_SYMBOL(bmap); 1725 #endif 1726 1727 /* 1728 * With relative atime, only update atime if the previous atime is 1729 * earlier than either the ctime or mtime or if at least a day has 1730 * passed since the last atime update. 1731 */ 1732 static int relatime_need_update(struct vfsmount *mnt, struct inode *inode, 1733 struct timespec64 now) 1734 { 1735 1736 if (!(mnt->mnt_flags & MNT_RELATIME)) 1737 return 1; 1738 /* 1739 * Is mtime younger than atime? If yes, update atime: 1740 */ 1741 if (timespec64_compare(&inode->i_mtime, &inode->i_atime) >= 0) 1742 return 1; 1743 /* 1744 * Is ctime younger than atime? If yes, update atime: 1745 */ 1746 if (timespec64_compare(&inode->i_ctime, &inode->i_atime) >= 0) 1747 return 1; 1748 1749 /* 1750 * Is the previous atime value older than a day? If yes, 1751 * update atime: 1752 */ 1753 if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60) 1754 return 1; 1755 /* 1756 * Good, we can skip the atime update: 1757 */ 1758 return 0; 1759 } 1760 1761 int generic_update_time(struct inode *inode, struct timespec64 *time, int flags) 1762 { 1763 int dirty_flags = 0; 1764 1765 if (flags & (S_ATIME | S_CTIME | S_MTIME)) { 1766 if (flags & S_ATIME) 1767 inode->i_atime = *time; 1768 if (flags & S_CTIME) 1769 inode->i_ctime = *time; 1770 if (flags & S_MTIME) 1771 inode->i_mtime = *time; 1772 1773 if (inode->i_sb->s_flags & SB_LAZYTIME) 1774 dirty_flags |= I_DIRTY_TIME; 1775 else 1776 dirty_flags |= I_DIRTY_SYNC; 1777 } 1778 1779 if ((flags & S_VERSION) && inode_maybe_inc_iversion(inode, false)) 1780 dirty_flags |= I_DIRTY_SYNC; 1781 1782 __mark_inode_dirty(inode, dirty_flags); 1783 return 0; 1784 } 1785 EXPORT_SYMBOL(generic_update_time); 1786 1787 /* 1788 * This does the actual work of updating an inodes time or version. Must have 1789 * had called mnt_want_write() before calling this. 1790 */ 1791 int inode_update_time(struct inode *inode, struct timespec64 *time, int flags) 1792 { 1793 if (inode->i_op->update_time) 1794 return inode->i_op->update_time(inode, time, flags); 1795 return generic_update_time(inode, time, flags); 1796 } 1797 EXPORT_SYMBOL(inode_update_time); 1798 1799 /** 1800 * atime_needs_update - update the access time 1801 * @path: the &struct path to update 1802 * @inode: inode to update 1803 * 1804 * Update the accessed time on an inode and mark it for writeback. 1805 * This function automatically handles read only file systems and media, 1806 * as well as the "noatime" flag and inode specific "noatime" markers. 1807 */ 1808 bool atime_needs_update(const struct path *path, struct inode *inode) 1809 { 1810 struct vfsmount *mnt = path->mnt; 1811 struct timespec64 now; 1812 1813 if (inode->i_flags & S_NOATIME) 1814 return false; 1815 1816 /* Atime updates will likely cause i_uid and i_gid to be written 1817 * back improprely if their true value is unknown to the vfs. 1818 */ 1819 if (HAS_UNMAPPED_ID(mnt_user_ns(mnt), inode)) 1820 return false; 1821 1822 if (IS_NOATIME(inode)) 1823 return false; 1824 if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode)) 1825 return false; 1826 1827 if (mnt->mnt_flags & MNT_NOATIME) 1828 return false; 1829 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)) 1830 return false; 1831 1832 now = current_time(inode); 1833 1834 if (!relatime_need_update(mnt, inode, now)) 1835 return false; 1836 1837 if (timespec64_equal(&inode->i_atime, &now)) 1838 return false; 1839 1840 return true; 1841 } 1842 1843 void touch_atime(const struct path *path) 1844 { 1845 struct vfsmount *mnt = path->mnt; 1846 struct inode *inode = d_inode(path->dentry); 1847 struct timespec64 now; 1848 1849 if (!atime_needs_update(path, inode)) 1850 return; 1851 1852 if (!sb_start_write_trylock(inode->i_sb)) 1853 return; 1854 1855 if (__mnt_want_write(mnt) != 0) 1856 goto skip_update; 1857 /* 1858 * File systems can error out when updating inodes if they need to 1859 * allocate new space to modify an inode (such is the case for 1860 * Btrfs), but since we touch atime while walking down the path we 1861 * really don't care if we failed to update the atime of the file, 1862 * so just ignore the return value. 1863 * We may also fail on filesystems that have the ability to make parts 1864 * of the fs read only, e.g. subvolumes in Btrfs. 1865 */ 1866 now = current_time(inode); 1867 inode_update_time(inode, &now, S_ATIME); 1868 __mnt_drop_write(mnt); 1869 skip_update: 1870 sb_end_write(inode->i_sb); 1871 } 1872 EXPORT_SYMBOL(touch_atime); 1873 1874 /* 1875 * The logic we want is 1876 * 1877 * if suid or (sgid and xgrp) 1878 * remove privs 1879 */ 1880 int should_remove_suid(struct dentry *dentry) 1881 { 1882 umode_t mode = d_inode(dentry)->i_mode; 1883 int kill = 0; 1884 1885 /* suid always must be killed */ 1886 if (unlikely(mode & S_ISUID)) 1887 kill = ATTR_KILL_SUID; 1888 1889 /* 1890 * sgid without any exec bits is just a mandatory locking mark; leave 1891 * it alone. If some exec bits are set, it's a real sgid; kill it. 1892 */ 1893 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) 1894 kill |= ATTR_KILL_SGID; 1895 1896 if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode))) 1897 return kill; 1898 1899 return 0; 1900 } 1901 EXPORT_SYMBOL(should_remove_suid); 1902 1903 /* 1904 * Return mask of changes for notify_change() that need to be done as a 1905 * response to write or truncate. Return 0 if nothing has to be changed. 1906 * Negative value on error (change should be denied). 1907 */ 1908 int dentry_needs_remove_privs(struct dentry *dentry) 1909 { 1910 struct inode *inode = d_inode(dentry); 1911 int mask = 0; 1912 int ret; 1913 1914 if (IS_NOSEC(inode)) 1915 return 0; 1916 1917 mask = should_remove_suid(dentry); 1918 ret = security_inode_need_killpriv(dentry); 1919 if (ret < 0) 1920 return ret; 1921 if (ret) 1922 mask |= ATTR_KILL_PRIV; 1923 return mask; 1924 } 1925 1926 static int __remove_privs(struct user_namespace *mnt_userns, 1927 struct dentry *dentry, int kill) 1928 { 1929 struct iattr newattrs; 1930 1931 newattrs.ia_valid = ATTR_FORCE | kill; 1932 /* 1933 * Note we call this on write, so notify_change will not 1934 * encounter any conflicting delegations: 1935 */ 1936 return notify_change(mnt_userns, dentry, &newattrs, NULL); 1937 } 1938 1939 /* 1940 * Remove special file priviledges (suid, capabilities) when file is written 1941 * to or truncated. 1942 */ 1943 int file_remove_privs(struct file *file) 1944 { 1945 struct dentry *dentry = file_dentry(file); 1946 struct inode *inode = file_inode(file); 1947 int kill; 1948 int error = 0; 1949 1950 /* 1951 * Fast path for nothing security related. 1952 * As well for non-regular files, e.g. blkdev inodes. 1953 * For example, blkdev_write_iter() might get here 1954 * trying to remove privs which it is not allowed to. 1955 */ 1956 if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode)) 1957 return 0; 1958 1959 kill = dentry_needs_remove_privs(dentry); 1960 if (kill < 0) 1961 return kill; 1962 if (kill) 1963 error = __remove_privs(file_mnt_user_ns(file), dentry, kill); 1964 if (!error) 1965 inode_has_no_xattr(inode); 1966 1967 return error; 1968 } 1969 EXPORT_SYMBOL(file_remove_privs); 1970 1971 /** 1972 * file_update_time - update mtime and ctime time 1973 * @file: file accessed 1974 * 1975 * Update the mtime and ctime members of an inode and mark the inode 1976 * for writeback. Note that this function is meant exclusively for 1977 * usage in the file write path of filesystems, and filesystems may 1978 * choose to explicitly ignore update via this function with the 1979 * S_NOCMTIME inode flag, e.g. for network filesystem where these 1980 * timestamps are handled by the server. This can return an error for 1981 * file systems who need to allocate space in order to update an inode. 1982 */ 1983 1984 int file_update_time(struct file *file) 1985 { 1986 struct inode *inode = file_inode(file); 1987 struct timespec64 now; 1988 int sync_it = 0; 1989 int ret; 1990 1991 /* First try to exhaust all avenues to not sync */ 1992 if (IS_NOCMTIME(inode)) 1993 return 0; 1994 1995 now = current_time(inode); 1996 if (!timespec64_equal(&inode->i_mtime, &now)) 1997 sync_it = S_MTIME; 1998 1999 if (!timespec64_equal(&inode->i_ctime, &now)) 2000 sync_it |= S_CTIME; 2001 2002 if (IS_I_VERSION(inode) && inode_iversion_need_inc(inode)) 2003 sync_it |= S_VERSION; 2004 2005 if (!sync_it) 2006 return 0; 2007 2008 /* Finally allowed to write? Takes lock. */ 2009 if (__mnt_want_write_file(file)) 2010 return 0; 2011 2012 ret = inode_update_time(inode, &now, sync_it); 2013 __mnt_drop_write_file(file); 2014 2015 return ret; 2016 } 2017 EXPORT_SYMBOL(file_update_time); 2018 2019 /* Caller must hold the file's inode lock */ 2020 int file_modified(struct file *file) 2021 { 2022 int err; 2023 2024 /* 2025 * Clear the security bits if the process is not being run by root. 2026 * This keeps people from modifying setuid and setgid binaries. 2027 */ 2028 err = file_remove_privs(file); 2029 if (err) 2030 return err; 2031 2032 if (unlikely(file->f_mode & FMODE_NOCMTIME)) 2033 return 0; 2034 2035 return file_update_time(file); 2036 } 2037 EXPORT_SYMBOL(file_modified); 2038 2039 int inode_needs_sync(struct inode *inode) 2040 { 2041 if (IS_SYNC(inode)) 2042 return 1; 2043 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) 2044 return 1; 2045 return 0; 2046 } 2047 EXPORT_SYMBOL(inode_needs_sync); 2048 2049 /* 2050 * If we try to find an inode in the inode hash while it is being 2051 * deleted, we have to wait until the filesystem completes its 2052 * deletion before reporting that it isn't found. This function waits 2053 * until the deletion _might_ have completed. Callers are responsible 2054 * to recheck inode state. 2055 * 2056 * It doesn't matter if I_NEW is not set initially, a call to 2057 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list 2058 * will DTRT. 2059 */ 2060 static void __wait_on_freeing_inode(struct inode *inode) 2061 { 2062 wait_queue_head_t *wq; 2063 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW); 2064 wq = bit_waitqueue(&inode->i_state, __I_NEW); 2065 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); 2066 spin_unlock(&inode->i_lock); 2067 spin_unlock(&inode_hash_lock); 2068 schedule(); 2069 finish_wait(wq, &wait.wq_entry); 2070 spin_lock(&inode_hash_lock); 2071 } 2072 2073 static __initdata unsigned long ihash_entries; 2074 static int __init set_ihash_entries(char *str) 2075 { 2076 if (!str) 2077 return 0; 2078 ihash_entries = simple_strtoul(str, &str, 0); 2079 return 1; 2080 } 2081 __setup("ihash_entries=", set_ihash_entries); 2082 2083 /* 2084 * Initialize the waitqueues and inode hash table. 2085 */ 2086 void __init inode_init_early(void) 2087 { 2088 /* If hashes are distributed across NUMA nodes, defer 2089 * hash allocation until vmalloc space is available. 2090 */ 2091 if (hashdist) 2092 return; 2093 2094 inode_hashtable = 2095 alloc_large_system_hash("Inode-cache", 2096 sizeof(struct hlist_head), 2097 ihash_entries, 2098 14, 2099 HASH_EARLY | HASH_ZERO, 2100 &i_hash_shift, 2101 &i_hash_mask, 2102 0, 2103 0); 2104 } 2105 2106 void __init inode_init(void) 2107 { 2108 /* inode slab cache */ 2109 inode_cachep = kmem_cache_create("inode_cache", 2110 sizeof(struct inode), 2111 0, 2112 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| 2113 SLAB_MEM_SPREAD|SLAB_ACCOUNT), 2114 init_once); 2115 2116 /* Hash may have been set up in inode_init_early */ 2117 if (!hashdist) 2118 return; 2119 2120 inode_hashtable = 2121 alloc_large_system_hash("Inode-cache", 2122 sizeof(struct hlist_head), 2123 ihash_entries, 2124 14, 2125 HASH_ZERO, 2126 &i_hash_shift, 2127 &i_hash_mask, 2128 0, 2129 0); 2130 } 2131 2132 void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev) 2133 { 2134 inode->i_mode = mode; 2135 if (S_ISCHR(mode)) { 2136 inode->i_fop = &def_chr_fops; 2137 inode->i_rdev = rdev; 2138 } else if (S_ISBLK(mode)) { 2139 inode->i_fop = &def_blk_fops; 2140 inode->i_rdev = rdev; 2141 } else if (S_ISFIFO(mode)) 2142 inode->i_fop = &pipefifo_fops; 2143 else if (S_ISSOCK(mode)) 2144 ; /* leave it no_open_fops */ 2145 else 2146 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for" 2147 " inode %s:%lu\n", mode, inode->i_sb->s_id, 2148 inode->i_ino); 2149 } 2150 EXPORT_SYMBOL(init_special_inode); 2151 2152 /** 2153 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards 2154 * @mnt_userns: User namespace of the mount the inode was created from 2155 * @inode: New inode 2156 * @dir: Directory inode 2157 * @mode: mode of the new inode 2158 * 2159 * If the inode has been created through an idmapped mount the user namespace of 2160 * the vfsmount must be passed through @mnt_userns. This function will then take 2161 * care to map the inode according to @mnt_userns before checking permissions 2162 * and initializing i_uid and i_gid. On non-idmapped mounts or if permission 2163 * checking is to be performed on the raw inode simply passs init_user_ns. 2164 */ 2165 void inode_init_owner(struct user_namespace *mnt_userns, struct inode *inode, 2166 const struct inode *dir, umode_t mode) 2167 { 2168 inode_fsuid_set(inode, mnt_userns); 2169 if (dir && dir->i_mode & S_ISGID) { 2170 inode->i_gid = dir->i_gid; 2171 2172 /* Directories are special, and always inherit S_ISGID */ 2173 if (S_ISDIR(mode)) 2174 mode |= S_ISGID; 2175 else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) && 2176 !in_group_p(i_gid_into_mnt(mnt_userns, dir)) && 2177 !capable_wrt_inode_uidgid(mnt_userns, dir, CAP_FSETID)) 2178 mode &= ~S_ISGID; 2179 } else 2180 inode_fsgid_set(inode, mnt_userns); 2181 inode->i_mode = mode; 2182 } 2183 EXPORT_SYMBOL(inode_init_owner); 2184 2185 /** 2186 * inode_owner_or_capable - check current task permissions to inode 2187 * @mnt_userns: user namespace of the mount the inode was found from 2188 * @inode: inode being checked 2189 * 2190 * Return true if current either has CAP_FOWNER in a namespace with the 2191 * inode owner uid mapped, or owns the file. 2192 * 2193 * If the inode has been found through an idmapped mount the user namespace of 2194 * the vfsmount must be passed through @mnt_userns. This function will then take 2195 * care to map the inode according to @mnt_userns before checking permissions. 2196 * On non-idmapped mounts or if permission checking is to be performed on the 2197 * raw inode simply passs init_user_ns. 2198 */ 2199 bool inode_owner_or_capable(struct user_namespace *mnt_userns, 2200 const struct inode *inode) 2201 { 2202 kuid_t i_uid; 2203 struct user_namespace *ns; 2204 2205 i_uid = i_uid_into_mnt(mnt_userns, inode); 2206 if (uid_eq(current_fsuid(), i_uid)) 2207 return true; 2208 2209 ns = current_user_ns(); 2210 if (kuid_has_mapping(ns, i_uid) && ns_capable(ns, CAP_FOWNER)) 2211 return true; 2212 return false; 2213 } 2214 EXPORT_SYMBOL(inode_owner_or_capable); 2215 2216 /* 2217 * Direct i/o helper functions 2218 */ 2219 static void __inode_dio_wait(struct inode *inode) 2220 { 2221 wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP); 2222 DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP); 2223 2224 do { 2225 prepare_to_wait(wq, &q.wq_entry, TASK_UNINTERRUPTIBLE); 2226 if (atomic_read(&inode->i_dio_count)) 2227 schedule(); 2228 } while (atomic_read(&inode->i_dio_count)); 2229 finish_wait(wq, &q.wq_entry); 2230 } 2231 2232 /** 2233 * inode_dio_wait - wait for outstanding DIO requests to finish 2234 * @inode: inode to wait for 2235 * 2236 * Waits for all pending direct I/O requests to finish so that we can 2237 * proceed with a truncate or equivalent operation. 2238 * 2239 * Must be called under a lock that serializes taking new references 2240 * to i_dio_count, usually by inode->i_mutex. 2241 */ 2242 void inode_dio_wait(struct inode *inode) 2243 { 2244 if (atomic_read(&inode->i_dio_count)) 2245 __inode_dio_wait(inode); 2246 } 2247 EXPORT_SYMBOL(inode_dio_wait); 2248 2249 /* 2250 * inode_set_flags - atomically set some inode flags 2251 * 2252 * Note: the caller should be holding i_mutex, or else be sure that 2253 * they have exclusive access to the inode structure (i.e., while the 2254 * inode is being instantiated). The reason for the cmpxchg() loop 2255 * --- which wouldn't be necessary if all code paths which modify 2256 * i_flags actually followed this rule, is that there is at least one 2257 * code path which doesn't today so we use cmpxchg() out of an abundance 2258 * of caution. 2259 * 2260 * In the long run, i_mutex is overkill, and we should probably look 2261 * at using the i_lock spinlock to protect i_flags, and then make sure 2262 * it is so documented in include/linux/fs.h and that all code follows 2263 * the locking convention!! 2264 */ 2265 void inode_set_flags(struct inode *inode, unsigned int flags, 2266 unsigned int mask) 2267 { 2268 WARN_ON_ONCE(flags & ~mask); 2269 set_mask_bits(&inode->i_flags, mask, flags); 2270 } 2271 EXPORT_SYMBOL(inode_set_flags); 2272 2273 void inode_nohighmem(struct inode *inode) 2274 { 2275 mapping_set_gfp_mask(inode->i_mapping, GFP_USER); 2276 } 2277 EXPORT_SYMBOL(inode_nohighmem); 2278 2279 /** 2280 * timestamp_truncate - Truncate timespec to a granularity 2281 * @t: Timespec 2282 * @inode: inode being updated 2283 * 2284 * Truncate a timespec to the granularity supported by the fs 2285 * containing the inode. Always rounds down. gran must 2286 * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns). 2287 */ 2288 struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode) 2289 { 2290 struct super_block *sb = inode->i_sb; 2291 unsigned int gran = sb->s_time_gran; 2292 2293 t.tv_sec = clamp(t.tv_sec, sb->s_time_min, sb->s_time_max); 2294 if (unlikely(t.tv_sec == sb->s_time_max || t.tv_sec == sb->s_time_min)) 2295 t.tv_nsec = 0; 2296 2297 /* Avoid division in the common cases 1 ns and 1 s. */ 2298 if (gran == 1) 2299 ; /* nothing */ 2300 else if (gran == NSEC_PER_SEC) 2301 t.tv_nsec = 0; 2302 else if (gran > 1 && gran < NSEC_PER_SEC) 2303 t.tv_nsec -= t.tv_nsec % gran; 2304 else 2305 WARN(1, "invalid file time granularity: %u", gran); 2306 return t; 2307 } 2308 EXPORT_SYMBOL(timestamp_truncate); 2309 2310 /** 2311 * current_time - Return FS time 2312 * @inode: inode. 2313 * 2314 * Return the current time truncated to the time granularity supported by 2315 * the fs. 2316 * 2317 * Note that inode and inode->sb cannot be NULL. 2318 * Otherwise, the function warns and returns time without truncation. 2319 */ 2320 struct timespec64 current_time(struct inode *inode) 2321 { 2322 struct timespec64 now; 2323 2324 ktime_get_coarse_real_ts64(&now); 2325 2326 if (unlikely(!inode->i_sb)) { 2327 WARN(1, "current_time() called with uninitialized super_block in the inode"); 2328 return now; 2329 } 2330 2331 return timestamp_truncate(now, inode); 2332 } 2333 EXPORT_SYMBOL(current_time); 2334