1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * (C) 1997 Linus Torvalds 4 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation) 5 */ 6 #include <linux/export.h> 7 #include <linux/fs.h> 8 #include <linux/mm.h> 9 #include <linux/backing-dev.h> 10 #include <linux/hash.h> 11 #include <linux/swap.h> 12 #include <linux/security.h> 13 #include <linux/cdev.h> 14 #include <linux/memblock.h> 15 #include <linux/fscrypt.h> 16 #include <linux/fsnotify.h> 17 #include <linux/mount.h> 18 #include <linux/posix_acl.h> 19 #include <linux/prefetch.h> 20 #include <linux/buffer_head.h> /* for inode_has_buffers */ 21 #include <linux/ratelimit.h> 22 #include <linux/list_lru.h> 23 #include <linux/iversion.h> 24 #include <trace/events/writeback.h> 25 #include "internal.h" 26 27 /* 28 * Inode locking rules: 29 * 30 * inode->i_lock protects: 31 * inode->i_state, inode->i_hash, __iget() 32 * Inode LRU list locks protect: 33 * inode->i_sb->s_inode_lru, inode->i_lru 34 * inode->i_sb->s_inode_list_lock protects: 35 * inode->i_sb->s_inodes, inode->i_sb_list 36 * bdi->wb.list_lock protects: 37 * bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list 38 * inode_hash_lock protects: 39 * inode_hashtable, inode->i_hash 40 * 41 * Lock ordering: 42 * 43 * inode->i_sb->s_inode_list_lock 44 * inode->i_lock 45 * Inode LRU list locks 46 * 47 * bdi->wb.list_lock 48 * inode->i_lock 49 * 50 * inode_hash_lock 51 * inode->i_sb->s_inode_list_lock 52 * inode->i_lock 53 * 54 * iunique_lock 55 * inode_hash_lock 56 */ 57 58 static unsigned int i_hash_mask __read_mostly; 59 static unsigned int i_hash_shift __read_mostly; 60 static struct hlist_head *inode_hashtable __read_mostly; 61 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); 62 63 /* 64 * Empty aops. Can be used for the cases where the user does not 65 * define any of the address_space operations. 66 */ 67 const struct address_space_operations empty_aops = { 68 }; 69 EXPORT_SYMBOL(empty_aops); 70 71 /* 72 * Statistics gathering.. 73 */ 74 struct inodes_stat_t inodes_stat; 75 76 static DEFINE_PER_CPU(unsigned long, nr_inodes); 77 static DEFINE_PER_CPU(unsigned long, nr_unused); 78 79 static struct kmem_cache *inode_cachep __read_mostly; 80 81 static long get_nr_inodes(void) 82 { 83 int i; 84 long sum = 0; 85 for_each_possible_cpu(i) 86 sum += per_cpu(nr_inodes, i); 87 return sum < 0 ? 0 : sum; 88 } 89 90 static inline long get_nr_inodes_unused(void) 91 { 92 int i; 93 long sum = 0; 94 for_each_possible_cpu(i) 95 sum += per_cpu(nr_unused, i); 96 return sum < 0 ? 0 : sum; 97 } 98 99 long get_nr_dirty_inodes(void) 100 { 101 /* not actually dirty inodes, but a wild approximation */ 102 long nr_dirty = get_nr_inodes() - get_nr_inodes_unused(); 103 return nr_dirty > 0 ? nr_dirty : 0; 104 } 105 106 /* 107 * Handle nr_inode sysctl 108 */ 109 #ifdef CONFIG_SYSCTL 110 int proc_nr_inodes(struct ctl_table *table, int write, 111 void __user *buffer, size_t *lenp, loff_t *ppos) 112 { 113 inodes_stat.nr_inodes = get_nr_inodes(); 114 inodes_stat.nr_unused = get_nr_inodes_unused(); 115 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 116 } 117 #endif 118 119 static int no_open(struct inode *inode, struct file *file) 120 { 121 return -ENXIO; 122 } 123 124 /** 125 * inode_init_always - perform inode structure initialisation 126 * @sb: superblock inode belongs to 127 * @inode: inode to initialise 128 * 129 * These are initializations that need to be done on every inode 130 * allocation as the fields are not initialised by slab allocation. 131 */ 132 int inode_init_always(struct super_block *sb, struct inode *inode) 133 { 134 static const struct inode_operations empty_iops; 135 static const struct file_operations no_open_fops = {.open = no_open}; 136 struct address_space *const mapping = &inode->i_data; 137 138 inode->i_sb = sb; 139 inode->i_blkbits = sb->s_blocksize_bits; 140 inode->i_flags = 0; 141 atomic_set(&inode->i_count, 1); 142 inode->i_op = &empty_iops; 143 inode->i_fop = &no_open_fops; 144 inode->__i_nlink = 1; 145 inode->i_opflags = 0; 146 if (sb->s_xattr) 147 inode->i_opflags |= IOP_XATTR; 148 i_uid_write(inode, 0); 149 i_gid_write(inode, 0); 150 atomic_set(&inode->i_writecount, 0); 151 inode->i_size = 0; 152 inode->i_write_hint = WRITE_LIFE_NOT_SET; 153 inode->i_blocks = 0; 154 inode->i_bytes = 0; 155 inode->i_generation = 0; 156 inode->i_pipe = NULL; 157 inode->i_bdev = NULL; 158 inode->i_cdev = NULL; 159 inode->i_link = NULL; 160 inode->i_dir_seq = 0; 161 inode->i_rdev = 0; 162 inode->dirtied_when = 0; 163 164 #ifdef CONFIG_CGROUP_WRITEBACK 165 inode->i_wb_frn_winner = 0; 166 inode->i_wb_frn_avg_time = 0; 167 inode->i_wb_frn_history = 0; 168 #endif 169 170 if (security_inode_alloc(inode)) 171 goto out; 172 spin_lock_init(&inode->i_lock); 173 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); 174 175 init_rwsem(&inode->i_rwsem); 176 lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key); 177 178 atomic_set(&inode->i_dio_count, 0); 179 180 mapping->a_ops = &empty_aops; 181 mapping->host = inode; 182 mapping->flags = 0; 183 mapping->wb_err = 0; 184 atomic_set(&mapping->i_mmap_writable, 0); 185 #ifdef CONFIG_READ_ONLY_THP_FOR_FS 186 atomic_set(&mapping->nr_thps, 0); 187 #endif 188 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); 189 mapping->private_data = NULL; 190 mapping->writeback_index = 0; 191 inode->i_private = NULL; 192 inode->i_mapping = mapping; 193 INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */ 194 #ifdef CONFIG_FS_POSIX_ACL 195 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED; 196 #endif 197 198 #ifdef CONFIG_FSNOTIFY 199 inode->i_fsnotify_mask = 0; 200 #endif 201 inode->i_flctx = NULL; 202 this_cpu_inc(nr_inodes); 203 204 return 0; 205 out: 206 return -ENOMEM; 207 } 208 EXPORT_SYMBOL(inode_init_always); 209 210 void free_inode_nonrcu(struct inode *inode) 211 { 212 kmem_cache_free(inode_cachep, inode); 213 } 214 EXPORT_SYMBOL(free_inode_nonrcu); 215 216 static void i_callback(struct rcu_head *head) 217 { 218 struct inode *inode = container_of(head, struct inode, i_rcu); 219 if (inode->free_inode) 220 inode->free_inode(inode); 221 else 222 free_inode_nonrcu(inode); 223 } 224 225 static struct inode *alloc_inode(struct super_block *sb) 226 { 227 const struct super_operations *ops = sb->s_op; 228 struct inode *inode; 229 230 if (ops->alloc_inode) 231 inode = ops->alloc_inode(sb); 232 else 233 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL); 234 235 if (!inode) 236 return NULL; 237 238 if (unlikely(inode_init_always(sb, inode))) { 239 if (ops->destroy_inode) { 240 ops->destroy_inode(inode); 241 if (!ops->free_inode) 242 return NULL; 243 } 244 inode->free_inode = ops->free_inode; 245 i_callback(&inode->i_rcu); 246 return NULL; 247 } 248 249 return inode; 250 } 251 252 void __destroy_inode(struct inode *inode) 253 { 254 BUG_ON(inode_has_buffers(inode)); 255 inode_detach_wb(inode); 256 security_inode_free(inode); 257 fsnotify_inode_delete(inode); 258 locks_free_lock_context(inode); 259 if (!inode->i_nlink) { 260 WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0); 261 atomic_long_dec(&inode->i_sb->s_remove_count); 262 } 263 264 #ifdef CONFIG_FS_POSIX_ACL 265 if (inode->i_acl && !is_uncached_acl(inode->i_acl)) 266 posix_acl_release(inode->i_acl); 267 if (inode->i_default_acl && !is_uncached_acl(inode->i_default_acl)) 268 posix_acl_release(inode->i_default_acl); 269 #endif 270 this_cpu_dec(nr_inodes); 271 } 272 EXPORT_SYMBOL(__destroy_inode); 273 274 static void destroy_inode(struct inode *inode) 275 { 276 const struct super_operations *ops = inode->i_sb->s_op; 277 278 BUG_ON(!list_empty(&inode->i_lru)); 279 __destroy_inode(inode); 280 if (ops->destroy_inode) { 281 ops->destroy_inode(inode); 282 if (!ops->free_inode) 283 return; 284 } 285 inode->free_inode = ops->free_inode; 286 call_rcu(&inode->i_rcu, i_callback); 287 } 288 289 /** 290 * drop_nlink - directly drop an inode's link count 291 * @inode: inode 292 * 293 * This is a low-level filesystem helper to replace any 294 * direct filesystem manipulation of i_nlink. In cases 295 * where we are attempting to track writes to the 296 * filesystem, a decrement to zero means an imminent 297 * write when the file is truncated and actually unlinked 298 * on the filesystem. 299 */ 300 void drop_nlink(struct inode *inode) 301 { 302 WARN_ON(inode->i_nlink == 0); 303 inode->__i_nlink--; 304 if (!inode->i_nlink) 305 atomic_long_inc(&inode->i_sb->s_remove_count); 306 } 307 EXPORT_SYMBOL(drop_nlink); 308 309 /** 310 * clear_nlink - directly zero an inode's link count 311 * @inode: inode 312 * 313 * This is a low-level filesystem helper to replace any 314 * direct filesystem manipulation of i_nlink. See 315 * drop_nlink() for why we care about i_nlink hitting zero. 316 */ 317 void clear_nlink(struct inode *inode) 318 { 319 if (inode->i_nlink) { 320 inode->__i_nlink = 0; 321 atomic_long_inc(&inode->i_sb->s_remove_count); 322 } 323 } 324 EXPORT_SYMBOL(clear_nlink); 325 326 /** 327 * set_nlink - directly set an inode's link count 328 * @inode: inode 329 * @nlink: new nlink (should be non-zero) 330 * 331 * This is a low-level filesystem helper to replace any 332 * direct filesystem manipulation of i_nlink. 333 */ 334 void set_nlink(struct inode *inode, unsigned int nlink) 335 { 336 if (!nlink) { 337 clear_nlink(inode); 338 } else { 339 /* Yes, some filesystems do change nlink from zero to one */ 340 if (inode->i_nlink == 0) 341 atomic_long_dec(&inode->i_sb->s_remove_count); 342 343 inode->__i_nlink = nlink; 344 } 345 } 346 EXPORT_SYMBOL(set_nlink); 347 348 /** 349 * inc_nlink - directly increment an inode's link count 350 * @inode: inode 351 * 352 * This is a low-level filesystem helper to replace any 353 * direct filesystem manipulation of i_nlink. Currently, 354 * it is only here for parity with dec_nlink(). 355 */ 356 void inc_nlink(struct inode *inode) 357 { 358 if (unlikely(inode->i_nlink == 0)) { 359 WARN_ON(!(inode->i_state & I_LINKABLE)); 360 atomic_long_dec(&inode->i_sb->s_remove_count); 361 } 362 363 inode->__i_nlink++; 364 } 365 EXPORT_SYMBOL(inc_nlink); 366 367 static void __address_space_init_once(struct address_space *mapping) 368 { 369 xa_init_flags(&mapping->i_pages, XA_FLAGS_LOCK_IRQ | XA_FLAGS_ACCOUNT); 370 init_rwsem(&mapping->i_mmap_rwsem); 371 INIT_LIST_HEAD(&mapping->private_list); 372 spin_lock_init(&mapping->private_lock); 373 mapping->i_mmap = RB_ROOT_CACHED; 374 } 375 376 void address_space_init_once(struct address_space *mapping) 377 { 378 memset(mapping, 0, sizeof(*mapping)); 379 __address_space_init_once(mapping); 380 } 381 EXPORT_SYMBOL(address_space_init_once); 382 383 /* 384 * These are initializations that only need to be done 385 * once, because the fields are idempotent across use 386 * of the inode, so let the slab aware of that. 387 */ 388 void inode_init_once(struct inode *inode) 389 { 390 memset(inode, 0, sizeof(*inode)); 391 INIT_HLIST_NODE(&inode->i_hash); 392 INIT_LIST_HEAD(&inode->i_devices); 393 INIT_LIST_HEAD(&inode->i_io_list); 394 INIT_LIST_HEAD(&inode->i_wb_list); 395 INIT_LIST_HEAD(&inode->i_lru); 396 __address_space_init_once(&inode->i_data); 397 i_size_ordered_init(inode); 398 } 399 EXPORT_SYMBOL(inode_init_once); 400 401 static void init_once(void *foo) 402 { 403 struct inode *inode = (struct inode *) foo; 404 405 inode_init_once(inode); 406 } 407 408 /* 409 * inode->i_lock must be held 410 */ 411 void __iget(struct inode *inode) 412 { 413 atomic_inc(&inode->i_count); 414 } 415 416 /* 417 * get additional reference to inode; caller must already hold one. 418 */ 419 void ihold(struct inode *inode) 420 { 421 WARN_ON(atomic_inc_return(&inode->i_count) < 2); 422 } 423 EXPORT_SYMBOL(ihold); 424 425 static void inode_lru_list_add(struct inode *inode) 426 { 427 if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru)) 428 this_cpu_inc(nr_unused); 429 else 430 inode->i_state |= I_REFERENCED; 431 } 432 433 /* 434 * Add inode to LRU if needed (inode is unused and clean). 435 * 436 * Needs inode->i_lock held. 437 */ 438 void inode_add_lru(struct inode *inode) 439 { 440 if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC | 441 I_FREEING | I_WILL_FREE)) && 442 !atomic_read(&inode->i_count) && inode->i_sb->s_flags & SB_ACTIVE) 443 inode_lru_list_add(inode); 444 } 445 446 447 static void inode_lru_list_del(struct inode *inode) 448 { 449 450 if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru)) 451 this_cpu_dec(nr_unused); 452 } 453 454 /** 455 * inode_sb_list_add - add inode to the superblock list of inodes 456 * @inode: inode to add 457 */ 458 void inode_sb_list_add(struct inode *inode) 459 { 460 spin_lock(&inode->i_sb->s_inode_list_lock); 461 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes); 462 spin_unlock(&inode->i_sb->s_inode_list_lock); 463 } 464 EXPORT_SYMBOL_GPL(inode_sb_list_add); 465 466 static inline void inode_sb_list_del(struct inode *inode) 467 { 468 if (!list_empty(&inode->i_sb_list)) { 469 spin_lock(&inode->i_sb->s_inode_list_lock); 470 list_del_init(&inode->i_sb_list); 471 spin_unlock(&inode->i_sb->s_inode_list_lock); 472 } 473 } 474 475 static unsigned long hash(struct super_block *sb, unsigned long hashval) 476 { 477 unsigned long tmp; 478 479 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) / 480 L1_CACHE_BYTES; 481 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift); 482 return tmp & i_hash_mask; 483 } 484 485 /** 486 * __insert_inode_hash - hash an inode 487 * @inode: unhashed inode 488 * @hashval: unsigned long value used to locate this object in the 489 * inode_hashtable. 490 * 491 * Add an inode to the inode hash for this superblock. 492 */ 493 void __insert_inode_hash(struct inode *inode, unsigned long hashval) 494 { 495 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval); 496 497 spin_lock(&inode_hash_lock); 498 spin_lock(&inode->i_lock); 499 hlist_add_head(&inode->i_hash, b); 500 spin_unlock(&inode->i_lock); 501 spin_unlock(&inode_hash_lock); 502 } 503 EXPORT_SYMBOL(__insert_inode_hash); 504 505 /** 506 * __remove_inode_hash - remove an inode from the hash 507 * @inode: inode to unhash 508 * 509 * Remove an inode from the superblock. 510 */ 511 void __remove_inode_hash(struct inode *inode) 512 { 513 spin_lock(&inode_hash_lock); 514 spin_lock(&inode->i_lock); 515 hlist_del_init(&inode->i_hash); 516 spin_unlock(&inode->i_lock); 517 spin_unlock(&inode_hash_lock); 518 } 519 EXPORT_SYMBOL(__remove_inode_hash); 520 521 void clear_inode(struct inode *inode) 522 { 523 /* 524 * We have to cycle the i_pages lock here because reclaim can be in the 525 * process of removing the last page (in __delete_from_page_cache()) 526 * and we must not free the mapping under it. 527 */ 528 xa_lock_irq(&inode->i_data.i_pages); 529 BUG_ON(inode->i_data.nrpages); 530 BUG_ON(inode->i_data.nrexceptional); 531 xa_unlock_irq(&inode->i_data.i_pages); 532 BUG_ON(!list_empty(&inode->i_data.private_list)); 533 BUG_ON(!(inode->i_state & I_FREEING)); 534 BUG_ON(inode->i_state & I_CLEAR); 535 BUG_ON(!list_empty(&inode->i_wb_list)); 536 /* don't need i_lock here, no concurrent mods to i_state */ 537 inode->i_state = I_FREEING | I_CLEAR; 538 } 539 EXPORT_SYMBOL(clear_inode); 540 541 /* 542 * Free the inode passed in, removing it from the lists it is still connected 543 * to. We remove any pages still attached to the inode and wait for any IO that 544 * is still in progress before finally destroying the inode. 545 * 546 * An inode must already be marked I_FREEING so that we avoid the inode being 547 * moved back onto lists if we race with other code that manipulates the lists 548 * (e.g. writeback_single_inode). The caller is responsible for setting this. 549 * 550 * An inode must already be removed from the LRU list before being evicted from 551 * the cache. This should occur atomically with setting the I_FREEING state 552 * flag, so no inodes here should ever be on the LRU when being evicted. 553 */ 554 static void evict(struct inode *inode) 555 { 556 const struct super_operations *op = inode->i_sb->s_op; 557 558 BUG_ON(!(inode->i_state & I_FREEING)); 559 BUG_ON(!list_empty(&inode->i_lru)); 560 561 if (!list_empty(&inode->i_io_list)) 562 inode_io_list_del(inode); 563 564 inode_sb_list_del(inode); 565 566 /* 567 * Wait for flusher thread to be done with the inode so that filesystem 568 * does not start destroying it while writeback is still running. Since 569 * the inode has I_FREEING set, flusher thread won't start new work on 570 * the inode. We just have to wait for running writeback to finish. 571 */ 572 inode_wait_for_writeback(inode); 573 574 if (op->evict_inode) { 575 op->evict_inode(inode); 576 } else { 577 truncate_inode_pages_final(&inode->i_data); 578 clear_inode(inode); 579 } 580 if (S_ISBLK(inode->i_mode) && inode->i_bdev) 581 bd_forget(inode); 582 if (S_ISCHR(inode->i_mode) && inode->i_cdev) 583 cd_forget(inode); 584 585 remove_inode_hash(inode); 586 587 spin_lock(&inode->i_lock); 588 wake_up_bit(&inode->i_state, __I_NEW); 589 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR)); 590 spin_unlock(&inode->i_lock); 591 592 destroy_inode(inode); 593 } 594 595 /* 596 * dispose_list - dispose of the contents of a local list 597 * @head: the head of the list to free 598 * 599 * Dispose-list gets a local list with local inodes in it, so it doesn't 600 * need to worry about list corruption and SMP locks. 601 */ 602 static void dispose_list(struct list_head *head) 603 { 604 while (!list_empty(head)) { 605 struct inode *inode; 606 607 inode = list_first_entry(head, struct inode, i_lru); 608 list_del_init(&inode->i_lru); 609 610 evict(inode); 611 cond_resched(); 612 } 613 } 614 615 /** 616 * evict_inodes - evict all evictable inodes for a superblock 617 * @sb: superblock to operate on 618 * 619 * Make sure that no inodes with zero refcount are retained. This is 620 * called by superblock shutdown after having SB_ACTIVE flag removed, 621 * so any inode reaching zero refcount during or after that call will 622 * be immediately evicted. 623 */ 624 void evict_inodes(struct super_block *sb) 625 { 626 struct inode *inode, *next; 627 LIST_HEAD(dispose); 628 629 again: 630 spin_lock(&sb->s_inode_list_lock); 631 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 632 if (atomic_read(&inode->i_count)) 633 continue; 634 635 spin_lock(&inode->i_lock); 636 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 637 spin_unlock(&inode->i_lock); 638 continue; 639 } 640 641 inode->i_state |= I_FREEING; 642 inode_lru_list_del(inode); 643 spin_unlock(&inode->i_lock); 644 list_add(&inode->i_lru, &dispose); 645 646 /* 647 * We can have a ton of inodes to evict at unmount time given 648 * enough memory, check to see if we need to go to sleep for a 649 * bit so we don't livelock. 650 */ 651 if (need_resched()) { 652 spin_unlock(&sb->s_inode_list_lock); 653 cond_resched(); 654 dispose_list(&dispose); 655 goto again; 656 } 657 } 658 spin_unlock(&sb->s_inode_list_lock); 659 660 dispose_list(&dispose); 661 } 662 EXPORT_SYMBOL_GPL(evict_inodes); 663 664 /** 665 * invalidate_inodes - attempt to free all inodes on a superblock 666 * @sb: superblock to operate on 667 * @kill_dirty: flag to guide handling of dirty inodes 668 * 669 * Attempts to free all inodes for a given superblock. If there were any 670 * busy inodes return a non-zero value, else zero. 671 * If @kill_dirty is set, discard dirty inodes too, otherwise treat 672 * them as busy. 673 */ 674 int invalidate_inodes(struct super_block *sb, bool kill_dirty) 675 { 676 int busy = 0; 677 struct inode *inode, *next; 678 LIST_HEAD(dispose); 679 680 again: 681 spin_lock(&sb->s_inode_list_lock); 682 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 683 spin_lock(&inode->i_lock); 684 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 685 spin_unlock(&inode->i_lock); 686 continue; 687 } 688 if (inode->i_state & I_DIRTY_ALL && !kill_dirty) { 689 spin_unlock(&inode->i_lock); 690 busy = 1; 691 continue; 692 } 693 if (atomic_read(&inode->i_count)) { 694 spin_unlock(&inode->i_lock); 695 busy = 1; 696 continue; 697 } 698 699 inode->i_state |= I_FREEING; 700 inode_lru_list_del(inode); 701 spin_unlock(&inode->i_lock); 702 list_add(&inode->i_lru, &dispose); 703 if (need_resched()) { 704 spin_unlock(&sb->s_inode_list_lock); 705 cond_resched(); 706 dispose_list(&dispose); 707 goto again; 708 } 709 } 710 spin_unlock(&sb->s_inode_list_lock); 711 712 dispose_list(&dispose); 713 714 return busy; 715 } 716 717 /* 718 * Isolate the inode from the LRU in preparation for freeing it. 719 * 720 * Any inodes which are pinned purely because of attached pagecache have their 721 * pagecache removed. If the inode has metadata buffers attached to 722 * mapping->private_list then try to remove them. 723 * 724 * If the inode has the I_REFERENCED flag set, then it means that it has been 725 * used recently - the flag is set in iput_final(). When we encounter such an 726 * inode, clear the flag and move it to the back of the LRU so it gets another 727 * pass through the LRU before it gets reclaimed. This is necessary because of 728 * the fact we are doing lazy LRU updates to minimise lock contention so the 729 * LRU does not have strict ordering. Hence we don't want to reclaim inodes 730 * with this flag set because they are the inodes that are out of order. 731 */ 732 static enum lru_status inode_lru_isolate(struct list_head *item, 733 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) 734 { 735 struct list_head *freeable = arg; 736 struct inode *inode = container_of(item, struct inode, i_lru); 737 738 /* 739 * we are inverting the lru lock/inode->i_lock here, so use a trylock. 740 * If we fail to get the lock, just skip it. 741 */ 742 if (!spin_trylock(&inode->i_lock)) 743 return LRU_SKIP; 744 745 /* 746 * Referenced or dirty inodes are still in use. Give them another pass 747 * through the LRU as we canot reclaim them now. 748 */ 749 if (atomic_read(&inode->i_count) || 750 (inode->i_state & ~I_REFERENCED)) { 751 list_lru_isolate(lru, &inode->i_lru); 752 spin_unlock(&inode->i_lock); 753 this_cpu_dec(nr_unused); 754 return LRU_REMOVED; 755 } 756 757 /* recently referenced inodes get one more pass */ 758 if (inode->i_state & I_REFERENCED) { 759 inode->i_state &= ~I_REFERENCED; 760 spin_unlock(&inode->i_lock); 761 return LRU_ROTATE; 762 } 763 764 if (inode_has_buffers(inode) || inode->i_data.nrpages) { 765 __iget(inode); 766 spin_unlock(&inode->i_lock); 767 spin_unlock(lru_lock); 768 if (remove_inode_buffers(inode)) { 769 unsigned long reap; 770 reap = invalidate_mapping_pages(&inode->i_data, 0, -1); 771 if (current_is_kswapd()) 772 __count_vm_events(KSWAPD_INODESTEAL, reap); 773 else 774 __count_vm_events(PGINODESTEAL, reap); 775 if (current->reclaim_state) 776 current->reclaim_state->reclaimed_slab += reap; 777 } 778 iput(inode); 779 spin_lock(lru_lock); 780 return LRU_RETRY; 781 } 782 783 WARN_ON(inode->i_state & I_NEW); 784 inode->i_state |= I_FREEING; 785 list_lru_isolate_move(lru, &inode->i_lru, freeable); 786 spin_unlock(&inode->i_lock); 787 788 this_cpu_dec(nr_unused); 789 return LRU_REMOVED; 790 } 791 792 /* 793 * Walk the superblock inode LRU for freeable inodes and attempt to free them. 794 * This is called from the superblock shrinker function with a number of inodes 795 * to trim from the LRU. Inodes to be freed are moved to a temporary list and 796 * then are freed outside inode_lock by dispose_list(). 797 */ 798 long prune_icache_sb(struct super_block *sb, struct shrink_control *sc) 799 { 800 LIST_HEAD(freeable); 801 long freed; 802 803 freed = list_lru_shrink_walk(&sb->s_inode_lru, sc, 804 inode_lru_isolate, &freeable); 805 dispose_list(&freeable); 806 return freed; 807 } 808 809 static void __wait_on_freeing_inode(struct inode *inode); 810 /* 811 * Called with the inode lock held. 812 */ 813 static struct inode *find_inode(struct super_block *sb, 814 struct hlist_head *head, 815 int (*test)(struct inode *, void *), 816 void *data) 817 { 818 struct inode *inode = NULL; 819 820 repeat: 821 hlist_for_each_entry(inode, head, i_hash) { 822 if (inode->i_sb != sb) 823 continue; 824 if (!test(inode, data)) 825 continue; 826 spin_lock(&inode->i_lock); 827 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 828 __wait_on_freeing_inode(inode); 829 goto repeat; 830 } 831 if (unlikely(inode->i_state & I_CREATING)) { 832 spin_unlock(&inode->i_lock); 833 return ERR_PTR(-ESTALE); 834 } 835 __iget(inode); 836 spin_unlock(&inode->i_lock); 837 return inode; 838 } 839 return NULL; 840 } 841 842 /* 843 * find_inode_fast is the fast path version of find_inode, see the comment at 844 * iget_locked for details. 845 */ 846 static struct inode *find_inode_fast(struct super_block *sb, 847 struct hlist_head *head, unsigned long ino) 848 { 849 struct inode *inode = NULL; 850 851 repeat: 852 hlist_for_each_entry(inode, head, i_hash) { 853 if (inode->i_ino != ino) 854 continue; 855 if (inode->i_sb != sb) 856 continue; 857 spin_lock(&inode->i_lock); 858 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 859 __wait_on_freeing_inode(inode); 860 goto repeat; 861 } 862 if (unlikely(inode->i_state & I_CREATING)) { 863 spin_unlock(&inode->i_lock); 864 return ERR_PTR(-ESTALE); 865 } 866 __iget(inode); 867 spin_unlock(&inode->i_lock); 868 return inode; 869 } 870 return NULL; 871 } 872 873 /* 874 * Each cpu owns a range of LAST_INO_BATCH numbers. 875 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations, 876 * to renew the exhausted range. 877 * 878 * This does not significantly increase overflow rate because every CPU can 879 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is 880 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the 881 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase 882 * overflow rate by 2x, which does not seem too significant. 883 * 884 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 885 * error if st_ino won't fit in target struct field. Use 32bit counter 886 * here to attempt to avoid that. 887 */ 888 #define LAST_INO_BATCH 1024 889 static DEFINE_PER_CPU(unsigned int, last_ino); 890 891 unsigned int get_next_ino(void) 892 { 893 unsigned int *p = &get_cpu_var(last_ino); 894 unsigned int res = *p; 895 896 #ifdef CONFIG_SMP 897 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) { 898 static atomic_t shared_last_ino; 899 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino); 900 901 res = next - LAST_INO_BATCH; 902 } 903 #endif 904 905 res++; 906 /* get_next_ino should not provide a 0 inode number */ 907 if (unlikely(!res)) 908 res++; 909 *p = res; 910 put_cpu_var(last_ino); 911 return res; 912 } 913 EXPORT_SYMBOL(get_next_ino); 914 915 /** 916 * new_inode_pseudo - obtain an inode 917 * @sb: superblock 918 * 919 * Allocates a new inode for given superblock. 920 * Inode wont be chained in superblock s_inodes list 921 * This means : 922 * - fs can't be unmount 923 * - quotas, fsnotify, writeback can't work 924 */ 925 struct inode *new_inode_pseudo(struct super_block *sb) 926 { 927 struct inode *inode = alloc_inode(sb); 928 929 if (inode) { 930 spin_lock(&inode->i_lock); 931 inode->i_state = 0; 932 spin_unlock(&inode->i_lock); 933 INIT_LIST_HEAD(&inode->i_sb_list); 934 } 935 return inode; 936 } 937 938 /** 939 * new_inode - obtain an inode 940 * @sb: superblock 941 * 942 * Allocates a new inode for given superblock. The default gfp_mask 943 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE. 944 * If HIGHMEM pages are unsuitable or it is known that pages allocated 945 * for the page cache are not reclaimable or migratable, 946 * mapping_set_gfp_mask() must be called with suitable flags on the 947 * newly created inode's mapping 948 * 949 */ 950 struct inode *new_inode(struct super_block *sb) 951 { 952 struct inode *inode; 953 954 spin_lock_prefetch(&sb->s_inode_list_lock); 955 956 inode = new_inode_pseudo(sb); 957 if (inode) 958 inode_sb_list_add(inode); 959 return inode; 960 } 961 EXPORT_SYMBOL(new_inode); 962 963 #ifdef CONFIG_DEBUG_LOCK_ALLOC 964 void lockdep_annotate_inode_mutex_key(struct inode *inode) 965 { 966 if (S_ISDIR(inode->i_mode)) { 967 struct file_system_type *type = inode->i_sb->s_type; 968 969 /* Set new key only if filesystem hasn't already changed it */ 970 if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) { 971 /* 972 * ensure nobody is actually holding i_mutex 973 */ 974 // mutex_destroy(&inode->i_mutex); 975 init_rwsem(&inode->i_rwsem); 976 lockdep_set_class(&inode->i_rwsem, 977 &type->i_mutex_dir_key); 978 } 979 } 980 } 981 EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key); 982 #endif 983 984 /** 985 * unlock_new_inode - clear the I_NEW state and wake up any waiters 986 * @inode: new inode to unlock 987 * 988 * Called when the inode is fully initialised to clear the new state of the 989 * inode and wake up anyone waiting for the inode to finish initialisation. 990 */ 991 void unlock_new_inode(struct inode *inode) 992 { 993 lockdep_annotate_inode_mutex_key(inode); 994 spin_lock(&inode->i_lock); 995 WARN_ON(!(inode->i_state & I_NEW)); 996 inode->i_state &= ~I_NEW & ~I_CREATING; 997 smp_mb(); 998 wake_up_bit(&inode->i_state, __I_NEW); 999 spin_unlock(&inode->i_lock); 1000 } 1001 EXPORT_SYMBOL(unlock_new_inode); 1002 1003 void discard_new_inode(struct inode *inode) 1004 { 1005 lockdep_annotate_inode_mutex_key(inode); 1006 spin_lock(&inode->i_lock); 1007 WARN_ON(!(inode->i_state & I_NEW)); 1008 inode->i_state &= ~I_NEW; 1009 smp_mb(); 1010 wake_up_bit(&inode->i_state, __I_NEW); 1011 spin_unlock(&inode->i_lock); 1012 iput(inode); 1013 } 1014 EXPORT_SYMBOL(discard_new_inode); 1015 1016 /** 1017 * lock_two_nondirectories - take two i_mutexes on non-directory objects 1018 * 1019 * Lock any non-NULL argument that is not a directory. 1020 * Zero, one or two objects may be locked by this function. 1021 * 1022 * @inode1: first inode to lock 1023 * @inode2: second inode to lock 1024 */ 1025 void lock_two_nondirectories(struct inode *inode1, struct inode *inode2) 1026 { 1027 if (inode1 > inode2) 1028 swap(inode1, inode2); 1029 1030 if (inode1 && !S_ISDIR(inode1->i_mode)) 1031 inode_lock(inode1); 1032 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1) 1033 inode_lock_nested(inode2, I_MUTEX_NONDIR2); 1034 } 1035 EXPORT_SYMBOL(lock_two_nondirectories); 1036 1037 /** 1038 * unlock_two_nondirectories - release locks from lock_two_nondirectories() 1039 * @inode1: first inode to unlock 1040 * @inode2: second inode to unlock 1041 */ 1042 void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2) 1043 { 1044 if (inode1 && !S_ISDIR(inode1->i_mode)) 1045 inode_unlock(inode1); 1046 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1) 1047 inode_unlock(inode2); 1048 } 1049 EXPORT_SYMBOL(unlock_two_nondirectories); 1050 1051 /** 1052 * inode_insert5 - obtain an inode from a mounted file system 1053 * @inode: pre-allocated inode to use for insert to cache 1054 * @hashval: hash value (usually inode number) to get 1055 * @test: callback used for comparisons between inodes 1056 * @set: callback used to initialize a new struct inode 1057 * @data: opaque data pointer to pass to @test and @set 1058 * 1059 * Search for the inode specified by @hashval and @data in the inode cache, 1060 * and if present it is return it with an increased reference count. This is 1061 * a variant of iget5_locked() for callers that don't want to fail on memory 1062 * allocation of inode. 1063 * 1064 * If the inode is not in cache, insert the pre-allocated inode to cache and 1065 * return it locked, hashed, and with the I_NEW flag set. The file system gets 1066 * to fill it in before unlocking it via unlock_new_inode(). 1067 * 1068 * Note both @test and @set are called with the inode_hash_lock held, so can't 1069 * sleep. 1070 */ 1071 struct inode *inode_insert5(struct inode *inode, unsigned long hashval, 1072 int (*test)(struct inode *, void *), 1073 int (*set)(struct inode *, void *), void *data) 1074 { 1075 struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval); 1076 struct inode *old; 1077 bool creating = inode->i_state & I_CREATING; 1078 1079 again: 1080 spin_lock(&inode_hash_lock); 1081 old = find_inode(inode->i_sb, head, test, data); 1082 if (unlikely(old)) { 1083 /* 1084 * Uhhuh, somebody else created the same inode under us. 1085 * Use the old inode instead of the preallocated one. 1086 */ 1087 spin_unlock(&inode_hash_lock); 1088 if (IS_ERR(old)) 1089 return NULL; 1090 wait_on_inode(old); 1091 if (unlikely(inode_unhashed(old))) { 1092 iput(old); 1093 goto again; 1094 } 1095 return old; 1096 } 1097 1098 if (set && unlikely(set(inode, data))) { 1099 inode = NULL; 1100 goto unlock; 1101 } 1102 1103 /* 1104 * Return the locked inode with I_NEW set, the 1105 * caller is responsible for filling in the contents 1106 */ 1107 spin_lock(&inode->i_lock); 1108 inode->i_state |= I_NEW; 1109 hlist_add_head(&inode->i_hash, head); 1110 spin_unlock(&inode->i_lock); 1111 if (!creating) 1112 inode_sb_list_add(inode); 1113 unlock: 1114 spin_unlock(&inode_hash_lock); 1115 1116 return inode; 1117 } 1118 EXPORT_SYMBOL(inode_insert5); 1119 1120 /** 1121 * iget5_locked - obtain an inode from a mounted file system 1122 * @sb: super block of file system 1123 * @hashval: hash value (usually inode number) to get 1124 * @test: callback used for comparisons between inodes 1125 * @set: callback used to initialize a new struct inode 1126 * @data: opaque data pointer to pass to @test and @set 1127 * 1128 * Search for the inode specified by @hashval and @data in the inode cache, 1129 * and if present it is return it with an increased reference count. This is 1130 * a generalized version of iget_locked() for file systems where the inode 1131 * number is not sufficient for unique identification of an inode. 1132 * 1133 * If the inode is not in cache, allocate a new inode and return it locked, 1134 * hashed, and with the I_NEW flag set. The file system gets to fill it in 1135 * before unlocking it via unlock_new_inode(). 1136 * 1137 * Note both @test and @set are called with the inode_hash_lock held, so can't 1138 * sleep. 1139 */ 1140 struct inode *iget5_locked(struct super_block *sb, unsigned long hashval, 1141 int (*test)(struct inode *, void *), 1142 int (*set)(struct inode *, void *), void *data) 1143 { 1144 struct inode *inode = ilookup5(sb, hashval, test, data); 1145 1146 if (!inode) { 1147 struct inode *new = alloc_inode(sb); 1148 1149 if (new) { 1150 new->i_state = 0; 1151 inode = inode_insert5(new, hashval, test, set, data); 1152 if (unlikely(inode != new)) 1153 destroy_inode(new); 1154 } 1155 } 1156 return inode; 1157 } 1158 EXPORT_SYMBOL(iget5_locked); 1159 1160 /** 1161 * iget_locked - obtain an inode from a mounted file system 1162 * @sb: super block of file system 1163 * @ino: inode number to get 1164 * 1165 * Search for the inode specified by @ino in the inode cache and if present 1166 * return it with an increased reference count. This is for file systems 1167 * where the inode number is sufficient for unique identification of an inode. 1168 * 1169 * If the inode is not in cache, allocate a new inode and return it locked, 1170 * hashed, and with the I_NEW flag set. The file system gets to fill it in 1171 * before unlocking it via unlock_new_inode(). 1172 */ 1173 struct inode *iget_locked(struct super_block *sb, unsigned long ino) 1174 { 1175 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1176 struct inode *inode; 1177 again: 1178 spin_lock(&inode_hash_lock); 1179 inode = find_inode_fast(sb, head, ino); 1180 spin_unlock(&inode_hash_lock); 1181 if (inode) { 1182 if (IS_ERR(inode)) 1183 return NULL; 1184 wait_on_inode(inode); 1185 if (unlikely(inode_unhashed(inode))) { 1186 iput(inode); 1187 goto again; 1188 } 1189 return inode; 1190 } 1191 1192 inode = alloc_inode(sb); 1193 if (inode) { 1194 struct inode *old; 1195 1196 spin_lock(&inode_hash_lock); 1197 /* We released the lock, so.. */ 1198 old = find_inode_fast(sb, head, ino); 1199 if (!old) { 1200 inode->i_ino = ino; 1201 spin_lock(&inode->i_lock); 1202 inode->i_state = I_NEW; 1203 hlist_add_head(&inode->i_hash, head); 1204 spin_unlock(&inode->i_lock); 1205 inode_sb_list_add(inode); 1206 spin_unlock(&inode_hash_lock); 1207 1208 /* Return the locked inode with I_NEW set, the 1209 * caller is responsible for filling in the contents 1210 */ 1211 return inode; 1212 } 1213 1214 /* 1215 * Uhhuh, somebody else created the same inode under 1216 * us. Use the old inode instead of the one we just 1217 * allocated. 1218 */ 1219 spin_unlock(&inode_hash_lock); 1220 destroy_inode(inode); 1221 if (IS_ERR(old)) 1222 return NULL; 1223 inode = old; 1224 wait_on_inode(inode); 1225 if (unlikely(inode_unhashed(inode))) { 1226 iput(inode); 1227 goto again; 1228 } 1229 } 1230 return inode; 1231 } 1232 EXPORT_SYMBOL(iget_locked); 1233 1234 /* 1235 * search the inode cache for a matching inode number. 1236 * If we find one, then the inode number we are trying to 1237 * allocate is not unique and so we should not use it. 1238 * 1239 * Returns 1 if the inode number is unique, 0 if it is not. 1240 */ 1241 static int test_inode_iunique(struct super_block *sb, unsigned long ino) 1242 { 1243 struct hlist_head *b = inode_hashtable + hash(sb, ino); 1244 struct inode *inode; 1245 1246 spin_lock(&inode_hash_lock); 1247 hlist_for_each_entry(inode, b, i_hash) { 1248 if (inode->i_ino == ino && inode->i_sb == sb) { 1249 spin_unlock(&inode_hash_lock); 1250 return 0; 1251 } 1252 } 1253 spin_unlock(&inode_hash_lock); 1254 1255 return 1; 1256 } 1257 1258 /** 1259 * iunique - get a unique inode number 1260 * @sb: superblock 1261 * @max_reserved: highest reserved inode number 1262 * 1263 * Obtain an inode number that is unique on the system for a given 1264 * superblock. This is used by file systems that have no natural 1265 * permanent inode numbering system. An inode number is returned that 1266 * is higher than the reserved limit but unique. 1267 * 1268 * BUGS: 1269 * With a large number of inodes live on the file system this function 1270 * currently becomes quite slow. 1271 */ 1272 ino_t iunique(struct super_block *sb, ino_t max_reserved) 1273 { 1274 /* 1275 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 1276 * error if st_ino won't fit in target struct field. Use 32bit counter 1277 * here to attempt to avoid that. 1278 */ 1279 static DEFINE_SPINLOCK(iunique_lock); 1280 static unsigned int counter; 1281 ino_t res; 1282 1283 spin_lock(&iunique_lock); 1284 do { 1285 if (counter <= max_reserved) 1286 counter = max_reserved + 1; 1287 res = counter++; 1288 } while (!test_inode_iunique(sb, res)); 1289 spin_unlock(&iunique_lock); 1290 1291 return res; 1292 } 1293 EXPORT_SYMBOL(iunique); 1294 1295 struct inode *igrab(struct inode *inode) 1296 { 1297 spin_lock(&inode->i_lock); 1298 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) { 1299 __iget(inode); 1300 spin_unlock(&inode->i_lock); 1301 } else { 1302 spin_unlock(&inode->i_lock); 1303 /* 1304 * Handle the case where s_op->clear_inode is not been 1305 * called yet, and somebody is calling igrab 1306 * while the inode is getting freed. 1307 */ 1308 inode = NULL; 1309 } 1310 return inode; 1311 } 1312 EXPORT_SYMBOL(igrab); 1313 1314 /** 1315 * ilookup5_nowait - search for an inode in the inode cache 1316 * @sb: super block of file system to search 1317 * @hashval: hash value (usually inode number) to search for 1318 * @test: callback used for comparisons between inodes 1319 * @data: opaque data pointer to pass to @test 1320 * 1321 * Search for the inode specified by @hashval and @data in the inode cache. 1322 * If the inode is in the cache, the inode is returned with an incremented 1323 * reference count. 1324 * 1325 * Note: I_NEW is not waited upon so you have to be very careful what you do 1326 * with the returned inode. You probably should be using ilookup5() instead. 1327 * 1328 * Note2: @test is called with the inode_hash_lock held, so can't sleep. 1329 */ 1330 struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, 1331 int (*test)(struct inode *, void *), void *data) 1332 { 1333 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1334 struct inode *inode; 1335 1336 spin_lock(&inode_hash_lock); 1337 inode = find_inode(sb, head, test, data); 1338 spin_unlock(&inode_hash_lock); 1339 1340 return IS_ERR(inode) ? NULL : inode; 1341 } 1342 EXPORT_SYMBOL(ilookup5_nowait); 1343 1344 /** 1345 * ilookup5 - search for an inode in the inode cache 1346 * @sb: super block of file system to search 1347 * @hashval: hash value (usually inode number) to search for 1348 * @test: callback used for comparisons between inodes 1349 * @data: opaque data pointer to pass to @test 1350 * 1351 * Search for the inode specified by @hashval and @data in the inode cache, 1352 * and if the inode is in the cache, return the inode with an incremented 1353 * reference count. Waits on I_NEW before returning the inode. 1354 * returned with an incremented reference count. 1355 * 1356 * This is a generalized version of ilookup() for file systems where the 1357 * inode number is not sufficient for unique identification of an inode. 1358 * 1359 * Note: @test is called with the inode_hash_lock held, so can't sleep. 1360 */ 1361 struct inode *ilookup5(struct super_block *sb, unsigned long hashval, 1362 int (*test)(struct inode *, void *), void *data) 1363 { 1364 struct inode *inode; 1365 again: 1366 inode = ilookup5_nowait(sb, hashval, test, data); 1367 if (inode) { 1368 wait_on_inode(inode); 1369 if (unlikely(inode_unhashed(inode))) { 1370 iput(inode); 1371 goto again; 1372 } 1373 } 1374 return inode; 1375 } 1376 EXPORT_SYMBOL(ilookup5); 1377 1378 /** 1379 * ilookup - search for an inode in the inode cache 1380 * @sb: super block of file system to search 1381 * @ino: inode number to search for 1382 * 1383 * Search for the inode @ino in the inode cache, and if the inode is in the 1384 * cache, the inode is returned with an incremented reference count. 1385 */ 1386 struct inode *ilookup(struct super_block *sb, unsigned long ino) 1387 { 1388 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1389 struct inode *inode; 1390 again: 1391 spin_lock(&inode_hash_lock); 1392 inode = find_inode_fast(sb, head, ino); 1393 spin_unlock(&inode_hash_lock); 1394 1395 if (inode) { 1396 if (IS_ERR(inode)) 1397 return NULL; 1398 wait_on_inode(inode); 1399 if (unlikely(inode_unhashed(inode))) { 1400 iput(inode); 1401 goto again; 1402 } 1403 } 1404 return inode; 1405 } 1406 EXPORT_SYMBOL(ilookup); 1407 1408 /** 1409 * find_inode_nowait - find an inode in the inode cache 1410 * @sb: super block of file system to search 1411 * @hashval: hash value (usually inode number) to search for 1412 * @match: callback used for comparisons between inodes 1413 * @data: opaque data pointer to pass to @match 1414 * 1415 * Search for the inode specified by @hashval and @data in the inode 1416 * cache, where the helper function @match will return 0 if the inode 1417 * does not match, 1 if the inode does match, and -1 if the search 1418 * should be stopped. The @match function must be responsible for 1419 * taking the i_lock spin_lock and checking i_state for an inode being 1420 * freed or being initialized, and incrementing the reference count 1421 * before returning 1. It also must not sleep, since it is called with 1422 * the inode_hash_lock spinlock held. 1423 * 1424 * This is a even more generalized version of ilookup5() when the 1425 * function must never block --- find_inode() can block in 1426 * __wait_on_freeing_inode() --- or when the caller can not increment 1427 * the reference count because the resulting iput() might cause an 1428 * inode eviction. The tradeoff is that the @match funtion must be 1429 * very carefully implemented. 1430 */ 1431 struct inode *find_inode_nowait(struct super_block *sb, 1432 unsigned long hashval, 1433 int (*match)(struct inode *, unsigned long, 1434 void *), 1435 void *data) 1436 { 1437 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1438 struct inode *inode, *ret_inode = NULL; 1439 int mval; 1440 1441 spin_lock(&inode_hash_lock); 1442 hlist_for_each_entry(inode, head, i_hash) { 1443 if (inode->i_sb != sb) 1444 continue; 1445 mval = match(inode, hashval, data); 1446 if (mval == 0) 1447 continue; 1448 if (mval == 1) 1449 ret_inode = inode; 1450 goto out; 1451 } 1452 out: 1453 spin_unlock(&inode_hash_lock); 1454 return ret_inode; 1455 } 1456 EXPORT_SYMBOL(find_inode_nowait); 1457 1458 int insert_inode_locked(struct inode *inode) 1459 { 1460 struct super_block *sb = inode->i_sb; 1461 ino_t ino = inode->i_ino; 1462 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1463 1464 while (1) { 1465 struct inode *old = NULL; 1466 spin_lock(&inode_hash_lock); 1467 hlist_for_each_entry(old, head, i_hash) { 1468 if (old->i_ino != ino) 1469 continue; 1470 if (old->i_sb != sb) 1471 continue; 1472 spin_lock(&old->i_lock); 1473 if (old->i_state & (I_FREEING|I_WILL_FREE)) { 1474 spin_unlock(&old->i_lock); 1475 continue; 1476 } 1477 break; 1478 } 1479 if (likely(!old)) { 1480 spin_lock(&inode->i_lock); 1481 inode->i_state |= I_NEW | I_CREATING; 1482 hlist_add_head(&inode->i_hash, head); 1483 spin_unlock(&inode->i_lock); 1484 spin_unlock(&inode_hash_lock); 1485 return 0; 1486 } 1487 if (unlikely(old->i_state & I_CREATING)) { 1488 spin_unlock(&old->i_lock); 1489 spin_unlock(&inode_hash_lock); 1490 return -EBUSY; 1491 } 1492 __iget(old); 1493 spin_unlock(&old->i_lock); 1494 spin_unlock(&inode_hash_lock); 1495 wait_on_inode(old); 1496 if (unlikely(!inode_unhashed(old))) { 1497 iput(old); 1498 return -EBUSY; 1499 } 1500 iput(old); 1501 } 1502 } 1503 EXPORT_SYMBOL(insert_inode_locked); 1504 1505 int insert_inode_locked4(struct inode *inode, unsigned long hashval, 1506 int (*test)(struct inode *, void *), void *data) 1507 { 1508 struct inode *old; 1509 1510 inode->i_state |= I_CREATING; 1511 old = inode_insert5(inode, hashval, test, NULL, data); 1512 1513 if (old != inode) { 1514 iput(old); 1515 return -EBUSY; 1516 } 1517 return 0; 1518 } 1519 EXPORT_SYMBOL(insert_inode_locked4); 1520 1521 1522 int generic_delete_inode(struct inode *inode) 1523 { 1524 return 1; 1525 } 1526 EXPORT_SYMBOL(generic_delete_inode); 1527 1528 /* 1529 * Called when we're dropping the last reference 1530 * to an inode. 1531 * 1532 * Call the FS "drop_inode()" function, defaulting to 1533 * the legacy UNIX filesystem behaviour. If it tells 1534 * us to evict inode, do so. Otherwise, retain inode 1535 * in cache if fs is alive, sync and evict if fs is 1536 * shutting down. 1537 */ 1538 static void iput_final(struct inode *inode) 1539 { 1540 struct super_block *sb = inode->i_sb; 1541 const struct super_operations *op = inode->i_sb->s_op; 1542 int drop; 1543 1544 WARN_ON(inode->i_state & I_NEW); 1545 1546 if (op->drop_inode) 1547 drop = op->drop_inode(inode); 1548 else 1549 drop = generic_drop_inode(inode); 1550 1551 if (!drop && (sb->s_flags & SB_ACTIVE)) { 1552 inode_add_lru(inode); 1553 spin_unlock(&inode->i_lock); 1554 return; 1555 } 1556 1557 if (!drop) { 1558 inode->i_state |= I_WILL_FREE; 1559 spin_unlock(&inode->i_lock); 1560 write_inode_now(inode, 1); 1561 spin_lock(&inode->i_lock); 1562 WARN_ON(inode->i_state & I_NEW); 1563 inode->i_state &= ~I_WILL_FREE; 1564 } 1565 1566 inode->i_state |= I_FREEING; 1567 if (!list_empty(&inode->i_lru)) 1568 inode_lru_list_del(inode); 1569 spin_unlock(&inode->i_lock); 1570 1571 evict(inode); 1572 } 1573 1574 /** 1575 * iput - put an inode 1576 * @inode: inode to put 1577 * 1578 * Puts an inode, dropping its usage count. If the inode use count hits 1579 * zero, the inode is then freed and may also be destroyed. 1580 * 1581 * Consequently, iput() can sleep. 1582 */ 1583 void iput(struct inode *inode) 1584 { 1585 if (!inode) 1586 return; 1587 BUG_ON(inode->i_state & I_CLEAR); 1588 retry: 1589 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) { 1590 if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) { 1591 atomic_inc(&inode->i_count); 1592 spin_unlock(&inode->i_lock); 1593 trace_writeback_lazytime_iput(inode); 1594 mark_inode_dirty_sync(inode); 1595 goto retry; 1596 } 1597 iput_final(inode); 1598 } 1599 } 1600 EXPORT_SYMBOL(iput); 1601 1602 #ifdef CONFIG_BLOCK 1603 /** 1604 * bmap - find a block number in a file 1605 * @inode: inode owning the block number being requested 1606 * @block: pointer containing the block to find 1607 * 1608 * Replaces the value in *block with the block number on the device holding 1609 * corresponding to the requested block number in the file. 1610 * That is, asked for block 4 of inode 1 the function will replace the 1611 * 4 in *block, with disk block relative to the disk start that holds that 1612 * block of the file. 1613 * 1614 * Returns -EINVAL in case of error, 0 otherwise. If mapping falls into a 1615 * hole, returns 0 and *block is also set to 0. 1616 */ 1617 int bmap(struct inode *inode, sector_t *block) 1618 { 1619 if (!inode->i_mapping->a_ops->bmap) 1620 return -EINVAL; 1621 1622 *block = inode->i_mapping->a_ops->bmap(inode->i_mapping, *block); 1623 return 0; 1624 } 1625 EXPORT_SYMBOL(bmap); 1626 #endif 1627 1628 /* 1629 * With relative atime, only update atime if the previous atime is 1630 * earlier than either the ctime or mtime or if at least a day has 1631 * passed since the last atime update. 1632 */ 1633 static int relatime_need_update(struct vfsmount *mnt, struct inode *inode, 1634 struct timespec64 now) 1635 { 1636 1637 if (!(mnt->mnt_flags & MNT_RELATIME)) 1638 return 1; 1639 /* 1640 * Is mtime younger than atime? If yes, update atime: 1641 */ 1642 if (timespec64_compare(&inode->i_mtime, &inode->i_atime) >= 0) 1643 return 1; 1644 /* 1645 * Is ctime younger than atime? If yes, update atime: 1646 */ 1647 if (timespec64_compare(&inode->i_ctime, &inode->i_atime) >= 0) 1648 return 1; 1649 1650 /* 1651 * Is the previous atime value older than a day? If yes, 1652 * update atime: 1653 */ 1654 if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60) 1655 return 1; 1656 /* 1657 * Good, we can skip the atime update: 1658 */ 1659 return 0; 1660 } 1661 1662 int generic_update_time(struct inode *inode, struct timespec64 *time, int flags) 1663 { 1664 int iflags = I_DIRTY_TIME; 1665 bool dirty = false; 1666 1667 if (flags & S_ATIME) 1668 inode->i_atime = *time; 1669 if (flags & S_VERSION) 1670 dirty = inode_maybe_inc_iversion(inode, false); 1671 if (flags & S_CTIME) 1672 inode->i_ctime = *time; 1673 if (flags & S_MTIME) 1674 inode->i_mtime = *time; 1675 if ((flags & (S_ATIME | S_CTIME | S_MTIME)) && 1676 !(inode->i_sb->s_flags & SB_LAZYTIME)) 1677 dirty = true; 1678 1679 if (dirty) 1680 iflags |= I_DIRTY_SYNC; 1681 __mark_inode_dirty(inode, iflags); 1682 return 0; 1683 } 1684 EXPORT_SYMBOL(generic_update_time); 1685 1686 /* 1687 * This does the actual work of updating an inodes time or version. Must have 1688 * had called mnt_want_write() before calling this. 1689 */ 1690 static int update_time(struct inode *inode, struct timespec64 *time, int flags) 1691 { 1692 if (inode->i_op->update_time) 1693 return inode->i_op->update_time(inode, time, flags); 1694 return generic_update_time(inode, time, flags); 1695 } 1696 1697 /** 1698 * touch_atime - update the access time 1699 * @path: the &struct path to update 1700 * @inode: inode to update 1701 * 1702 * Update the accessed time on an inode and mark it for writeback. 1703 * This function automatically handles read only file systems and media, 1704 * as well as the "noatime" flag and inode specific "noatime" markers. 1705 */ 1706 bool atime_needs_update(const struct path *path, struct inode *inode) 1707 { 1708 struct vfsmount *mnt = path->mnt; 1709 struct timespec64 now; 1710 1711 if (inode->i_flags & S_NOATIME) 1712 return false; 1713 1714 /* Atime updates will likely cause i_uid and i_gid to be written 1715 * back improprely if their true value is unknown to the vfs. 1716 */ 1717 if (HAS_UNMAPPED_ID(inode)) 1718 return false; 1719 1720 if (IS_NOATIME(inode)) 1721 return false; 1722 if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode)) 1723 return false; 1724 1725 if (mnt->mnt_flags & MNT_NOATIME) 1726 return false; 1727 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)) 1728 return false; 1729 1730 now = current_time(inode); 1731 1732 if (!relatime_need_update(mnt, inode, now)) 1733 return false; 1734 1735 if (timespec64_equal(&inode->i_atime, &now)) 1736 return false; 1737 1738 return true; 1739 } 1740 1741 void touch_atime(const struct path *path) 1742 { 1743 struct vfsmount *mnt = path->mnt; 1744 struct inode *inode = d_inode(path->dentry); 1745 struct timespec64 now; 1746 1747 if (!atime_needs_update(path, inode)) 1748 return; 1749 1750 if (!sb_start_write_trylock(inode->i_sb)) 1751 return; 1752 1753 if (__mnt_want_write(mnt) != 0) 1754 goto skip_update; 1755 /* 1756 * File systems can error out when updating inodes if they need to 1757 * allocate new space to modify an inode (such is the case for 1758 * Btrfs), but since we touch atime while walking down the path we 1759 * really don't care if we failed to update the atime of the file, 1760 * so just ignore the return value. 1761 * We may also fail on filesystems that have the ability to make parts 1762 * of the fs read only, e.g. subvolumes in Btrfs. 1763 */ 1764 now = current_time(inode); 1765 update_time(inode, &now, S_ATIME); 1766 __mnt_drop_write(mnt); 1767 skip_update: 1768 sb_end_write(inode->i_sb); 1769 } 1770 EXPORT_SYMBOL(touch_atime); 1771 1772 /* 1773 * The logic we want is 1774 * 1775 * if suid or (sgid and xgrp) 1776 * remove privs 1777 */ 1778 int should_remove_suid(struct dentry *dentry) 1779 { 1780 umode_t mode = d_inode(dentry)->i_mode; 1781 int kill = 0; 1782 1783 /* suid always must be killed */ 1784 if (unlikely(mode & S_ISUID)) 1785 kill = ATTR_KILL_SUID; 1786 1787 /* 1788 * sgid without any exec bits is just a mandatory locking mark; leave 1789 * it alone. If some exec bits are set, it's a real sgid; kill it. 1790 */ 1791 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) 1792 kill |= ATTR_KILL_SGID; 1793 1794 if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode))) 1795 return kill; 1796 1797 return 0; 1798 } 1799 EXPORT_SYMBOL(should_remove_suid); 1800 1801 /* 1802 * Return mask of changes for notify_change() that need to be done as a 1803 * response to write or truncate. Return 0 if nothing has to be changed. 1804 * Negative value on error (change should be denied). 1805 */ 1806 int dentry_needs_remove_privs(struct dentry *dentry) 1807 { 1808 struct inode *inode = d_inode(dentry); 1809 int mask = 0; 1810 int ret; 1811 1812 if (IS_NOSEC(inode)) 1813 return 0; 1814 1815 mask = should_remove_suid(dentry); 1816 ret = security_inode_need_killpriv(dentry); 1817 if (ret < 0) 1818 return ret; 1819 if (ret) 1820 mask |= ATTR_KILL_PRIV; 1821 return mask; 1822 } 1823 1824 static int __remove_privs(struct dentry *dentry, int kill) 1825 { 1826 struct iattr newattrs; 1827 1828 newattrs.ia_valid = ATTR_FORCE | kill; 1829 /* 1830 * Note we call this on write, so notify_change will not 1831 * encounter any conflicting delegations: 1832 */ 1833 return notify_change(dentry, &newattrs, NULL); 1834 } 1835 1836 /* 1837 * Remove special file priviledges (suid, capabilities) when file is written 1838 * to or truncated. 1839 */ 1840 int file_remove_privs(struct file *file) 1841 { 1842 struct dentry *dentry = file_dentry(file); 1843 struct inode *inode = file_inode(file); 1844 int kill; 1845 int error = 0; 1846 1847 /* 1848 * Fast path for nothing security related. 1849 * As well for non-regular files, e.g. blkdev inodes. 1850 * For example, blkdev_write_iter() might get here 1851 * trying to remove privs which it is not allowed to. 1852 */ 1853 if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode)) 1854 return 0; 1855 1856 kill = dentry_needs_remove_privs(dentry); 1857 if (kill < 0) 1858 return kill; 1859 if (kill) 1860 error = __remove_privs(dentry, kill); 1861 if (!error) 1862 inode_has_no_xattr(inode); 1863 1864 return error; 1865 } 1866 EXPORT_SYMBOL(file_remove_privs); 1867 1868 /** 1869 * file_update_time - update mtime and ctime time 1870 * @file: file accessed 1871 * 1872 * Update the mtime and ctime members of an inode and mark the inode 1873 * for writeback. Note that this function is meant exclusively for 1874 * usage in the file write path of filesystems, and filesystems may 1875 * choose to explicitly ignore update via this function with the 1876 * S_NOCMTIME inode flag, e.g. for network filesystem where these 1877 * timestamps are handled by the server. This can return an error for 1878 * file systems who need to allocate space in order to update an inode. 1879 */ 1880 1881 int file_update_time(struct file *file) 1882 { 1883 struct inode *inode = file_inode(file); 1884 struct timespec64 now; 1885 int sync_it = 0; 1886 int ret; 1887 1888 /* First try to exhaust all avenues to not sync */ 1889 if (IS_NOCMTIME(inode)) 1890 return 0; 1891 1892 now = current_time(inode); 1893 if (!timespec64_equal(&inode->i_mtime, &now)) 1894 sync_it = S_MTIME; 1895 1896 if (!timespec64_equal(&inode->i_ctime, &now)) 1897 sync_it |= S_CTIME; 1898 1899 if (IS_I_VERSION(inode) && inode_iversion_need_inc(inode)) 1900 sync_it |= S_VERSION; 1901 1902 if (!sync_it) 1903 return 0; 1904 1905 /* Finally allowed to write? Takes lock. */ 1906 if (__mnt_want_write_file(file)) 1907 return 0; 1908 1909 ret = update_time(inode, &now, sync_it); 1910 __mnt_drop_write_file(file); 1911 1912 return ret; 1913 } 1914 EXPORT_SYMBOL(file_update_time); 1915 1916 /* Caller must hold the file's inode lock */ 1917 int file_modified(struct file *file) 1918 { 1919 int err; 1920 1921 /* 1922 * Clear the security bits if the process is not being run by root. 1923 * This keeps people from modifying setuid and setgid binaries. 1924 */ 1925 err = file_remove_privs(file); 1926 if (err) 1927 return err; 1928 1929 if (unlikely(file->f_mode & FMODE_NOCMTIME)) 1930 return 0; 1931 1932 return file_update_time(file); 1933 } 1934 EXPORT_SYMBOL(file_modified); 1935 1936 int inode_needs_sync(struct inode *inode) 1937 { 1938 if (IS_SYNC(inode)) 1939 return 1; 1940 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) 1941 return 1; 1942 return 0; 1943 } 1944 EXPORT_SYMBOL(inode_needs_sync); 1945 1946 /* 1947 * If we try to find an inode in the inode hash while it is being 1948 * deleted, we have to wait until the filesystem completes its 1949 * deletion before reporting that it isn't found. This function waits 1950 * until the deletion _might_ have completed. Callers are responsible 1951 * to recheck inode state. 1952 * 1953 * It doesn't matter if I_NEW is not set initially, a call to 1954 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list 1955 * will DTRT. 1956 */ 1957 static void __wait_on_freeing_inode(struct inode *inode) 1958 { 1959 wait_queue_head_t *wq; 1960 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW); 1961 wq = bit_waitqueue(&inode->i_state, __I_NEW); 1962 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); 1963 spin_unlock(&inode->i_lock); 1964 spin_unlock(&inode_hash_lock); 1965 schedule(); 1966 finish_wait(wq, &wait.wq_entry); 1967 spin_lock(&inode_hash_lock); 1968 } 1969 1970 static __initdata unsigned long ihash_entries; 1971 static int __init set_ihash_entries(char *str) 1972 { 1973 if (!str) 1974 return 0; 1975 ihash_entries = simple_strtoul(str, &str, 0); 1976 return 1; 1977 } 1978 __setup("ihash_entries=", set_ihash_entries); 1979 1980 /* 1981 * Initialize the waitqueues and inode hash table. 1982 */ 1983 void __init inode_init_early(void) 1984 { 1985 /* If hashes are distributed across NUMA nodes, defer 1986 * hash allocation until vmalloc space is available. 1987 */ 1988 if (hashdist) 1989 return; 1990 1991 inode_hashtable = 1992 alloc_large_system_hash("Inode-cache", 1993 sizeof(struct hlist_head), 1994 ihash_entries, 1995 14, 1996 HASH_EARLY | HASH_ZERO, 1997 &i_hash_shift, 1998 &i_hash_mask, 1999 0, 2000 0); 2001 } 2002 2003 void __init inode_init(void) 2004 { 2005 /* inode slab cache */ 2006 inode_cachep = kmem_cache_create("inode_cache", 2007 sizeof(struct inode), 2008 0, 2009 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| 2010 SLAB_MEM_SPREAD|SLAB_ACCOUNT), 2011 init_once); 2012 2013 /* Hash may have been set up in inode_init_early */ 2014 if (!hashdist) 2015 return; 2016 2017 inode_hashtable = 2018 alloc_large_system_hash("Inode-cache", 2019 sizeof(struct hlist_head), 2020 ihash_entries, 2021 14, 2022 HASH_ZERO, 2023 &i_hash_shift, 2024 &i_hash_mask, 2025 0, 2026 0); 2027 } 2028 2029 void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev) 2030 { 2031 inode->i_mode = mode; 2032 if (S_ISCHR(mode)) { 2033 inode->i_fop = &def_chr_fops; 2034 inode->i_rdev = rdev; 2035 } else if (S_ISBLK(mode)) { 2036 inode->i_fop = &def_blk_fops; 2037 inode->i_rdev = rdev; 2038 } else if (S_ISFIFO(mode)) 2039 inode->i_fop = &pipefifo_fops; 2040 else if (S_ISSOCK(mode)) 2041 ; /* leave it no_open_fops */ 2042 else 2043 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for" 2044 " inode %s:%lu\n", mode, inode->i_sb->s_id, 2045 inode->i_ino); 2046 } 2047 EXPORT_SYMBOL(init_special_inode); 2048 2049 /** 2050 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards 2051 * @inode: New inode 2052 * @dir: Directory inode 2053 * @mode: mode of the new inode 2054 */ 2055 void inode_init_owner(struct inode *inode, const struct inode *dir, 2056 umode_t mode) 2057 { 2058 inode->i_uid = current_fsuid(); 2059 if (dir && dir->i_mode & S_ISGID) { 2060 inode->i_gid = dir->i_gid; 2061 2062 /* Directories are special, and always inherit S_ISGID */ 2063 if (S_ISDIR(mode)) 2064 mode |= S_ISGID; 2065 else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) && 2066 !in_group_p(inode->i_gid) && 2067 !capable_wrt_inode_uidgid(dir, CAP_FSETID)) 2068 mode &= ~S_ISGID; 2069 } else 2070 inode->i_gid = current_fsgid(); 2071 inode->i_mode = mode; 2072 } 2073 EXPORT_SYMBOL(inode_init_owner); 2074 2075 /** 2076 * inode_owner_or_capable - check current task permissions to inode 2077 * @inode: inode being checked 2078 * 2079 * Return true if current either has CAP_FOWNER in a namespace with the 2080 * inode owner uid mapped, or owns the file. 2081 */ 2082 bool inode_owner_or_capable(const struct inode *inode) 2083 { 2084 struct user_namespace *ns; 2085 2086 if (uid_eq(current_fsuid(), inode->i_uid)) 2087 return true; 2088 2089 ns = current_user_ns(); 2090 if (kuid_has_mapping(ns, inode->i_uid) && ns_capable(ns, CAP_FOWNER)) 2091 return true; 2092 return false; 2093 } 2094 EXPORT_SYMBOL(inode_owner_or_capable); 2095 2096 /* 2097 * Direct i/o helper functions 2098 */ 2099 static void __inode_dio_wait(struct inode *inode) 2100 { 2101 wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP); 2102 DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP); 2103 2104 do { 2105 prepare_to_wait(wq, &q.wq_entry, TASK_UNINTERRUPTIBLE); 2106 if (atomic_read(&inode->i_dio_count)) 2107 schedule(); 2108 } while (atomic_read(&inode->i_dio_count)); 2109 finish_wait(wq, &q.wq_entry); 2110 } 2111 2112 /** 2113 * inode_dio_wait - wait for outstanding DIO requests to finish 2114 * @inode: inode to wait for 2115 * 2116 * Waits for all pending direct I/O requests to finish so that we can 2117 * proceed with a truncate or equivalent operation. 2118 * 2119 * Must be called under a lock that serializes taking new references 2120 * to i_dio_count, usually by inode->i_mutex. 2121 */ 2122 void inode_dio_wait(struct inode *inode) 2123 { 2124 if (atomic_read(&inode->i_dio_count)) 2125 __inode_dio_wait(inode); 2126 } 2127 EXPORT_SYMBOL(inode_dio_wait); 2128 2129 /* 2130 * inode_set_flags - atomically set some inode flags 2131 * 2132 * Note: the caller should be holding i_mutex, or else be sure that 2133 * they have exclusive access to the inode structure (i.e., while the 2134 * inode is being instantiated). The reason for the cmpxchg() loop 2135 * --- which wouldn't be necessary if all code paths which modify 2136 * i_flags actually followed this rule, is that there is at least one 2137 * code path which doesn't today so we use cmpxchg() out of an abundance 2138 * of caution. 2139 * 2140 * In the long run, i_mutex is overkill, and we should probably look 2141 * at using the i_lock spinlock to protect i_flags, and then make sure 2142 * it is so documented in include/linux/fs.h and that all code follows 2143 * the locking convention!! 2144 */ 2145 void inode_set_flags(struct inode *inode, unsigned int flags, 2146 unsigned int mask) 2147 { 2148 WARN_ON_ONCE(flags & ~mask); 2149 set_mask_bits(&inode->i_flags, mask, flags); 2150 } 2151 EXPORT_SYMBOL(inode_set_flags); 2152 2153 void inode_nohighmem(struct inode *inode) 2154 { 2155 mapping_set_gfp_mask(inode->i_mapping, GFP_USER); 2156 } 2157 EXPORT_SYMBOL(inode_nohighmem); 2158 2159 /** 2160 * timestamp_truncate - Truncate timespec to a granularity 2161 * @t: Timespec 2162 * @inode: inode being updated 2163 * 2164 * Truncate a timespec to the granularity supported by the fs 2165 * containing the inode. Always rounds down. gran must 2166 * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns). 2167 */ 2168 struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode) 2169 { 2170 struct super_block *sb = inode->i_sb; 2171 unsigned int gran = sb->s_time_gran; 2172 2173 t.tv_sec = clamp(t.tv_sec, sb->s_time_min, sb->s_time_max); 2174 if (unlikely(t.tv_sec == sb->s_time_max || t.tv_sec == sb->s_time_min)) 2175 t.tv_nsec = 0; 2176 2177 /* Avoid division in the common cases 1 ns and 1 s. */ 2178 if (gran == 1) 2179 ; /* nothing */ 2180 else if (gran == NSEC_PER_SEC) 2181 t.tv_nsec = 0; 2182 else if (gran > 1 && gran < NSEC_PER_SEC) 2183 t.tv_nsec -= t.tv_nsec % gran; 2184 else 2185 WARN(1, "invalid file time granularity: %u", gran); 2186 return t; 2187 } 2188 EXPORT_SYMBOL(timestamp_truncate); 2189 2190 /** 2191 * current_time - Return FS time 2192 * @inode: inode. 2193 * 2194 * Return the current time truncated to the time granularity supported by 2195 * the fs. 2196 * 2197 * Note that inode and inode->sb cannot be NULL. 2198 * Otherwise, the function warns and returns time without truncation. 2199 */ 2200 struct timespec64 current_time(struct inode *inode) 2201 { 2202 struct timespec64 now; 2203 2204 ktime_get_coarse_real_ts64(&now); 2205 2206 if (unlikely(!inode->i_sb)) { 2207 WARN(1, "current_time() called with uninitialized super_block in the inode"); 2208 return now; 2209 } 2210 2211 return timestamp_truncate(now, inode); 2212 } 2213 EXPORT_SYMBOL(current_time); 2214 2215 /* 2216 * Generic function to check FS_IOC_SETFLAGS values and reject any invalid 2217 * configurations. 2218 * 2219 * Note: the caller should be holding i_mutex, or else be sure that they have 2220 * exclusive access to the inode structure. 2221 */ 2222 int vfs_ioc_setflags_prepare(struct inode *inode, unsigned int oldflags, 2223 unsigned int flags) 2224 { 2225 /* 2226 * The IMMUTABLE and APPEND_ONLY flags can only be changed by 2227 * the relevant capability. 2228 * 2229 * This test looks nicer. Thanks to Pauline Middelink 2230 */ 2231 if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL) && 2232 !capable(CAP_LINUX_IMMUTABLE)) 2233 return -EPERM; 2234 2235 return fscrypt_prepare_setflags(inode, oldflags, flags); 2236 } 2237 EXPORT_SYMBOL(vfs_ioc_setflags_prepare); 2238 2239 /* 2240 * Generic function to check FS_IOC_FSSETXATTR values and reject any invalid 2241 * configurations. 2242 * 2243 * Note: the caller should be holding i_mutex, or else be sure that they have 2244 * exclusive access to the inode structure. 2245 */ 2246 int vfs_ioc_fssetxattr_check(struct inode *inode, const struct fsxattr *old_fa, 2247 struct fsxattr *fa) 2248 { 2249 /* 2250 * Can't modify an immutable/append-only file unless we have 2251 * appropriate permission. 2252 */ 2253 if ((old_fa->fsx_xflags ^ fa->fsx_xflags) & 2254 (FS_XFLAG_IMMUTABLE | FS_XFLAG_APPEND) && 2255 !capable(CAP_LINUX_IMMUTABLE)) 2256 return -EPERM; 2257 2258 /* 2259 * Project Quota ID state is only allowed to change from within the init 2260 * namespace. Enforce that restriction only if we are trying to change 2261 * the quota ID state. Everything else is allowed in user namespaces. 2262 */ 2263 if (current_user_ns() != &init_user_ns) { 2264 if (old_fa->fsx_projid != fa->fsx_projid) 2265 return -EINVAL; 2266 if ((old_fa->fsx_xflags ^ fa->fsx_xflags) & 2267 FS_XFLAG_PROJINHERIT) 2268 return -EINVAL; 2269 } 2270 2271 /* Check extent size hints. */ 2272 if ((fa->fsx_xflags & FS_XFLAG_EXTSIZE) && !S_ISREG(inode->i_mode)) 2273 return -EINVAL; 2274 2275 if ((fa->fsx_xflags & FS_XFLAG_EXTSZINHERIT) && 2276 !S_ISDIR(inode->i_mode)) 2277 return -EINVAL; 2278 2279 if ((fa->fsx_xflags & FS_XFLAG_COWEXTSIZE) && 2280 !S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) 2281 return -EINVAL; 2282 2283 /* 2284 * It is only valid to set the DAX flag on regular files and 2285 * directories on filesystems. 2286 */ 2287 if ((fa->fsx_xflags & FS_XFLAG_DAX) && 2288 !(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) 2289 return -EINVAL; 2290 2291 /* Extent size hints of zero turn off the flags. */ 2292 if (fa->fsx_extsize == 0) 2293 fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE | FS_XFLAG_EXTSZINHERIT); 2294 if (fa->fsx_cowextsize == 0) 2295 fa->fsx_xflags &= ~FS_XFLAG_COWEXTSIZE; 2296 2297 return 0; 2298 } 2299 EXPORT_SYMBOL(vfs_ioc_fssetxattr_check); 2300