1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * (C) 1997 Linus Torvalds 4 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation) 5 */ 6 #include <linux/export.h> 7 #include <linux/fs.h> 8 #include <linux/mm.h> 9 #include <linux/backing-dev.h> 10 #include <linux/hash.h> 11 #include <linux/swap.h> 12 #include <linux/security.h> 13 #include <linux/cdev.h> 14 #include <linux/memblock.h> 15 #include <linux/fsnotify.h> 16 #include <linux/mount.h> 17 #include <linux/posix_acl.h> 18 #include <linux/prefetch.h> 19 #include <linux/buffer_head.h> /* for inode_has_buffers */ 20 #include <linux/ratelimit.h> 21 #include <linux/list_lru.h> 22 #include <linux/iversion.h> 23 #include <trace/events/writeback.h> 24 #include "internal.h" 25 26 /* 27 * Inode locking rules: 28 * 29 * inode->i_lock protects: 30 * inode->i_state, inode->i_hash, __iget() 31 * Inode LRU list locks protect: 32 * inode->i_sb->s_inode_lru, inode->i_lru 33 * inode->i_sb->s_inode_list_lock protects: 34 * inode->i_sb->s_inodes, inode->i_sb_list 35 * bdi->wb.list_lock protects: 36 * bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list 37 * inode_hash_lock protects: 38 * inode_hashtable, inode->i_hash 39 * 40 * Lock ordering: 41 * 42 * inode->i_sb->s_inode_list_lock 43 * inode->i_lock 44 * Inode LRU list locks 45 * 46 * bdi->wb.list_lock 47 * inode->i_lock 48 * 49 * inode_hash_lock 50 * inode->i_sb->s_inode_list_lock 51 * inode->i_lock 52 * 53 * iunique_lock 54 * inode_hash_lock 55 */ 56 57 static unsigned int i_hash_mask __read_mostly; 58 static unsigned int i_hash_shift __read_mostly; 59 static struct hlist_head *inode_hashtable __read_mostly; 60 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); 61 62 /* 63 * Empty aops. Can be used for the cases where the user does not 64 * define any of the address_space operations. 65 */ 66 const struct address_space_operations empty_aops = { 67 }; 68 EXPORT_SYMBOL(empty_aops); 69 70 /* 71 * Statistics gathering.. 72 */ 73 struct inodes_stat_t inodes_stat; 74 75 static DEFINE_PER_CPU(unsigned long, nr_inodes); 76 static DEFINE_PER_CPU(unsigned long, nr_unused); 77 78 static struct kmem_cache *inode_cachep __read_mostly; 79 80 static long get_nr_inodes(void) 81 { 82 int i; 83 long sum = 0; 84 for_each_possible_cpu(i) 85 sum += per_cpu(nr_inodes, i); 86 return sum < 0 ? 0 : sum; 87 } 88 89 static inline long get_nr_inodes_unused(void) 90 { 91 int i; 92 long sum = 0; 93 for_each_possible_cpu(i) 94 sum += per_cpu(nr_unused, i); 95 return sum < 0 ? 0 : sum; 96 } 97 98 long get_nr_dirty_inodes(void) 99 { 100 /* not actually dirty inodes, but a wild approximation */ 101 long nr_dirty = get_nr_inodes() - get_nr_inodes_unused(); 102 return nr_dirty > 0 ? nr_dirty : 0; 103 } 104 105 /* 106 * Handle nr_inode sysctl 107 */ 108 #ifdef CONFIG_SYSCTL 109 int proc_nr_inodes(struct ctl_table *table, int write, 110 void __user *buffer, size_t *lenp, loff_t *ppos) 111 { 112 inodes_stat.nr_inodes = get_nr_inodes(); 113 inodes_stat.nr_unused = get_nr_inodes_unused(); 114 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 115 } 116 #endif 117 118 static int no_open(struct inode *inode, struct file *file) 119 { 120 return -ENXIO; 121 } 122 123 /** 124 * inode_init_always - perform inode structure initialisation 125 * @sb: superblock inode belongs to 126 * @inode: inode to initialise 127 * 128 * These are initializations that need to be done on every inode 129 * allocation as the fields are not initialised by slab allocation. 130 */ 131 int inode_init_always(struct super_block *sb, struct inode *inode) 132 { 133 static const struct inode_operations empty_iops; 134 static const struct file_operations no_open_fops = {.open = no_open}; 135 struct address_space *const mapping = &inode->i_data; 136 137 inode->i_sb = sb; 138 inode->i_blkbits = sb->s_blocksize_bits; 139 inode->i_flags = 0; 140 atomic_set(&inode->i_count, 1); 141 inode->i_op = &empty_iops; 142 inode->i_fop = &no_open_fops; 143 inode->__i_nlink = 1; 144 inode->i_opflags = 0; 145 if (sb->s_xattr) 146 inode->i_opflags |= IOP_XATTR; 147 i_uid_write(inode, 0); 148 i_gid_write(inode, 0); 149 atomic_set(&inode->i_writecount, 0); 150 inode->i_size = 0; 151 inode->i_write_hint = WRITE_LIFE_NOT_SET; 152 inode->i_blocks = 0; 153 inode->i_bytes = 0; 154 inode->i_generation = 0; 155 inode->i_pipe = NULL; 156 inode->i_bdev = NULL; 157 inode->i_cdev = NULL; 158 inode->i_link = NULL; 159 inode->i_dir_seq = 0; 160 inode->i_rdev = 0; 161 inode->dirtied_when = 0; 162 163 #ifdef CONFIG_CGROUP_WRITEBACK 164 inode->i_wb_frn_winner = 0; 165 inode->i_wb_frn_avg_time = 0; 166 inode->i_wb_frn_history = 0; 167 #endif 168 169 if (security_inode_alloc(inode)) 170 goto out; 171 spin_lock_init(&inode->i_lock); 172 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); 173 174 init_rwsem(&inode->i_rwsem); 175 lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key); 176 177 atomic_set(&inode->i_dio_count, 0); 178 179 mapping->a_ops = &empty_aops; 180 mapping->host = inode; 181 mapping->flags = 0; 182 mapping->wb_err = 0; 183 atomic_set(&mapping->i_mmap_writable, 0); 184 #ifdef CONFIG_READ_ONLY_THP_FOR_FS 185 atomic_set(&mapping->nr_thps, 0); 186 #endif 187 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); 188 mapping->private_data = NULL; 189 mapping->writeback_index = 0; 190 inode->i_private = NULL; 191 inode->i_mapping = mapping; 192 INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */ 193 #ifdef CONFIG_FS_POSIX_ACL 194 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED; 195 #endif 196 197 #ifdef CONFIG_FSNOTIFY 198 inode->i_fsnotify_mask = 0; 199 #endif 200 inode->i_flctx = NULL; 201 this_cpu_inc(nr_inodes); 202 203 return 0; 204 out: 205 return -ENOMEM; 206 } 207 EXPORT_SYMBOL(inode_init_always); 208 209 void free_inode_nonrcu(struct inode *inode) 210 { 211 kmem_cache_free(inode_cachep, inode); 212 } 213 EXPORT_SYMBOL(free_inode_nonrcu); 214 215 static void i_callback(struct rcu_head *head) 216 { 217 struct inode *inode = container_of(head, struct inode, i_rcu); 218 if (inode->free_inode) 219 inode->free_inode(inode); 220 else 221 free_inode_nonrcu(inode); 222 } 223 224 static struct inode *alloc_inode(struct super_block *sb) 225 { 226 const struct super_operations *ops = sb->s_op; 227 struct inode *inode; 228 229 if (ops->alloc_inode) 230 inode = ops->alloc_inode(sb); 231 else 232 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL); 233 234 if (!inode) 235 return NULL; 236 237 if (unlikely(inode_init_always(sb, inode))) { 238 if (ops->destroy_inode) { 239 ops->destroy_inode(inode); 240 if (!ops->free_inode) 241 return NULL; 242 } 243 inode->free_inode = ops->free_inode; 244 i_callback(&inode->i_rcu); 245 return NULL; 246 } 247 248 return inode; 249 } 250 251 void __destroy_inode(struct inode *inode) 252 { 253 BUG_ON(inode_has_buffers(inode)); 254 inode_detach_wb(inode); 255 security_inode_free(inode); 256 fsnotify_inode_delete(inode); 257 locks_free_lock_context(inode); 258 if (!inode->i_nlink) { 259 WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0); 260 atomic_long_dec(&inode->i_sb->s_remove_count); 261 } 262 263 #ifdef CONFIG_FS_POSIX_ACL 264 if (inode->i_acl && !is_uncached_acl(inode->i_acl)) 265 posix_acl_release(inode->i_acl); 266 if (inode->i_default_acl && !is_uncached_acl(inode->i_default_acl)) 267 posix_acl_release(inode->i_default_acl); 268 #endif 269 this_cpu_dec(nr_inodes); 270 } 271 EXPORT_SYMBOL(__destroy_inode); 272 273 static void destroy_inode(struct inode *inode) 274 { 275 const struct super_operations *ops = inode->i_sb->s_op; 276 277 BUG_ON(!list_empty(&inode->i_lru)); 278 __destroy_inode(inode); 279 if (ops->destroy_inode) { 280 ops->destroy_inode(inode); 281 if (!ops->free_inode) 282 return; 283 } 284 inode->free_inode = ops->free_inode; 285 call_rcu(&inode->i_rcu, i_callback); 286 } 287 288 /** 289 * drop_nlink - directly drop an inode's link count 290 * @inode: inode 291 * 292 * This is a low-level filesystem helper to replace any 293 * direct filesystem manipulation of i_nlink. In cases 294 * where we are attempting to track writes to the 295 * filesystem, a decrement to zero means an imminent 296 * write when the file is truncated and actually unlinked 297 * on the filesystem. 298 */ 299 void drop_nlink(struct inode *inode) 300 { 301 WARN_ON(inode->i_nlink == 0); 302 inode->__i_nlink--; 303 if (!inode->i_nlink) 304 atomic_long_inc(&inode->i_sb->s_remove_count); 305 } 306 EXPORT_SYMBOL(drop_nlink); 307 308 /** 309 * clear_nlink - directly zero an inode's link count 310 * @inode: inode 311 * 312 * This is a low-level filesystem helper to replace any 313 * direct filesystem manipulation of i_nlink. See 314 * drop_nlink() for why we care about i_nlink hitting zero. 315 */ 316 void clear_nlink(struct inode *inode) 317 { 318 if (inode->i_nlink) { 319 inode->__i_nlink = 0; 320 atomic_long_inc(&inode->i_sb->s_remove_count); 321 } 322 } 323 EXPORT_SYMBOL(clear_nlink); 324 325 /** 326 * set_nlink - directly set an inode's link count 327 * @inode: inode 328 * @nlink: new nlink (should be non-zero) 329 * 330 * This is a low-level filesystem helper to replace any 331 * direct filesystem manipulation of i_nlink. 332 */ 333 void set_nlink(struct inode *inode, unsigned int nlink) 334 { 335 if (!nlink) { 336 clear_nlink(inode); 337 } else { 338 /* Yes, some filesystems do change nlink from zero to one */ 339 if (inode->i_nlink == 0) 340 atomic_long_dec(&inode->i_sb->s_remove_count); 341 342 inode->__i_nlink = nlink; 343 } 344 } 345 EXPORT_SYMBOL(set_nlink); 346 347 /** 348 * inc_nlink - directly increment an inode's link count 349 * @inode: inode 350 * 351 * This is a low-level filesystem helper to replace any 352 * direct filesystem manipulation of i_nlink. Currently, 353 * it is only here for parity with dec_nlink(). 354 */ 355 void inc_nlink(struct inode *inode) 356 { 357 if (unlikely(inode->i_nlink == 0)) { 358 WARN_ON(!(inode->i_state & I_LINKABLE)); 359 atomic_long_dec(&inode->i_sb->s_remove_count); 360 } 361 362 inode->__i_nlink++; 363 } 364 EXPORT_SYMBOL(inc_nlink); 365 366 static void __address_space_init_once(struct address_space *mapping) 367 { 368 xa_init_flags(&mapping->i_pages, XA_FLAGS_LOCK_IRQ | XA_FLAGS_ACCOUNT); 369 init_rwsem(&mapping->i_mmap_rwsem); 370 INIT_LIST_HEAD(&mapping->private_list); 371 spin_lock_init(&mapping->private_lock); 372 mapping->i_mmap = RB_ROOT_CACHED; 373 } 374 375 void address_space_init_once(struct address_space *mapping) 376 { 377 memset(mapping, 0, sizeof(*mapping)); 378 __address_space_init_once(mapping); 379 } 380 EXPORT_SYMBOL(address_space_init_once); 381 382 /* 383 * These are initializations that only need to be done 384 * once, because the fields are idempotent across use 385 * of the inode, so let the slab aware of that. 386 */ 387 void inode_init_once(struct inode *inode) 388 { 389 memset(inode, 0, sizeof(*inode)); 390 INIT_HLIST_NODE(&inode->i_hash); 391 INIT_LIST_HEAD(&inode->i_devices); 392 INIT_LIST_HEAD(&inode->i_io_list); 393 INIT_LIST_HEAD(&inode->i_wb_list); 394 INIT_LIST_HEAD(&inode->i_lru); 395 __address_space_init_once(&inode->i_data); 396 i_size_ordered_init(inode); 397 } 398 EXPORT_SYMBOL(inode_init_once); 399 400 static void init_once(void *foo) 401 { 402 struct inode *inode = (struct inode *) foo; 403 404 inode_init_once(inode); 405 } 406 407 /* 408 * inode->i_lock must be held 409 */ 410 void __iget(struct inode *inode) 411 { 412 atomic_inc(&inode->i_count); 413 } 414 415 /* 416 * get additional reference to inode; caller must already hold one. 417 */ 418 void ihold(struct inode *inode) 419 { 420 WARN_ON(atomic_inc_return(&inode->i_count) < 2); 421 } 422 EXPORT_SYMBOL(ihold); 423 424 static void inode_lru_list_add(struct inode *inode) 425 { 426 if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru)) 427 this_cpu_inc(nr_unused); 428 else 429 inode->i_state |= I_REFERENCED; 430 } 431 432 /* 433 * Add inode to LRU if needed (inode is unused and clean). 434 * 435 * Needs inode->i_lock held. 436 */ 437 void inode_add_lru(struct inode *inode) 438 { 439 if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC | 440 I_FREEING | I_WILL_FREE)) && 441 !atomic_read(&inode->i_count) && inode->i_sb->s_flags & SB_ACTIVE) 442 inode_lru_list_add(inode); 443 } 444 445 446 static void inode_lru_list_del(struct inode *inode) 447 { 448 449 if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru)) 450 this_cpu_dec(nr_unused); 451 } 452 453 /** 454 * inode_sb_list_add - add inode to the superblock list of inodes 455 * @inode: inode to add 456 */ 457 void inode_sb_list_add(struct inode *inode) 458 { 459 spin_lock(&inode->i_sb->s_inode_list_lock); 460 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes); 461 spin_unlock(&inode->i_sb->s_inode_list_lock); 462 } 463 EXPORT_SYMBOL_GPL(inode_sb_list_add); 464 465 static inline void inode_sb_list_del(struct inode *inode) 466 { 467 if (!list_empty(&inode->i_sb_list)) { 468 spin_lock(&inode->i_sb->s_inode_list_lock); 469 list_del_init(&inode->i_sb_list); 470 spin_unlock(&inode->i_sb->s_inode_list_lock); 471 } 472 } 473 474 static unsigned long hash(struct super_block *sb, unsigned long hashval) 475 { 476 unsigned long tmp; 477 478 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) / 479 L1_CACHE_BYTES; 480 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift); 481 return tmp & i_hash_mask; 482 } 483 484 /** 485 * __insert_inode_hash - hash an inode 486 * @inode: unhashed inode 487 * @hashval: unsigned long value used to locate this object in the 488 * inode_hashtable. 489 * 490 * Add an inode to the inode hash for this superblock. 491 */ 492 void __insert_inode_hash(struct inode *inode, unsigned long hashval) 493 { 494 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval); 495 496 spin_lock(&inode_hash_lock); 497 spin_lock(&inode->i_lock); 498 hlist_add_head(&inode->i_hash, b); 499 spin_unlock(&inode->i_lock); 500 spin_unlock(&inode_hash_lock); 501 } 502 EXPORT_SYMBOL(__insert_inode_hash); 503 504 /** 505 * __remove_inode_hash - remove an inode from the hash 506 * @inode: inode to unhash 507 * 508 * Remove an inode from the superblock. 509 */ 510 void __remove_inode_hash(struct inode *inode) 511 { 512 spin_lock(&inode_hash_lock); 513 spin_lock(&inode->i_lock); 514 hlist_del_init(&inode->i_hash); 515 spin_unlock(&inode->i_lock); 516 spin_unlock(&inode_hash_lock); 517 } 518 EXPORT_SYMBOL(__remove_inode_hash); 519 520 void clear_inode(struct inode *inode) 521 { 522 /* 523 * We have to cycle the i_pages lock here because reclaim can be in the 524 * process of removing the last page (in __delete_from_page_cache()) 525 * and we must not free the mapping under it. 526 */ 527 xa_lock_irq(&inode->i_data.i_pages); 528 BUG_ON(inode->i_data.nrpages); 529 BUG_ON(inode->i_data.nrexceptional); 530 xa_unlock_irq(&inode->i_data.i_pages); 531 BUG_ON(!list_empty(&inode->i_data.private_list)); 532 BUG_ON(!(inode->i_state & I_FREEING)); 533 BUG_ON(inode->i_state & I_CLEAR); 534 BUG_ON(!list_empty(&inode->i_wb_list)); 535 /* don't need i_lock here, no concurrent mods to i_state */ 536 inode->i_state = I_FREEING | I_CLEAR; 537 } 538 EXPORT_SYMBOL(clear_inode); 539 540 /* 541 * Free the inode passed in, removing it from the lists it is still connected 542 * to. We remove any pages still attached to the inode and wait for any IO that 543 * is still in progress before finally destroying the inode. 544 * 545 * An inode must already be marked I_FREEING so that we avoid the inode being 546 * moved back onto lists if we race with other code that manipulates the lists 547 * (e.g. writeback_single_inode). The caller is responsible for setting this. 548 * 549 * An inode must already be removed from the LRU list before being evicted from 550 * the cache. This should occur atomically with setting the I_FREEING state 551 * flag, so no inodes here should ever be on the LRU when being evicted. 552 */ 553 static void evict(struct inode *inode) 554 { 555 const struct super_operations *op = inode->i_sb->s_op; 556 557 BUG_ON(!(inode->i_state & I_FREEING)); 558 BUG_ON(!list_empty(&inode->i_lru)); 559 560 if (!list_empty(&inode->i_io_list)) 561 inode_io_list_del(inode); 562 563 inode_sb_list_del(inode); 564 565 /* 566 * Wait for flusher thread to be done with the inode so that filesystem 567 * does not start destroying it while writeback is still running. Since 568 * the inode has I_FREEING set, flusher thread won't start new work on 569 * the inode. We just have to wait for running writeback to finish. 570 */ 571 inode_wait_for_writeback(inode); 572 573 if (op->evict_inode) { 574 op->evict_inode(inode); 575 } else { 576 truncate_inode_pages_final(&inode->i_data); 577 clear_inode(inode); 578 } 579 if (S_ISBLK(inode->i_mode) && inode->i_bdev) 580 bd_forget(inode); 581 if (S_ISCHR(inode->i_mode) && inode->i_cdev) 582 cd_forget(inode); 583 584 remove_inode_hash(inode); 585 586 spin_lock(&inode->i_lock); 587 wake_up_bit(&inode->i_state, __I_NEW); 588 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR)); 589 spin_unlock(&inode->i_lock); 590 591 destroy_inode(inode); 592 } 593 594 /* 595 * dispose_list - dispose of the contents of a local list 596 * @head: the head of the list to free 597 * 598 * Dispose-list gets a local list with local inodes in it, so it doesn't 599 * need to worry about list corruption and SMP locks. 600 */ 601 static void dispose_list(struct list_head *head) 602 { 603 while (!list_empty(head)) { 604 struct inode *inode; 605 606 inode = list_first_entry(head, struct inode, i_lru); 607 list_del_init(&inode->i_lru); 608 609 evict(inode); 610 cond_resched(); 611 } 612 } 613 614 /** 615 * evict_inodes - evict all evictable inodes for a superblock 616 * @sb: superblock to operate on 617 * 618 * Make sure that no inodes with zero refcount are retained. This is 619 * called by superblock shutdown after having SB_ACTIVE flag removed, 620 * so any inode reaching zero refcount during or after that call will 621 * be immediately evicted. 622 */ 623 void evict_inodes(struct super_block *sb) 624 { 625 struct inode *inode, *next; 626 LIST_HEAD(dispose); 627 628 again: 629 spin_lock(&sb->s_inode_list_lock); 630 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 631 if (atomic_read(&inode->i_count)) 632 continue; 633 634 spin_lock(&inode->i_lock); 635 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 636 spin_unlock(&inode->i_lock); 637 continue; 638 } 639 640 inode->i_state |= I_FREEING; 641 inode_lru_list_del(inode); 642 spin_unlock(&inode->i_lock); 643 list_add(&inode->i_lru, &dispose); 644 645 /* 646 * We can have a ton of inodes to evict at unmount time given 647 * enough memory, check to see if we need to go to sleep for a 648 * bit so we don't livelock. 649 */ 650 if (need_resched()) { 651 spin_unlock(&sb->s_inode_list_lock); 652 cond_resched(); 653 dispose_list(&dispose); 654 goto again; 655 } 656 } 657 spin_unlock(&sb->s_inode_list_lock); 658 659 dispose_list(&dispose); 660 } 661 EXPORT_SYMBOL_GPL(evict_inodes); 662 663 /** 664 * invalidate_inodes - attempt to free all inodes on a superblock 665 * @sb: superblock to operate on 666 * @kill_dirty: flag to guide handling of dirty inodes 667 * 668 * Attempts to free all inodes for a given superblock. If there were any 669 * busy inodes return a non-zero value, else zero. 670 * If @kill_dirty is set, discard dirty inodes too, otherwise treat 671 * them as busy. 672 */ 673 int invalidate_inodes(struct super_block *sb, bool kill_dirty) 674 { 675 int busy = 0; 676 struct inode *inode, *next; 677 LIST_HEAD(dispose); 678 679 again: 680 spin_lock(&sb->s_inode_list_lock); 681 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 682 spin_lock(&inode->i_lock); 683 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 684 spin_unlock(&inode->i_lock); 685 continue; 686 } 687 if (inode->i_state & I_DIRTY_ALL && !kill_dirty) { 688 spin_unlock(&inode->i_lock); 689 busy = 1; 690 continue; 691 } 692 if (atomic_read(&inode->i_count)) { 693 spin_unlock(&inode->i_lock); 694 busy = 1; 695 continue; 696 } 697 698 inode->i_state |= I_FREEING; 699 inode_lru_list_del(inode); 700 spin_unlock(&inode->i_lock); 701 list_add(&inode->i_lru, &dispose); 702 if (need_resched()) { 703 spin_unlock(&sb->s_inode_list_lock); 704 cond_resched(); 705 dispose_list(&dispose); 706 goto again; 707 } 708 } 709 spin_unlock(&sb->s_inode_list_lock); 710 711 dispose_list(&dispose); 712 713 return busy; 714 } 715 716 /* 717 * Isolate the inode from the LRU in preparation for freeing it. 718 * 719 * Any inodes which are pinned purely because of attached pagecache have their 720 * pagecache removed. If the inode has metadata buffers attached to 721 * mapping->private_list then try to remove them. 722 * 723 * If the inode has the I_REFERENCED flag set, then it means that it has been 724 * used recently - the flag is set in iput_final(). When we encounter such an 725 * inode, clear the flag and move it to the back of the LRU so it gets another 726 * pass through the LRU before it gets reclaimed. This is necessary because of 727 * the fact we are doing lazy LRU updates to minimise lock contention so the 728 * LRU does not have strict ordering. Hence we don't want to reclaim inodes 729 * with this flag set because they are the inodes that are out of order. 730 */ 731 static enum lru_status inode_lru_isolate(struct list_head *item, 732 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) 733 { 734 struct list_head *freeable = arg; 735 struct inode *inode = container_of(item, struct inode, i_lru); 736 737 /* 738 * we are inverting the lru lock/inode->i_lock here, so use a trylock. 739 * If we fail to get the lock, just skip it. 740 */ 741 if (!spin_trylock(&inode->i_lock)) 742 return LRU_SKIP; 743 744 /* 745 * Referenced or dirty inodes are still in use. Give them another pass 746 * through the LRU as we canot reclaim them now. 747 */ 748 if (atomic_read(&inode->i_count) || 749 (inode->i_state & ~I_REFERENCED)) { 750 list_lru_isolate(lru, &inode->i_lru); 751 spin_unlock(&inode->i_lock); 752 this_cpu_dec(nr_unused); 753 return LRU_REMOVED; 754 } 755 756 /* recently referenced inodes get one more pass */ 757 if (inode->i_state & I_REFERENCED) { 758 inode->i_state &= ~I_REFERENCED; 759 spin_unlock(&inode->i_lock); 760 return LRU_ROTATE; 761 } 762 763 if (inode_has_buffers(inode) || inode->i_data.nrpages) { 764 __iget(inode); 765 spin_unlock(&inode->i_lock); 766 spin_unlock(lru_lock); 767 if (remove_inode_buffers(inode)) { 768 unsigned long reap; 769 reap = invalidate_mapping_pages(&inode->i_data, 0, -1); 770 if (current_is_kswapd()) 771 __count_vm_events(KSWAPD_INODESTEAL, reap); 772 else 773 __count_vm_events(PGINODESTEAL, reap); 774 if (current->reclaim_state) 775 current->reclaim_state->reclaimed_slab += reap; 776 } 777 iput(inode); 778 spin_lock(lru_lock); 779 return LRU_RETRY; 780 } 781 782 WARN_ON(inode->i_state & I_NEW); 783 inode->i_state |= I_FREEING; 784 list_lru_isolate_move(lru, &inode->i_lru, freeable); 785 spin_unlock(&inode->i_lock); 786 787 this_cpu_dec(nr_unused); 788 return LRU_REMOVED; 789 } 790 791 /* 792 * Walk the superblock inode LRU for freeable inodes and attempt to free them. 793 * This is called from the superblock shrinker function with a number of inodes 794 * to trim from the LRU. Inodes to be freed are moved to a temporary list and 795 * then are freed outside inode_lock by dispose_list(). 796 */ 797 long prune_icache_sb(struct super_block *sb, struct shrink_control *sc) 798 { 799 LIST_HEAD(freeable); 800 long freed; 801 802 freed = list_lru_shrink_walk(&sb->s_inode_lru, sc, 803 inode_lru_isolate, &freeable); 804 dispose_list(&freeable); 805 return freed; 806 } 807 808 static void __wait_on_freeing_inode(struct inode *inode); 809 /* 810 * Called with the inode lock held. 811 */ 812 static struct inode *find_inode(struct super_block *sb, 813 struct hlist_head *head, 814 int (*test)(struct inode *, void *), 815 void *data) 816 { 817 struct inode *inode = NULL; 818 819 repeat: 820 hlist_for_each_entry(inode, head, i_hash) { 821 if (inode->i_sb != sb) 822 continue; 823 if (!test(inode, data)) 824 continue; 825 spin_lock(&inode->i_lock); 826 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 827 __wait_on_freeing_inode(inode); 828 goto repeat; 829 } 830 if (unlikely(inode->i_state & I_CREATING)) { 831 spin_unlock(&inode->i_lock); 832 return ERR_PTR(-ESTALE); 833 } 834 __iget(inode); 835 spin_unlock(&inode->i_lock); 836 return inode; 837 } 838 return NULL; 839 } 840 841 /* 842 * find_inode_fast is the fast path version of find_inode, see the comment at 843 * iget_locked for details. 844 */ 845 static struct inode *find_inode_fast(struct super_block *sb, 846 struct hlist_head *head, unsigned long ino) 847 { 848 struct inode *inode = NULL; 849 850 repeat: 851 hlist_for_each_entry(inode, head, i_hash) { 852 if (inode->i_ino != ino) 853 continue; 854 if (inode->i_sb != sb) 855 continue; 856 spin_lock(&inode->i_lock); 857 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 858 __wait_on_freeing_inode(inode); 859 goto repeat; 860 } 861 if (unlikely(inode->i_state & I_CREATING)) { 862 spin_unlock(&inode->i_lock); 863 return ERR_PTR(-ESTALE); 864 } 865 __iget(inode); 866 spin_unlock(&inode->i_lock); 867 return inode; 868 } 869 return NULL; 870 } 871 872 /* 873 * Each cpu owns a range of LAST_INO_BATCH numbers. 874 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations, 875 * to renew the exhausted range. 876 * 877 * This does not significantly increase overflow rate because every CPU can 878 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is 879 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the 880 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase 881 * overflow rate by 2x, which does not seem too significant. 882 * 883 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 884 * error if st_ino won't fit in target struct field. Use 32bit counter 885 * here to attempt to avoid that. 886 */ 887 #define LAST_INO_BATCH 1024 888 static DEFINE_PER_CPU(unsigned int, last_ino); 889 890 unsigned int get_next_ino(void) 891 { 892 unsigned int *p = &get_cpu_var(last_ino); 893 unsigned int res = *p; 894 895 #ifdef CONFIG_SMP 896 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) { 897 static atomic_t shared_last_ino; 898 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino); 899 900 res = next - LAST_INO_BATCH; 901 } 902 #endif 903 904 res++; 905 /* get_next_ino should not provide a 0 inode number */ 906 if (unlikely(!res)) 907 res++; 908 *p = res; 909 put_cpu_var(last_ino); 910 return res; 911 } 912 EXPORT_SYMBOL(get_next_ino); 913 914 /** 915 * new_inode_pseudo - obtain an inode 916 * @sb: superblock 917 * 918 * Allocates a new inode for given superblock. 919 * Inode wont be chained in superblock s_inodes list 920 * This means : 921 * - fs can't be unmount 922 * - quotas, fsnotify, writeback can't work 923 */ 924 struct inode *new_inode_pseudo(struct super_block *sb) 925 { 926 struct inode *inode = alloc_inode(sb); 927 928 if (inode) { 929 spin_lock(&inode->i_lock); 930 inode->i_state = 0; 931 spin_unlock(&inode->i_lock); 932 INIT_LIST_HEAD(&inode->i_sb_list); 933 } 934 return inode; 935 } 936 937 /** 938 * new_inode - obtain an inode 939 * @sb: superblock 940 * 941 * Allocates a new inode for given superblock. The default gfp_mask 942 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE. 943 * If HIGHMEM pages are unsuitable or it is known that pages allocated 944 * for the page cache are not reclaimable or migratable, 945 * mapping_set_gfp_mask() must be called with suitable flags on the 946 * newly created inode's mapping 947 * 948 */ 949 struct inode *new_inode(struct super_block *sb) 950 { 951 struct inode *inode; 952 953 spin_lock_prefetch(&sb->s_inode_list_lock); 954 955 inode = new_inode_pseudo(sb); 956 if (inode) 957 inode_sb_list_add(inode); 958 return inode; 959 } 960 EXPORT_SYMBOL(new_inode); 961 962 #ifdef CONFIG_DEBUG_LOCK_ALLOC 963 void lockdep_annotate_inode_mutex_key(struct inode *inode) 964 { 965 if (S_ISDIR(inode->i_mode)) { 966 struct file_system_type *type = inode->i_sb->s_type; 967 968 /* Set new key only if filesystem hasn't already changed it */ 969 if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) { 970 /* 971 * ensure nobody is actually holding i_mutex 972 */ 973 // mutex_destroy(&inode->i_mutex); 974 init_rwsem(&inode->i_rwsem); 975 lockdep_set_class(&inode->i_rwsem, 976 &type->i_mutex_dir_key); 977 } 978 } 979 } 980 EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key); 981 #endif 982 983 /** 984 * unlock_new_inode - clear the I_NEW state and wake up any waiters 985 * @inode: new inode to unlock 986 * 987 * Called when the inode is fully initialised to clear the new state of the 988 * inode and wake up anyone waiting for the inode to finish initialisation. 989 */ 990 void unlock_new_inode(struct inode *inode) 991 { 992 lockdep_annotate_inode_mutex_key(inode); 993 spin_lock(&inode->i_lock); 994 WARN_ON(!(inode->i_state & I_NEW)); 995 inode->i_state &= ~I_NEW & ~I_CREATING; 996 smp_mb(); 997 wake_up_bit(&inode->i_state, __I_NEW); 998 spin_unlock(&inode->i_lock); 999 } 1000 EXPORT_SYMBOL(unlock_new_inode); 1001 1002 void discard_new_inode(struct inode *inode) 1003 { 1004 lockdep_annotate_inode_mutex_key(inode); 1005 spin_lock(&inode->i_lock); 1006 WARN_ON(!(inode->i_state & I_NEW)); 1007 inode->i_state &= ~I_NEW; 1008 smp_mb(); 1009 wake_up_bit(&inode->i_state, __I_NEW); 1010 spin_unlock(&inode->i_lock); 1011 iput(inode); 1012 } 1013 EXPORT_SYMBOL(discard_new_inode); 1014 1015 /** 1016 * lock_two_nondirectories - take two i_mutexes on non-directory objects 1017 * 1018 * Lock any non-NULL argument that is not a directory. 1019 * Zero, one or two objects may be locked by this function. 1020 * 1021 * @inode1: first inode to lock 1022 * @inode2: second inode to lock 1023 */ 1024 void lock_two_nondirectories(struct inode *inode1, struct inode *inode2) 1025 { 1026 if (inode1 > inode2) 1027 swap(inode1, inode2); 1028 1029 if (inode1 && !S_ISDIR(inode1->i_mode)) 1030 inode_lock(inode1); 1031 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1) 1032 inode_lock_nested(inode2, I_MUTEX_NONDIR2); 1033 } 1034 EXPORT_SYMBOL(lock_two_nondirectories); 1035 1036 /** 1037 * unlock_two_nondirectories - release locks from lock_two_nondirectories() 1038 * @inode1: first inode to unlock 1039 * @inode2: second inode to unlock 1040 */ 1041 void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2) 1042 { 1043 if (inode1 && !S_ISDIR(inode1->i_mode)) 1044 inode_unlock(inode1); 1045 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1) 1046 inode_unlock(inode2); 1047 } 1048 EXPORT_SYMBOL(unlock_two_nondirectories); 1049 1050 /** 1051 * inode_insert5 - obtain an inode from a mounted file system 1052 * @inode: pre-allocated inode to use for insert to cache 1053 * @hashval: hash value (usually inode number) to get 1054 * @test: callback used for comparisons between inodes 1055 * @set: callback used to initialize a new struct inode 1056 * @data: opaque data pointer to pass to @test and @set 1057 * 1058 * Search for the inode specified by @hashval and @data in the inode cache, 1059 * and if present it is return it with an increased reference count. This is 1060 * a variant of iget5_locked() for callers that don't want to fail on memory 1061 * allocation of inode. 1062 * 1063 * If the inode is not in cache, insert the pre-allocated inode to cache and 1064 * return it locked, hashed, and with the I_NEW flag set. The file system gets 1065 * to fill it in before unlocking it via unlock_new_inode(). 1066 * 1067 * Note both @test and @set are called with the inode_hash_lock held, so can't 1068 * sleep. 1069 */ 1070 struct inode *inode_insert5(struct inode *inode, unsigned long hashval, 1071 int (*test)(struct inode *, void *), 1072 int (*set)(struct inode *, void *), void *data) 1073 { 1074 struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval); 1075 struct inode *old; 1076 bool creating = inode->i_state & I_CREATING; 1077 1078 again: 1079 spin_lock(&inode_hash_lock); 1080 old = find_inode(inode->i_sb, head, test, data); 1081 if (unlikely(old)) { 1082 /* 1083 * Uhhuh, somebody else created the same inode under us. 1084 * Use the old inode instead of the preallocated one. 1085 */ 1086 spin_unlock(&inode_hash_lock); 1087 if (IS_ERR(old)) 1088 return NULL; 1089 wait_on_inode(old); 1090 if (unlikely(inode_unhashed(old))) { 1091 iput(old); 1092 goto again; 1093 } 1094 return old; 1095 } 1096 1097 if (set && unlikely(set(inode, data))) { 1098 inode = NULL; 1099 goto unlock; 1100 } 1101 1102 /* 1103 * Return the locked inode with I_NEW set, the 1104 * caller is responsible for filling in the contents 1105 */ 1106 spin_lock(&inode->i_lock); 1107 inode->i_state |= I_NEW; 1108 hlist_add_head(&inode->i_hash, head); 1109 spin_unlock(&inode->i_lock); 1110 if (!creating) 1111 inode_sb_list_add(inode); 1112 unlock: 1113 spin_unlock(&inode_hash_lock); 1114 1115 return inode; 1116 } 1117 EXPORT_SYMBOL(inode_insert5); 1118 1119 /** 1120 * iget5_locked - obtain an inode from a mounted file system 1121 * @sb: super block of file system 1122 * @hashval: hash value (usually inode number) to get 1123 * @test: callback used for comparisons between inodes 1124 * @set: callback used to initialize a new struct inode 1125 * @data: opaque data pointer to pass to @test and @set 1126 * 1127 * Search for the inode specified by @hashval and @data in the inode cache, 1128 * and if present it is return it with an increased reference count. This is 1129 * a generalized version of iget_locked() for file systems where the inode 1130 * number is not sufficient for unique identification of an inode. 1131 * 1132 * If the inode is not in cache, allocate a new inode and return it locked, 1133 * hashed, and with the I_NEW flag set. The file system gets to fill it in 1134 * before unlocking it via unlock_new_inode(). 1135 * 1136 * Note both @test and @set are called with the inode_hash_lock held, so can't 1137 * sleep. 1138 */ 1139 struct inode *iget5_locked(struct super_block *sb, unsigned long hashval, 1140 int (*test)(struct inode *, void *), 1141 int (*set)(struct inode *, void *), void *data) 1142 { 1143 struct inode *inode = ilookup5(sb, hashval, test, data); 1144 1145 if (!inode) { 1146 struct inode *new = alloc_inode(sb); 1147 1148 if (new) { 1149 new->i_state = 0; 1150 inode = inode_insert5(new, hashval, test, set, data); 1151 if (unlikely(inode != new)) 1152 destroy_inode(new); 1153 } 1154 } 1155 return inode; 1156 } 1157 EXPORT_SYMBOL(iget5_locked); 1158 1159 /** 1160 * iget_locked - obtain an inode from a mounted file system 1161 * @sb: super block of file system 1162 * @ino: inode number to get 1163 * 1164 * Search for the inode specified by @ino in the inode cache and if present 1165 * return it with an increased reference count. This is for file systems 1166 * where the inode number is sufficient for unique identification of an inode. 1167 * 1168 * If the inode is not in cache, allocate a new inode and return it locked, 1169 * hashed, and with the I_NEW flag set. The file system gets to fill it in 1170 * before unlocking it via unlock_new_inode(). 1171 */ 1172 struct inode *iget_locked(struct super_block *sb, unsigned long ino) 1173 { 1174 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1175 struct inode *inode; 1176 again: 1177 spin_lock(&inode_hash_lock); 1178 inode = find_inode_fast(sb, head, ino); 1179 spin_unlock(&inode_hash_lock); 1180 if (inode) { 1181 if (IS_ERR(inode)) 1182 return NULL; 1183 wait_on_inode(inode); 1184 if (unlikely(inode_unhashed(inode))) { 1185 iput(inode); 1186 goto again; 1187 } 1188 return inode; 1189 } 1190 1191 inode = alloc_inode(sb); 1192 if (inode) { 1193 struct inode *old; 1194 1195 spin_lock(&inode_hash_lock); 1196 /* We released the lock, so.. */ 1197 old = find_inode_fast(sb, head, ino); 1198 if (!old) { 1199 inode->i_ino = ino; 1200 spin_lock(&inode->i_lock); 1201 inode->i_state = I_NEW; 1202 hlist_add_head(&inode->i_hash, head); 1203 spin_unlock(&inode->i_lock); 1204 inode_sb_list_add(inode); 1205 spin_unlock(&inode_hash_lock); 1206 1207 /* Return the locked inode with I_NEW set, the 1208 * caller is responsible for filling in the contents 1209 */ 1210 return inode; 1211 } 1212 1213 /* 1214 * Uhhuh, somebody else created the same inode under 1215 * us. Use the old inode instead of the one we just 1216 * allocated. 1217 */ 1218 spin_unlock(&inode_hash_lock); 1219 destroy_inode(inode); 1220 if (IS_ERR(old)) 1221 return NULL; 1222 inode = old; 1223 wait_on_inode(inode); 1224 if (unlikely(inode_unhashed(inode))) { 1225 iput(inode); 1226 goto again; 1227 } 1228 } 1229 return inode; 1230 } 1231 EXPORT_SYMBOL(iget_locked); 1232 1233 /* 1234 * search the inode cache for a matching inode number. 1235 * If we find one, then the inode number we are trying to 1236 * allocate is not unique and so we should not use it. 1237 * 1238 * Returns 1 if the inode number is unique, 0 if it is not. 1239 */ 1240 static int test_inode_iunique(struct super_block *sb, unsigned long ino) 1241 { 1242 struct hlist_head *b = inode_hashtable + hash(sb, ino); 1243 struct inode *inode; 1244 1245 spin_lock(&inode_hash_lock); 1246 hlist_for_each_entry(inode, b, i_hash) { 1247 if (inode->i_ino == ino && inode->i_sb == sb) { 1248 spin_unlock(&inode_hash_lock); 1249 return 0; 1250 } 1251 } 1252 spin_unlock(&inode_hash_lock); 1253 1254 return 1; 1255 } 1256 1257 /** 1258 * iunique - get a unique inode number 1259 * @sb: superblock 1260 * @max_reserved: highest reserved inode number 1261 * 1262 * Obtain an inode number that is unique on the system for a given 1263 * superblock. This is used by file systems that have no natural 1264 * permanent inode numbering system. An inode number is returned that 1265 * is higher than the reserved limit but unique. 1266 * 1267 * BUGS: 1268 * With a large number of inodes live on the file system this function 1269 * currently becomes quite slow. 1270 */ 1271 ino_t iunique(struct super_block *sb, ino_t max_reserved) 1272 { 1273 /* 1274 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 1275 * error if st_ino won't fit in target struct field. Use 32bit counter 1276 * here to attempt to avoid that. 1277 */ 1278 static DEFINE_SPINLOCK(iunique_lock); 1279 static unsigned int counter; 1280 ino_t res; 1281 1282 spin_lock(&iunique_lock); 1283 do { 1284 if (counter <= max_reserved) 1285 counter = max_reserved + 1; 1286 res = counter++; 1287 } while (!test_inode_iunique(sb, res)); 1288 spin_unlock(&iunique_lock); 1289 1290 return res; 1291 } 1292 EXPORT_SYMBOL(iunique); 1293 1294 struct inode *igrab(struct inode *inode) 1295 { 1296 spin_lock(&inode->i_lock); 1297 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) { 1298 __iget(inode); 1299 spin_unlock(&inode->i_lock); 1300 } else { 1301 spin_unlock(&inode->i_lock); 1302 /* 1303 * Handle the case where s_op->clear_inode is not been 1304 * called yet, and somebody is calling igrab 1305 * while the inode is getting freed. 1306 */ 1307 inode = NULL; 1308 } 1309 return inode; 1310 } 1311 EXPORT_SYMBOL(igrab); 1312 1313 /** 1314 * ilookup5_nowait - search for an inode in the inode cache 1315 * @sb: super block of file system to search 1316 * @hashval: hash value (usually inode number) to search for 1317 * @test: callback used for comparisons between inodes 1318 * @data: opaque data pointer to pass to @test 1319 * 1320 * Search for the inode specified by @hashval and @data in the inode cache. 1321 * If the inode is in the cache, the inode is returned with an incremented 1322 * reference count. 1323 * 1324 * Note: I_NEW is not waited upon so you have to be very careful what you do 1325 * with the returned inode. You probably should be using ilookup5() instead. 1326 * 1327 * Note2: @test is called with the inode_hash_lock held, so can't sleep. 1328 */ 1329 struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, 1330 int (*test)(struct inode *, void *), void *data) 1331 { 1332 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1333 struct inode *inode; 1334 1335 spin_lock(&inode_hash_lock); 1336 inode = find_inode(sb, head, test, data); 1337 spin_unlock(&inode_hash_lock); 1338 1339 return IS_ERR(inode) ? NULL : inode; 1340 } 1341 EXPORT_SYMBOL(ilookup5_nowait); 1342 1343 /** 1344 * ilookup5 - search for an inode in the inode cache 1345 * @sb: super block of file system to search 1346 * @hashval: hash value (usually inode number) to search for 1347 * @test: callback used for comparisons between inodes 1348 * @data: opaque data pointer to pass to @test 1349 * 1350 * Search for the inode specified by @hashval and @data in the inode cache, 1351 * and if the inode is in the cache, return the inode with an incremented 1352 * reference count. Waits on I_NEW before returning the inode. 1353 * returned with an incremented reference count. 1354 * 1355 * This is a generalized version of ilookup() for file systems where the 1356 * inode number is not sufficient for unique identification of an inode. 1357 * 1358 * Note: @test is called with the inode_hash_lock held, so can't sleep. 1359 */ 1360 struct inode *ilookup5(struct super_block *sb, unsigned long hashval, 1361 int (*test)(struct inode *, void *), void *data) 1362 { 1363 struct inode *inode; 1364 again: 1365 inode = ilookup5_nowait(sb, hashval, test, data); 1366 if (inode) { 1367 wait_on_inode(inode); 1368 if (unlikely(inode_unhashed(inode))) { 1369 iput(inode); 1370 goto again; 1371 } 1372 } 1373 return inode; 1374 } 1375 EXPORT_SYMBOL(ilookup5); 1376 1377 /** 1378 * ilookup - search for an inode in the inode cache 1379 * @sb: super block of file system to search 1380 * @ino: inode number to search for 1381 * 1382 * Search for the inode @ino in the inode cache, and if the inode is in the 1383 * cache, the inode is returned with an incremented reference count. 1384 */ 1385 struct inode *ilookup(struct super_block *sb, unsigned long ino) 1386 { 1387 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1388 struct inode *inode; 1389 again: 1390 spin_lock(&inode_hash_lock); 1391 inode = find_inode_fast(sb, head, ino); 1392 spin_unlock(&inode_hash_lock); 1393 1394 if (inode) { 1395 if (IS_ERR(inode)) 1396 return NULL; 1397 wait_on_inode(inode); 1398 if (unlikely(inode_unhashed(inode))) { 1399 iput(inode); 1400 goto again; 1401 } 1402 } 1403 return inode; 1404 } 1405 EXPORT_SYMBOL(ilookup); 1406 1407 /** 1408 * find_inode_nowait - find an inode in the inode cache 1409 * @sb: super block of file system to search 1410 * @hashval: hash value (usually inode number) to search for 1411 * @match: callback used for comparisons between inodes 1412 * @data: opaque data pointer to pass to @match 1413 * 1414 * Search for the inode specified by @hashval and @data in the inode 1415 * cache, where the helper function @match will return 0 if the inode 1416 * does not match, 1 if the inode does match, and -1 if the search 1417 * should be stopped. The @match function must be responsible for 1418 * taking the i_lock spin_lock and checking i_state for an inode being 1419 * freed or being initialized, and incrementing the reference count 1420 * before returning 1. It also must not sleep, since it is called with 1421 * the inode_hash_lock spinlock held. 1422 * 1423 * This is a even more generalized version of ilookup5() when the 1424 * function must never block --- find_inode() can block in 1425 * __wait_on_freeing_inode() --- or when the caller can not increment 1426 * the reference count because the resulting iput() might cause an 1427 * inode eviction. The tradeoff is that the @match funtion must be 1428 * very carefully implemented. 1429 */ 1430 struct inode *find_inode_nowait(struct super_block *sb, 1431 unsigned long hashval, 1432 int (*match)(struct inode *, unsigned long, 1433 void *), 1434 void *data) 1435 { 1436 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1437 struct inode *inode, *ret_inode = NULL; 1438 int mval; 1439 1440 spin_lock(&inode_hash_lock); 1441 hlist_for_each_entry(inode, head, i_hash) { 1442 if (inode->i_sb != sb) 1443 continue; 1444 mval = match(inode, hashval, data); 1445 if (mval == 0) 1446 continue; 1447 if (mval == 1) 1448 ret_inode = inode; 1449 goto out; 1450 } 1451 out: 1452 spin_unlock(&inode_hash_lock); 1453 return ret_inode; 1454 } 1455 EXPORT_SYMBOL(find_inode_nowait); 1456 1457 int insert_inode_locked(struct inode *inode) 1458 { 1459 struct super_block *sb = inode->i_sb; 1460 ino_t ino = inode->i_ino; 1461 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1462 1463 while (1) { 1464 struct inode *old = NULL; 1465 spin_lock(&inode_hash_lock); 1466 hlist_for_each_entry(old, head, i_hash) { 1467 if (old->i_ino != ino) 1468 continue; 1469 if (old->i_sb != sb) 1470 continue; 1471 spin_lock(&old->i_lock); 1472 if (old->i_state & (I_FREEING|I_WILL_FREE)) { 1473 spin_unlock(&old->i_lock); 1474 continue; 1475 } 1476 break; 1477 } 1478 if (likely(!old)) { 1479 spin_lock(&inode->i_lock); 1480 inode->i_state |= I_NEW | I_CREATING; 1481 hlist_add_head(&inode->i_hash, head); 1482 spin_unlock(&inode->i_lock); 1483 spin_unlock(&inode_hash_lock); 1484 return 0; 1485 } 1486 if (unlikely(old->i_state & I_CREATING)) { 1487 spin_unlock(&old->i_lock); 1488 spin_unlock(&inode_hash_lock); 1489 return -EBUSY; 1490 } 1491 __iget(old); 1492 spin_unlock(&old->i_lock); 1493 spin_unlock(&inode_hash_lock); 1494 wait_on_inode(old); 1495 if (unlikely(!inode_unhashed(old))) { 1496 iput(old); 1497 return -EBUSY; 1498 } 1499 iput(old); 1500 } 1501 } 1502 EXPORT_SYMBOL(insert_inode_locked); 1503 1504 int insert_inode_locked4(struct inode *inode, unsigned long hashval, 1505 int (*test)(struct inode *, void *), void *data) 1506 { 1507 struct inode *old; 1508 1509 inode->i_state |= I_CREATING; 1510 old = inode_insert5(inode, hashval, test, NULL, data); 1511 1512 if (old != inode) { 1513 iput(old); 1514 return -EBUSY; 1515 } 1516 return 0; 1517 } 1518 EXPORT_SYMBOL(insert_inode_locked4); 1519 1520 1521 int generic_delete_inode(struct inode *inode) 1522 { 1523 return 1; 1524 } 1525 EXPORT_SYMBOL(generic_delete_inode); 1526 1527 /* 1528 * Called when we're dropping the last reference 1529 * to an inode. 1530 * 1531 * Call the FS "drop_inode()" function, defaulting to 1532 * the legacy UNIX filesystem behaviour. If it tells 1533 * us to evict inode, do so. Otherwise, retain inode 1534 * in cache if fs is alive, sync and evict if fs is 1535 * shutting down. 1536 */ 1537 static void iput_final(struct inode *inode) 1538 { 1539 struct super_block *sb = inode->i_sb; 1540 const struct super_operations *op = inode->i_sb->s_op; 1541 int drop; 1542 1543 WARN_ON(inode->i_state & I_NEW); 1544 1545 if (op->drop_inode) 1546 drop = op->drop_inode(inode); 1547 else 1548 drop = generic_drop_inode(inode); 1549 1550 if (!drop && (sb->s_flags & SB_ACTIVE)) { 1551 inode_add_lru(inode); 1552 spin_unlock(&inode->i_lock); 1553 return; 1554 } 1555 1556 if (!drop) { 1557 inode->i_state |= I_WILL_FREE; 1558 spin_unlock(&inode->i_lock); 1559 write_inode_now(inode, 1); 1560 spin_lock(&inode->i_lock); 1561 WARN_ON(inode->i_state & I_NEW); 1562 inode->i_state &= ~I_WILL_FREE; 1563 } 1564 1565 inode->i_state |= I_FREEING; 1566 if (!list_empty(&inode->i_lru)) 1567 inode_lru_list_del(inode); 1568 spin_unlock(&inode->i_lock); 1569 1570 evict(inode); 1571 } 1572 1573 /** 1574 * iput - put an inode 1575 * @inode: inode to put 1576 * 1577 * Puts an inode, dropping its usage count. If the inode use count hits 1578 * zero, the inode is then freed and may also be destroyed. 1579 * 1580 * Consequently, iput() can sleep. 1581 */ 1582 void iput(struct inode *inode) 1583 { 1584 if (!inode) 1585 return; 1586 BUG_ON(inode->i_state & I_CLEAR); 1587 retry: 1588 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) { 1589 if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) { 1590 atomic_inc(&inode->i_count); 1591 spin_unlock(&inode->i_lock); 1592 trace_writeback_lazytime_iput(inode); 1593 mark_inode_dirty_sync(inode); 1594 goto retry; 1595 } 1596 iput_final(inode); 1597 } 1598 } 1599 EXPORT_SYMBOL(iput); 1600 1601 /** 1602 * bmap - find a block number in a file 1603 * @inode: inode of file 1604 * @block: block to find 1605 * 1606 * Returns the block number on the device holding the inode that 1607 * is the disk block number for the block of the file requested. 1608 * That is, asked for block 4 of inode 1 the function will return the 1609 * disk block relative to the disk start that holds that block of the 1610 * file. 1611 */ 1612 sector_t bmap(struct inode *inode, sector_t block) 1613 { 1614 sector_t res = 0; 1615 if (inode->i_mapping->a_ops->bmap) 1616 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block); 1617 return res; 1618 } 1619 EXPORT_SYMBOL(bmap); 1620 1621 /* 1622 * With relative atime, only update atime if the previous atime is 1623 * earlier than either the ctime or mtime or if at least a day has 1624 * passed since the last atime update. 1625 */ 1626 static int relatime_need_update(struct vfsmount *mnt, struct inode *inode, 1627 struct timespec64 now) 1628 { 1629 1630 if (!(mnt->mnt_flags & MNT_RELATIME)) 1631 return 1; 1632 /* 1633 * Is mtime younger than atime? If yes, update atime: 1634 */ 1635 if (timespec64_compare(&inode->i_mtime, &inode->i_atime) >= 0) 1636 return 1; 1637 /* 1638 * Is ctime younger than atime? If yes, update atime: 1639 */ 1640 if (timespec64_compare(&inode->i_ctime, &inode->i_atime) >= 0) 1641 return 1; 1642 1643 /* 1644 * Is the previous atime value older than a day? If yes, 1645 * update atime: 1646 */ 1647 if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60) 1648 return 1; 1649 /* 1650 * Good, we can skip the atime update: 1651 */ 1652 return 0; 1653 } 1654 1655 int generic_update_time(struct inode *inode, struct timespec64 *time, int flags) 1656 { 1657 int iflags = I_DIRTY_TIME; 1658 bool dirty = false; 1659 1660 if (flags & S_ATIME) 1661 inode->i_atime = *time; 1662 if (flags & S_VERSION) 1663 dirty = inode_maybe_inc_iversion(inode, false); 1664 if (flags & S_CTIME) 1665 inode->i_ctime = *time; 1666 if (flags & S_MTIME) 1667 inode->i_mtime = *time; 1668 if ((flags & (S_ATIME | S_CTIME | S_MTIME)) && 1669 !(inode->i_sb->s_flags & SB_LAZYTIME)) 1670 dirty = true; 1671 1672 if (dirty) 1673 iflags |= I_DIRTY_SYNC; 1674 __mark_inode_dirty(inode, iflags); 1675 return 0; 1676 } 1677 EXPORT_SYMBOL(generic_update_time); 1678 1679 /* 1680 * This does the actual work of updating an inodes time or version. Must have 1681 * had called mnt_want_write() before calling this. 1682 */ 1683 static int update_time(struct inode *inode, struct timespec64 *time, int flags) 1684 { 1685 int (*update_time)(struct inode *, struct timespec64 *, int); 1686 1687 update_time = inode->i_op->update_time ? inode->i_op->update_time : 1688 generic_update_time; 1689 1690 return update_time(inode, time, flags); 1691 } 1692 1693 /** 1694 * touch_atime - update the access time 1695 * @path: the &struct path to update 1696 * @inode: inode to update 1697 * 1698 * Update the accessed time on an inode and mark it for writeback. 1699 * This function automatically handles read only file systems and media, 1700 * as well as the "noatime" flag and inode specific "noatime" markers. 1701 */ 1702 bool atime_needs_update(const struct path *path, struct inode *inode) 1703 { 1704 struct vfsmount *mnt = path->mnt; 1705 struct timespec64 now; 1706 1707 if (inode->i_flags & S_NOATIME) 1708 return false; 1709 1710 /* Atime updates will likely cause i_uid and i_gid to be written 1711 * back improprely if their true value is unknown to the vfs. 1712 */ 1713 if (HAS_UNMAPPED_ID(inode)) 1714 return false; 1715 1716 if (IS_NOATIME(inode)) 1717 return false; 1718 if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode)) 1719 return false; 1720 1721 if (mnt->mnt_flags & MNT_NOATIME) 1722 return false; 1723 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)) 1724 return false; 1725 1726 now = current_time(inode); 1727 1728 if (!relatime_need_update(mnt, inode, now)) 1729 return false; 1730 1731 if (timespec64_equal(&inode->i_atime, &now)) 1732 return false; 1733 1734 return true; 1735 } 1736 1737 void touch_atime(const struct path *path) 1738 { 1739 struct vfsmount *mnt = path->mnt; 1740 struct inode *inode = d_inode(path->dentry); 1741 struct timespec64 now; 1742 1743 if (!atime_needs_update(path, inode)) 1744 return; 1745 1746 if (!sb_start_write_trylock(inode->i_sb)) 1747 return; 1748 1749 if (__mnt_want_write(mnt) != 0) 1750 goto skip_update; 1751 /* 1752 * File systems can error out when updating inodes if they need to 1753 * allocate new space to modify an inode (such is the case for 1754 * Btrfs), but since we touch atime while walking down the path we 1755 * really don't care if we failed to update the atime of the file, 1756 * so just ignore the return value. 1757 * We may also fail on filesystems that have the ability to make parts 1758 * of the fs read only, e.g. subvolumes in Btrfs. 1759 */ 1760 now = current_time(inode); 1761 update_time(inode, &now, S_ATIME); 1762 __mnt_drop_write(mnt); 1763 skip_update: 1764 sb_end_write(inode->i_sb); 1765 } 1766 EXPORT_SYMBOL(touch_atime); 1767 1768 /* 1769 * The logic we want is 1770 * 1771 * if suid or (sgid and xgrp) 1772 * remove privs 1773 */ 1774 int should_remove_suid(struct dentry *dentry) 1775 { 1776 umode_t mode = d_inode(dentry)->i_mode; 1777 int kill = 0; 1778 1779 /* suid always must be killed */ 1780 if (unlikely(mode & S_ISUID)) 1781 kill = ATTR_KILL_SUID; 1782 1783 /* 1784 * sgid without any exec bits is just a mandatory locking mark; leave 1785 * it alone. If some exec bits are set, it's a real sgid; kill it. 1786 */ 1787 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) 1788 kill |= ATTR_KILL_SGID; 1789 1790 if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode))) 1791 return kill; 1792 1793 return 0; 1794 } 1795 EXPORT_SYMBOL(should_remove_suid); 1796 1797 /* 1798 * Return mask of changes for notify_change() that need to be done as a 1799 * response to write or truncate. Return 0 if nothing has to be changed. 1800 * Negative value on error (change should be denied). 1801 */ 1802 int dentry_needs_remove_privs(struct dentry *dentry) 1803 { 1804 struct inode *inode = d_inode(dentry); 1805 int mask = 0; 1806 int ret; 1807 1808 if (IS_NOSEC(inode)) 1809 return 0; 1810 1811 mask = should_remove_suid(dentry); 1812 ret = security_inode_need_killpriv(dentry); 1813 if (ret < 0) 1814 return ret; 1815 if (ret) 1816 mask |= ATTR_KILL_PRIV; 1817 return mask; 1818 } 1819 1820 static int __remove_privs(struct dentry *dentry, int kill) 1821 { 1822 struct iattr newattrs; 1823 1824 newattrs.ia_valid = ATTR_FORCE | kill; 1825 /* 1826 * Note we call this on write, so notify_change will not 1827 * encounter any conflicting delegations: 1828 */ 1829 return notify_change(dentry, &newattrs, NULL); 1830 } 1831 1832 /* 1833 * Remove special file priviledges (suid, capabilities) when file is written 1834 * to or truncated. 1835 */ 1836 int file_remove_privs(struct file *file) 1837 { 1838 struct dentry *dentry = file_dentry(file); 1839 struct inode *inode = file_inode(file); 1840 int kill; 1841 int error = 0; 1842 1843 /* 1844 * Fast path for nothing security related. 1845 * As well for non-regular files, e.g. blkdev inodes. 1846 * For example, blkdev_write_iter() might get here 1847 * trying to remove privs which it is not allowed to. 1848 */ 1849 if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode)) 1850 return 0; 1851 1852 kill = dentry_needs_remove_privs(dentry); 1853 if (kill < 0) 1854 return kill; 1855 if (kill) 1856 error = __remove_privs(dentry, kill); 1857 if (!error) 1858 inode_has_no_xattr(inode); 1859 1860 return error; 1861 } 1862 EXPORT_SYMBOL(file_remove_privs); 1863 1864 /** 1865 * file_update_time - update mtime and ctime time 1866 * @file: file accessed 1867 * 1868 * Update the mtime and ctime members of an inode and mark the inode 1869 * for writeback. Note that this function is meant exclusively for 1870 * usage in the file write path of filesystems, and filesystems may 1871 * choose to explicitly ignore update via this function with the 1872 * S_NOCMTIME inode flag, e.g. for network filesystem where these 1873 * timestamps are handled by the server. This can return an error for 1874 * file systems who need to allocate space in order to update an inode. 1875 */ 1876 1877 int file_update_time(struct file *file) 1878 { 1879 struct inode *inode = file_inode(file); 1880 struct timespec64 now; 1881 int sync_it = 0; 1882 int ret; 1883 1884 /* First try to exhaust all avenues to not sync */ 1885 if (IS_NOCMTIME(inode)) 1886 return 0; 1887 1888 now = current_time(inode); 1889 if (!timespec64_equal(&inode->i_mtime, &now)) 1890 sync_it = S_MTIME; 1891 1892 if (!timespec64_equal(&inode->i_ctime, &now)) 1893 sync_it |= S_CTIME; 1894 1895 if (IS_I_VERSION(inode) && inode_iversion_need_inc(inode)) 1896 sync_it |= S_VERSION; 1897 1898 if (!sync_it) 1899 return 0; 1900 1901 /* Finally allowed to write? Takes lock. */ 1902 if (__mnt_want_write_file(file)) 1903 return 0; 1904 1905 ret = update_time(inode, &now, sync_it); 1906 __mnt_drop_write_file(file); 1907 1908 return ret; 1909 } 1910 EXPORT_SYMBOL(file_update_time); 1911 1912 /* Caller must hold the file's inode lock */ 1913 int file_modified(struct file *file) 1914 { 1915 int err; 1916 1917 /* 1918 * Clear the security bits if the process is not being run by root. 1919 * This keeps people from modifying setuid and setgid binaries. 1920 */ 1921 err = file_remove_privs(file); 1922 if (err) 1923 return err; 1924 1925 if (unlikely(file->f_mode & FMODE_NOCMTIME)) 1926 return 0; 1927 1928 return file_update_time(file); 1929 } 1930 EXPORT_SYMBOL(file_modified); 1931 1932 int inode_needs_sync(struct inode *inode) 1933 { 1934 if (IS_SYNC(inode)) 1935 return 1; 1936 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) 1937 return 1; 1938 return 0; 1939 } 1940 EXPORT_SYMBOL(inode_needs_sync); 1941 1942 /* 1943 * If we try to find an inode in the inode hash while it is being 1944 * deleted, we have to wait until the filesystem completes its 1945 * deletion before reporting that it isn't found. This function waits 1946 * until the deletion _might_ have completed. Callers are responsible 1947 * to recheck inode state. 1948 * 1949 * It doesn't matter if I_NEW is not set initially, a call to 1950 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list 1951 * will DTRT. 1952 */ 1953 static void __wait_on_freeing_inode(struct inode *inode) 1954 { 1955 wait_queue_head_t *wq; 1956 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW); 1957 wq = bit_waitqueue(&inode->i_state, __I_NEW); 1958 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); 1959 spin_unlock(&inode->i_lock); 1960 spin_unlock(&inode_hash_lock); 1961 schedule(); 1962 finish_wait(wq, &wait.wq_entry); 1963 spin_lock(&inode_hash_lock); 1964 } 1965 1966 static __initdata unsigned long ihash_entries; 1967 static int __init set_ihash_entries(char *str) 1968 { 1969 if (!str) 1970 return 0; 1971 ihash_entries = simple_strtoul(str, &str, 0); 1972 return 1; 1973 } 1974 __setup("ihash_entries=", set_ihash_entries); 1975 1976 /* 1977 * Initialize the waitqueues and inode hash table. 1978 */ 1979 void __init inode_init_early(void) 1980 { 1981 /* If hashes are distributed across NUMA nodes, defer 1982 * hash allocation until vmalloc space is available. 1983 */ 1984 if (hashdist) 1985 return; 1986 1987 inode_hashtable = 1988 alloc_large_system_hash("Inode-cache", 1989 sizeof(struct hlist_head), 1990 ihash_entries, 1991 14, 1992 HASH_EARLY | HASH_ZERO, 1993 &i_hash_shift, 1994 &i_hash_mask, 1995 0, 1996 0); 1997 } 1998 1999 void __init inode_init(void) 2000 { 2001 /* inode slab cache */ 2002 inode_cachep = kmem_cache_create("inode_cache", 2003 sizeof(struct inode), 2004 0, 2005 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| 2006 SLAB_MEM_SPREAD|SLAB_ACCOUNT), 2007 init_once); 2008 2009 /* Hash may have been set up in inode_init_early */ 2010 if (!hashdist) 2011 return; 2012 2013 inode_hashtable = 2014 alloc_large_system_hash("Inode-cache", 2015 sizeof(struct hlist_head), 2016 ihash_entries, 2017 14, 2018 HASH_ZERO, 2019 &i_hash_shift, 2020 &i_hash_mask, 2021 0, 2022 0); 2023 } 2024 2025 void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev) 2026 { 2027 inode->i_mode = mode; 2028 if (S_ISCHR(mode)) { 2029 inode->i_fop = &def_chr_fops; 2030 inode->i_rdev = rdev; 2031 } else if (S_ISBLK(mode)) { 2032 inode->i_fop = &def_blk_fops; 2033 inode->i_rdev = rdev; 2034 } else if (S_ISFIFO(mode)) 2035 inode->i_fop = &pipefifo_fops; 2036 else if (S_ISSOCK(mode)) 2037 ; /* leave it no_open_fops */ 2038 else 2039 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for" 2040 " inode %s:%lu\n", mode, inode->i_sb->s_id, 2041 inode->i_ino); 2042 } 2043 EXPORT_SYMBOL(init_special_inode); 2044 2045 /** 2046 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards 2047 * @inode: New inode 2048 * @dir: Directory inode 2049 * @mode: mode of the new inode 2050 */ 2051 void inode_init_owner(struct inode *inode, const struct inode *dir, 2052 umode_t mode) 2053 { 2054 inode->i_uid = current_fsuid(); 2055 if (dir && dir->i_mode & S_ISGID) { 2056 inode->i_gid = dir->i_gid; 2057 2058 /* Directories are special, and always inherit S_ISGID */ 2059 if (S_ISDIR(mode)) 2060 mode |= S_ISGID; 2061 else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) && 2062 !in_group_p(inode->i_gid) && 2063 !capable_wrt_inode_uidgid(dir, CAP_FSETID)) 2064 mode &= ~S_ISGID; 2065 } else 2066 inode->i_gid = current_fsgid(); 2067 inode->i_mode = mode; 2068 } 2069 EXPORT_SYMBOL(inode_init_owner); 2070 2071 /** 2072 * inode_owner_or_capable - check current task permissions to inode 2073 * @inode: inode being checked 2074 * 2075 * Return true if current either has CAP_FOWNER in a namespace with the 2076 * inode owner uid mapped, or owns the file. 2077 */ 2078 bool inode_owner_or_capable(const struct inode *inode) 2079 { 2080 struct user_namespace *ns; 2081 2082 if (uid_eq(current_fsuid(), inode->i_uid)) 2083 return true; 2084 2085 ns = current_user_ns(); 2086 if (kuid_has_mapping(ns, inode->i_uid) && ns_capable(ns, CAP_FOWNER)) 2087 return true; 2088 return false; 2089 } 2090 EXPORT_SYMBOL(inode_owner_or_capable); 2091 2092 /* 2093 * Direct i/o helper functions 2094 */ 2095 static void __inode_dio_wait(struct inode *inode) 2096 { 2097 wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP); 2098 DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP); 2099 2100 do { 2101 prepare_to_wait(wq, &q.wq_entry, TASK_UNINTERRUPTIBLE); 2102 if (atomic_read(&inode->i_dio_count)) 2103 schedule(); 2104 } while (atomic_read(&inode->i_dio_count)); 2105 finish_wait(wq, &q.wq_entry); 2106 } 2107 2108 /** 2109 * inode_dio_wait - wait for outstanding DIO requests to finish 2110 * @inode: inode to wait for 2111 * 2112 * Waits for all pending direct I/O requests to finish so that we can 2113 * proceed with a truncate or equivalent operation. 2114 * 2115 * Must be called under a lock that serializes taking new references 2116 * to i_dio_count, usually by inode->i_mutex. 2117 */ 2118 void inode_dio_wait(struct inode *inode) 2119 { 2120 if (atomic_read(&inode->i_dio_count)) 2121 __inode_dio_wait(inode); 2122 } 2123 EXPORT_SYMBOL(inode_dio_wait); 2124 2125 /* 2126 * inode_set_flags - atomically set some inode flags 2127 * 2128 * Note: the caller should be holding i_mutex, or else be sure that 2129 * they have exclusive access to the inode structure (i.e., while the 2130 * inode is being instantiated). The reason for the cmpxchg() loop 2131 * --- which wouldn't be necessary if all code paths which modify 2132 * i_flags actually followed this rule, is that there is at least one 2133 * code path which doesn't today so we use cmpxchg() out of an abundance 2134 * of caution. 2135 * 2136 * In the long run, i_mutex is overkill, and we should probably look 2137 * at using the i_lock spinlock to protect i_flags, and then make sure 2138 * it is so documented in include/linux/fs.h and that all code follows 2139 * the locking convention!! 2140 */ 2141 void inode_set_flags(struct inode *inode, unsigned int flags, 2142 unsigned int mask) 2143 { 2144 WARN_ON_ONCE(flags & ~mask); 2145 set_mask_bits(&inode->i_flags, mask, flags); 2146 } 2147 EXPORT_SYMBOL(inode_set_flags); 2148 2149 void inode_nohighmem(struct inode *inode) 2150 { 2151 mapping_set_gfp_mask(inode->i_mapping, GFP_USER); 2152 } 2153 EXPORT_SYMBOL(inode_nohighmem); 2154 2155 /** 2156 * timespec64_trunc - Truncate timespec64 to a granularity 2157 * @t: Timespec64 2158 * @gran: Granularity in ns. 2159 * 2160 * Truncate a timespec64 to a granularity. Always rounds down. gran must 2161 * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns). 2162 */ 2163 struct timespec64 timespec64_trunc(struct timespec64 t, unsigned gran) 2164 { 2165 /* Avoid division in the common cases 1 ns and 1 s. */ 2166 if (gran == 1) { 2167 /* nothing */ 2168 } else if (gran == NSEC_PER_SEC) { 2169 t.tv_nsec = 0; 2170 } else if (gran > 1 && gran < NSEC_PER_SEC) { 2171 t.tv_nsec -= t.tv_nsec % gran; 2172 } else { 2173 WARN(1, "illegal file time granularity: %u", gran); 2174 } 2175 return t; 2176 } 2177 EXPORT_SYMBOL(timespec64_trunc); 2178 2179 /** 2180 * timestamp_truncate - Truncate timespec to a granularity 2181 * @t: Timespec 2182 * @inode: inode being updated 2183 * 2184 * Truncate a timespec to the granularity supported by the fs 2185 * containing the inode. Always rounds down. gran must 2186 * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns). 2187 */ 2188 struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode) 2189 { 2190 struct super_block *sb = inode->i_sb; 2191 unsigned int gran = sb->s_time_gran; 2192 2193 t.tv_sec = clamp(t.tv_sec, sb->s_time_min, sb->s_time_max); 2194 if (unlikely(t.tv_sec == sb->s_time_max || t.tv_sec == sb->s_time_min)) 2195 t.tv_nsec = 0; 2196 2197 /* Avoid division in the common cases 1 ns and 1 s. */ 2198 if (gran == 1) 2199 ; /* nothing */ 2200 else if (gran == NSEC_PER_SEC) 2201 t.tv_nsec = 0; 2202 else if (gran > 1 && gran < NSEC_PER_SEC) 2203 t.tv_nsec -= t.tv_nsec % gran; 2204 else 2205 WARN(1, "invalid file time granularity: %u", gran); 2206 return t; 2207 } 2208 EXPORT_SYMBOL(timestamp_truncate); 2209 2210 /** 2211 * current_time - Return FS time 2212 * @inode: inode. 2213 * 2214 * Return the current time truncated to the time granularity supported by 2215 * the fs. 2216 * 2217 * Note that inode and inode->sb cannot be NULL. 2218 * Otherwise, the function warns and returns time without truncation. 2219 */ 2220 struct timespec64 current_time(struct inode *inode) 2221 { 2222 struct timespec64 now; 2223 2224 ktime_get_coarse_real_ts64(&now); 2225 2226 if (unlikely(!inode->i_sb)) { 2227 WARN(1, "current_time() called with uninitialized super_block in the inode"); 2228 return now; 2229 } 2230 2231 return timestamp_truncate(now, inode); 2232 } 2233 EXPORT_SYMBOL(current_time); 2234 2235 /* 2236 * Generic function to check FS_IOC_SETFLAGS values and reject any invalid 2237 * configurations. 2238 * 2239 * Note: the caller should be holding i_mutex, or else be sure that they have 2240 * exclusive access to the inode structure. 2241 */ 2242 int vfs_ioc_setflags_prepare(struct inode *inode, unsigned int oldflags, 2243 unsigned int flags) 2244 { 2245 /* 2246 * The IMMUTABLE and APPEND_ONLY flags can only be changed by 2247 * the relevant capability. 2248 * 2249 * This test looks nicer. Thanks to Pauline Middelink 2250 */ 2251 if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL) && 2252 !capable(CAP_LINUX_IMMUTABLE)) 2253 return -EPERM; 2254 2255 return 0; 2256 } 2257 EXPORT_SYMBOL(vfs_ioc_setflags_prepare); 2258 2259 /* 2260 * Generic function to check FS_IOC_FSSETXATTR values and reject any invalid 2261 * configurations. 2262 * 2263 * Note: the caller should be holding i_mutex, or else be sure that they have 2264 * exclusive access to the inode structure. 2265 */ 2266 int vfs_ioc_fssetxattr_check(struct inode *inode, const struct fsxattr *old_fa, 2267 struct fsxattr *fa) 2268 { 2269 /* 2270 * Can't modify an immutable/append-only file unless we have 2271 * appropriate permission. 2272 */ 2273 if ((old_fa->fsx_xflags ^ fa->fsx_xflags) & 2274 (FS_XFLAG_IMMUTABLE | FS_XFLAG_APPEND) && 2275 !capable(CAP_LINUX_IMMUTABLE)) 2276 return -EPERM; 2277 2278 /* 2279 * Project Quota ID state is only allowed to change from within the init 2280 * namespace. Enforce that restriction only if we are trying to change 2281 * the quota ID state. Everything else is allowed in user namespaces. 2282 */ 2283 if (current_user_ns() != &init_user_ns) { 2284 if (old_fa->fsx_projid != fa->fsx_projid) 2285 return -EINVAL; 2286 if ((old_fa->fsx_xflags ^ fa->fsx_xflags) & 2287 FS_XFLAG_PROJINHERIT) 2288 return -EINVAL; 2289 } 2290 2291 /* Check extent size hints. */ 2292 if ((fa->fsx_xflags & FS_XFLAG_EXTSIZE) && !S_ISREG(inode->i_mode)) 2293 return -EINVAL; 2294 2295 if ((fa->fsx_xflags & FS_XFLAG_EXTSZINHERIT) && 2296 !S_ISDIR(inode->i_mode)) 2297 return -EINVAL; 2298 2299 if ((fa->fsx_xflags & FS_XFLAG_COWEXTSIZE) && 2300 !S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) 2301 return -EINVAL; 2302 2303 /* 2304 * It is only valid to set the DAX flag on regular files and 2305 * directories on filesystems. 2306 */ 2307 if ((fa->fsx_xflags & FS_XFLAG_DAX) && 2308 !(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) 2309 return -EINVAL; 2310 2311 /* Extent size hints of zero turn off the flags. */ 2312 if (fa->fsx_extsize == 0) 2313 fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE | FS_XFLAG_EXTSZINHERIT); 2314 if (fa->fsx_cowextsize == 0) 2315 fa->fsx_xflags &= ~FS_XFLAG_COWEXTSIZE; 2316 2317 return 0; 2318 } 2319 EXPORT_SYMBOL(vfs_ioc_fssetxattr_check); 2320