1 /* 2 * (C) 1997 Linus Torvalds 3 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation) 4 */ 5 #include <linux/export.h> 6 #include <linux/fs.h> 7 #include <linux/mm.h> 8 #include <linux/backing-dev.h> 9 #include <linux/hash.h> 10 #include <linux/swap.h> 11 #include <linux/security.h> 12 #include <linux/cdev.h> 13 #include <linux/bootmem.h> 14 #include <linux/fsnotify.h> 15 #include <linux/mount.h> 16 #include <linux/posix_acl.h> 17 #include <linux/prefetch.h> 18 #include <linux/buffer_head.h> /* for inode_has_buffers */ 19 #include <linux/ratelimit.h> 20 #include <linux/list_lru.h> 21 #include <linux/iversion.h> 22 #include <trace/events/writeback.h> 23 #include "internal.h" 24 25 /* 26 * Inode locking rules: 27 * 28 * inode->i_lock protects: 29 * inode->i_state, inode->i_hash, __iget() 30 * Inode LRU list locks protect: 31 * inode->i_sb->s_inode_lru, inode->i_lru 32 * inode->i_sb->s_inode_list_lock protects: 33 * inode->i_sb->s_inodes, inode->i_sb_list 34 * bdi->wb.list_lock protects: 35 * bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list 36 * inode_hash_lock protects: 37 * inode_hashtable, inode->i_hash 38 * 39 * Lock ordering: 40 * 41 * inode->i_sb->s_inode_list_lock 42 * inode->i_lock 43 * Inode LRU list locks 44 * 45 * bdi->wb.list_lock 46 * inode->i_lock 47 * 48 * inode_hash_lock 49 * inode->i_sb->s_inode_list_lock 50 * inode->i_lock 51 * 52 * iunique_lock 53 * inode_hash_lock 54 */ 55 56 static unsigned int i_hash_mask __read_mostly; 57 static unsigned int i_hash_shift __read_mostly; 58 static struct hlist_head *inode_hashtable __read_mostly; 59 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); 60 61 /* 62 * Empty aops. Can be used for the cases where the user does not 63 * define any of the address_space operations. 64 */ 65 const struct address_space_operations empty_aops = { 66 }; 67 EXPORT_SYMBOL(empty_aops); 68 69 /* 70 * Statistics gathering.. 71 */ 72 struct inodes_stat_t inodes_stat; 73 74 static DEFINE_PER_CPU(unsigned long, nr_inodes); 75 static DEFINE_PER_CPU(unsigned long, nr_unused); 76 77 static struct kmem_cache *inode_cachep __read_mostly; 78 79 static long get_nr_inodes(void) 80 { 81 int i; 82 long sum = 0; 83 for_each_possible_cpu(i) 84 sum += per_cpu(nr_inodes, i); 85 return sum < 0 ? 0 : sum; 86 } 87 88 static inline long get_nr_inodes_unused(void) 89 { 90 int i; 91 long sum = 0; 92 for_each_possible_cpu(i) 93 sum += per_cpu(nr_unused, i); 94 return sum < 0 ? 0 : sum; 95 } 96 97 long get_nr_dirty_inodes(void) 98 { 99 /* not actually dirty inodes, but a wild approximation */ 100 long nr_dirty = get_nr_inodes() - get_nr_inodes_unused(); 101 return nr_dirty > 0 ? nr_dirty : 0; 102 } 103 104 /* 105 * Handle nr_inode sysctl 106 */ 107 #ifdef CONFIG_SYSCTL 108 int proc_nr_inodes(struct ctl_table *table, int write, 109 void __user *buffer, size_t *lenp, loff_t *ppos) 110 { 111 inodes_stat.nr_inodes = get_nr_inodes(); 112 inodes_stat.nr_unused = get_nr_inodes_unused(); 113 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 114 } 115 #endif 116 117 static int no_open(struct inode *inode, struct file *file) 118 { 119 return -ENXIO; 120 } 121 122 /** 123 * inode_init_always - perform inode structure initialisation 124 * @sb: superblock inode belongs to 125 * @inode: inode to initialise 126 * 127 * These are initializations that need to be done on every inode 128 * allocation as the fields are not initialised by slab allocation. 129 */ 130 int inode_init_always(struct super_block *sb, struct inode *inode) 131 { 132 static const struct inode_operations empty_iops; 133 static const struct file_operations no_open_fops = {.open = no_open}; 134 struct address_space *const mapping = &inode->i_data; 135 136 inode->i_sb = sb; 137 inode->i_blkbits = sb->s_blocksize_bits; 138 inode->i_flags = 0; 139 atomic_set(&inode->i_count, 1); 140 inode->i_op = &empty_iops; 141 inode->i_fop = &no_open_fops; 142 inode->__i_nlink = 1; 143 inode->i_opflags = 0; 144 if (sb->s_xattr) 145 inode->i_opflags |= IOP_XATTR; 146 i_uid_write(inode, 0); 147 i_gid_write(inode, 0); 148 atomic_set(&inode->i_writecount, 0); 149 inode->i_size = 0; 150 inode->i_write_hint = WRITE_LIFE_NOT_SET; 151 inode->i_blocks = 0; 152 inode->i_bytes = 0; 153 inode->i_generation = 0; 154 inode->i_pipe = NULL; 155 inode->i_bdev = NULL; 156 inode->i_cdev = NULL; 157 inode->i_link = NULL; 158 inode->i_dir_seq = 0; 159 inode->i_rdev = 0; 160 inode->dirtied_when = 0; 161 162 #ifdef CONFIG_CGROUP_WRITEBACK 163 inode->i_wb_frn_winner = 0; 164 inode->i_wb_frn_avg_time = 0; 165 inode->i_wb_frn_history = 0; 166 #endif 167 168 if (security_inode_alloc(inode)) 169 goto out; 170 spin_lock_init(&inode->i_lock); 171 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); 172 173 init_rwsem(&inode->i_rwsem); 174 lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key); 175 176 atomic_set(&inode->i_dio_count, 0); 177 178 mapping->a_ops = &empty_aops; 179 mapping->host = inode; 180 mapping->flags = 0; 181 mapping->wb_err = 0; 182 atomic_set(&mapping->i_mmap_writable, 0); 183 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); 184 mapping->private_data = NULL; 185 mapping->writeback_index = 0; 186 inode->i_private = NULL; 187 inode->i_mapping = mapping; 188 INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */ 189 #ifdef CONFIG_FS_POSIX_ACL 190 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED; 191 #endif 192 193 #ifdef CONFIG_FSNOTIFY 194 inode->i_fsnotify_mask = 0; 195 #endif 196 inode->i_flctx = NULL; 197 this_cpu_inc(nr_inodes); 198 199 return 0; 200 out: 201 return -ENOMEM; 202 } 203 EXPORT_SYMBOL(inode_init_always); 204 205 static struct inode *alloc_inode(struct super_block *sb) 206 { 207 struct inode *inode; 208 209 if (sb->s_op->alloc_inode) 210 inode = sb->s_op->alloc_inode(sb); 211 else 212 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL); 213 214 if (!inode) 215 return NULL; 216 217 if (unlikely(inode_init_always(sb, inode))) { 218 if (inode->i_sb->s_op->destroy_inode) 219 inode->i_sb->s_op->destroy_inode(inode); 220 else 221 kmem_cache_free(inode_cachep, inode); 222 return NULL; 223 } 224 225 return inode; 226 } 227 228 void free_inode_nonrcu(struct inode *inode) 229 { 230 kmem_cache_free(inode_cachep, inode); 231 } 232 EXPORT_SYMBOL(free_inode_nonrcu); 233 234 void __destroy_inode(struct inode *inode) 235 { 236 BUG_ON(inode_has_buffers(inode)); 237 inode_detach_wb(inode); 238 security_inode_free(inode); 239 fsnotify_inode_delete(inode); 240 locks_free_lock_context(inode); 241 if (!inode->i_nlink) { 242 WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0); 243 atomic_long_dec(&inode->i_sb->s_remove_count); 244 } 245 246 #ifdef CONFIG_FS_POSIX_ACL 247 if (inode->i_acl && !is_uncached_acl(inode->i_acl)) 248 posix_acl_release(inode->i_acl); 249 if (inode->i_default_acl && !is_uncached_acl(inode->i_default_acl)) 250 posix_acl_release(inode->i_default_acl); 251 #endif 252 this_cpu_dec(nr_inodes); 253 } 254 EXPORT_SYMBOL(__destroy_inode); 255 256 static void i_callback(struct rcu_head *head) 257 { 258 struct inode *inode = container_of(head, struct inode, i_rcu); 259 kmem_cache_free(inode_cachep, inode); 260 } 261 262 static void destroy_inode(struct inode *inode) 263 { 264 BUG_ON(!list_empty(&inode->i_lru)); 265 __destroy_inode(inode); 266 if (inode->i_sb->s_op->destroy_inode) 267 inode->i_sb->s_op->destroy_inode(inode); 268 else 269 call_rcu(&inode->i_rcu, i_callback); 270 } 271 272 /** 273 * drop_nlink - directly drop an inode's link count 274 * @inode: inode 275 * 276 * This is a low-level filesystem helper to replace any 277 * direct filesystem manipulation of i_nlink. In cases 278 * where we are attempting to track writes to the 279 * filesystem, a decrement to zero means an imminent 280 * write when the file is truncated and actually unlinked 281 * on the filesystem. 282 */ 283 void drop_nlink(struct inode *inode) 284 { 285 WARN_ON(inode->i_nlink == 0); 286 inode->__i_nlink--; 287 if (!inode->i_nlink) 288 atomic_long_inc(&inode->i_sb->s_remove_count); 289 } 290 EXPORT_SYMBOL(drop_nlink); 291 292 /** 293 * clear_nlink - directly zero an inode's link count 294 * @inode: inode 295 * 296 * This is a low-level filesystem helper to replace any 297 * direct filesystem manipulation of i_nlink. See 298 * drop_nlink() for why we care about i_nlink hitting zero. 299 */ 300 void clear_nlink(struct inode *inode) 301 { 302 if (inode->i_nlink) { 303 inode->__i_nlink = 0; 304 atomic_long_inc(&inode->i_sb->s_remove_count); 305 } 306 } 307 EXPORT_SYMBOL(clear_nlink); 308 309 /** 310 * set_nlink - directly set an inode's link count 311 * @inode: inode 312 * @nlink: new nlink (should be non-zero) 313 * 314 * This is a low-level filesystem helper to replace any 315 * direct filesystem manipulation of i_nlink. 316 */ 317 void set_nlink(struct inode *inode, unsigned int nlink) 318 { 319 if (!nlink) { 320 clear_nlink(inode); 321 } else { 322 /* Yes, some filesystems do change nlink from zero to one */ 323 if (inode->i_nlink == 0) 324 atomic_long_dec(&inode->i_sb->s_remove_count); 325 326 inode->__i_nlink = nlink; 327 } 328 } 329 EXPORT_SYMBOL(set_nlink); 330 331 /** 332 * inc_nlink - directly increment an inode's link count 333 * @inode: inode 334 * 335 * This is a low-level filesystem helper to replace any 336 * direct filesystem manipulation of i_nlink. Currently, 337 * it is only here for parity with dec_nlink(). 338 */ 339 void inc_nlink(struct inode *inode) 340 { 341 if (unlikely(inode->i_nlink == 0)) { 342 WARN_ON(!(inode->i_state & I_LINKABLE)); 343 atomic_long_dec(&inode->i_sb->s_remove_count); 344 } 345 346 inode->__i_nlink++; 347 } 348 EXPORT_SYMBOL(inc_nlink); 349 350 static void __address_space_init_once(struct address_space *mapping) 351 { 352 INIT_RADIX_TREE(&mapping->i_pages, GFP_ATOMIC | __GFP_ACCOUNT); 353 init_rwsem(&mapping->i_mmap_rwsem); 354 INIT_LIST_HEAD(&mapping->private_list); 355 spin_lock_init(&mapping->private_lock); 356 mapping->i_mmap = RB_ROOT_CACHED; 357 } 358 359 void address_space_init_once(struct address_space *mapping) 360 { 361 memset(mapping, 0, sizeof(*mapping)); 362 __address_space_init_once(mapping); 363 } 364 EXPORT_SYMBOL(address_space_init_once); 365 366 /* 367 * These are initializations that only need to be done 368 * once, because the fields are idempotent across use 369 * of the inode, so let the slab aware of that. 370 */ 371 void inode_init_once(struct inode *inode) 372 { 373 memset(inode, 0, sizeof(*inode)); 374 INIT_HLIST_NODE(&inode->i_hash); 375 INIT_LIST_HEAD(&inode->i_devices); 376 INIT_LIST_HEAD(&inode->i_io_list); 377 INIT_LIST_HEAD(&inode->i_wb_list); 378 INIT_LIST_HEAD(&inode->i_lru); 379 __address_space_init_once(&inode->i_data); 380 i_size_ordered_init(inode); 381 } 382 EXPORT_SYMBOL(inode_init_once); 383 384 static void init_once(void *foo) 385 { 386 struct inode *inode = (struct inode *) foo; 387 388 inode_init_once(inode); 389 } 390 391 /* 392 * inode->i_lock must be held 393 */ 394 void __iget(struct inode *inode) 395 { 396 atomic_inc(&inode->i_count); 397 } 398 399 /* 400 * get additional reference to inode; caller must already hold one. 401 */ 402 void ihold(struct inode *inode) 403 { 404 WARN_ON(atomic_inc_return(&inode->i_count) < 2); 405 } 406 EXPORT_SYMBOL(ihold); 407 408 static void inode_lru_list_add(struct inode *inode) 409 { 410 if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru)) 411 this_cpu_inc(nr_unused); 412 else 413 inode->i_state |= I_REFERENCED; 414 } 415 416 /* 417 * Add inode to LRU if needed (inode is unused and clean). 418 * 419 * Needs inode->i_lock held. 420 */ 421 void inode_add_lru(struct inode *inode) 422 { 423 if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC | 424 I_FREEING | I_WILL_FREE)) && 425 !atomic_read(&inode->i_count) && inode->i_sb->s_flags & SB_ACTIVE) 426 inode_lru_list_add(inode); 427 } 428 429 430 static void inode_lru_list_del(struct inode *inode) 431 { 432 433 if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru)) 434 this_cpu_dec(nr_unused); 435 } 436 437 /** 438 * inode_sb_list_add - add inode to the superblock list of inodes 439 * @inode: inode to add 440 */ 441 void inode_sb_list_add(struct inode *inode) 442 { 443 spin_lock(&inode->i_sb->s_inode_list_lock); 444 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes); 445 spin_unlock(&inode->i_sb->s_inode_list_lock); 446 } 447 EXPORT_SYMBOL_GPL(inode_sb_list_add); 448 449 static inline void inode_sb_list_del(struct inode *inode) 450 { 451 if (!list_empty(&inode->i_sb_list)) { 452 spin_lock(&inode->i_sb->s_inode_list_lock); 453 list_del_init(&inode->i_sb_list); 454 spin_unlock(&inode->i_sb->s_inode_list_lock); 455 } 456 } 457 458 static unsigned long hash(struct super_block *sb, unsigned long hashval) 459 { 460 unsigned long tmp; 461 462 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) / 463 L1_CACHE_BYTES; 464 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift); 465 return tmp & i_hash_mask; 466 } 467 468 /** 469 * __insert_inode_hash - hash an inode 470 * @inode: unhashed inode 471 * @hashval: unsigned long value used to locate this object in the 472 * inode_hashtable. 473 * 474 * Add an inode to the inode hash for this superblock. 475 */ 476 void __insert_inode_hash(struct inode *inode, unsigned long hashval) 477 { 478 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval); 479 480 spin_lock(&inode_hash_lock); 481 spin_lock(&inode->i_lock); 482 hlist_add_head(&inode->i_hash, b); 483 spin_unlock(&inode->i_lock); 484 spin_unlock(&inode_hash_lock); 485 } 486 EXPORT_SYMBOL(__insert_inode_hash); 487 488 /** 489 * __remove_inode_hash - remove an inode from the hash 490 * @inode: inode to unhash 491 * 492 * Remove an inode from the superblock. 493 */ 494 void __remove_inode_hash(struct inode *inode) 495 { 496 spin_lock(&inode_hash_lock); 497 spin_lock(&inode->i_lock); 498 hlist_del_init(&inode->i_hash); 499 spin_unlock(&inode->i_lock); 500 spin_unlock(&inode_hash_lock); 501 } 502 EXPORT_SYMBOL(__remove_inode_hash); 503 504 void clear_inode(struct inode *inode) 505 { 506 /* 507 * We have to cycle the i_pages lock here because reclaim can be in the 508 * process of removing the last page (in __delete_from_page_cache()) 509 * and we must not free the mapping under it. 510 */ 511 xa_lock_irq(&inode->i_data.i_pages); 512 BUG_ON(inode->i_data.nrpages); 513 BUG_ON(inode->i_data.nrexceptional); 514 xa_unlock_irq(&inode->i_data.i_pages); 515 BUG_ON(!list_empty(&inode->i_data.private_list)); 516 BUG_ON(!(inode->i_state & I_FREEING)); 517 BUG_ON(inode->i_state & I_CLEAR); 518 BUG_ON(!list_empty(&inode->i_wb_list)); 519 /* don't need i_lock here, no concurrent mods to i_state */ 520 inode->i_state = I_FREEING | I_CLEAR; 521 } 522 EXPORT_SYMBOL(clear_inode); 523 524 /* 525 * Free the inode passed in, removing it from the lists it is still connected 526 * to. We remove any pages still attached to the inode and wait for any IO that 527 * is still in progress before finally destroying the inode. 528 * 529 * An inode must already be marked I_FREEING so that we avoid the inode being 530 * moved back onto lists if we race with other code that manipulates the lists 531 * (e.g. writeback_single_inode). The caller is responsible for setting this. 532 * 533 * An inode must already be removed from the LRU list before being evicted from 534 * the cache. This should occur atomically with setting the I_FREEING state 535 * flag, so no inodes here should ever be on the LRU when being evicted. 536 */ 537 static void evict(struct inode *inode) 538 { 539 const struct super_operations *op = inode->i_sb->s_op; 540 541 BUG_ON(!(inode->i_state & I_FREEING)); 542 BUG_ON(!list_empty(&inode->i_lru)); 543 544 if (!list_empty(&inode->i_io_list)) 545 inode_io_list_del(inode); 546 547 inode_sb_list_del(inode); 548 549 /* 550 * Wait for flusher thread to be done with the inode so that filesystem 551 * does not start destroying it while writeback is still running. Since 552 * the inode has I_FREEING set, flusher thread won't start new work on 553 * the inode. We just have to wait for running writeback to finish. 554 */ 555 inode_wait_for_writeback(inode); 556 557 if (op->evict_inode) { 558 op->evict_inode(inode); 559 } else { 560 truncate_inode_pages_final(&inode->i_data); 561 clear_inode(inode); 562 } 563 if (S_ISBLK(inode->i_mode) && inode->i_bdev) 564 bd_forget(inode); 565 if (S_ISCHR(inode->i_mode) && inode->i_cdev) 566 cd_forget(inode); 567 568 remove_inode_hash(inode); 569 570 spin_lock(&inode->i_lock); 571 wake_up_bit(&inode->i_state, __I_NEW); 572 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR)); 573 spin_unlock(&inode->i_lock); 574 575 destroy_inode(inode); 576 } 577 578 /* 579 * dispose_list - dispose of the contents of a local list 580 * @head: the head of the list to free 581 * 582 * Dispose-list gets a local list with local inodes in it, so it doesn't 583 * need to worry about list corruption and SMP locks. 584 */ 585 static void dispose_list(struct list_head *head) 586 { 587 while (!list_empty(head)) { 588 struct inode *inode; 589 590 inode = list_first_entry(head, struct inode, i_lru); 591 list_del_init(&inode->i_lru); 592 593 evict(inode); 594 cond_resched(); 595 } 596 } 597 598 /** 599 * evict_inodes - evict all evictable inodes for a superblock 600 * @sb: superblock to operate on 601 * 602 * Make sure that no inodes with zero refcount are retained. This is 603 * called by superblock shutdown after having SB_ACTIVE flag removed, 604 * so any inode reaching zero refcount during or after that call will 605 * be immediately evicted. 606 */ 607 void evict_inodes(struct super_block *sb) 608 { 609 struct inode *inode, *next; 610 LIST_HEAD(dispose); 611 612 again: 613 spin_lock(&sb->s_inode_list_lock); 614 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 615 if (atomic_read(&inode->i_count)) 616 continue; 617 618 spin_lock(&inode->i_lock); 619 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 620 spin_unlock(&inode->i_lock); 621 continue; 622 } 623 624 inode->i_state |= I_FREEING; 625 inode_lru_list_del(inode); 626 spin_unlock(&inode->i_lock); 627 list_add(&inode->i_lru, &dispose); 628 629 /* 630 * We can have a ton of inodes to evict at unmount time given 631 * enough memory, check to see if we need to go to sleep for a 632 * bit so we don't livelock. 633 */ 634 if (need_resched()) { 635 spin_unlock(&sb->s_inode_list_lock); 636 cond_resched(); 637 dispose_list(&dispose); 638 goto again; 639 } 640 } 641 spin_unlock(&sb->s_inode_list_lock); 642 643 dispose_list(&dispose); 644 } 645 EXPORT_SYMBOL_GPL(evict_inodes); 646 647 /** 648 * invalidate_inodes - attempt to free all inodes on a superblock 649 * @sb: superblock to operate on 650 * @kill_dirty: flag to guide handling of dirty inodes 651 * 652 * Attempts to free all inodes for a given superblock. If there were any 653 * busy inodes return a non-zero value, else zero. 654 * If @kill_dirty is set, discard dirty inodes too, otherwise treat 655 * them as busy. 656 */ 657 int invalidate_inodes(struct super_block *sb, bool kill_dirty) 658 { 659 int busy = 0; 660 struct inode *inode, *next; 661 LIST_HEAD(dispose); 662 663 spin_lock(&sb->s_inode_list_lock); 664 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 665 spin_lock(&inode->i_lock); 666 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 667 spin_unlock(&inode->i_lock); 668 continue; 669 } 670 if (inode->i_state & I_DIRTY_ALL && !kill_dirty) { 671 spin_unlock(&inode->i_lock); 672 busy = 1; 673 continue; 674 } 675 if (atomic_read(&inode->i_count)) { 676 spin_unlock(&inode->i_lock); 677 busy = 1; 678 continue; 679 } 680 681 inode->i_state |= I_FREEING; 682 inode_lru_list_del(inode); 683 spin_unlock(&inode->i_lock); 684 list_add(&inode->i_lru, &dispose); 685 } 686 spin_unlock(&sb->s_inode_list_lock); 687 688 dispose_list(&dispose); 689 690 return busy; 691 } 692 693 /* 694 * Isolate the inode from the LRU in preparation for freeing it. 695 * 696 * Any inodes which are pinned purely because of attached pagecache have their 697 * pagecache removed. If the inode has metadata buffers attached to 698 * mapping->private_list then try to remove them. 699 * 700 * If the inode has the I_REFERENCED flag set, then it means that it has been 701 * used recently - the flag is set in iput_final(). When we encounter such an 702 * inode, clear the flag and move it to the back of the LRU so it gets another 703 * pass through the LRU before it gets reclaimed. This is necessary because of 704 * the fact we are doing lazy LRU updates to minimise lock contention so the 705 * LRU does not have strict ordering. Hence we don't want to reclaim inodes 706 * with this flag set because they are the inodes that are out of order. 707 */ 708 static enum lru_status inode_lru_isolate(struct list_head *item, 709 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) 710 { 711 struct list_head *freeable = arg; 712 struct inode *inode = container_of(item, struct inode, i_lru); 713 714 /* 715 * we are inverting the lru lock/inode->i_lock here, so use a trylock. 716 * If we fail to get the lock, just skip it. 717 */ 718 if (!spin_trylock(&inode->i_lock)) 719 return LRU_SKIP; 720 721 /* 722 * Referenced or dirty inodes are still in use. Give them another pass 723 * through the LRU as we canot reclaim them now. 724 */ 725 if (atomic_read(&inode->i_count) || 726 (inode->i_state & ~I_REFERENCED)) { 727 list_lru_isolate(lru, &inode->i_lru); 728 spin_unlock(&inode->i_lock); 729 this_cpu_dec(nr_unused); 730 return LRU_REMOVED; 731 } 732 733 /* recently referenced inodes get one more pass */ 734 if (inode->i_state & I_REFERENCED) { 735 inode->i_state &= ~I_REFERENCED; 736 spin_unlock(&inode->i_lock); 737 return LRU_ROTATE; 738 } 739 740 if (inode_has_buffers(inode) || inode->i_data.nrpages) { 741 __iget(inode); 742 spin_unlock(&inode->i_lock); 743 spin_unlock(lru_lock); 744 if (remove_inode_buffers(inode)) { 745 unsigned long reap; 746 reap = invalidate_mapping_pages(&inode->i_data, 0, -1); 747 if (current_is_kswapd()) 748 __count_vm_events(KSWAPD_INODESTEAL, reap); 749 else 750 __count_vm_events(PGINODESTEAL, reap); 751 if (current->reclaim_state) 752 current->reclaim_state->reclaimed_slab += reap; 753 } 754 iput(inode); 755 spin_lock(lru_lock); 756 return LRU_RETRY; 757 } 758 759 WARN_ON(inode->i_state & I_NEW); 760 inode->i_state |= I_FREEING; 761 list_lru_isolate_move(lru, &inode->i_lru, freeable); 762 spin_unlock(&inode->i_lock); 763 764 this_cpu_dec(nr_unused); 765 return LRU_REMOVED; 766 } 767 768 /* 769 * Walk the superblock inode LRU for freeable inodes and attempt to free them. 770 * This is called from the superblock shrinker function with a number of inodes 771 * to trim from the LRU. Inodes to be freed are moved to a temporary list and 772 * then are freed outside inode_lock by dispose_list(). 773 */ 774 long prune_icache_sb(struct super_block *sb, struct shrink_control *sc) 775 { 776 LIST_HEAD(freeable); 777 long freed; 778 779 freed = list_lru_shrink_walk(&sb->s_inode_lru, sc, 780 inode_lru_isolate, &freeable); 781 dispose_list(&freeable); 782 return freed; 783 } 784 785 static void __wait_on_freeing_inode(struct inode *inode); 786 /* 787 * Called with the inode lock held. 788 */ 789 static struct inode *find_inode(struct super_block *sb, 790 struct hlist_head *head, 791 int (*test)(struct inode *, void *), 792 void *data) 793 { 794 struct inode *inode = NULL; 795 796 repeat: 797 hlist_for_each_entry(inode, head, i_hash) { 798 if (inode->i_sb != sb) 799 continue; 800 if (!test(inode, data)) 801 continue; 802 spin_lock(&inode->i_lock); 803 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 804 __wait_on_freeing_inode(inode); 805 goto repeat; 806 } 807 if (unlikely(inode->i_state & I_CREATING)) { 808 spin_unlock(&inode->i_lock); 809 return ERR_PTR(-ESTALE); 810 } 811 __iget(inode); 812 spin_unlock(&inode->i_lock); 813 return inode; 814 } 815 return NULL; 816 } 817 818 /* 819 * find_inode_fast is the fast path version of find_inode, see the comment at 820 * iget_locked for details. 821 */ 822 static struct inode *find_inode_fast(struct super_block *sb, 823 struct hlist_head *head, unsigned long ino) 824 { 825 struct inode *inode = NULL; 826 827 repeat: 828 hlist_for_each_entry(inode, head, i_hash) { 829 if (inode->i_ino != ino) 830 continue; 831 if (inode->i_sb != sb) 832 continue; 833 spin_lock(&inode->i_lock); 834 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 835 __wait_on_freeing_inode(inode); 836 goto repeat; 837 } 838 if (unlikely(inode->i_state & I_CREATING)) { 839 spin_unlock(&inode->i_lock); 840 return ERR_PTR(-ESTALE); 841 } 842 __iget(inode); 843 spin_unlock(&inode->i_lock); 844 return inode; 845 } 846 return NULL; 847 } 848 849 /* 850 * Each cpu owns a range of LAST_INO_BATCH numbers. 851 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations, 852 * to renew the exhausted range. 853 * 854 * This does not significantly increase overflow rate because every CPU can 855 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is 856 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the 857 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase 858 * overflow rate by 2x, which does not seem too significant. 859 * 860 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 861 * error if st_ino won't fit in target struct field. Use 32bit counter 862 * here to attempt to avoid that. 863 */ 864 #define LAST_INO_BATCH 1024 865 static DEFINE_PER_CPU(unsigned int, last_ino); 866 867 unsigned int get_next_ino(void) 868 { 869 unsigned int *p = &get_cpu_var(last_ino); 870 unsigned int res = *p; 871 872 #ifdef CONFIG_SMP 873 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) { 874 static atomic_t shared_last_ino; 875 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino); 876 877 res = next - LAST_INO_BATCH; 878 } 879 #endif 880 881 res++; 882 /* get_next_ino should not provide a 0 inode number */ 883 if (unlikely(!res)) 884 res++; 885 *p = res; 886 put_cpu_var(last_ino); 887 return res; 888 } 889 EXPORT_SYMBOL(get_next_ino); 890 891 /** 892 * new_inode_pseudo - obtain an inode 893 * @sb: superblock 894 * 895 * Allocates a new inode for given superblock. 896 * Inode wont be chained in superblock s_inodes list 897 * This means : 898 * - fs can't be unmount 899 * - quotas, fsnotify, writeback can't work 900 */ 901 struct inode *new_inode_pseudo(struct super_block *sb) 902 { 903 struct inode *inode = alloc_inode(sb); 904 905 if (inode) { 906 spin_lock(&inode->i_lock); 907 inode->i_state = 0; 908 spin_unlock(&inode->i_lock); 909 INIT_LIST_HEAD(&inode->i_sb_list); 910 } 911 return inode; 912 } 913 914 /** 915 * new_inode - obtain an inode 916 * @sb: superblock 917 * 918 * Allocates a new inode for given superblock. The default gfp_mask 919 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE. 920 * If HIGHMEM pages are unsuitable or it is known that pages allocated 921 * for the page cache are not reclaimable or migratable, 922 * mapping_set_gfp_mask() must be called with suitable flags on the 923 * newly created inode's mapping 924 * 925 */ 926 struct inode *new_inode(struct super_block *sb) 927 { 928 struct inode *inode; 929 930 spin_lock_prefetch(&sb->s_inode_list_lock); 931 932 inode = new_inode_pseudo(sb); 933 if (inode) 934 inode_sb_list_add(inode); 935 return inode; 936 } 937 EXPORT_SYMBOL(new_inode); 938 939 #ifdef CONFIG_DEBUG_LOCK_ALLOC 940 void lockdep_annotate_inode_mutex_key(struct inode *inode) 941 { 942 if (S_ISDIR(inode->i_mode)) { 943 struct file_system_type *type = inode->i_sb->s_type; 944 945 /* Set new key only if filesystem hasn't already changed it */ 946 if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) { 947 /* 948 * ensure nobody is actually holding i_mutex 949 */ 950 // mutex_destroy(&inode->i_mutex); 951 init_rwsem(&inode->i_rwsem); 952 lockdep_set_class(&inode->i_rwsem, 953 &type->i_mutex_dir_key); 954 } 955 } 956 } 957 EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key); 958 #endif 959 960 /** 961 * unlock_new_inode - clear the I_NEW state and wake up any waiters 962 * @inode: new inode to unlock 963 * 964 * Called when the inode is fully initialised to clear the new state of the 965 * inode and wake up anyone waiting for the inode to finish initialisation. 966 */ 967 void unlock_new_inode(struct inode *inode) 968 { 969 lockdep_annotate_inode_mutex_key(inode); 970 spin_lock(&inode->i_lock); 971 WARN_ON(!(inode->i_state & I_NEW)); 972 inode->i_state &= ~I_NEW & ~I_CREATING; 973 smp_mb(); 974 wake_up_bit(&inode->i_state, __I_NEW); 975 spin_unlock(&inode->i_lock); 976 } 977 EXPORT_SYMBOL(unlock_new_inode); 978 979 void discard_new_inode(struct inode *inode) 980 { 981 lockdep_annotate_inode_mutex_key(inode); 982 spin_lock(&inode->i_lock); 983 WARN_ON(!(inode->i_state & I_NEW)); 984 inode->i_state &= ~I_NEW; 985 smp_mb(); 986 wake_up_bit(&inode->i_state, __I_NEW); 987 spin_unlock(&inode->i_lock); 988 iput(inode); 989 } 990 EXPORT_SYMBOL(discard_new_inode); 991 992 /** 993 * lock_two_nondirectories - take two i_mutexes on non-directory objects 994 * 995 * Lock any non-NULL argument that is not a directory. 996 * Zero, one or two objects may be locked by this function. 997 * 998 * @inode1: first inode to lock 999 * @inode2: second inode to lock 1000 */ 1001 void lock_two_nondirectories(struct inode *inode1, struct inode *inode2) 1002 { 1003 if (inode1 > inode2) 1004 swap(inode1, inode2); 1005 1006 if (inode1 && !S_ISDIR(inode1->i_mode)) 1007 inode_lock(inode1); 1008 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1) 1009 inode_lock_nested(inode2, I_MUTEX_NONDIR2); 1010 } 1011 EXPORT_SYMBOL(lock_two_nondirectories); 1012 1013 /** 1014 * unlock_two_nondirectories - release locks from lock_two_nondirectories() 1015 * @inode1: first inode to unlock 1016 * @inode2: second inode to unlock 1017 */ 1018 void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2) 1019 { 1020 if (inode1 && !S_ISDIR(inode1->i_mode)) 1021 inode_unlock(inode1); 1022 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1) 1023 inode_unlock(inode2); 1024 } 1025 EXPORT_SYMBOL(unlock_two_nondirectories); 1026 1027 /** 1028 * inode_insert5 - obtain an inode from a mounted file system 1029 * @inode: pre-allocated inode to use for insert to cache 1030 * @hashval: hash value (usually inode number) to get 1031 * @test: callback used for comparisons between inodes 1032 * @set: callback used to initialize a new struct inode 1033 * @data: opaque data pointer to pass to @test and @set 1034 * 1035 * Search for the inode specified by @hashval and @data in the inode cache, 1036 * and if present it is return it with an increased reference count. This is 1037 * a variant of iget5_locked() for callers that don't want to fail on memory 1038 * allocation of inode. 1039 * 1040 * If the inode is not in cache, insert the pre-allocated inode to cache and 1041 * return it locked, hashed, and with the I_NEW flag set. The file system gets 1042 * to fill it in before unlocking it via unlock_new_inode(). 1043 * 1044 * Note both @test and @set are called with the inode_hash_lock held, so can't 1045 * sleep. 1046 */ 1047 struct inode *inode_insert5(struct inode *inode, unsigned long hashval, 1048 int (*test)(struct inode *, void *), 1049 int (*set)(struct inode *, void *), void *data) 1050 { 1051 struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval); 1052 struct inode *old; 1053 bool creating = inode->i_state & I_CREATING; 1054 1055 again: 1056 spin_lock(&inode_hash_lock); 1057 old = find_inode(inode->i_sb, head, test, data); 1058 if (unlikely(old)) { 1059 /* 1060 * Uhhuh, somebody else created the same inode under us. 1061 * Use the old inode instead of the preallocated one. 1062 */ 1063 spin_unlock(&inode_hash_lock); 1064 if (IS_ERR(old)) 1065 return NULL; 1066 wait_on_inode(old); 1067 if (unlikely(inode_unhashed(old))) { 1068 iput(old); 1069 goto again; 1070 } 1071 return old; 1072 } 1073 1074 if (set && unlikely(set(inode, data))) { 1075 inode = NULL; 1076 goto unlock; 1077 } 1078 1079 /* 1080 * Return the locked inode with I_NEW set, the 1081 * caller is responsible for filling in the contents 1082 */ 1083 spin_lock(&inode->i_lock); 1084 inode->i_state |= I_NEW; 1085 hlist_add_head(&inode->i_hash, head); 1086 spin_unlock(&inode->i_lock); 1087 if (!creating) 1088 inode_sb_list_add(inode); 1089 unlock: 1090 spin_unlock(&inode_hash_lock); 1091 1092 return inode; 1093 } 1094 EXPORT_SYMBOL(inode_insert5); 1095 1096 /** 1097 * iget5_locked - obtain an inode from a mounted file system 1098 * @sb: super block of file system 1099 * @hashval: hash value (usually inode number) to get 1100 * @test: callback used for comparisons between inodes 1101 * @set: callback used to initialize a new struct inode 1102 * @data: opaque data pointer to pass to @test and @set 1103 * 1104 * Search for the inode specified by @hashval and @data in the inode cache, 1105 * and if present it is return it with an increased reference count. This is 1106 * a generalized version of iget_locked() for file systems where the inode 1107 * number is not sufficient for unique identification of an inode. 1108 * 1109 * If the inode is not in cache, allocate a new inode and return it locked, 1110 * hashed, and with the I_NEW flag set. The file system gets to fill it in 1111 * before unlocking it via unlock_new_inode(). 1112 * 1113 * Note both @test and @set are called with the inode_hash_lock held, so can't 1114 * sleep. 1115 */ 1116 struct inode *iget5_locked(struct super_block *sb, unsigned long hashval, 1117 int (*test)(struct inode *, void *), 1118 int (*set)(struct inode *, void *), void *data) 1119 { 1120 struct inode *inode = ilookup5(sb, hashval, test, data); 1121 1122 if (!inode) { 1123 struct inode *new = alloc_inode(sb); 1124 1125 if (new) { 1126 new->i_state = 0; 1127 inode = inode_insert5(new, hashval, test, set, data); 1128 if (unlikely(inode != new)) 1129 destroy_inode(new); 1130 } 1131 } 1132 return inode; 1133 } 1134 EXPORT_SYMBOL(iget5_locked); 1135 1136 /** 1137 * iget_locked - obtain an inode from a mounted file system 1138 * @sb: super block of file system 1139 * @ino: inode number to get 1140 * 1141 * Search for the inode specified by @ino in the inode cache and if present 1142 * return it with an increased reference count. This is for file systems 1143 * where the inode number is sufficient for unique identification of an inode. 1144 * 1145 * If the inode is not in cache, allocate a new inode and return it locked, 1146 * hashed, and with the I_NEW flag set. The file system gets to fill it in 1147 * before unlocking it via unlock_new_inode(). 1148 */ 1149 struct inode *iget_locked(struct super_block *sb, unsigned long ino) 1150 { 1151 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1152 struct inode *inode; 1153 again: 1154 spin_lock(&inode_hash_lock); 1155 inode = find_inode_fast(sb, head, ino); 1156 spin_unlock(&inode_hash_lock); 1157 if (inode) { 1158 if (IS_ERR(inode)) 1159 return NULL; 1160 wait_on_inode(inode); 1161 if (unlikely(inode_unhashed(inode))) { 1162 iput(inode); 1163 goto again; 1164 } 1165 return inode; 1166 } 1167 1168 inode = alloc_inode(sb); 1169 if (inode) { 1170 struct inode *old; 1171 1172 spin_lock(&inode_hash_lock); 1173 /* We released the lock, so.. */ 1174 old = find_inode_fast(sb, head, ino); 1175 if (!old) { 1176 inode->i_ino = ino; 1177 spin_lock(&inode->i_lock); 1178 inode->i_state = I_NEW; 1179 hlist_add_head(&inode->i_hash, head); 1180 spin_unlock(&inode->i_lock); 1181 inode_sb_list_add(inode); 1182 spin_unlock(&inode_hash_lock); 1183 1184 /* Return the locked inode with I_NEW set, the 1185 * caller is responsible for filling in the contents 1186 */ 1187 return inode; 1188 } 1189 1190 /* 1191 * Uhhuh, somebody else created the same inode under 1192 * us. Use the old inode instead of the one we just 1193 * allocated. 1194 */ 1195 spin_unlock(&inode_hash_lock); 1196 destroy_inode(inode); 1197 if (IS_ERR(old)) 1198 return NULL; 1199 inode = old; 1200 wait_on_inode(inode); 1201 if (unlikely(inode_unhashed(inode))) { 1202 iput(inode); 1203 goto again; 1204 } 1205 } 1206 return inode; 1207 } 1208 EXPORT_SYMBOL(iget_locked); 1209 1210 /* 1211 * search the inode cache for a matching inode number. 1212 * If we find one, then the inode number we are trying to 1213 * allocate is not unique and so we should not use it. 1214 * 1215 * Returns 1 if the inode number is unique, 0 if it is not. 1216 */ 1217 static int test_inode_iunique(struct super_block *sb, unsigned long ino) 1218 { 1219 struct hlist_head *b = inode_hashtable + hash(sb, ino); 1220 struct inode *inode; 1221 1222 spin_lock(&inode_hash_lock); 1223 hlist_for_each_entry(inode, b, i_hash) { 1224 if (inode->i_ino == ino && inode->i_sb == sb) { 1225 spin_unlock(&inode_hash_lock); 1226 return 0; 1227 } 1228 } 1229 spin_unlock(&inode_hash_lock); 1230 1231 return 1; 1232 } 1233 1234 /** 1235 * iunique - get a unique inode number 1236 * @sb: superblock 1237 * @max_reserved: highest reserved inode number 1238 * 1239 * Obtain an inode number that is unique on the system for a given 1240 * superblock. This is used by file systems that have no natural 1241 * permanent inode numbering system. An inode number is returned that 1242 * is higher than the reserved limit but unique. 1243 * 1244 * BUGS: 1245 * With a large number of inodes live on the file system this function 1246 * currently becomes quite slow. 1247 */ 1248 ino_t iunique(struct super_block *sb, ino_t max_reserved) 1249 { 1250 /* 1251 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 1252 * error if st_ino won't fit in target struct field. Use 32bit counter 1253 * here to attempt to avoid that. 1254 */ 1255 static DEFINE_SPINLOCK(iunique_lock); 1256 static unsigned int counter; 1257 ino_t res; 1258 1259 spin_lock(&iunique_lock); 1260 do { 1261 if (counter <= max_reserved) 1262 counter = max_reserved + 1; 1263 res = counter++; 1264 } while (!test_inode_iunique(sb, res)); 1265 spin_unlock(&iunique_lock); 1266 1267 return res; 1268 } 1269 EXPORT_SYMBOL(iunique); 1270 1271 struct inode *igrab(struct inode *inode) 1272 { 1273 spin_lock(&inode->i_lock); 1274 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) { 1275 __iget(inode); 1276 spin_unlock(&inode->i_lock); 1277 } else { 1278 spin_unlock(&inode->i_lock); 1279 /* 1280 * Handle the case where s_op->clear_inode is not been 1281 * called yet, and somebody is calling igrab 1282 * while the inode is getting freed. 1283 */ 1284 inode = NULL; 1285 } 1286 return inode; 1287 } 1288 EXPORT_SYMBOL(igrab); 1289 1290 /** 1291 * ilookup5_nowait - search for an inode in the inode cache 1292 * @sb: super block of file system to search 1293 * @hashval: hash value (usually inode number) to search for 1294 * @test: callback used for comparisons between inodes 1295 * @data: opaque data pointer to pass to @test 1296 * 1297 * Search for the inode specified by @hashval and @data in the inode cache. 1298 * If the inode is in the cache, the inode is returned with an incremented 1299 * reference count. 1300 * 1301 * Note: I_NEW is not waited upon so you have to be very careful what you do 1302 * with the returned inode. You probably should be using ilookup5() instead. 1303 * 1304 * Note2: @test is called with the inode_hash_lock held, so can't sleep. 1305 */ 1306 struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, 1307 int (*test)(struct inode *, void *), void *data) 1308 { 1309 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1310 struct inode *inode; 1311 1312 spin_lock(&inode_hash_lock); 1313 inode = find_inode(sb, head, test, data); 1314 spin_unlock(&inode_hash_lock); 1315 1316 return IS_ERR(inode) ? NULL : inode; 1317 } 1318 EXPORT_SYMBOL(ilookup5_nowait); 1319 1320 /** 1321 * ilookup5 - search for an inode in the inode cache 1322 * @sb: super block of file system to search 1323 * @hashval: hash value (usually inode number) to search for 1324 * @test: callback used for comparisons between inodes 1325 * @data: opaque data pointer to pass to @test 1326 * 1327 * Search for the inode specified by @hashval and @data in the inode cache, 1328 * and if the inode is in the cache, return the inode with an incremented 1329 * reference count. Waits on I_NEW before returning the inode. 1330 * returned with an incremented reference count. 1331 * 1332 * This is a generalized version of ilookup() for file systems where the 1333 * inode number is not sufficient for unique identification of an inode. 1334 * 1335 * Note: @test is called with the inode_hash_lock held, so can't sleep. 1336 */ 1337 struct inode *ilookup5(struct super_block *sb, unsigned long hashval, 1338 int (*test)(struct inode *, void *), void *data) 1339 { 1340 struct inode *inode; 1341 again: 1342 inode = ilookup5_nowait(sb, hashval, test, data); 1343 if (inode) { 1344 wait_on_inode(inode); 1345 if (unlikely(inode_unhashed(inode))) { 1346 iput(inode); 1347 goto again; 1348 } 1349 } 1350 return inode; 1351 } 1352 EXPORT_SYMBOL(ilookup5); 1353 1354 /** 1355 * ilookup - search for an inode in the inode cache 1356 * @sb: super block of file system to search 1357 * @ino: inode number to search for 1358 * 1359 * Search for the inode @ino in the inode cache, and if the inode is in the 1360 * cache, the inode is returned with an incremented reference count. 1361 */ 1362 struct inode *ilookup(struct super_block *sb, unsigned long ino) 1363 { 1364 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1365 struct inode *inode; 1366 again: 1367 spin_lock(&inode_hash_lock); 1368 inode = find_inode_fast(sb, head, ino); 1369 spin_unlock(&inode_hash_lock); 1370 1371 if (inode) { 1372 if (IS_ERR(inode)) 1373 return NULL; 1374 wait_on_inode(inode); 1375 if (unlikely(inode_unhashed(inode))) { 1376 iput(inode); 1377 goto again; 1378 } 1379 } 1380 return inode; 1381 } 1382 EXPORT_SYMBOL(ilookup); 1383 1384 /** 1385 * find_inode_nowait - find an inode in the inode cache 1386 * @sb: super block of file system to search 1387 * @hashval: hash value (usually inode number) to search for 1388 * @match: callback used for comparisons between inodes 1389 * @data: opaque data pointer to pass to @match 1390 * 1391 * Search for the inode specified by @hashval and @data in the inode 1392 * cache, where the helper function @match will return 0 if the inode 1393 * does not match, 1 if the inode does match, and -1 if the search 1394 * should be stopped. The @match function must be responsible for 1395 * taking the i_lock spin_lock and checking i_state for an inode being 1396 * freed or being initialized, and incrementing the reference count 1397 * before returning 1. It also must not sleep, since it is called with 1398 * the inode_hash_lock spinlock held. 1399 * 1400 * This is a even more generalized version of ilookup5() when the 1401 * function must never block --- find_inode() can block in 1402 * __wait_on_freeing_inode() --- or when the caller can not increment 1403 * the reference count because the resulting iput() might cause an 1404 * inode eviction. The tradeoff is that the @match funtion must be 1405 * very carefully implemented. 1406 */ 1407 struct inode *find_inode_nowait(struct super_block *sb, 1408 unsigned long hashval, 1409 int (*match)(struct inode *, unsigned long, 1410 void *), 1411 void *data) 1412 { 1413 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1414 struct inode *inode, *ret_inode = NULL; 1415 int mval; 1416 1417 spin_lock(&inode_hash_lock); 1418 hlist_for_each_entry(inode, head, i_hash) { 1419 if (inode->i_sb != sb) 1420 continue; 1421 mval = match(inode, hashval, data); 1422 if (mval == 0) 1423 continue; 1424 if (mval == 1) 1425 ret_inode = inode; 1426 goto out; 1427 } 1428 out: 1429 spin_unlock(&inode_hash_lock); 1430 return ret_inode; 1431 } 1432 EXPORT_SYMBOL(find_inode_nowait); 1433 1434 int insert_inode_locked(struct inode *inode) 1435 { 1436 struct super_block *sb = inode->i_sb; 1437 ino_t ino = inode->i_ino; 1438 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1439 1440 while (1) { 1441 struct inode *old = NULL; 1442 spin_lock(&inode_hash_lock); 1443 hlist_for_each_entry(old, head, i_hash) { 1444 if (old->i_ino != ino) 1445 continue; 1446 if (old->i_sb != sb) 1447 continue; 1448 spin_lock(&old->i_lock); 1449 if (old->i_state & (I_FREEING|I_WILL_FREE)) { 1450 spin_unlock(&old->i_lock); 1451 continue; 1452 } 1453 break; 1454 } 1455 if (likely(!old)) { 1456 spin_lock(&inode->i_lock); 1457 inode->i_state |= I_NEW | I_CREATING; 1458 hlist_add_head(&inode->i_hash, head); 1459 spin_unlock(&inode->i_lock); 1460 spin_unlock(&inode_hash_lock); 1461 return 0; 1462 } 1463 if (unlikely(old->i_state & I_CREATING)) { 1464 spin_unlock(&old->i_lock); 1465 spin_unlock(&inode_hash_lock); 1466 return -EBUSY; 1467 } 1468 __iget(old); 1469 spin_unlock(&old->i_lock); 1470 spin_unlock(&inode_hash_lock); 1471 wait_on_inode(old); 1472 if (unlikely(!inode_unhashed(old))) { 1473 iput(old); 1474 return -EBUSY; 1475 } 1476 iput(old); 1477 } 1478 } 1479 EXPORT_SYMBOL(insert_inode_locked); 1480 1481 int insert_inode_locked4(struct inode *inode, unsigned long hashval, 1482 int (*test)(struct inode *, void *), void *data) 1483 { 1484 struct inode *old; 1485 1486 inode->i_state |= I_CREATING; 1487 old = inode_insert5(inode, hashval, test, NULL, data); 1488 1489 if (old != inode) { 1490 iput(old); 1491 return -EBUSY; 1492 } 1493 return 0; 1494 } 1495 EXPORT_SYMBOL(insert_inode_locked4); 1496 1497 1498 int generic_delete_inode(struct inode *inode) 1499 { 1500 return 1; 1501 } 1502 EXPORT_SYMBOL(generic_delete_inode); 1503 1504 /* 1505 * Called when we're dropping the last reference 1506 * to an inode. 1507 * 1508 * Call the FS "drop_inode()" function, defaulting to 1509 * the legacy UNIX filesystem behaviour. If it tells 1510 * us to evict inode, do so. Otherwise, retain inode 1511 * in cache if fs is alive, sync and evict if fs is 1512 * shutting down. 1513 */ 1514 static void iput_final(struct inode *inode) 1515 { 1516 struct super_block *sb = inode->i_sb; 1517 const struct super_operations *op = inode->i_sb->s_op; 1518 int drop; 1519 1520 WARN_ON(inode->i_state & I_NEW); 1521 1522 if (op->drop_inode) 1523 drop = op->drop_inode(inode); 1524 else 1525 drop = generic_drop_inode(inode); 1526 1527 if (!drop && (sb->s_flags & SB_ACTIVE)) { 1528 inode_add_lru(inode); 1529 spin_unlock(&inode->i_lock); 1530 return; 1531 } 1532 1533 if (!drop) { 1534 inode->i_state |= I_WILL_FREE; 1535 spin_unlock(&inode->i_lock); 1536 write_inode_now(inode, 1); 1537 spin_lock(&inode->i_lock); 1538 WARN_ON(inode->i_state & I_NEW); 1539 inode->i_state &= ~I_WILL_FREE; 1540 } 1541 1542 inode->i_state |= I_FREEING; 1543 if (!list_empty(&inode->i_lru)) 1544 inode_lru_list_del(inode); 1545 spin_unlock(&inode->i_lock); 1546 1547 evict(inode); 1548 } 1549 1550 /** 1551 * iput - put an inode 1552 * @inode: inode to put 1553 * 1554 * Puts an inode, dropping its usage count. If the inode use count hits 1555 * zero, the inode is then freed and may also be destroyed. 1556 * 1557 * Consequently, iput() can sleep. 1558 */ 1559 void iput(struct inode *inode) 1560 { 1561 if (!inode) 1562 return; 1563 BUG_ON(inode->i_state & I_CLEAR); 1564 retry: 1565 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) { 1566 if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) { 1567 atomic_inc(&inode->i_count); 1568 spin_unlock(&inode->i_lock); 1569 trace_writeback_lazytime_iput(inode); 1570 mark_inode_dirty_sync(inode); 1571 goto retry; 1572 } 1573 iput_final(inode); 1574 } 1575 } 1576 EXPORT_SYMBOL(iput); 1577 1578 /** 1579 * bmap - find a block number in a file 1580 * @inode: inode of file 1581 * @block: block to find 1582 * 1583 * Returns the block number on the device holding the inode that 1584 * is the disk block number for the block of the file requested. 1585 * That is, asked for block 4 of inode 1 the function will return the 1586 * disk block relative to the disk start that holds that block of the 1587 * file. 1588 */ 1589 sector_t bmap(struct inode *inode, sector_t block) 1590 { 1591 sector_t res = 0; 1592 if (inode->i_mapping->a_ops->bmap) 1593 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block); 1594 return res; 1595 } 1596 EXPORT_SYMBOL(bmap); 1597 1598 /* 1599 * With relative atime, only update atime if the previous atime is 1600 * earlier than either the ctime or mtime or if at least a day has 1601 * passed since the last atime update. 1602 */ 1603 static int relatime_need_update(struct vfsmount *mnt, struct inode *inode, 1604 struct timespec now) 1605 { 1606 1607 if (!(mnt->mnt_flags & MNT_RELATIME)) 1608 return 1; 1609 /* 1610 * Is mtime younger than atime? If yes, update atime: 1611 */ 1612 if (timespec64_compare(&inode->i_mtime, &inode->i_atime) >= 0) 1613 return 1; 1614 /* 1615 * Is ctime younger than atime? If yes, update atime: 1616 */ 1617 if (timespec64_compare(&inode->i_ctime, &inode->i_atime) >= 0) 1618 return 1; 1619 1620 /* 1621 * Is the previous atime value older than a day? If yes, 1622 * update atime: 1623 */ 1624 if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60) 1625 return 1; 1626 /* 1627 * Good, we can skip the atime update: 1628 */ 1629 return 0; 1630 } 1631 1632 int generic_update_time(struct inode *inode, struct timespec64 *time, int flags) 1633 { 1634 int iflags = I_DIRTY_TIME; 1635 bool dirty = false; 1636 1637 if (flags & S_ATIME) 1638 inode->i_atime = *time; 1639 if (flags & S_VERSION) 1640 dirty = inode_maybe_inc_iversion(inode, false); 1641 if (flags & S_CTIME) 1642 inode->i_ctime = *time; 1643 if (flags & S_MTIME) 1644 inode->i_mtime = *time; 1645 if ((flags & (S_ATIME | S_CTIME | S_MTIME)) && 1646 !(inode->i_sb->s_flags & SB_LAZYTIME)) 1647 dirty = true; 1648 1649 if (dirty) 1650 iflags |= I_DIRTY_SYNC; 1651 __mark_inode_dirty(inode, iflags); 1652 return 0; 1653 } 1654 EXPORT_SYMBOL(generic_update_time); 1655 1656 /* 1657 * This does the actual work of updating an inodes time or version. Must have 1658 * had called mnt_want_write() before calling this. 1659 */ 1660 static int update_time(struct inode *inode, struct timespec64 *time, int flags) 1661 { 1662 int (*update_time)(struct inode *, struct timespec64 *, int); 1663 1664 update_time = inode->i_op->update_time ? inode->i_op->update_time : 1665 generic_update_time; 1666 1667 return update_time(inode, time, flags); 1668 } 1669 1670 /** 1671 * touch_atime - update the access time 1672 * @path: the &struct path to update 1673 * @inode: inode to update 1674 * 1675 * Update the accessed time on an inode and mark it for writeback. 1676 * This function automatically handles read only file systems and media, 1677 * as well as the "noatime" flag and inode specific "noatime" markers. 1678 */ 1679 bool atime_needs_update(const struct path *path, struct inode *inode) 1680 { 1681 struct vfsmount *mnt = path->mnt; 1682 struct timespec64 now; 1683 1684 if (inode->i_flags & S_NOATIME) 1685 return false; 1686 1687 /* Atime updates will likely cause i_uid and i_gid to be written 1688 * back improprely if their true value is unknown to the vfs. 1689 */ 1690 if (HAS_UNMAPPED_ID(inode)) 1691 return false; 1692 1693 if (IS_NOATIME(inode)) 1694 return false; 1695 if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode)) 1696 return false; 1697 1698 if (mnt->mnt_flags & MNT_NOATIME) 1699 return false; 1700 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)) 1701 return false; 1702 1703 now = current_time(inode); 1704 1705 if (!relatime_need_update(mnt, inode, timespec64_to_timespec(now))) 1706 return false; 1707 1708 if (timespec64_equal(&inode->i_atime, &now)) 1709 return false; 1710 1711 return true; 1712 } 1713 1714 void touch_atime(const struct path *path) 1715 { 1716 struct vfsmount *mnt = path->mnt; 1717 struct inode *inode = d_inode(path->dentry); 1718 struct timespec64 now; 1719 1720 if (!atime_needs_update(path, inode)) 1721 return; 1722 1723 if (!sb_start_write_trylock(inode->i_sb)) 1724 return; 1725 1726 if (__mnt_want_write(mnt) != 0) 1727 goto skip_update; 1728 /* 1729 * File systems can error out when updating inodes if they need to 1730 * allocate new space to modify an inode (such is the case for 1731 * Btrfs), but since we touch atime while walking down the path we 1732 * really don't care if we failed to update the atime of the file, 1733 * so just ignore the return value. 1734 * We may also fail on filesystems that have the ability to make parts 1735 * of the fs read only, e.g. subvolumes in Btrfs. 1736 */ 1737 now = current_time(inode); 1738 update_time(inode, &now, S_ATIME); 1739 __mnt_drop_write(mnt); 1740 skip_update: 1741 sb_end_write(inode->i_sb); 1742 } 1743 EXPORT_SYMBOL(touch_atime); 1744 1745 /* 1746 * The logic we want is 1747 * 1748 * if suid or (sgid and xgrp) 1749 * remove privs 1750 */ 1751 int should_remove_suid(struct dentry *dentry) 1752 { 1753 umode_t mode = d_inode(dentry)->i_mode; 1754 int kill = 0; 1755 1756 /* suid always must be killed */ 1757 if (unlikely(mode & S_ISUID)) 1758 kill = ATTR_KILL_SUID; 1759 1760 /* 1761 * sgid without any exec bits is just a mandatory locking mark; leave 1762 * it alone. If some exec bits are set, it's a real sgid; kill it. 1763 */ 1764 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) 1765 kill |= ATTR_KILL_SGID; 1766 1767 if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode))) 1768 return kill; 1769 1770 return 0; 1771 } 1772 EXPORT_SYMBOL(should_remove_suid); 1773 1774 /* 1775 * Return mask of changes for notify_change() that need to be done as a 1776 * response to write or truncate. Return 0 if nothing has to be changed. 1777 * Negative value on error (change should be denied). 1778 */ 1779 int dentry_needs_remove_privs(struct dentry *dentry) 1780 { 1781 struct inode *inode = d_inode(dentry); 1782 int mask = 0; 1783 int ret; 1784 1785 if (IS_NOSEC(inode)) 1786 return 0; 1787 1788 mask = should_remove_suid(dentry); 1789 ret = security_inode_need_killpriv(dentry); 1790 if (ret < 0) 1791 return ret; 1792 if (ret) 1793 mask |= ATTR_KILL_PRIV; 1794 return mask; 1795 } 1796 1797 static int __remove_privs(struct dentry *dentry, int kill) 1798 { 1799 struct iattr newattrs; 1800 1801 newattrs.ia_valid = ATTR_FORCE | kill; 1802 /* 1803 * Note we call this on write, so notify_change will not 1804 * encounter any conflicting delegations: 1805 */ 1806 return notify_change(dentry, &newattrs, NULL); 1807 } 1808 1809 /* 1810 * Remove special file priviledges (suid, capabilities) when file is written 1811 * to or truncated. 1812 */ 1813 int file_remove_privs(struct file *file) 1814 { 1815 struct dentry *dentry = file_dentry(file); 1816 struct inode *inode = file_inode(file); 1817 int kill; 1818 int error = 0; 1819 1820 /* Fast path for nothing security related */ 1821 if (IS_NOSEC(inode)) 1822 return 0; 1823 1824 kill = dentry_needs_remove_privs(dentry); 1825 if (kill < 0) 1826 return kill; 1827 if (kill) 1828 error = __remove_privs(dentry, kill); 1829 if (!error) 1830 inode_has_no_xattr(inode); 1831 1832 return error; 1833 } 1834 EXPORT_SYMBOL(file_remove_privs); 1835 1836 /** 1837 * file_update_time - update mtime and ctime time 1838 * @file: file accessed 1839 * 1840 * Update the mtime and ctime members of an inode and mark the inode 1841 * for writeback. Note that this function is meant exclusively for 1842 * usage in the file write path of filesystems, and filesystems may 1843 * choose to explicitly ignore update via this function with the 1844 * S_NOCMTIME inode flag, e.g. for network filesystem where these 1845 * timestamps are handled by the server. This can return an error for 1846 * file systems who need to allocate space in order to update an inode. 1847 */ 1848 1849 int file_update_time(struct file *file) 1850 { 1851 struct inode *inode = file_inode(file); 1852 struct timespec64 now; 1853 int sync_it = 0; 1854 int ret; 1855 1856 /* First try to exhaust all avenues to not sync */ 1857 if (IS_NOCMTIME(inode)) 1858 return 0; 1859 1860 now = current_time(inode); 1861 if (!timespec64_equal(&inode->i_mtime, &now)) 1862 sync_it = S_MTIME; 1863 1864 if (!timespec64_equal(&inode->i_ctime, &now)) 1865 sync_it |= S_CTIME; 1866 1867 if (IS_I_VERSION(inode) && inode_iversion_need_inc(inode)) 1868 sync_it |= S_VERSION; 1869 1870 if (!sync_it) 1871 return 0; 1872 1873 /* Finally allowed to write? Takes lock. */ 1874 if (__mnt_want_write_file(file)) 1875 return 0; 1876 1877 ret = update_time(inode, &now, sync_it); 1878 __mnt_drop_write_file(file); 1879 1880 return ret; 1881 } 1882 EXPORT_SYMBOL(file_update_time); 1883 1884 int inode_needs_sync(struct inode *inode) 1885 { 1886 if (IS_SYNC(inode)) 1887 return 1; 1888 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) 1889 return 1; 1890 return 0; 1891 } 1892 EXPORT_SYMBOL(inode_needs_sync); 1893 1894 /* 1895 * If we try to find an inode in the inode hash while it is being 1896 * deleted, we have to wait until the filesystem completes its 1897 * deletion before reporting that it isn't found. This function waits 1898 * until the deletion _might_ have completed. Callers are responsible 1899 * to recheck inode state. 1900 * 1901 * It doesn't matter if I_NEW is not set initially, a call to 1902 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list 1903 * will DTRT. 1904 */ 1905 static void __wait_on_freeing_inode(struct inode *inode) 1906 { 1907 wait_queue_head_t *wq; 1908 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW); 1909 wq = bit_waitqueue(&inode->i_state, __I_NEW); 1910 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); 1911 spin_unlock(&inode->i_lock); 1912 spin_unlock(&inode_hash_lock); 1913 schedule(); 1914 finish_wait(wq, &wait.wq_entry); 1915 spin_lock(&inode_hash_lock); 1916 } 1917 1918 static __initdata unsigned long ihash_entries; 1919 static int __init set_ihash_entries(char *str) 1920 { 1921 if (!str) 1922 return 0; 1923 ihash_entries = simple_strtoul(str, &str, 0); 1924 return 1; 1925 } 1926 __setup("ihash_entries=", set_ihash_entries); 1927 1928 /* 1929 * Initialize the waitqueues and inode hash table. 1930 */ 1931 void __init inode_init_early(void) 1932 { 1933 /* If hashes are distributed across NUMA nodes, defer 1934 * hash allocation until vmalloc space is available. 1935 */ 1936 if (hashdist) 1937 return; 1938 1939 inode_hashtable = 1940 alloc_large_system_hash("Inode-cache", 1941 sizeof(struct hlist_head), 1942 ihash_entries, 1943 14, 1944 HASH_EARLY | HASH_ZERO, 1945 &i_hash_shift, 1946 &i_hash_mask, 1947 0, 1948 0); 1949 } 1950 1951 void __init inode_init(void) 1952 { 1953 /* inode slab cache */ 1954 inode_cachep = kmem_cache_create("inode_cache", 1955 sizeof(struct inode), 1956 0, 1957 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| 1958 SLAB_MEM_SPREAD|SLAB_ACCOUNT), 1959 init_once); 1960 1961 /* Hash may have been set up in inode_init_early */ 1962 if (!hashdist) 1963 return; 1964 1965 inode_hashtable = 1966 alloc_large_system_hash("Inode-cache", 1967 sizeof(struct hlist_head), 1968 ihash_entries, 1969 14, 1970 HASH_ZERO, 1971 &i_hash_shift, 1972 &i_hash_mask, 1973 0, 1974 0); 1975 } 1976 1977 void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev) 1978 { 1979 inode->i_mode = mode; 1980 if (S_ISCHR(mode)) { 1981 inode->i_fop = &def_chr_fops; 1982 inode->i_rdev = rdev; 1983 } else if (S_ISBLK(mode)) { 1984 inode->i_fop = &def_blk_fops; 1985 inode->i_rdev = rdev; 1986 } else if (S_ISFIFO(mode)) 1987 inode->i_fop = &pipefifo_fops; 1988 else if (S_ISSOCK(mode)) 1989 ; /* leave it no_open_fops */ 1990 else 1991 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for" 1992 " inode %s:%lu\n", mode, inode->i_sb->s_id, 1993 inode->i_ino); 1994 } 1995 EXPORT_SYMBOL(init_special_inode); 1996 1997 /** 1998 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards 1999 * @inode: New inode 2000 * @dir: Directory inode 2001 * @mode: mode of the new inode 2002 */ 2003 void inode_init_owner(struct inode *inode, const struct inode *dir, 2004 umode_t mode) 2005 { 2006 inode->i_uid = current_fsuid(); 2007 if (dir && dir->i_mode & S_ISGID) { 2008 inode->i_gid = dir->i_gid; 2009 2010 /* Directories are special, and always inherit S_ISGID */ 2011 if (S_ISDIR(mode)) 2012 mode |= S_ISGID; 2013 else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) && 2014 !in_group_p(inode->i_gid) && 2015 !capable_wrt_inode_uidgid(dir, CAP_FSETID)) 2016 mode &= ~S_ISGID; 2017 } else 2018 inode->i_gid = current_fsgid(); 2019 inode->i_mode = mode; 2020 } 2021 EXPORT_SYMBOL(inode_init_owner); 2022 2023 /** 2024 * inode_owner_or_capable - check current task permissions to inode 2025 * @inode: inode being checked 2026 * 2027 * Return true if current either has CAP_FOWNER in a namespace with the 2028 * inode owner uid mapped, or owns the file. 2029 */ 2030 bool inode_owner_or_capable(const struct inode *inode) 2031 { 2032 struct user_namespace *ns; 2033 2034 if (uid_eq(current_fsuid(), inode->i_uid)) 2035 return true; 2036 2037 ns = current_user_ns(); 2038 if (kuid_has_mapping(ns, inode->i_uid) && ns_capable(ns, CAP_FOWNER)) 2039 return true; 2040 return false; 2041 } 2042 EXPORT_SYMBOL(inode_owner_or_capable); 2043 2044 /* 2045 * Direct i/o helper functions 2046 */ 2047 static void __inode_dio_wait(struct inode *inode) 2048 { 2049 wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP); 2050 DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP); 2051 2052 do { 2053 prepare_to_wait(wq, &q.wq_entry, TASK_UNINTERRUPTIBLE); 2054 if (atomic_read(&inode->i_dio_count)) 2055 schedule(); 2056 } while (atomic_read(&inode->i_dio_count)); 2057 finish_wait(wq, &q.wq_entry); 2058 } 2059 2060 /** 2061 * inode_dio_wait - wait for outstanding DIO requests to finish 2062 * @inode: inode to wait for 2063 * 2064 * Waits for all pending direct I/O requests to finish so that we can 2065 * proceed with a truncate or equivalent operation. 2066 * 2067 * Must be called under a lock that serializes taking new references 2068 * to i_dio_count, usually by inode->i_mutex. 2069 */ 2070 void inode_dio_wait(struct inode *inode) 2071 { 2072 if (atomic_read(&inode->i_dio_count)) 2073 __inode_dio_wait(inode); 2074 } 2075 EXPORT_SYMBOL(inode_dio_wait); 2076 2077 /* 2078 * inode_set_flags - atomically set some inode flags 2079 * 2080 * Note: the caller should be holding i_mutex, or else be sure that 2081 * they have exclusive access to the inode structure (i.e., while the 2082 * inode is being instantiated). The reason for the cmpxchg() loop 2083 * --- which wouldn't be necessary if all code paths which modify 2084 * i_flags actually followed this rule, is that there is at least one 2085 * code path which doesn't today so we use cmpxchg() out of an abundance 2086 * of caution. 2087 * 2088 * In the long run, i_mutex is overkill, and we should probably look 2089 * at using the i_lock spinlock to protect i_flags, and then make sure 2090 * it is so documented in include/linux/fs.h and that all code follows 2091 * the locking convention!! 2092 */ 2093 void inode_set_flags(struct inode *inode, unsigned int flags, 2094 unsigned int mask) 2095 { 2096 unsigned int old_flags, new_flags; 2097 2098 WARN_ON_ONCE(flags & ~mask); 2099 do { 2100 old_flags = READ_ONCE(inode->i_flags); 2101 new_flags = (old_flags & ~mask) | flags; 2102 } while (unlikely(cmpxchg(&inode->i_flags, old_flags, 2103 new_flags) != old_flags)); 2104 } 2105 EXPORT_SYMBOL(inode_set_flags); 2106 2107 void inode_nohighmem(struct inode *inode) 2108 { 2109 mapping_set_gfp_mask(inode->i_mapping, GFP_USER); 2110 } 2111 EXPORT_SYMBOL(inode_nohighmem); 2112 2113 /** 2114 * timespec64_trunc - Truncate timespec64 to a granularity 2115 * @t: Timespec64 2116 * @gran: Granularity in ns. 2117 * 2118 * Truncate a timespec64 to a granularity. Always rounds down. gran must 2119 * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns). 2120 */ 2121 struct timespec64 timespec64_trunc(struct timespec64 t, unsigned gran) 2122 { 2123 /* Avoid division in the common cases 1 ns and 1 s. */ 2124 if (gran == 1) { 2125 /* nothing */ 2126 } else if (gran == NSEC_PER_SEC) { 2127 t.tv_nsec = 0; 2128 } else if (gran > 1 && gran < NSEC_PER_SEC) { 2129 t.tv_nsec -= t.tv_nsec % gran; 2130 } else { 2131 WARN(1, "illegal file time granularity: %u", gran); 2132 } 2133 return t; 2134 } 2135 EXPORT_SYMBOL(timespec64_trunc); 2136 2137 /** 2138 * current_time - Return FS time 2139 * @inode: inode. 2140 * 2141 * Return the current time truncated to the time granularity supported by 2142 * the fs. 2143 * 2144 * Note that inode and inode->sb cannot be NULL. 2145 * Otherwise, the function warns and returns time without truncation. 2146 */ 2147 struct timespec64 current_time(struct inode *inode) 2148 { 2149 struct timespec64 now = current_kernel_time64(); 2150 2151 if (unlikely(!inode->i_sb)) { 2152 WARN(1, "current_time() called with uninitialized super_block in the inode"); 2153 return now; 2154 } 2155 2156 return timespec64_trunc(now, inode->i_sb->s_time_gran); 2157 } 2158 EXPORT_SYMBOL(current_time); 2159