1 /* 2 * (C) 1997 Linus Torvalds 3 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation) 4 */ 5 #include <linux/export.h> 6 #include <linux/fs.h> 7 #include <linux/mm.h> 8 #include <linux/backing-dev.h> 9 #include <linux/hash.h> 10 #include <linux/swap.h> 11 #include <linux/security.h> 12 #include <linux/cdev.h> 13 #include <linux/memblock.h> 14 #include <linux/fsnotify.h> 15 #include <linux/mount.h> 16 #include <linux/posix_acl.h> 17 #include <linux/prefetch.h> 18 #include <linux/buffer_head.h> /* for inode_has_buffers */ 19 #include <linux/ratelimit.h> 20 #include <linux/list_lru.h> 21 #include <linux/iversion.h> 22 #include <trace/events/writeback.h> 23 #include "internal.h" 24 25 /* 26 * Inode locking rules: 27 * 28 * inode->i_lock protects: 29 * inode->i_state, inode->i_hash, __iget() 30 * Inode LRU list locks protect: 31 * inode->i_sb->s_inode_lru, inode->i_lru 32 * inode->i_sb->s_inode_list_lock protects: 33 * inode->i_sb->s_inodes, inode->i_sb_list 34 * bdi->wb.list_lock protects: 35 * bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list 36 * inode_hash_lock protects: 37 * inode_hashtable, inode->i_hash 38 * 39 * Lock ordering: 40 * 41 * inode->i_sb->s_inode_list_lock 42 * inode->i_lock 43 * Inode LRU list locks 44 * 45 * bdi->wb.list_lock 46 * inode->i_lock 47 * 48 * inode_hash_lock 49 * inode->i_sb->s_inode_list_lock 50 * inode->i_lock 51 * 52 * iunique_lock 53 * inode_hash_lock 54 */ 55 56 static unsigned int i_hash_mask __read_mostly; 57 static unsigned int i_hash_shift __read_mostly; 58 static struct hlist_head *inode_hashtable __read_mostly; 59 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); 60 61 /* 62 * Empty aops. Can be used for the cases where the user does not 63 * define any of the address_space operations. 64 */ 65 const struct address_space_operations empty_aops = { 66 }; 67 EXPORT_SYMBOL(empty_aops); 68 69 /* 70 * Statistics gathering.. 71 */ 72 struct inodes_stat_t inodes_stat; 73 74 static DEFINE_PER_CPU(unsigned long, nr_inodes); 75 static DEFINE_PER_CPU(unsigned long, nr_unused); 76 77 static struct kmem_cache *inode_cachep __read_mostly; 78 79 static long get_nr_inodes(void) 80 { 81 int i; 82 long sum = 0; 83 for_each_possible_cpu(i) 84 sum += per_cpu(nr_inodes, i); 85 return sum < 0 ? 0 : sum; 86 } 87 88 static inline long get_nr_inodes_unused(void) 89 { 90 int i; 91 long sum = 0; 92 for_each_possible_cpu(i) 93 sum += per_cpu(nr_unused, i); 94 return sum < 0 ? 0 : sum; 95 } 96 97 long get_nr_dirty_inodes(void) 98 { 99 /* not actually dirty inodes, but a wild approximation */ 100 long nr_dirty = get_nr_inodes() - get_nr_inodes_unused(); 101 return nr_dirty > 0 ? nr_dirty : 0; 102 } 103 104 /* 105 * Handle nr_inode sysctl 106 */ 107 #ifdef CONFIG_SYSCTL 108 int proc_nr_inodes(struct ctl_table *table, int write, 109 void __user *buffer, size_t *lenp, loff_t *ppos) 110 { 111 inodes_stat.nr_inodes = get_nr_inodes(); 112 inodes_stat.nr_unused = get_nr_inodes_unused(); 113 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 114 } 115 #endif 116 117 static int no_open(struct inode *inode, struct file *file) 118 { 119 return -ENXIO; 120 } 121 122 /** 123 * inode_init_always - perform inode structure initialisation 124 * @sb: superblock inode belongs to 125 * @inode: inode to initialise 126 * 127 * These are initializations that need to be done on every inode 128 * allocation as the fields are not initialised by slab allocation. 129 */ 130 int inode_init_always(struct super_block *sb, struct inode *inode) 131 { 132 static const struct inode_operations empty_iops; 133 static const struct file_operations no_open_fops = {.open = no_open}; 134 struct address_space *const mapping = &inode->i_data; 135 136 inode->i_sb = sb; 137 inode->i_blkbits = sb->s_blocksize_bits; 138 inode->i_flags = 0; 139 atomic_set(&inode->i_count, 1); 140 inode->i_op = &empty_iops; 141 inode->i_fop = &no_open_fops; 142 inode->__i_nlink = 1; 143 inode->i_opflags = 0; 144 if (sb->s_xattr) 145 inode->i_opflags |= IOP_XATTR; 146 i_uid_write(inode, 0); 147 i_gid_write(inode, 0); 148 atomic_set(&inode->i_writecount, 0); 149 inode->i_size = 0; 150 inode->i_write_hint = WRITE_LIFE_NOT_SET; 151 inode->i_blocks = 0; 152 inode->i_bytes = 0; 153 inode->i_generation = 0; 154 inode->i_pipe = NULL; 155 inode->i_bdev = NULL; 156 inode->i_cdev = NULL; 157 inode->i_link = NULL; 158 inode->i_dir_seq = 0; 159 inode->i_rdev = 0; 160 inode->dirtied_when = 0; 161 162 #ifdef CONFIG_CGROUP_WRITEBACK 163 inode->i_wb_frn_winner = 0; 164 inode->i_wb_frn_avg_time = 0; 165 inode->i_wb_frn_history = 0; 166 #endif 167 168 if (security_inode_alloc(inode)) 169 goto out; 170 spin_lock_init(&inode->i_lock); 171 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); 172 173 init_rwsem(&inode->i_rwsem); 174 lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key); 175 176 atomic_set(&inode->i_dio_count, 0); 177 178 mapping->a_ops = &empty_aops; 179 mapping->host = inode; 180 mapping->flags = 0; 181 mapping->wb_err = 0; 182 atomic_set(&mapping->i_mmap_writable, 0); 183 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); 184 mapping->private_data = NULL; 185 mapping->writeback_index = 0; 186 inode->i_private = NULL; 187 inode->i_mapping = mapping; 188 INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */ 189 #ifdef CONFIG_FS_POSIX_ACL 190 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED; 191 #endif 192 193 #ifdef CONFIG_FSNOTIFY 194 inode->i_fsnotify_mask = 0; 195 #endif 196 inode->i_flctx = NULL; 197 this_cpu_inc(nr_inodes); 198 199 return 0; 200 out: 201 return -ENOMEM; 202 } 203 EXPORT_SYMBOL(inode_init_always); 204 205 void free_inode_nonrcu(struct inode *inode) 206 { 207 kmem_cache_free(inode_cachep, inode); 208 } 209 EXPORT_SYMBOL(free_inode_nonrcu); 210 211 static void i_callback(struct rcu_head *head) 212 { 213 struct inode *inode = container_of(head, struct inode, i_rcu); 214 if (inode->free_inode) 215 inode->free_inode(inode); 216 else 217 free_inode_nonrcu(inode); 218 } 219 220 static struct inode *alloc_inode(struct super_block *sb) 221 { 222 const struct super_operations *ops = sb->s_op; 223 struct inode *inode; 224 225 if (ops->alloc_inode) 226 inode = ops->alloc_inode(sb); 227 else 228 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL); 229 230 if (!inode) 231 return NULL; 232 233 if (unlikely(inode_init_always(sb, inode))) { 234 if (ops->destroy_inode) { 235 ops->destroy_inode(inode); 236 if (!ops->free_inode) 237 return NULL; 238 } 239 inode->free_inode = ops->free_inode; 240 i_callback(&inode->i_rcu); 241 return NULL; 242 } 243 244 return inode; 245 } 246 247 void __destroy_inode(struct inode *inode) 248 { 249 BUG_ON(inode_has_buffers(inode)); 250 inode_detach_wb(inode); 251 security_inode_free(inode); 252 fsnotify_inode_delete(inode); 253 locks_free_lock_context(inode); 254 if (!inode->i_nlink) { 255 WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0); 256 atomic_long_dec(&inode->i_sb->s_remove_count); 257 } 258 259 #ifdef CONFIG_FS_POSIX_ACL 260 if (inode->i_acl && !is_uncached_acl(inode->i_acl)) 261 posix_acl_release(inode->i_acl); 262 if (inode->i_default_acl && !is_uncached_acl(inode->i_default_acl)) 263 posix_acl_release(inode->i_default_acl); 264 #endif 265 this_cpu_dec(nr_inodes); 266 } 267 EXPORT_SYMBOL(__destroy_inode); 268 269 static void destroy_inode(struct inode *inode) 270 { 271 const struct super_operations *ops = inode->i_sb->s_op; 272 273 BUG_ON(!list_empty(&inode->i_lru)); 274 __destroy_inode(inode); 275 if (ops->destroy_inode) { 276 ops->destroy_inode(inode); 277 if (!ops->free_inode) 278 return; 279 } 280 inode->free_inode = ops->free_inode; 281 call_rcu(&inode->i_rcu, i_callback); 282 } 283 284 /** 285 * drop_nlink - directly drop an inode's link count 286 * @inode: inode 287 * 288 * This is a low-level filesystem helper to replace any 289 * direct filesystem manipulation of i_nlink. In cases 290 * where we are attempting to track writes to the 291 * filesystem, a decrement to zero means an imminent 292 * write when the file is truncated and actually unlinked 293 * on the filesystem. 294 */ 295 void drop_nlink(struct inode *inode) 296 { 297 WARN_ON(inode->i_nlink == 0); 298 inode->__i_nlink--; 299 if (!inode->i_nlink) 300 atomic_long_inc(&inode->i_sb->s_remove_count); 301 } 302 EXPORT_SYMBOL(drop_nlink); 303 304 /** 305 * clear_nlink - directly zero an inode's link count 306 * @inode: inode 307 * 308 * This is a low-level filesystem helper to replace any 309 * direct filesystem manipulation of i_nlink. See 310 * drop_nlink() for why we care about i_nlink hitting zero. 311 */ 312 void clear_nlink(struct inode *inode) 313 { 314 if (inode->i_nlink) { 315 inode->__i_nlink = 0; 316 atomic_long_inc(&inode->i_sb->s_remove_count); 317 } 318 } 319 EXPORT_SYMBOL(clear_nlink); 320 321 /** 322 * set_nlink - directly set an inode's link count 323 * @inode: inode 324 * @nlink: new nlink (should be non-zero) 325 * 326 * This is a low-level filesystem helper to replace any 327 * direct filesystem manipulation of i_nlink. 328 */ 329 void set_nlink(struct inode *inode, unsigned int nlink) 330 { 331 if (!nlink) { 332 clear_nlink(inode); 333 } else { 334 /* Yes, some filesystems do change nlink from zero to one */ 335 if (inode->i_nlink == 0) 336 atomic_long_dec(&inode->i_sb->s_remove_count); 337 338 inode->__i_nlink = nlink; 339 } 340 } 341 EXPORT_SYMBOL(set_nlink); 342 343 /** 344 * inc_nlink - directly increment an inode's link count 345 * @inode: inode 346 * 347 * This is a low-level filesystem helper to replace any 348 * direct filesystem manipulation of i_nlink. Currently, 349 * it is only here for parity with dec_nlink(). 350 */ 351 void inc_nlink(struct inode *inode) 352 { 353 if (unlikely(inode->i_nlink == 0)) { 354 WARN_ON(!(inode->i_state & I_LINKABLE)); 355 atomic_long_dec(&inode->i_sb->s_remove_count); 356 } 357 358 inode->__i_nlink++; 359 } 360 EXPORT_SYMBOL(inc_nlink); 361 362 static void __address_space_init_once(struct address_space *mapping) 363 { 364 xa_init_flags(&mapping->i_pages, XA_FLAGS_LOCK_IRQ); 365 init_rwsem(&mapping->i_mmap_rwsem); 366 INIT_LIST_HEAD(&mapping->private_list); 367 spin_lock_init(&mapping->private_lock); 368 mapping->i_mmap = RB_ROOT_CACHED; 369 } 370 371 void address_space_init_once(struct address_space *mapping) 372 { 373 memset(mapping, 0, sizeof(*mapping)); 374 __address_space_init_once(mapping); 375 } 376 EXPORT_SYMBOL(address_space_init_once); 377 378 /* 379 * These are initializations that only need to be done 380 * once, because the fields are idempotent across use 381 * of the inode, so let the slab aware of that. 382 */ 383 void inode_init_once(struct inode *inode) 384 { 385 memset(inode, 0, sizeof(*inode)); 386 INIT_HLIST_NODE(&inode->i_hash); 387 INIT_LIST_HEAD(&inode->i_devices); 388 INIT_LIST_HEAD(&inode->i_io_list); 389 INIT_LIST_HEAD(&inode->i_wb_list); 390 INIT_LIST_HEAD(&inode->i_lru); 391 __address_space_init_once(&inode->i_data); 392 i_size_ordered_init(inode); 393 } 394 EXPORT_SYMBOL(inode_init_once); 395 396 static void init_once(void *foo) 397 { 398 struct inode *inode = (struct inode *) foo; 399 400 inode_init_once(inode); 401 } 402 403 /* 404 * inode->i_lock must be held 405 */ 406 void __iget(struct inode *inode) 407 { 408 atomic_inc(&inode->i_count); 409 } 410 411 /* 412 * get additional reference to inode; caller must already hold one. 413 */ 414 void ihold(struct inode *inode) 415 { 416 WARN_ON(atomic_inc_return(&inode->i_count) < 2); 417 } 418 EXPORT_SYMBOL(ihold); 419 420 static void inode_lru_list_add(struct inode *inode) 421 { 422 if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru)) 423 this_cpu_inc(nr_unused); 424 else 425 inode->i_state |= I_REFERENCED; 426 } 427 428 /* 429 * Add inode to LRU if needed (inode is unused and clean). 430 * 431 * Needs inode->i_lock held. 432 */ 433 void inode_add_lru(struct inode *inode) 434 { 435 if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC | 436 I_FREEING | I_WILL_FREE)) && 437 !atomic_read(&inode->i_count) && inode->i_sb->s_flags & SB_ACTIVE) 438 inode_lru_list_add(inode); 439 } 440 441 442 static void inode_lru_list_del(struct inode *inode) 443 { 444 445 if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru)) 446 this_cpu_dec(nr_unused); 447 } 448 449 /** 450 * inode_sb_list_add - add inode to the superblock list of inodes 451 * @inode: inode to add 452 */ 453 void inode_sb_list_add(struct inode *inode) 454 { 455 spin_lock(&inode->i_sb->s_inode_list_lock); 456 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes); 457 spin_unlock(&inode->i_sb->s_inode_list_lock); 458 } 459 EXPORT_SYMBOL_GPL(inode_sb_list_add); 460 461 static inline void inode_sb_list_del(struct inode *inode) 462 { 463 if (!list_empty(&inode->i_sb_list)) { 464 spin_lock(&inode->i_sb->s_inode_list_lock); 465 list_del_init(&inode->i_sb_list); 466 spin_unlock(&inode->i_sb->s_inode_list_lock); 467 } 468 } 469 470 static unsigned long hash(struct super_block *sb, unsigned long hashval) 471 { 472 unsigned long tmp; 473 474 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) / 475 L1_CACHE_BYTES; 476 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift); 477 return tmp & i_hash_mask; 478 } 479 480 /** 481 * __insert_inode_hash - hash an inode 482 * @inode: unhashed inode 483 * @hashval: unsigned long value used to locate this object in the 484 * inode_hashtable. 485 * 486 * Add an inode to the inode hash for this superblock. 487 */ 488 void __insert_inode_hash(struct inode *inode, unsigned long hashval) 489 { 490 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval); 491 492 spin_lock(&inode_hash_lock); 493 spin_lock(&inode->i_lock); 494 hlist_add_head(&inode->i_hash, b); 495 spin_unlock(&inode->i_lock); 496 spin_unlock(&inode_hash_lock); 497 } 498 EXPORT_SYMBOL(__insert_inode_hash); 499 500 /** 501 * __remove_inode_hash - remove an inode from the hash 502 * @inode: inode to unhash 503 * 504 * Remove an inode from the superblock. 505 */ 506 void __remove_inode_hash(struct inode *inode) 507 { 508 spin_lock(&inode_hash_lock); 509 spin_lock(&inode->i_lock); 510 hlist_del_init(&inode->i_hash); 511 spin_unlock(&inode->i_lock); 512 spin_unlock(&inode_hash_lock); 513 } 514 EXPORT_SYMBOL(__remove_inode_hash); 515 516 void clear_inode(struct inode *inode) 517 { 518 /* 519 * We have to cycle the i_pages lock here because reclaim can be in the 520 * process of removing the last page (in __delete_from_page_cache()) 521 * and we must not free the mapping under it. 522 */ 523 xa_lock_irq(&inode->i_data.i_pages); 524 BUG_ON(inode->i_data.nrpages); 525 BUG_ON(inode->i_data.nrexceptional); 526 xa_unlock_irq(&inode->i_data.i_pages); 527 BUG_ON(!list_empty(&inode->i_data.private_list)); 528 BUG_ON(!(inode->i_state & I_FREEING)); 529 BUG_ON(inode->i_state & I_CLEAR); 530 BUG_ON(!list_empty(&inode->i_wb_list)); 531 /* don't need i_lock here, no concurrent mods to i_state */ 532 inode->i_state = I_FREEING | I_CLEAR; 533 } 534 EXPORT_SYMBOL(clear_inode); 535 536 /* 537 * Free the inode passed in, removing it from the lists it is still connected 538 * to. We remove any pages still attached to the inode and wait for any IO that 539 * is still in progress before finally destroying the inode. 540 * 541 * An inode must already be marked I_FREEING so that we avoid the inode being 542 * moved back onto lists if we race with other code that manipulates the lists 543 * (e.g. writeback_single_inode). The caller is responsible for setting this. 544 * 545 * An inode must already be removed from the LRU list before being evicted from 546 * the cache. This should occur atomically with setting the I_FREEING state 547 * flag, so no inodes here should ever be on the LRU when being evicted. 548 */ 549 static void evict(struct inode *inode) 550 { 551 const struct super_operations *op = inode->i_sb->s_op; 552 553 BUG_ON(!(inode->i_state & I_FREEING)); 554 BUG_ON(!list_empty(&inode->i_lru)); 555 556 if (!list_empty(&inode->i_io_list)) 557 inode_io_list_del(inode); 558 559 inode_sb_list_del(inode); 560 561 /* 562 * Wait for flusher thread to be done with the inode so that filesystem 563 * does not start destroying it while writeback is still running. Since 564 * the inode has I_FREEING set, flusher thread won't start new work on 565 * the inode. We just have to wait for running writeback to finish. 566 */ 567 inode_wait_for_writeback(inode); 568 569 if (op->evict_inode) { 570 op->evict_inode(inode); 571 } else { 572 truncate_inode_pages_final(&inode->i_data); 573 clear_inode(inode); 574 } 575 if (S_ISBLK(inode->i_mode) && inode->i_bdev) 576 bd_forget(inode); 577 if (S_ISCHR(inode->i_mode) && inode->i_cdev) 578 cd_forget(inode); 579 580 remove_inode_hash(inode); 581 582 spin_lock(&inode->i_lock); 583 wake_up_bit(&inode->i_state, __I_NEW); 584 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR)); 585 spin_unlock(&inode->i_lock); 586 587 destroy_inode(inode); 588 } 589 590 /* 591 * dispose_list - dispose of the contents of a local list 592 * @head: the head of the list to free 593 * 594 * Dispose-list gets a local list with local inodes in it, so it doesn't 595 * need to worry about list corruption and SMP locks. 596 */ 597 static void dispose_list(struct list_head *head) 598 { 599 while (!list_empty(head)) { 600 struct inode *inode; 601 602 inode = list_first_entry(head, struct inode, i_lru); 603 list_del_init(&inode->i_lru); 604 605 evict(inode); 606 cond_resched(); 607 } 608 } 609 610 /** 611 * evict_inodes - evict all evictable inodes for a superblock 612 * @sb: superblock to operate on 613 * 614 * Make sure that no inodes with zero refcount are retained. This is 615 * called by superblock shutdown after having SB_ACTIVE flag removed, 616 * so any inode reaching zero refcount during or after that call will 617 * be immediately evicted. 618 */ 619 void evict_inodes(struct super_block *sb) 620 { 621 struct inode *inode, *next; 622 LIST_HEAD(dispose); 623 624 again: 625 spin_lock(&sb->s_inode_list_lock); 626 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 627 if (atomic_read(&inode->i_count)) 628 continue; 629 630 spin_lock(&inode->i_lock); 631 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 632 spin_unlock(&inode->i_lock); 633 continue; 634 } 635 636 inode->i_state |= I_FREEING; 637 inode_lru_list_del(inode); 638 spin_unlock(&inode->i_lock); 639 list_add(&inode->i_lru, &dispose); 640 641 /* 642 * We can have a ton of inodes to evict at unmount time given 643 * enough memory, check to see if we need to go to sleep for a 644 * bit so we don't livelock. 645 */ 646 if (need_resched()) { 647 spin_unlock(&sb->s_inode_list_lock); 648 cond_resched(); 649 dispose_list(&dispose); 650 goto again; 651 } 652 } 653 spin_unlock(&sb->s_inode_list_lock); 654 655 dispose_list(&dispose); 656 } 657 EXPORT_SYMBOL_GPL(evict_inodes); 658 659 /** 660 * invalidate_inodes - attempt to free all inodes on a superblock 661 * @sb: superblock to operate on 662 * @kill_dirty: flag to guide handling of dirty inodes 663 * 664 * Attempts to free all inodes for a given superblock. If there were any 665 * busy inodes return a non-zero value, else zero. 666 * If @kill_dirty is set, discard dirty inodes too, otherwise treat 667 * them as busy. 668 */ 669 int invalidate_inodes(struct super_block *sb, bool kill_dirty) 670 { 671 int busy = 0; 672 struct inode *inode, *next; 673 LIST_HEAD(dispose); 674 675 spin_lock(&sb->s_inode_list_lock); 676 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 677 spin_lock(&inode->i_lock); 678 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 679 spin_unlock(&inode->i_lock); 680 continue; 681 } 682 if (inode->i_state & I_DIRTY_ALL && !kill_dirty) { 683 spin_unlock(&inode->i_lock); 684 busy = 1; 685 continue; 686 } 687 if (atomic_read(&inode->i_count)) { 688 spin_unlock(&inode->i_lock); 689 busy = 1; 690 continue; 691 } 692 693 inode->i_state |= I_FREEING; 694 inode_lru_list_del(inode); 695 spin_unlock(&inode->i_lock); 696 list_add(&inode->i_lru, &dispose); 697 } 698 spin_unlock(&sb->s_inode_list_lock); 699 700 dispose_list(&dispose); 701 702 return busy; 703 } 704 705 /* 706 * Isolate the inode from the LRU in preparation for freeing it. 707 * 708 * Any inodes which are pinned purely because of attached pagecache have their 709 * pagecache removed. If the inode has metadata buffers attached to 710 * mapping->private_list then try to remove them. 711 * 712 * If the inode has the I_REFERENCED flag set, then it means that it has been 713 * used recently - the flag is set in iput_final(). When we encounter such an 714 * inode, clear the flag and move it to the back of the LRU so it gets another 715 * pass through the LRU before it gets reclaimed. This is necessary because of 716 * the fact we are doing lazy LRU updates to minimise lock contention so the 717 * LRU does not have strict ordering. Hence we don't want to reclaim inodes 718 * with this flag set because they are the inodes that are out of order. 719 */ 720 static enum lru_status inode_lru_isolate(struct list_head *item, 721 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) 722 { 723 struct list_head *freeable = arg; 724 struct inode *inode = container_of(item, struct inode, i_lru); 725 726 /* 727 * we are inverting the lru lock/inode->i_lock here, so use a trylock. 728 * If we fail to get the lock, just skip it. 729 */ 730 if (!spin_trylock(&inode->i_lock)) 731 return LRU_SKIP; 732 733 /* 734 * Referenced or dirty inodes are still in use. Give them another pass 735 * through the LRU as we canot reclaim them now. 736 */ 737 if (atomic_read(&inode->i_count) || 738 (inode->i_state & ~I_REFERENCED)) { 739 list_lru_isolate(lru, &inode->i_lru); 740 spin_unlock(&inode->i_lock); 741 this_cpu_dec(nr_unused); 742 return LRU_REMOVED; 743 } 744 745 /* recently referenced inodes get one more pass */ 746 if (inode->i_state & I_REFERENCED) { 747 inode->i_state &= ~I_REFERENCED; 748 spin_unlock(&inode->i_lock); 749 return LRU_ROTATE; 750 } 751 752 if (inode_has_buffers(inode) || inode->i_data.nrpages) { 753 __iget(inode); 754 spin_unlock(&inode->i_lock); 755 spin_unlock(lru_lock); 756 if (remove_inode_buffers(inode)) { 757 unsigned long reap; 758 reap = invalidate_mapping_pages(&inode->i_data, 0, -1); 759 if (current_is_kswapd()) 760 __count_vm_events(KSWAPD_INODESTEAL, reap); 761 else 762 __count_vm_events(PGINODESTEAL, reap); 763 if (current->reclaim_state) 764 current->reclaim_state->reclaimed_slab += reap; 765 } 766 iput(inode); 767 spin_lock(lru_lock); 768 return LRU_RETRY; 769 } 770 771 WARN_ON(inode->i_state & I_NEW); 772 inode->i_state |= I_FREEING; 773 list_lru_isolate_move(lru, &inode->i_lru, freeable); 774 spin_unlock(&inode->i_lock); 775 776 this_cpu_dec(nr_unused); 777 return LRU_REMOVED; 778 } 779 780 /* 781 * Walk the superblock inode LRU for freeable inodes and attempt to free them. 782 * This is called from the superblock shrinker function with a number of inodes 783 * to trim from the LRU. Inodes to be freed are moved to a temporary list and 784 * then are freed outside inode_lock by dispose_list(). 785 */ 786 long prune_icache_sb(struct super_block *sb, struct shrink_control *sc) 787 { 788 LIST_HEAD(freeable); 789 long freed; 790 791 freed = list_lru_shrink_walk(&sb->s_inode_lru, sc, 792 inode_lru_isolate, &freeable); 793 dispose_list(&freeable); 794 return freed; 795 } 796 797 static void __wait_on_freeing_inode(struct inode *inode); 798 /* 799 * Called with the inode lock held. 800 */ 801 static struct inode *find_inode(struct super_block *sb, 802 struct hlist_head *head, 803 int (*test)(struct inode *, void *), 804 void *data) 805 { 806 struct inode *inode = NULL; 807 808 repeat: 809 hlist_for_each_entry(inode, head, i_hash) { 810 if (inode->i_sb != sb) 811 continue; 812 if (!test(inode, data)) 813 continue; 814 spin_lock(&inode->i_lock); 815 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 816 __wait_on_freeing_inode(inode); 817 goto repeat; 818 } 819 if (unlikely(inode->i_state & I_CREATING)) { 820 spin_unlock(&inode->i_lock); 821 return ERR_PTR(-ESTALE); 822 } 823 __iget(inode); 824 spin_unlock(&inode->i_lock); 825 return inode; 826 } 827 return NULL; 828 } 829 830 /* 831 * find_inode_fast is the fast path version of find_inode, see the comment at 832 * iget_locked for details. 833 */ 834 static struct inode *find_inode_fast(struct super_block *sb, 835 struct hlist_head *head, unsigned long ino) 836 { 837 struct inode *inode = NULL; 838 839 repeat: 840 hlist_for_each_entry(inode, head, i_hash) { 841 if (inode->i_ino != ino) 842 continue; 843 if (inode->i_sb != sb) 844 continue; 845 spin_lock(&inode->i_lock); 846 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 847 __wait_on_freeing_inode(inode); 848 goto repeat; 849 } 850 if (unlikely(inode->i_state & I_CREATING)) { 851 spin_unlock(&inode->i_lock); 852 return ERR_PTR(-ESTALE); 853 } 854 __iget(inode); 855 spin_unlock(&inode->i_lock); 856 return inode; 857 } 858 return NULL; 859 } 860 861 /* 862 * Each cpu owns a range of LAST_INO_BATCH numbers. 863 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations, 864 * to renew the exhausted range. 865 * 866 * This does not significantly increase overflow rate because every CPU can 867 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is 868 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the 869 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase 870 * overflow rate by 2x, which does not seem too significant. 871 * 872 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 873 * error if st_ino won't fit in target struct field. Use 32bit counter 874 * here to attempt to avoid that. 875 */ 876 #define LAST_INO_BATCH 1024 877 static DEFINE_PER_CPU(unsigned int, last_ino); 878 879 unsigned int get_next_ino(void) 880 { 881 unsigned int *p = &get_cpu_var(last_ino); 882 unsigned int res = *p; 883 884 #ifdef CONFIG_SMP 885 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) { 886 static atomic_t shared_last_ino; 887 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino); 888 889 res = next - LAST_INO_BATCH; 890 } 891 #endif 892 893 res++; 894 /* get_next_ino should not provide a 0 inode number */ 895 if (unlikely(!res)) 896 res++; 897 *p = res; 898 put_cpu_var(last_ino); 899 return res; 900 } 901 EXPORT_SYMBOL(get_next_ino); 902 903 /** 904 * new_inode_pseudo - obtain an inode 905 * @sb: superblock 906 * 907 * Allocates a new inode for given superblock. 908 * Inode wont be chained in superblock s_inodes list 909 * This means : 910 * - fs can't be unmount 911 * - quotas, fsnotify, writeback can't work 912 */ 913 struct inode *new_inode_pseudo(struct super_block *sb) 914 { 915 struct inode *inode = alloc_inode(sb); 916 917 if (inode) { 918 spin_lock(&inode->i_lock); 919 inode->i_state = 0; 920 spin_unlock(&inode->i_lock); 921 INIT_LIST_HEAD(&inode->i_sb_list); 922 } 923 return inode; 924 } 925 926 /** 927 * new_inode - obtain an inode 928 * @sb: superblock 929 * 930 * Allocates a new inode for given superblock. The default gfp_mask 931 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE. 932 * If HIGHMEM pages are unsuitable or it is known that pages allocated 933 * for the page cache are not reclaimable or migratable, 934 * mapping_set_gfp_mask() must be called with suitable flags on the 935 * newly created inode's mapping 936 * 937 */ 938 struct inode *new_inode(struct super_block *sb) 939 { 940 struct inode *inode; 941 942 spin_lock_prefetch(&sb->s_inode_list_lock); 943 944 inode = new_inode_pseudo(sb); 945 if (inode) 946 inode_sb_list_add(inode); 947 return inode; 948 } 949 EXPORT_SYMBOL(new_inode); 950 951 #ifdef CONFIG_DEBUG_LOCK_ALLOC 952 void lockdep_annotate_inode_mutex_key(struct inode *inode) 953 { 954 if (S_ISDIR(inode->i_mode)) { 955 struct file_system_type *type = inode->i_sb->s_type; 956 957 /* Set new key only if filesystem hasn't already changed it */ 958 if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) { 959 /* 960 * ensure nobody is actually holding i_mutex 961 */ 962 // mutex_destroy(&inode->i_mutex); 963 init_rwsem(&inode->i_rwsem); 964 lockdep_set_class(&inode->i_rwsem, 965 &type->i_mutex_dir_key); 966 } 967 } 968 } 969 EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key); 970 #endif 971 972 /** 973 * unlock_new_inode - clear the I_NEW state and wake up any waiters 974 * @inode: new inode to unlock 975 * 976 * Called when the inode is fully initialised to clear the new state of the 977 * inode and wake up anyone waiting for the inode to finish initialisation. 978 */ 979 void unlock_new_inode(struct inode *inode) 980 { 981 lockdep_annotate_inode_mutex_key(inode); 982 spin_lock(&inode->i_lock); 983 WARN_ON(!(inode->i_state & I_NEW)); 984 inode->i_state &= ~I_NEW & ~I_CREATING; 985 smp_mb(); 986 wake_up_bit(&inode->i_state, __I_NEW); 987 spin_unlock(&inode->i_lock); 988 } 989 EXPORT_SYMBOL(unlock_new_inode); 990 991 void discard_new_inode(struct inode *inode) 992 { 993 lockdep_annotate_inode_mutex_key(inode); 994 spin_lock(&inode->i_lock); 995 WARN_ON(!(inode->i_state & I_NEW)); 996 inode->i_state &= ~I_NEW; 997 smp_mb(); 998 wake_up_bit(&inode->i_state, __I_NEW); 999 spin_unlock(&inode->i_lock); 1000 iput(inode); 1001 } 1002 EXPORT_SYMBOL(discard_new_inode); 1003 1004 /** 1005 * lock_two_nondirectories - take two i_mutexes on non-directory objects 1006 * 1007 * Lock any non-NULL argument that is not a directory. 1008 * Zero, one or two objects may be locked by this function. 1009 * 1010 * @inode1: first inode to lock 1011 * @inode2: second inode to lock 1012 */ 1013 void lock_two_nondirectories(struct inode *inode1, struct inode *inode2) 1014 { 1015 if (inode1 > inode2) 1016 swap(inode1, inode2); 1017 1018 if (inode1 && !S_ISDIR(inode1->i_mode)) 1019 inode_lock(inode1); 1020 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1) 1021 inode_lock_nested(inode2, I_MUTEX_NONDIR2); 1022 } 1023 EXPORT_SYMBOL(lock_two_nondirectories); 1024 1025 /** 1026 * unlock_two_nondirectories - release locks from lock_two_nondirectories() 1027 * @inode1: first inode to unlock 1028 * @inode2: second inode to unlock 1029 */ 1030 void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2) 1031 { 1032 if (inode1 && !S_ISDIR(inode1->i_mode)) 1033 inode_unlock(inode1); 1034 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1) 1035 inode_unlock(inode2); 1036 } 1037 EXPORT_SYMBOL(unlock_two_nondirectories); 1038 1039 /** 1040 * inode_insert5 - obtain an inode from a mounted file system 1041 * @inode: pre-allocated inode to use for insert to cache 1042 * @hashval: hash value (usually inode number) to get 1043 * @test: callback used for comparisons between inodes 1044 * @set: callback used to initialize a new struct inode 1045 * @data: opaque data pointer to pass to @test and @set 1046 * 1047 * Search for the inode specified by @hashval and @data in the inode cache, 1048 * and if present it is return it with an increased reference count. This is 1049 * a variant of iget5_locked() for callers that don't want to fail on memory 1050 * allocation of inode. 1051 * 1052 * If the inode is not in cache, insert the pre-allocated inode to cache and 1053 * return it locked, hashed, and with the I_NEW flag set. The file system gets 1054 * to fill it in before unlocking it via unlock_new_inode(). 1055 * 1056 * Note both @test and @set are called with the inode_hash_lock held, so can't 1057 * sleep. 1058 */ 1059 struct inode *inode_insert5(struct inode *inode, unsigned long hashval, 1060 int (*test)(struct inode *, void *), 1061 int (*set)(struct inode *, void *), void *data) 1062 { 1063 struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval); 1064 struct inode *old; 1065 bool creating = inode->i_state & I_CREATING; 1066 1067 again: 1068 spin_lock(&inode_hash_lock); 1069 old = find_inode(inode->i_sb, head, test, data); 1070 if (unlikely(old)) { 1071 /* 1072 * Uhhuh, somebody else created the same inode under us. 1073 * Use the old inode instead of the preallocated one. 1074 */ 1075 spin_unlock(&inode_hash_lock); 1076 if (IS_ERR(old)) 1077 return NULL; 1078 wait_on_inode(old); 1079 if (unlikely(inode_unhashed(old))) { 1080 iput(old); 1081 goto again; 1082 } 1083 return old; 1084 } 1085 1086 if (set && unlikely(set(inode, data))) { 1087 inode = NULL; 1088 goto unlock; 1089 } 1090 1091 /* 1092 * Return the locked inode with I_NEW set, the 1093 * caller is responsible for filling in the contents 1094 */ 1095 spin_lock(&inode->i_lock); 1096 inode->i_state |= I_NEW; 1097 hlist_add_head(&inode->i_hash, head); 1098 spin_unlock(&inode->i_lock); 1099 if (!creating) 1100 inode_sb_list_add(inode); 1101 unlock: 1102 spin_unlock(&inode_hash_lock); 1103 1104 return inode; 1105 } 1106 EXPORT_SYMBOL(inode_insert5); 1107 1108 /** 1109 * iget5_locked - obtain an inode from a mounted file system 1110 * @sb: super block of file system 1111 * @hashval: hash value (usually inode number) to get 1112 * @test: callback used for comparisons between inodes 1113 * @set: callback used to initialize a new struct inode 1114 * @data: opaque data pointer to pass to @test and @set 1115 * 1116 * Search for the inode specified by @hashval and @data in the inode cache, 1117 * and if present it is return it with an increased reference count. This is 1118 * a generalized version of iget_locked() for file systems where the inode 1119 * number is not sufficient for unique identification of an inode. 1120 * 1121 * If the inode is not in cache, allocate a new inode and return it locked, 1122 * hashed, and with the I_NEW flag set. The file system gets to fill it in 1123 * before unlocking it via unlock_new_inode(). 1124 * 1125 * Note both @test and @set are called with the inode_hash_lock held, so can't 1126 * sleep. 1127 */ 1128 struct inode *iget5_locked(struct super_block *sb, unsigned long hashval, 1129 int (*test)(struct inode *, void *), 1130 int (*set)(struct inode *, void *), void *data) 1131 { 1132 struct inode *inode = ilookup5(sb, hashval, test, data); 1133 1134 if (!inode) { 1135 struct inode *new = alloc_inode(sb); 1136 1137 if (new) { 1138 new->i_state = 0; 1139 inode = inode_insert5(new, hashval, test, set, data); 1140 if (unlikely(inode != new)) 1141 destroy_inode(new); 1142 } 1143 } 1144 return inode; 1145 } 1146 EXPORT_SYMBOL(iget5_locked); 1147 1148 /** 1149 * iget_locked - obtain an inode from a mounted file system 1150 * @sb: super block of file system 1151 * @ino: inode number to get 1152 * 1153 * Search for the inode specified by @ino in the inode cache and if present 1154 * return it with an increased reference count. This is for file systems 1155 * where the inode number is sufficient for unique identification of an inode. 1156 * 1157 * If the inode is not in cache, allocate a new inode and return it locked, 1158 * hashed, and with the I_NEW flag set. The file system gets to fill it in 1159 * before unlocking it via unlock_new_inode(). 1160 */ 1161 struct inode *iget_locked(struct super_block *sb, unsigned long ino) 1162 { 1163 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1164 struct inode *inode; 1165 again: 1166 spin_lock(&inode_hash_lock); 1167 inode = find_inode_fast(sb, head, ino); 1168 spin_unlock(&inode_hash_lock); 1169 if (inode) { 1170 if (IS_ERR(inode)) 1171 return NULL; 1172 wait_on_inode(inode); 1173 if (unlikely(inode_unhashed(inode))) { 1174 iput(inode); 1175 goto again; 1176 } 1177 return inode; 1178 } 1179 1180 inode = alloc_inode(sb); 1181 if (inode) { 1182 struct inode *old; 1183 1184 spin_lock(&inode_hash_lock); 1185 /* We released the lock, so.. */ 1186 old = find_inode_fast(sb, head, ino); 1187 if (!old) { 1188 inode->i_ino = ino; 1189 spin_lock(&inode->i_lock); 1190 inode->i_state = I_NEW; 1191 hlist_add_head(&inode->i_hash, head); 1192 spin_unlock(&inode->i_lock); 1193 inode_sb_list_add(inode); 1194 spin_unlock(&inode_hash_lock); 1195 1196 /* Return the locked inode with I_NEW set, the 1197 * caller is responsible for filling in the contents 1198 */ 1199 return inode; 1200 } 1201 1202 /* 1203 * Uhhuh, somebody else created the same inode under 1204 * us. Use the old inode instead of the one we just 1205 * allocated. 1206 */ 1207 spin_unlock(&inode_hash_lock); 1208 destroy_inode(inode); 1209 if (IS_ERR(old)) 1210 return NULL; 1211 inode = old; 1212 wait_on_inode(inode); 1213 if (unlikely(inode_unhashed(inode))) { 1214 iput(inode); 1215 goto again; 1216 } 1217 } 1218 return inode; 1219 } 1220 EXPORT_SYMBOL(iget_locked); 1221 1222 /* 1223 * search the inode cache for a matching inode number. 1224 * If we find one, then the inode number we are trying to 1225 * allocate is not unique and so we should not use it. 1226 * 1227 * Returns 1 if the inode number is unique, 0 if it is not. 1228 */ 1229 static int test_inode_iunique(struct super_block *sb, unsigned long ino) 1230 { 1231 struct hlist_head *b = inode_hashtable + hash(sb, ino); 1232 struct inode *inode; 1233 1234 spin_lock(&inode_hash_lock); 1235 hlist_for_each_entry(inode, b, i_hash) { 1236 if (inode->i_ino == ino && inode->i_sb == sb) { 1237 spin_unlock(&inode_hash_lock); 1238 return 0; 1239 } 1240 } 1241 spin_unlock(&inode_hash_lock); 1242 1243 return 1; 1244 } 1245 1246 /** 1247 * iunique - get a unique inode number 1248 * @sb: superblock 1249 * @max_reserved: highest reserved inode number 1250 * 1251 * Obtain an inode number that is unique on the system for a given 1252 * superblock. This is used by file systems that have no natural 1253 * permanent inode numbering system. An inode number is returned that 1254 * is higher than the reserved limit but unique. 1255 * 1256 * BUGS: 1257 * With a large number of inodes live on the file system this function 1258 * currently becomes quite slow. 1259 */ 1260 ino_t iunique(struct super_block *sb, ino_t max_reserved) 1261 { 1262 /* 1263 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 1264 * error if st_ino won't fit in target struct field. Use 32bit counter 1265 * here to attempt to avoid that. 1266 */ 1267 static DEFINE_SPINLOCK(iunique_lock); 1268 static unsigned int counter; 1269 ino_t res; 1270 1271 spin_lock(&iunique_lock); 1272 do { 1273 if (counter <= max_reserved) 1274 counter = max_reserved + 1; 1275 res = counter++; 1276 } while (!test_inode_iunique(sb, res)); 1277 spin_unlock(&iunique_lock); 1278 1279 return res; 1280 } 1281 EXPORT_SYMBOL(iunique); 1282 1283 struct inode *igrab(struct inode *inode) 1284 { 1285 spin_lock(&inode->i_lock); 1286 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) { 1287 __iget(inode); 1288 spin_unlock(&inode->i_lock); 1289 } else { 1290 spin_unlock(&inode->i_lock); 1291 /* 1292 * Handle the case where s_op->clear_inode is not been 1293 * called yet, and somebody is calling igrab 1294 * while the inode is getting freed. 1295 */ 1296 inode = NULL; 1297 } 1298 return inode; 1299 } 1300 EXPORT_SYMBOL(igrab); 1301 1302 /** 1303 * ilookup5_nowait - search for an inode in the inode cache 1304 * @sb: super block of file system to search 1305 * @hashval: hash value (usually inode number) to search for 1306 * @test: callback used for comparisons between inodes 1307 * @data: opaque data pointer to pass to @test 1308 * 1309 * Search for the inode specified by @hashval and @data in the inode cache. 1310 * If the inode is in the cache, the inode is returned with an incremented 1311 * reference count. 1312 * 1313 * Note: I_NEW is not waited upon so you have to be very careful what you do 1314 * with the returned inode. You probably should be using ilookup5() instead. 1315 * 1316 * Note2: @test is called with the inode_hash_lock held, so can't sleep. 1317 */ 1318 struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, 1319 int (*test)(struct inode *, void *), void *data) 1320 { 1321 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1322 struct inode *inode; 1323 1324 spin_lock(&inode_hash_lock); 1325 inode = find_inode(sb, head, test, data); 1326 spin_unlock(&inode_hash_lock); 1327 1328 return IS_ERR(inode) ? NULL : inode; 1329 } 1330 EXPORT_SYMBOL(ilookup5_nowait); 1331 1332 /** 1333 * ilookup5 - search for an inode in the inode cache 1334 * @sb: super block of file system to search 1335 * @hashval: hash value (usually inode number) to search for 1336 * @test: callback used for comparisons between inodes 1337 * @data: opaque data pointer to pass to @test 1338 * 1339 * Search for the inode specified by @hashval and @data in the inode cache, 1340 * and if the inode is in the cache, return the inode with an incremented 1341 * reference count. Waits on I_NEW before returning the inode. 1342 * returned with an incremented reference count. 1343 * 1344 * This is a generalized version of ilookup() for file systems where the 1345 * inode number is not sufficient for unique identification of an inode. 1346 * 1347 * Note: @test is called with the inode_hash_lock held, so can't sleep. 1348 */ 1349 struct inode *ilookup5(struct super_block *sb, unsigned long hashval, 1350 int (*test)(struct inode *, void *), void *data) 1351 { 1352 struct inode *inode; 1353 again: 1354 inode = ilookup5_nowait(sb, hashval, test, data); 1355 if (inode) { 1356 wait_on_inode(inode); 1357 if (unlikely(inode_unhashed(inode))) { 1358 iput(inode); 1359 goto again; 1360 } 1361 } 1362 return inode; 1363 } 1364 EXPORT_SYMBOL(ilookup5); 1365 1366 /** 1367 * ilookup - search for an inode in the inode cache 1368 * @sb: super block of file system to search 1369 * @ino: inode number to search for 1370 * 1371 * Search for the inode @ino in the inode cache, and if the inode is in the 1372 * cache, the inode is returned with an incremented reference count. 1373 */ 1374 struct inode *ilookup(struct super_block *sb, unsigned long ino) 1375 { 1376 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1377 struct inode *inode; 1378 again: 1379 spin_lock(&inode_hash_lock); 1380 inode = find_inode_fast(sb, head, ino); 1381 spin_unlock(&inode_hash_lock); 1382 1383 if (inode) { 1384 if (IS_ERR(inode)) 1385 return NULL; 1386 wait_on_inode(inode); 1387 if (unlikely(inode_unhashed(inode))) { 1388 iput(inode); 1389 goto again; 1390 } 1391 } 1392 return inode; 1393 } 1394 EXPORT_SYMBOL(ilookup); 1395 1396 /** 1397 * find_inode_nowait - find an inode in the inode cache 1398 * @sb: super block of file system to search 1399 * @hashval: hash value (usually inode number) to search for 1400 * @match: callback used for comparisons between inodes 1401 * @data: opaque data pointer to pass to @match 1402 * 1403 * Search for the inode specified by @hashval and @data in the inode 1404 * cache, where the helper function @match will return 0 if the inode 1405 * does not match, 1 if the inode does match, and -1 if the search 1406 * should be stopped. The @match function must be responsible for 1407 * taking the i_lock spin_lock and checking i_state for an inode being 1408 * freed or being initialized, and incrementing the reference count 1409 * before returning 1. It also must not sleep, since it is called with 1410 * the inode_hash_lock spinlock held. 1411 * 1412 * This is a even more generalized version of ilookup5() when the 1413 * function must never block --- find_inode() can block in 1414 * __wait_on_freeing_inode() --- or when the caller can not increment 1415 * the reference count because the resulting iput() might cause an 1416 * inode eviction. The tradeoff is that the @match funtion must be 1417 * very carefully implemented. 1418 */ 1419 struct inode *find_inode_nowait(struct super_block *sb, 1420 unsigned long hashval, 1421 int (*match)(struct inode *, unsigned long, 1422 void *), 1423 void *data) 1424 { 1425 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1426 struct inode *inode, *ret_inode = NULL; 1427 int mval; 1428 1429 spin_lock(&inode_hash_lock); 1430 hlist_for_each_entry(inode, head, i_hash) { 1431 if (inode->i_sb != sb) 1432 continue; 1433 mval = match(inode, hashval, data); 1434 if (mval == 0) 1435 continue; 1436 if (mval == 1) 1437 ret_inode = inode; 1438 goto out; 1439 } 1440 out: 1441 spin_unlock(&inode_hash_lock); 1442 return ret_inode; 1443 } 1444 EXPORT_SYMBOL(find_inode_nowait); 1445 1446 int insert_inode_locked(struct inode *inode) 1447 { 1448 struct super_block *sb = inode->i_sb; 1449 ino_t ino = inode->i_ino; 1450 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1451 1452 while (1) { 1453 struct inode *old = NULL; 1454 spin_lock(&inode_hash_lock); 1455 hlist_for_each_entry(old, head, i_hash) { 1456 if (old->i_ino != ino) 1457 continue; 1458 if (old->i_sb != sb) 1459 continue; 1460 spin_lock(&old->i_lock); 1461 if (old->i_state & (I_FREEING|I_WILL_FREE)) { 1462 spin_unlock(&old->i_lock); 1463 continue; 1464 } 1465 break; 1466 } 1467 if (likely(!old)) { 1468 spin_lock(&inode->i_lock); 1469 inode->i_state |= I_NEW | I_CREATING; 1470 hlist_add_head(&inode->i_hash, head); 1471 spin_unlock(&inode->i_lock); 1472 spin_unlock(&inode_hash_lock); 1473 return 0; 1474 } 1475 if (unlikely(old->i_state & I_CREATING)) { 1476 spin_unlock(&old->i_lock); 1477 spin_unlock(&inode_hash_lock); 1478 return -EBUSY; 1479 } 1480 __iget(old); 1481 spin_unlock(&old->i_lock); 1482 spin_unlock(&inode_hash_lock); 1483 wait_on_inode(old); 1484 if (unlikely(!inode_unhashed(old))) { 1485 iput(old); 1486 return -EBUSY; 1487 } 1488 iput(old); 1489 } 1490 } 1491 EXPORT_SYMBOL(insert_inode_locked); 1492 1493 int insert_inode_locked4(struct inode *inode, unsigned long hashval, 1494 int (*test)(struct inode *, void *), void *data) 1495 { 1496 struct inode *old; 1497 1498 inode->i_state |= I_CREATING; 1499 old = inode_insert5(inode, hashval, test, NULL, data); 1500 1501 if (old != inode) { 1502 iput(old); 1503 return -EBUSY; 1504 } 1505 return 0; 1506 } 1507 EXPORT_SYMBOL(insert_inode_locked4); 1508 1509 1510 int generic_delete_inode(struct inode *inode) 1511 { 1512 return 1; 1513 } 1514 EXPORT_SYMBOL(generic_delete_inode); 1515 1516 /* 1517 * Called when we're dropping the last reference 1518 * to an inode. 1519 * 1520 * Call the FS "drop_inode()" function, defaulting to 1521 * the legacy UNIX filesystem behaviour. If it tells 1522 * us to evict inode, do so. Otherwise, retain inode 1523 * in cache if fs is alive, sync and evict if fs is 1524 * shutting down. 1525 */ 1526 static void iput_final(struct inode *inode) 1527 { 1528 struct super_block *sb = inode->i_sb; 1529 const struct super_operations *op = inode->i_sb->s_op; 1530 int drop; 1531 1532 WARN_ON(inode->i_state & I_NEW); 1533 1534 if (op->drop_inode) 1535 drop = op->drop_inode(inode); 1536 else 1537 drop = generic_drop_inode(inode); 1538 1539 if (!drop && (sb->s_flags & SB_ACTIVE)) { 1540 inode_add_lru(inode); 1541 spin_unlock(&inode->i_lock); 1542 return; 1543 } 1544 1545 if (!drop) { 1546 inode->i_state |= I_WILL_FREE; 1547 spin_unlock(&inode->i_lock); 1548 write_inode_now(inode, 1); 1549 spin_lock(&inode->i_lock); 1550 WARN_ON(inode->i_state & I_NEW); 1551 inode->i_state &= ~I_WILL_FREE; 1552 } 1553 1554 inode->i_state |= I_FREEING; 1555 if (!list_empty(&inode->i_lru)) 1556 inode_lru_list_del(inode); 1557 spin_unlock(&inode->i_lock); 1558 1559 evict(inode); 1560 } 1561 1562 /** 1563 * iput - put an inode 1564 * @inode: inode to put 1565 * 1566 * Puts an inode, dropping its usage count. If the inode use count hits 1567 * zero, the inode is then freed and may also be destroyed. 1568 * 1569 * Consequently, iput() can sleep. 1570 */ 1571 void iput(struct inode *inode) 1572 { 1573 if (!inode) 1574 return; 1575 BUG_ON(inode->i_state & I_CLEAR); 1576 retry: 1577 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) { 1578 if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) { 1579 atomic_inc(&inode->i_count); 1580 spin_unlock(&inode->i_lock); 1581 trace_writeback_lazytime_iput(inode); 1582 mark_inode_dirty_sync(inode); 1583 goto retry; 1584 } 1585 iput_final(inode); 1586 } 1587 } 1588 EXPORT_SYMBOL(iput); 1589 1590 /** 1591 * bmap - find a block number in a file 1592 * @inode: inode of file 1593 * @block: block to find 1594 * 1595 * Returns the block number on the device holding the inode that 1596 * is the disk block number for the block of the file requested. 1597 * That is, asked for block 4 of inode 1 the function will return the 1598 * disk block relative to the disk start that holds that block of the 1599 * file. 1600 */ 1601 sector_t bmap(struct inode *inode, sector_t block) 1602 { 1603 sector_t res = 0; 1604 if (inode->i_mapping->a_ops->bmap) 1605 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block); 1606 return res; 1607 } 1608 EXPORT_SYMBOL(bmap); 1609 1610 /* 1611 * With relative atime, only update atime if the previous atime is 1612 * earlier than either the ctime or mtime or if at least a day has 1613 * passed since the last atime update. 1614 */ 1615 static int relatime_need_update(struct vfsmount *mnt, struct inode *inode, 1616 struct timespec64 now) 1617 { 1618 1619 if (!(mnt->mnt_flags & MNT_RELATIME)) 1620 return 1; 1621 /* 1622 * Is mtime younger than atime? If yes, update atime: 1623 */ 1624 if (timespec64_compare(&inode->i_mtime, &inode->i_atime) >= 0) 1625 return 1; 1626 /* 1627 * Is ctime younger than atime? If yes, update atime: 1628 */ 1629 if (timespec64_compare(&inode->i_ctime, &inode->i_atime) >= 0) 1630 return 1; 1631 1632 /* 1633 * Is the previous atime value older than a day? If yes, 1634 * update atime: 1635 */ 1636 if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60) 1637 return 1; 1638 /* 1639 * Good, we can skip the atime update: 1640 */ 1641 return 0; 1642 } 1643 1644 int generic_update_time(struct inode *inode, struct timespec64 *time, int flags) 1645 { 1646 int iflags = I_DIRTY_TIME; 1647 bool dirty = false; 1648 1649 if (flags & S_ATIME) 1650 inode->i_atime = *time; 1651 if (flags & S_VERSION) 1652 dirty = inode_maybe_inc_iversion(inode, false); 1653 if (flags & S_CTIME) 1654 inode->i_ctime = *time; 1655 if (flags & S_MTIME) 1656 inode->i_mtime = *time; 1657 if ((flags & (S_ATIME | S_CTIME | S_MTIME)) && 1658 !(inode->i_sb->s_flags & SB_LAZYTIME)) 1659 dirty = true; 1660 1661 if (dirty) 1662 iflags |= I_DIRTY_SYNC; 1663 __mark_inode_dirty(inode, iflags); 1664 return 0; 1665 } 1666 EXPORT_SYMBOL(generic_update_time); 1667 1668 /* 1669 * This does the actual work of updating an inodes time or version. Must have 1670 * had called mnt_want_write() before calling this. 1671 */ 1672 static int update_time(struct inode *inode, struct timespec64 *time, int flags) 1673 { 1674 int (*update_time)(struct inode *, struct timespec64 *, int); 1675 1676 update_time = inode->i_op->update_time ? inode->i_op->update_time : 1677 generic_update_time; 1678 1679 return update_time(inode, time, flags); 1680 } 1681 1682 /** 1683 * touch_atime - update the access time 1684 * @path: the &struct path to update 1685 * @inode: inode to update 1686 * 1687 * Update the accessed time on an inode and mark it for writeback. 1688 * This function automatically handles read only file systems and media, 1689 * as well as the "noatime" flag and inode specific "noatime" markers. 1690 */ 1691 bool atime_needs_update(const struct path *path, struct inode *inode) 1692 { 1693 struct vfsmount *mnt = path->mnt; 1694 struct timespec64 now; 1695 1696 if (inode->i_flags & S_NOATIME) 1697 return false; 1698 1699 /* Atime updates will likely cause i_uid and i_gid to be written 1700 * back improprely if their true value is unknown to the vfs. 1701 */ 1702 if (HAS_UNMAPPED_ID(inode)) 1703 return false; 1704 1705 if (IS_NOATIME(inode)) 1706 return false; 1707 if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode)) 1708 return false; 1709 1710 if (mnt->mnt_flags & MNT_NOATIME) 1711 return false; 1712 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)) 1713 return false; 1714 1715 now = current_time(inode); 1716 1717 if (!relatime_need_update(mnt, inode, now)) 1718 return false; 1719 1720 if (timespec64_equal(&inode->i_atime, &now)) 1721 return false; 1722 1723 return true; 1724 } 1725 1726 void touch_atime(const struct path *path) 1727 { 1728 struct vfsmount *mnt = path->mnt; 1729 struct inode *inode = d_inode(path->dentry); 1730 struct timespec64 now; 1731 1732 if (!atime_needs_update(path, inode)) 1733 return; 1734 1735 if (!sb_start_write_trylock(inode->i_sb)) 1736 return; 1737 1738 if (__mnt_want_write(mnt) != 0) 1739 goto skip_update; 1740 /* 1741 * File systems can error out when updating inodes if they need to 1742 * allocate new space to modify an inode (such is the case for 1743 * Btrfs), but since we touch atime while walking down the path we 1744 * really don't care if we failed to update the atime of the file, 1745 * so just ignore the return value. 1746 * We may also fail on filesystems that have the ability to make parts 1747 * of the fs read only, e.g. subvolumes in Btrfs. 1748 */ 1749 now = current_time(inode); 1750 update_time(inode, &now, S_ATIME); 1751 __mnt_drop_write(mnt); 1752 skip_update: 1753 sb_end_write(inode->i_sb); 1754 } 1755 EXPORT_SYMBOL(touch_atime); 1756 1757 /* 1758 * The logic we want is 1759 * 1760 * if suid or (sgid and xgrp) 1761 * remove privs 1762 */ 1763 int should_remove_suid(struct dentry *dentry) 1764 { 1765 umode_t mode = d_inode(dentry)->i_mode; 1766 int kill = 0; 1767 1768 /* suid always must be killed */ 1769 if (unlikely(mode & S_ISUID)) 1770 kill = ATTR_KILL_SUID; 1771 1772 /* 1773 * sgid without any exec bits is just a mandatory locking mark; leave 1774 * it alone. If some exec bits are set, it's a real sgid; kill it. 1775 */ 1776 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) 1777 kill |= ATTR_KILL_SGID; 1778 1779 if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode))) 1780 return kill; 1781 1782 return 0; 1783 } 1784 EXPORT_SYMBOL(should_remove_suid); 1785 1786 /* 1787 * Return mask of changes for notify_change() that need to be done as a 1788 * response to write or truncate. Return 0 if nothing has to be changed. 1789 * Negative value on error (change should be denied). 1790 */ 1791 int dentry_needs_remove_privs(struct dentry *dentry) 1792 { 1793 struct inode *inode = d_inode(dentry); 1794 int mask = 0; 1795 int ret; 1796 1797 if (IS_NOSEC(inode)) 1798 return 0; 1799 1800 mask = should_remove_suid(dentry); 1801 ret = security_inode_need_killpriv(dentry); 1802 if (ret < 0) 1803 return ret; 1804 if (ret) 1805 mask |= ATTR_KILL_PRIV; 1806 return mask; 1807 } 1808 1809 static int __remove_privs(struct dentry *dentry, int kill) 1810 { 1811 struct iattr newattrs; 1812 1813 newattrs.ia_valid = ATTR_FORCE | kill; 1814 /* 1815 * Note we call this on write, so notify_change will not 1816 * encounter any conflicting delegations: 1817 */ 1818 return notify_change(dentry, &newattrs, NULL); 1819 } 1820 1821 /* 1822 * Remove special file priviledges (suid, capabilities) when file is written 1823 * to or truncated. 1824 */ 1825 int file_remove_privs(struct file *file) 1826 { 1827 struct dentry *dentry = file_dentry(file); 1828 struct inode *inode = file_inode(file); 1829 int kill; 1830 int error = 0; 1831 1832 /* 1833 * Fast path for nothing security related. 1834 * As well for non-regular files, e.g. blkdev inodes. 1835 * For example, blkdev_write_iter() might get here 1836 * trying to remove privs which it is not allowed to. 1837 */ 1838 if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode)) 1839 return 0; 1840 1841 kill = dentry_needs_remove_privs(dentry); 1842 if (kill < 0) 1843 return kill; 1844 if (kill) 1845 error = __remove_privs(dentry, kill); 1846 if (!error) 1847 inode_has_no_xattr(inode); 1848 1849 return error; 1850 } 1851 EXPORT_SYMBOL(file_remove_privs); 1852 1853 /** 1854 * file_update_time - update mtime and ctime time 1855 * @file: file accessed 1856 * 1857 * Update the mtime and ctime members of an inode and mark the inode 1858 * for writeback. Note that this function is meant exclusively for 1859 * usage in the file write path of filesystems, and filesystems may 1860 * choose to explicitly ignore update via this function with the 1861 * S_NOCMTIME inode flag, e.g. for network filesystem where these 1862 * timestamps are handled by the server. This can return an error for 1863 * file systems who need to allocate space in order to update an inode. 1864 */ 1865 1866 int file_update_time(struct file *file) 1867 { 1868 struct inode *inode = file_inode(file); 1869 struct timespec64 now; 1870 int sync_it = 0; 1871 int ret; 1872 1873 /* First try to exhaust all avenues to not sync */ 1874 if (IS_NOCMTIME(inode)) 1875 return 0; 1876 1877 now = current_time(inode); 1878 if (!timespec64_equal(&inode->i_mtime, &now)) 1879 sync_it = S_MTIME; 1880 1881 if (!timespec64_equal(&inode->i_ctime, &now)) 1882 sync_it |= S_CTIME; 1883 1884 if (IS_I_VERSION(inode) && inode_iversion_need_inc(inode)) 1885 sync_it |= S_VERSION; 1886 1887 if (!sync_it) 1888 return 0; 1889 1890 /* Finally allowed to write? Takes lock. */ 1891 if (__mnt_want_write_file(file)) 1892 return 0; 1893 1894 ret = update_time(inode, &now, sync_it); 1895 __mnt_drop_write_file(file); 1896 1897 return ret; 1898 } 1899 EXPORT_SYMBOL(file_update_time); 1900 1901 int inode_needs_sync(struct inode *inode) 1902 { 1903 if (IS_SYNC(inode)) 1904 return 1; 1905 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) 1906 return 1; 1907 return 0; 1908 } 1909 EXPORT_SYMBOL(inode_needs_sync); 1910 1911 /* 1912 * If we try to find an inode in the inode hash while it is being 1913 * deleted, we have to wait until the filesystem completes its 1914 * deletion before reporting that it isn't found. This function waits 1915 * until the deletion _might_ have completed. Callers are responsible 1916 * to recheck inode state. 1917 * 1918 * It doesn't matter if I_NEW is not set initially, a call to 1919 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list 1920 * will DTRT. 1921 */ 1922 static void __wait_on_freeing_inode(struct inode *inode) 1923 { 1924 wait_queue_head_t *wq; 1925 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW); 1926 wq = bit_waitqueue(&inode->i_state, __I_NEW); 1927 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); 1928 spin_unlock(&inode->i_lock); 1929 spin_unlock(&inode_hash_lock); 1930 schedule(); 1931 finish_wait(wq, &wait.wq_entry); 1932 spin_lock(&inode_hash_lock); 1933 } 1934 1935 static __initdata unsigned long ihash_entries; 1936 static int __init set_ihash_entries(char *str) 1937 { 1938 if (!str) 1939 return 0; 1940 ihash_entries = simple_strtoul(str, &str, 0); 1941 return 1; 1942 } 1943 __setup("ihash_entries=", set_ihash_entries); 1944 1945 /* 1946 * Initialize the waitqueues and inode hash table. 1947 */ 1948 void __init inode_init_early(void) 1949 { 1950 /* If hashes are distributed across NUMA nodes, defer 1951 * hash allocation until vmalloc space is available. 1952 */ 1953 if (hashdist) 1954 return; 1955 1956 inode_hashtable = 1957 alloc_large_system_hash("Inode-cache", 1958 sizeof(struct hlist_head), 1959 ihash_entries, 1960 14, 1961 HASH_EARLY | HASH_ZERO, 1962 &i_hash_shift, 1963 &i_hash_mask, 1964 0, 1965 0); 1966 } 1967 1968 void __init inode_init(void) 1969 { 1970 /* inode slab cache */ 1971 inode_cachep = kmem_cache_create("inode_cache", 1972 sizeof(struct inode), 1973 0, 1974 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| 1975 SLAB_MEM_SPREAD|SLAB_ACCOUNT), 1976 init_once); 1977 1978 /* Hash may have been set up in inode_init_early */ 1979 if (!hashdist) 1980 return; 1981 1982 inode_hashtable = 1983 alloc_large_system_hash("Inode-cache", 1984 sizeof(struct hlist_head), 1985 ihash_entries, 1986 14, 1987 HASH_ZERO, 1988 &i_hash_shift, 1989 &i_hash_mask, 1990 0, 1991 0); 1992 } 1993 1994 void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev) 1995 { 1996 inode->i_mode = mode; 1997 if (S_ISCHR(mode)) { 1998 inode->i_fop = &def_chr_fops; 1999 inode->i_rdev = rdev; 2000 } else if (S_ISBLK(mode)) { 2001 inode->i_fop = &def_blk_fops; 2002 inode->i_rdev = rdev; 2003 } else if (S_ISFIFO(mode)) 2004 inode->i_fop = &pipefifo_fops; 2005 else if (S_ISSOCK(mode)) 2006 ; /* leave it no_open_fops */ 2007 else 2008 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for" 2009 " inode %s:%lu\n", mode, inode->i_sb->s_id, 2010 inode->i_ino); 2011 } 2012 EXPORT_SYMBOL(init_special_inode); 2013 2014 /** 2015 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards 2016 * @inode: New inode 2017 * @dir: Directory inode 2018 * @mode: mode of the new inode 2019 */ 2020 void inode_init_owner(struct inode *inode, const struct inode *dir, 2021 umode_t mode) 2022 { 2023 inode->i_uid = current_fsuid(); 2024 if (dir && dir->i_mode & S_ISGID) { 2025 inode->i_gid = dir->i_gid; 2026 2027 /* Directories are special, and always inherit S_ISGID */ 2028 if (S_ISDIR(mode)) 2029 mode |= S_ISGID; 2030 else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) && 2031 !in_group_p(inode->i_gid) && 2032 !capable_wrt_inode_uidgid(dir, CAP_FSETID)) 2033 mode &= ~S_ISGID; 2034 } else 2035 inode->i_gid = current_fsgid(); 2036 inode->i_mode = mode; 2037 } 2038 EXPORT_SYMBOL(inode_init_owner); 2039 2040 /** 2041 * inode_owner_or_capable - check current task permissions to inode 2042 * @inode: inode being checked 2043 * 2044 * Return true if current either has CAP_FOWNER in a namespace with the 2045 * inode owner uid mapped, or owns the file. 2046 */ 2047 bool inode_owner_or_capable(const struct inode *inode) 2048 { 2049 struct user_namespace *ns; 2050 2051 if (uid_eq(current_fsuid(), inode->i_uid)) 2052 return true; 2053 2054 ns = current_user_ns(); 2055 if (kuid_has_mapping(ns, inode->i_uid) && ns_capable(ns, CAP_FOWNER)) 2056 return true; 2057 return false; 2058 } 2059 EXPORT_SYMBOL(inode_owner_or_capable); 2060 2061 /* 2062 * Direct i/o helper functions 2063 */ 2064 static void __inode_dio_wait(struct inode *inode) 2065 { 2066 wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP); 2067 DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP); 2068 2069 do { 2070 prepare_to_wait(wq, &q.wq_entry, TASK_UNINTERRUPTIBLE); 2071 if (atomic_read(&inode->i_dio_count)) 2072 schedule(); 2073 } while (atomic_read(&inode->i_dio_count)); 2074 finish_wait(wq, &q.wq_entry); 2075 } 2076 2077 /** 2078 * inode_dio_wait - wait for outstanding DIO requests to finish 2079 * @inode: inode to wait for 2080 * 2081 * Waits for all pending direct I/O requests to finish so that we can 2082 * proceed with a truncate or equivalent operation. 2083 * 2084 * Must be called under a lock that serializes taking new references 2085 * to i_dio_count, usually by inode->i_mutex. 2086 */ 2087 void inode_dio_wait(struct inode *inode) 2088 { 2089 if (atomic_read(&inode->i_dio_count)) 2090 __inode_dio_wait(inode); 2091 } 2092 EXPORT_SYMBOL(inode_dio_wait); 2093 2094 /* 2095 * inode_set_flags - atomically set some inode flags 2096 * 2097 * Note: the caller should be holding i_mutex, or else be sure that 2098 * they have exclusive access to the inode structure (i.e., while the 2099 * inode is being instantiated). The reason for the cmpxchg() loop 2100 * --- which wouldn't be necessary if all code paths which modify 2101 * i_flags actually followed this rule, is that there is at least one 2102 * code path which doesn't today so we use cmpxchg() out of an abundance 2103 * of caution. 2104 * 2105 * In the long run, i_mutex is overkill, and we should probably look 2106 * at using the i_lock spinlock to protect i_flags, and then make sure 2107 * it is so documented in include/linux/fs.h and that all code follows 2108 * the locking convention!! 2109 */ 2110 void inode_set_flags(struct inode *inode, unsigned int flags, 2111 unsigned int mask) 2112 { 2113 WARN_ON_ONCE(flags & ~mask); 2114 set_mask_bits(&inode->i_flags, mask, flags); 2115 } 2116 EXPORT_SYMBOL(inode_set_flags); 2117 2118 void inode_nohighmem(struct inode *inode) 2119 { 2120 mapping_set_gfp_mask(inode->i_mapping, GFP_USER); 2121 } 2122 EXPORT_SYMBOL(inode_nohighmem); 2123 2124 /** 2125 * timespec64_trunc - Truncate timespec64 to a granularity 2126 * @t: Timespec64 2127 * @gran: Granularity in ns. 2128 * 2129 * Truncate a timespec64 to a granularity. Always rounds down. gran must 2130 * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns). 2131 */ 2132 struct timespec64 timespec64_trunc(struct timespec64 t, unsigned gran) 2133 { 2134 /* Avoid division in the common cases 1 ns and 1 s. */ 2135 if (gran == 1) { 2136 /* nothing */ 2137 } else if (gran == NSEC_PER_SEC) { 2138 t.tv_nsec = 0; 2139 } else if (gran > 1 && gran < NSEC_PER_SEC) { 2140 t.tv_nsec -= t.tv_nsec % gran; 2141 } else { 2142 WARN(1, "illegal file time granularity: %u", gran); 2143 } 2144 return t; 2145 } 2146 EXPORT_SYMBOL(timespec64_trunc); 2147 2148 /** 2149 * current_time - Return FS time 2150 * @inode: inode. 2151 * 2152 * Return the current time truncated to the time granularity supported by 2153 * the fs. 2154 * 2155 * Note that inode and inode->sb cannot be NULL. 2156 * Otherwise, the function warns and returns time without truncation. 2157 */ 2158 struct timespec64 current_time(struct inode *inode) 2159 { 2160 struct timespec64 now; 2161 2162 ktime_get_coarse_real_ts64(&now); 2163 2164 if (unlikely(!inode->i_sb)) { 2165 WARN(1, "current_time() called with uninitialized super_block in the inode"); 2166 return now; 2167 } 2168 2169 return timespec64_trunc(now, inode->i_sb->s_time_gran); 2170 } 2171 EXPORT_SYMBOL(current_time); 2172