1 /* 2 * (C) 1997 Linus Torvalds 3 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation) 4 */ 5 #include <linux/export.h> 6 #include <linux/fs.h> 7 #include <linux/mm.h> 8 #include <linux/backing-dev.h> 9 #include <linux/hash.h> 10 #include <linux/swap.h> 11 #include <linux/security.h> 12 #include <linux/cdev.h> 13 #include <linux/bootmem.h> 14 #include <linux/fsnotify.h> 15 #include <linux/mount.h> 16 #include <linux/posix_acl.h> 17 #include <linux/prefetch.h> 18 #include <linux/buffer_head.h> /* for inode_has_buffers */ 19 #include <linux/ratelimit.h> 20 #include <linux/list_lru.h> 21 #include <trace/events/writeback.h> 22 #include "internal.h" 23 24 /* 25 * Inode locking rules: 26 * 27 * inode->i_lock protects: 28 * inode->i_state, inode->i_hash, __iget() 29 * Inode LRU list locks protect: 30 * inode->i_sb->s_inode_lru, inode->i_lru 31 * inode_sb_list_lock protects: 32 * sb->s_inodes, inode->i_sb_list 33 * bdi->wb.list_lock protects: 34 * bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_wb_list 35 * inode_hash_lock protects: 36 * inode_hashtable, inode->i_hash 37 * 38 * Lock ordering: 39 * 40 * inode_sb_list_lock 41 * inode->i_lock 42 * Inode LRU list locks 43 * 44 * bdi->wb.list_lock 45 * inode->i_lock 46 * 47 * inode_hash_lock 48 * inode_sb_list_lock 49 * inode->i_lock 50 * 51 * iunique_lock 52 * inode_hash_lock 53 */ 54 55 static unsigned int i_hash_mask __read_mostly; 56 static unsigned int i_hash_shift __read_mostly; 57 static struct hlist_head *inode_hashtable __read_mostly; 58 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); 59 60 __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock); 61 62 /* 63 * Empty aops. Can be used for the cases where the user does not 64 * define any of the address_space operations. 65 */ 66 const struct address_space_operations empty_aops = { 67 }; 68 EXPORT_SYMBOL(empty_aops); 69 70 /* 71 * Statistics gathering.. 72 */ 73 struct inodes_stat_t inodes_stat; 74 75 static DEFINE_PER_CPU(unsigned long, nr_inodes); 76 static DEFINE_PER_CPU(unsigned long, nr_unused); 77 78 static struct kmem_cache *inode_cachep __read_mostly; 79 80 static long get_nr_inodes(void) 81 { 82 int i; 83 long sum = 0; 84 for_each_possible_cpu(i) 85 sum += per_cpu(nr_inodes, i); 86 return sum < 0 ? 0 : sum; 87 } 88 89 static inline long get_nr_inodes_unused(void) 90 { 91 int i; 92 long sum = 0; 93 for_each_possible_cpu(i) 94 sum += per_cpu(nr_unused, i); 95 return sum < 0 ? 0 : sum; 96 } 97 98 long get_nr_dirty_inodes(void) 99 { 100 /* not actually dirty inodes, but a wild approximation */ 101 long nr_dirty = get_nr_inodes() - get_nr_inodes_unused(); 102 return nr_dirty > 0 ? nr_dirty : 0; 103 } 104 105 /* 106 * Handle nr_inode sysctl 107 */ 108 #ifdef CONFIG_SYSCTL 109 int proc_nr_inodes(struct ctl_table *table, int write, 110 void __user *buffer, size_t *lenp, loff_t *ppos) 111 { 112 inodes_stat.nr_inodes = get_nr_inodes(); 113 inodes_stat.nr_unused = get_nr_inodes_unused(); 114 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 115 } 116 #endif 117 118 static int no_open(struct inode *inode, struct file *file) 119 { 120 return -ENXIO; 121 } 122 123 /** 124 * inode_init_always - perform inode structure intialisation 125 * @sb: superblock inode belongs to 126 * @inode: inode to initialise 127 * 128 * These are initializations that need to be done on every inode 129 * allocation as the fields are not initialised by slab allocation. 130 */ 131 int inode_init_always(struct super_block *sb, struct inode *inode) 132 { 133 static const struct inode_operations empty_iops; 134 static const struct file_operations no_open_fops = {.open = no_open}; 135 struct address_space *const mapping = &inode->i_data; 136 137 inode->i_sb = sb; 138 inode->i_blkbits = sb->s_blocksize_bits; 139 inode->i_flags = 0; 140 atomic_set(&inode->i_count, 1); 141 inode->i_op = &empty_iops; 142 inode->i_fop = &no_open_fops; 143 inode->__i_nlink = 1; 144 inode->i_opflags = 0; 145 i_uid_write(inode, 0); 146 i_gid_write(inode, 0); 147 atomic_set(&inode->i_writecount, 0); 148 inode->i_size = 0; 149 inode->i_blocks = 0; 150 inode->i_bytes = 0; 151 inode->i_generation = 0; 152 inode->i_pipe = NULL; 153 inode->i_bdev = NULL; 154 inode->i_cdev = NULL; 155 inode->i_link = NULL; 156 inode->i_rdev = 0; 157 inode->dirtied_when = 0; 158 159 if (security_inode_alloc(inode)) 160 goto out; 161 spin_lock_init(&inode->i_lock); 162 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); 163 164 mutex_init(&inode->i_mutex); 165 lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key); 166 167 atomic_set(&inode->i_dio_count, 0); 168 169 mapping->a_ops = &empty_aops; 170 mapping->host = inode; 171 mapping->flags = 0; 172 atomic_set(&mapping->i_mmap_writable, 0); 173 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); 174 mapping->private_data = NULL; 175 mapping->writeback_index = 0; 176 inode->i_private = NULL; 177 inode->i_mapping = mapping; 178 INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */ 179 #ifdef CONFIG_FS_POSIX_ACL 180 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED; 181 #endif 182 183 #ifdef CONFIG_FSNOTIFY 184 inode->i_fsnotify_mask = 0; 185 #endif 186 inode->i_flctx = NULL; 187 this_cpu_inc(nr_inodes); 188 189 return 0; 190 out: 191 return -ENOMEM; 192 } 193 EXPORT_SYMBOL(inode_init_always); 194 195 static struct inode *alloc_inode(struct super_block *sb) 196 { 197 struct inode *inode; 198 199 if (sb->s_op->alloc_inode) 200 inode = sb->s_op->alloc_inode(sb); 201 else 202 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL); 203 204 if (!inode) 205 return NULL; 206 207 if (unlikely(inode_init_always(sb, inode))) { 208 if (inode->i_sb->s_op->destroy_inode) 209 inode->i_sb->s_op->destroy_inode(inode); 210 else 211 kmem_cache_free(inode_cachep, inode); 212 return NULL; 213 } 214 215 return inode; 216 } 217 218 void free_inode_nonrcu(struct inode *inode) 219 { 220 kmem_cache_free(inode_cachep, inode); 221 } 222 EXPORT_SYMBOL(free_inode_nonrcu); 223 224 void __destroy_inode(struct inode *inode) 225 { 226 BUG_ON(inode_has_buffers(inode)); 227 inode_detach_wb(inode); 228 security_inode_free(inode); 229 fsnotify_inode_delete(inode); 230 locks_free_lock_context(inode->i_flctx); 231 if (!inode->i_nlink) { 232 WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0); 233 atomic_long_dec(&inode->i_sb->s_remove_count); 234 } 235 236 #ifdef CONFIG_FS_POSIX_ACL 237 if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED) 238 posix_acl_release(inode->i_acl); 239 if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED) 240 posix_acl_release(inode->i_default_acl); 241 #endif 242 this_cpu_dec(nr_inodes); 243 } 244 EXPORT_SYMBOL(__destroy_inode); 245 246 static void i_callback(struct rcu_head *head) 247 { 248 struct inode *inode = container_of(head, struct inode, i_rcu); 249 kmem_cache_free(inode_cachep, inode); 250 } 251 252 static void destroy_inode(struct inode *inode) 253 { 254 BUG_ON(!list_empty(&inode->i_lru)); 255 __destroy_inode(inode); 256 if (inode->i_sb->s_op->destroy_inode) 257 inode->i_sb->s_op->destroy_inode(inode); 258 else 259 call_rcu(&inode->i_rcu, i_callback); 260 } 261 262 /** 263 * drop_nlink - directly drop an inode's link count 264 * @inode: inode 265 * 266 * This is a low-level filesystem helper to replace any 267 * direct filesystem manipulation of i_nlink. In cases 268 * where we are attempting to track writes to the 269 * filesystem, a decrement to zero means an imminent 270 * write when the file is truncated and actually unlinked 271 * on the filesystem. 272 */ 273 void drop_nlink(struct inode *inode) 274 { 275 WARN_ON(inode->i_nlink == 0); 276 inode->__i_nlink--; 277 if (!inode->i_nlink) 278 atomic_long_inc(&inode->i_sb->s_remove_count); 279 } 280 EXPORT_SYMBOL(drop_nlink); 281 282 /** 283 * clear_nlink - directly zero an inode's link count 284 * @inode: inode 285 * 286 * This is a low-level filesystem helper to replace any 287 * direct filesystem manipulation of i_nlink. See 288 * drop_nlink() for why we care about i_nlink hitting zero. 289 */ 290 void clear_nlink(struct inode *inode) 291 { 292 if (inode->i_nlink) { 293 inode->__i_nlink = 0; 294 atomic_long_inc(&inode->i_sb->s_remove_count); 295 } 296 } 297 EXPORT_SYMBOL(clear_nlink); 298 299 /** 300 * set_nlink - directly set an inode's link count 301 * @inode: inode 302 * @nlink: new nlink (should be non-zero) 303 * 304 * This is a low-level filesystem helper to replace any 305 * direct filesystem manipulation of i_nlink. 306 */ 307 void set_nlink(struct inode *inode, unsigned int nlink) 308 { 309 if (!nlink) { 310 clear_nlink(inode); 311 } else { 312 /* Yes, some filesystems do change nlink from zero to one */ 313 if (inode->i_nlink == 0) 314 atomic_long_dec(&inode->i_sb->s_remove_count); 315 316 inode->__i_nlink = nlink; 317 } 318 } 319 EXPORT_SYMBOL(set_nlink); 320 321 /** 322 * inc_nlink - directly increment an inode's link count 323 * @inode: inode 324 * 325 * This is a low-level filesystem helper to replace any 326 * direct filesystem manipulation of i_nlink. Currently, 327 * it is only here for parity with dec_nlink(). 328 */ 329 void inc_nlink(struct inode *inode) 330 { 331 if (unlikely(inode->i_nlink == 0)) { 332 WARN_ON(!(inode->i_state & I_LINKABLE)); 333 atomic_long_dec(&inode->i_sb->s_remove_count); 334 } 335 336 inode->__i_nlink++; 337 } 338 EXPORT_SYMBOL(inc_nlink); 339 340 void address_space_init_once(struct address_space *mapping) 341 { 342 memset(mapping, 0, sizeof(*mapping)); 343 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); 344 spin_lock_init(&mapping->tree_lock); 345 init_rwsem(&mapping->i_mmap_rwsem); 346 INIT_LIST_HEAD(&mapping->private_list); 347 spin_lock_init(&mapping->private_lock); 348 mapping->i_mmap = RB_ROOT; 349 } 350 EXPORT_SYMBOL(address_space_init_once); 351 352 /* 353 * These are initializations that only need to be done 354 * once, because the fields are idempotent across use 355 * of the inode, so let the slab aware of that. 356 */ 357 void inode_init_once(struct inode *inode) 358 { 359 memset(inode, 0, sizeof(*inode)); 360 INIT_HLIST_NODE(&inode->i_hash); 361 INIT_LIST_HEAD(&inode->i_devices); 362 INIT_LIST_HEAD(&inode->i_wb_list); 363 INIT_LIST_HEAD(&inode->i_lru); 364 address_space_init_once(&inode->i_data); 365 i_size_ordered_init(inode); 366 #ifdef CONFIG_FSNOTIFY 367 INIT_HLIST_HEAD(&inode->i_fsnotify_marks); 368 #endif 369 } 370 EXPORT_SYMBOL(inode_init_once); 371 372 static void init_once(void *foo) 373 { 374 struct inode *inode = (struct inode *) foo; 375 376 inode_init_once(inode); 377 } 378 379 /* 380 * inode->i_lock must be held 381 */ 382 void __iget(struct inode *inode) 383 { 384 atomic_inc(&inode->i_count); 385 } 386 387 /* 388 * get additional reference to inode; caller must already hold one. 389 */ 390 void ihold(struct inode *inode) 391 { 392 WARN_ON(atomic_inc_return(&inode->i_count) < 2); 393 } 394 EXPORT_SYMBOL(ihold); 395 396 static void inode_lru_list_add(struct inode *inode) 397 { 398 if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru)) 399 this_cpu_inc(nr_unused); 400 } 401 402 /* 403 * Add inode to LRU if needed (inode is unused and clean). 404 * 405 * Needs inode->i_lock held. 406 */ 407 void inode_add_lru(struct inode *inode) 408 { 409 if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC | 410 I_FREEING | I_WILL_FREE)) && 411 !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE) 412 inode_lru_list_add(inode); 413 } 414 415 416 static void inode_lru_list_del(struct inode *inode) 417 { 418 419 if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru)) 420 this_cpu_dec(nr_unused); 421 } 422 423 /** 424 * inode_sb_list_add - add inode to the superblock list of inodes 425 * @inode: inode to add 426 */ 427 void inode_sb_list_add(struct inode *inode) 428 { 429 spin_lock(&inode_sb_list_lock); 430 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes); 431 spin_unlock(&inode_sb_list_lock); 432 } 433 EXPORT_SYMBOL_GPL(inode_sb_list_add); 434 435 static inline void inode_sb_list_del(struct inode *inode) 436 { 437 if (!list_empty(&inode->i_sb_list)) { 438 spin_lock(&inode_sb_list_lock); 439 list_del_init(&inode->i_sb_list); 440 spin_unlock(&inode_sb_list_lock); 441 } 442 } 443 444 static unsigned long hash(struct super_block *sb, unsigned long hashval) 445 { 446 unsigned long tmp; 447 448 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) / 449 L1_CACHE_BYTES; 450 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift); 451 return tmp & i_hash_mask; 452 } 453 454 /** 455 * __insert_inode_hash - hash an inode 456 * @inode: unhashed inode 457 * @hashval: unsigned long value used to locate this object in the 458 * inode_hashtable. 459 * 460 * Add an inode to the inode hash for this superblock. 461 */ 462 void __insert_inode_hash(struct inode *inode, unsigned long hashval) 463 { 464 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval); 465 466 spin_lock(&inode_hash_lock); 467 spin_lock(&inode->i_lock); 468 hlist_add_head(&inode->i_hash, b); 469 spin_unlock(&inode->i_lock); 470 spin_unlock(&inode_hash_lock); 471 } 472 EXPORT_SYMBOL(__insert_inode_hash); 473 474 /** 475 * __remove_inode_hash - remove an inode from the hash 476 * @inode: inode to unhash 477 * 478 * Remove an inode from the superblock. 479 */ 480 void __remove_inode_hash(struct inode *inode) 481 { 482 spin_lock(&inode_hash_lock); 483 spin_lock(&inode->i_lock); 484 hlist_del_init(&inode->i_hash); 485 spin_unlock(&inode->i_lock); 486 spin_unlock(&inode_hash_lock); 487 } 488 EXPORT_SYMBOL(__remove_inode_hash); 489 490 void clear_inode(struct inode *inode) 491 { 492 might_sleep(); 493 /* 494 * We have to cycle tree_lock here because reclaim can be still in the 495 * process of removing the last page (in __delete_from_page_cache()) 496 * and we must not free mapping under it. 497 */ 498 spin_lock_irq(&inode->i_data.tree_lock); 499 BUG_ON(inode->i_data.nrpages); 500 BUG_ON(inode->i_data.nrshadows); 501 spin_unlock_irq(&inode->i_data.tree_lock); 502 BUG_ON(!list_empty(&inode->i_data.private_list)); 503 BUG_ON(!(inode->i_state & I_FREEING)); 504 BUG_ON(inode->i_state & I_CLEAR); 505 /* don't need i_lock here, no concurrent mods to i_state */ 506 inode->i_state = I_FREEING | I_CLEAR; 507 } 508 EXPORT_SYMBOL(clear_inode); 509 510 /* 511 * Free the inode passed in, removing it from the lists it is still connected 512 * to. We remove any pages still attached to the inode and wait for any IO that 513 * is still in progress before finally destroying the inode. 514 * 515 * An inode must already be marked I_FREEING so that we avoid the inode being 516 * moved back onto lists if we race with other code that manipulates the lists 517 * (e.g. writeback_single_inode). The caller is responsible for setting this. 518 * 519 * An inode must already be removed from the LRU list before being evicted from 520 * the cache. This should occur atomically with setting the I_FREEING state 521 * flag, so no inodes here should ever be on the LRU when being evicted. 522 */ 523 static void evict(struct inode *inode) 524 { 525 const struct super_operations *op = inode->i_sb->s_op; 526 527 BUG_ON(!(inode->i_state & I_FREEING)); 528 BUG_ON(!list_empty(&inode->i_lru)); 529 530 if (!list_empty(&inode->i_wb_list)) 531 inode_wb_list_del(inode); 532 533 inode_sb_list_del(inode); 534 535 /* 536 * Wait for flusher thread to be done with the inode so that filesystem 537 * does not start destroying it while writeback is still running. Since 538 * the inode has I_FREEING set, flusher thread won't start new work on 539 * the inode. We just have to wait for running writeback to finish. 540 */ 541 inode_wait_for_writeback(inode); 542 543 if (op->evict_inode) { 544 op->evict_inode(inode); 545 } else { 546 truncate_inode_pages_final(&inode->i_data); 547 clear_inode(inode); 548 } 549 if (S_ISBLK(inode->i_mode) && inode->i_bdev) 550 bd_forget(inode); 551 if (S_ISCHR(inode->i_mode) && inode->i_cdev) 552 cd_forget(inode); 553 554 remove_inode_hash(inode); 555 556 spin_lock(&inode->i_lock); 557 wake_up_bit(&inode->i_state, __I_NEW); 558 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR)); 559 spin_unlock(&inode->i_lock); 560 561 destroy_inode(inode); 562 } 563 564 /* 565 * dispose_list - dispose of the contents of a local list 566 * @head: the head of the list to free 567 * 568 * Dispose-list gets a local list with local inodes in it, so it doesn't 569 * need to worry about list corruption and SMP locks. 570 */ 571 static void dispose_list(struct list_head *head) 572 { 573 while (!list_empty(head)) { 574 struct inode *inode; 575 576 inode = list_first_entry(head, struct inode, i_lru); 577 list_del_init(&inode->i_lru); 578 579 evict(inode); 580 } 581 } 582 583 /** 584 * evict_inodes - evict all evictable inodes for a superblock 585 * @sb: superblock to operate on 586 * 587 * Make sure that no inodes with zero refcount are retained. This is 588 * called by superblock shutdown after having MS_ACTIVE flag removed, 589 * so any inode reaching zero refcount during or after that call will 590 * be immediately evicted. 591 */ 592 void evict_inodes(struct super_block *sb) 593 { 594 struct inode *inode, *next; 595 LIST_HEAD(dispose); 596 597 spin_lock(&inode_sb_list_lock); 598 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 599 if (atomic_read(&inode->i_count)) 600 continue; 601 602 spin_lock(&inode->i_lock); 603 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 604 spin_unlock(&inode->i_lock); 605 continue; 606 } 607 608 inode->i_state |= I_FREEING; 609 inode_lru_list_del(inode); 610 spin_unlock(&inode->i_lock); 611 list_add(&inode->i_lru, &dispose); 612 } 613 spin_unlock(&inode_sb_list_lock); 614 615 dispose_list(&dispose); 616 } 617 618 /** 619 * invalidate_inodes - attempt to free all inodes on a superblock 620 * @sb: superblock to operate on 621 * @kill_dirty: flag to guide handling of dirty inodes 622 * 623 * Attempts to free all inodes for a given superblock. If there were any 624 * busy inodes return a non-zero value, else zero. 625 * If @kill_dirty is set, discard dirty inodes too, otherwise treat 626 * them as busy. 627 */ 628 int invalidate_inodes(struct super_block *sb, bool kill_dirty) 629 { 630 int busy = 0; 631 struct inode *inode, *next; 632 LIST_HEAD(dispose); 633 634 spin_lock(&inode_sb_list_lock); 635 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 636 spin_lock(&inode->i_lock); 637 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 638 spin_unlock(&inode->i_lock); 639 continue; 640 } 641 if (inode->i_state & I_DIRTY_ALL && !kill_dirty) { 642 spin_unlock(&inode->i_lock); 643 busy = 1; 644 continue; 645 } 646 if (atomic_read(&inode->i_count)) { 647 spin_unlock(&inode->i_lock); 648 busy = 1; 649 continue; 650 } 651 652 inode->i_state |= I_FREEING; 653 inode_lru_list_del(inode); 654 spin_unlock(&inode->i_lock); 655 list_add(&inode->i_lru, &dispose); 656 } 657 spin_unlock(&inode_sb_list_lock); 658 659 dispose_list(&dispose); 660 661 return busy; 662 } 663 664 /* 665 * Isolate the inode from the LRU in preparation for freeing it. 666 * 667 * Any inodes which are pinned purely because of attached pagecache have their 668 * pagecache removed. If the inode has metadata buffers attached to 669 * mapping->private_list then try to remove them. 670 * 671 * If the inode has the I_REFERENCED flag set, then it means that it has been 672 * used recently - the flag is set in iput_final(). When we encounter such an 673 * inode, clear the flag and move it to the back of the LRU so it gets another 674 * pass through the LRU before it gets reclaimed. This is necessary because of 675 * the fact we are doing lazy LRU updates to minimise lock contention so the 676 * LRU does not have strict ordering. Hence we don't want to reclaim inodes 677 * with this flag set because they are the inodes that are out of order. 678 */ 679 static enum lru_status inode_lru_isolate(struct list_head *item, 680 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) 681 { 682 struct list_head *freeable = arg; 683 struct inode *inode = container_of(item, struct inode, i_lru); 684 685 /* 686 * we are inverting the lru lock/inode->i_lock here, so use a trylock. 687 * If we fail to get the lock, just skip it. 688 */ 689 if (!spin_trylock(&inode->i_lock)) 690 return LRU_SKIP; 691 692 /* 693 * Referenced or dirty inodes are still in use. Give them another pass 694 * through the LRU as we canot reclaim them now. 695 */ 696 if (atomic_read(&inode->i_count) || 697 (inode->i_state & ~I_REFERENCED)) { 698 list_lru_isolate(lru, &inode->i_lru); 699 spin_unlock(&inode->i_lock); 700 this_cpu_dec(nr_unused); 701 return LRU_REMOVED; 702 } 703 704 /* recently referenced inodes get one more pass */ 705 if (inode->i_state & I_REFERENCED) { 706 inode->i_state &= ~I_REFERENCED; 707 spin_unlock(&inode->i_lock); 708 return LRU_ROTATE; 709 } 710 711 if (inode_has_buffers(inode) || inode->i_data.nrpages) { 712 __iget(inode); 713 spin_unlock(&inode->i_lock); 714 spin_unlock(lru_lock); 715 if (remove_inode_buffers(inode)) { 716 unsigned long reap; 717 reap = invalidate_mapping_pages(&inode->i_data, 0, -1); 718 if (current_is_kswapd()) 719 __count_vm_events(KSWAPD_INODESTEAL, reap); 720 else 721 __count_vm_events(PGINODESTEAL, reap); 722 if (current->reclaim_state) 723 current->reclaim_state->reclaimed_slab += reap; 724 } 725 iput(inode); 726 spin_lock(lru_lock); 727 return LRU_RETRY; 728 } 729 730 WARN_ON(inode->i_state & I_NEW); 731 inode->i_state |= I_FREEING; 732 list_lru_isolate_move(lru, &inode->i_lru, freeable); 733 spin_unlock(&inode->i_lock); 734 735 this_cpu_dec(nr_unused); 736 return LRU_REMOVED; 737 } 738 739 /* 740 * Walk the superblock inode LRU for freeable inodes and attempt to free them. 741 * This is called from the superblock shrinker function with a number of inodes 742 * to trim from the LRU. Inodes to be freed are moved to a temporary list and 743 * then are freed outside inode_lock by dispose_list(). 744 */ 745 long prune_icache_sb(struct super_block *sb, struct shrink_control *sc) 746 { 747 LIST_HEAD(freeable); 748 long freed; 749 750 freed = list_lru_shrink_walk(&sb->s_inode_lru, sc, 751 inode_lru_isolate, &freeable); 752 dispose_list(&freeable); 753 return freed; 754 } 755 756 static void __wait_on_freeing_inode(struct inode *inode); 757 /* 758 * Called with the inode lock held. 759 */ 760 static struct inode *find_inode(struct super_block *sb, 761 struct hlist_head *head, 762 int (*test)(struct inode *, void *), 763 void *data) 764 { 765 struct inode *inode = NULL; 766 767 repeat: 768 hlist_for_each_entry(inode, head, i_hash) { 769 if (inode->i_sb != sb) 770 continue; 771 if (!test(inode, data)) 772 continue; 773 spin_lock(&inode->i_lock); 774 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 775 __wait_on_freeing_inode(inode); 776 goto repeat; 777 } 778 __iget(inode); 779 spin_unlock(&inode->i_lock); 780 return inode; 781 } 782 return NULL; 783 } 784 785 /* 786 * find_inode_fast is the fast path version of find_inode, see the comment at 787 * iget_locked for details. 788 */ 789 static struct inode *find_inode_fast(struct super_block *sb, 790 struct hlist_head *head, unsigned long ino) 791 { 792 struct inode *inode = NULL; 793 794 repeat: 795 hlist_for_each_entry(inode, head, i_hash) { 796 if (inode->i_ino != ino) 797 continue; 798 if (inode->i_sb != sb) 799 continue; 800 spin_lock(&inode->i_lock); 801 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 802 __wait_on_freeing_inode(inode); 803 goto repeat; 804 } 805 __iget(inode); 806 spin_unlock(&inode->i_lock); 807 return inode; 808 } 809 return NULL; 810 } 811 812 /* 813 * Each cpu owns a range of LAST_INO_BATCH numbers. 814 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations, 815 * to renew the exhausted range. 816 * 817 * This does not significantly increase overflow rate because every CPU can 818 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is 819 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the 820 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase 821 * overflow rate by 2x, which does not seem too significant. 822 * 823 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 824 * error if st_ino won't fit in target struct field. Use 32bit counter 825 * here to attempt to avoid that. 826 */ 827 #define LAST_INO_BATCH 1024 828 static DEFINE_PER_CPU(unsigned int, last_ino); 829 830 unsigned int get_next_ino(void) 831 { 832 unsigned int *p = &get_cpu_var(last_ino); 833 unsigned int res = *p; 834 835 #ifdef CONFIG_SMP 836 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) { 837 static atomic_t shared_last_ino; 838 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino); 839 840 res = next - LAST_INO_BATCH; 841 } 842 #endif 843 844 res++; 845 /* get_next_ino should not provide a 0 inode number */ 846 if (unlikely(!res)) 847 res++; 848 *p = res; 849 put_cpu_var(last_ino); 850 return res; 851 } 852 EXPORT_SYMBOL(get_next_ino); 853 854 /** 855 * new_inode_pseudo - obtain an inode 856 * @sb: superblock 857 * 858 * Allocates a new inode for given superblock. 859 * Inode wont be chained in superblock s_inodes list 860 * This means : 861 * - fs can't be unmount 862 * - quotas, fsnotify, writeback can't work 863 */ 864 struct inode *new_inode_pseudo(struct super_block *sb) 865 { 866 struct inode *inode = alloc_inode(sb); 867 868 if (inode) { 869 spin_lock(&inode->i_lock); 870 inode->i_state = 0; 871 spin_unlock(&inode->i_lock); 872 INIT_LIST_HEAD(&inode->i_sb_list); 873 } 874 return inode; 875 } 876 877 /** 878 * new_inode - obtain an inode 879 * @sb: superblock 880 * 881 * Allocates a new inode for given superblock. The default gfp_mask 882 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE. 883 * If HIGHMEM pages are unsuitable or it is known that pages allocated 884 * for the page cache are not reclaimable or migratable, 885 * mapping_set_gfp_mask() must be called with suitable flags on the 886 * newly created inode's mapping 887 * 888 */ 889 struct inode *new_inode(struct super_block *sb) 890 { 891 struct inode *inode; 892 893 spin_lock_prefetch(&inode_sb_list_lock); 894 895 inode = new_inode_pseudo(sb); 896 if (inode) 897 inode_sb_list_add(inode); 898 return inode; 899 } 900 EXPORT_SYMBOL(new_inode); 901 902 #ifdef CONFIG_DEBUG_LOCK_ALLOC 903 void lockdep_annotate_inode_mutex_key(struct inode *inode) 904 { 905 if (S_ISDIR(inode->i_mode)) { 906 struct file_system_type *type = inode->i_sb->s_type; 907 908 /* Set new key only if filesystem hasn't already changed it */ 909 if (lockdep_match_class(&inode->i_mutex, &type->i_mutex_key)) { 910 /* 911 * ensure nobody is actually holding i_mutex 912 */ 913 mutex_destroy(&inode->i_mutex); 914 mutex_init(&inode->i_mutex); 915 lockdep_set_class(&inode->i_mutex, 916 &type->i_mutex_dir_key); 917 } 918 } 919 } 920 EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key); 921 #endif 922 923 /** 924 * unlock_new_inode - clear the I_NEW state and wake up any waiters 925 * @inode: new inode to unlock 926 * 927 * Called when the inode is fully initialised to clear the new state of the 928 * inode and wake up anyone waiting for the inode to finish initialisation. 929 */ 930 void unlock_new_inode(struct inode *inode) 931 { 932 lockdep_annotate_inode_mutex_key(inode); 933 spin_lock(&inode->i_lock); 934 WARN_ON(!(inode->i_state & I_NEW)); 935 inode->i_state &= ~I_NEW; 936 smp_mb(); 937 wake_up_bit(&inode->i_state, __I_NEW); 938 spin_unlock(&inode->i_lock); 939 } 940 EXPORT_SYMBOL(unlock_new_inode); 941 942 /** 943 * lock_two_nondirectories - take two i_mutexes on non-directory objects 944 * 945 * Lock any non-NULL argument that is not a directory. 946 * Zero, one or two objects may be locked by this function. 947 * 948 * @inode1: first inode to lock 949 * @inode2: second inode to lock 950 */ 951 void lock_two_nondirectories(struct inode *inode1, struct inode *inode2) 952 { 953 if (inode1 > inode2) 954 swap(inode1, inode2); 955 956 if (inode1 && !S_ISDIR(inode1->i_mode)) 957 mutex_lock(&inode1->i_mutex); 958 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1) 959 mutex_lock_nested(&inode2->i_mutex, I_MUTEX_NONDIR2); 960 } 961 EXPORT_SYMBOL(lock_two_nondirectories); 962 963 /** 964 * unlock_two_nondirectories - release locks from lock_two_nondirectories() 965 * @inode1: first inode to unlock 966 * @inode2: second inode to unlock 967 */ 968 void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2) 969 { 970 if (inode1 && !S_ISDIR(inode1->i_mode)) 971 mutex_unlock(&inode1->i_mutex); 972 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1) 973 mutex_unlock(&inode2->i_mutex); 974 } 975 EXPORT_SYMBOL(unlock_two_nondirectories); 976 977 /** 978 * iget5_locked - obtain an inode from a mounted file system 979 * @sb: super block of file system 980 * @hashval: hash value (usually inode number) to get 981 * @test: callback used for comparisons between inodes 982 * @set: callback used to initialize a new struct inode 983 * @data: opaque data pointer to pass to @test and @set 984 * 985 * Search for the inode specified by @hashval and @data in the inode cache, 986 * and if present it is return it with an increased reference count. This is 987 * a generalized version of iget_locked() for file systems where the inode 988 * number is not sufficient for unique identification of an inode. 989 * 990 * If the inode is not in cache, allocate a new inode and return it locked, 991 * hashed, and with the I_NEW flag set. The file system gets to fill it in 992 * before unlocking it via unlock_new_inode(). 993 * 994 * Note both @test and @set are called with the inode_hash_lock held, so can't 995 * sleep. 996 */ 997 struct inode *iget5_locked(struct super_block *sb, unsigned long hashval, 998 int (*test)(struct inode *, void *), 999 int (*set)(struct inode *, void *), void *data) 1000 { 1001 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1002 struct inode *inode; 1003 1004 spin_lock(&inode_hash_lock); 1005 inode = find_inode(sb, head, test, data); 1006 spin_unlock(&inode_hash_lock); 1007 1008 if (inode) { 1009 wait_on_inode(inode); 1010 return inode; 1011 } 1012 1013 inode = alloc_inode(sb); 1014 if (inode) { 1015 struct inode *old; 1016 1017 spin_lock(&inode_hash_lock); 1018 /* We released the lock, so.. */ 1019 old = find_inode(sb, head, test, data); 1020 if (!old) { 1021 if (set(inode, data)) 1022 goto set_failed; 1023 1024 spin_lock(&inode->i_lock); 1025 inode->i_state = I_NEW; 1026 hlist_add_head(&inode->i_hash, head); 1027 spin_unlock(&inode->i_lock); 1028 inode_sb_list_add(inode); 1029 spin_unlock(&inode_hash_lock); 1030 1031 /* Return the locked inode with I_NEW set, the 1032 * caller is responsible for filling in the contents 1033 */ 1034 return inode; 1035 } 1036 1037 /* 1038 * Uhhuh, somebody else created the same inode under 1039 * us. Use the old inode instead of the one we just 1040 * allocated. 1041 */ 1042 spin_unlock(&inode_hash_lock); 1043 destroy_inode(inode); 1044 inode = old; 1045 wait_on_inode(inode); 1046 } 1047 return inode; 1048 1049 set_failed: 1050 spin_unlock(&inode_hash_lock); 1051 destroy_inode(inode); 1052 return NULL; 1053 } 1054 EXPORT_SYMBOL(iget5_locked); 1055 1056 /** 1057 * iget_locked - obtain an inode from a mounted file system 1058 * @sb: super block of file system 1059 * @ino: inode number to get 1060 * 1061 * Search for the inode specified by @ino in the inode cache and if present 1062 * return it with an increased reference count. This is for file systems 1063 * where the inode number is sufficient for unique identification of an inode. 1064 * 1065 * If the inode is not in cache, allocate a new inode and return it locked, 1066 * hashed, and with the I_NEW flag set. The file system gets to fill it in 1067 * before unlocking it via unlock_new_inode(). 1068 */ 1069 struct inode *iget_locked(struct super_block *sb, unsigned long ino) 1070 { 1071 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1072 struct inode *inode; 1073 1074 spin_lock(&inode_hash_lock); 1075 inode = find_inode_fast(sb, head, ino); 1076 spin_unlock(&inode_hash_lock); 1077 if (inode) { 1078 wait_on_inode(inode); 1079 return inode; 1080 } 1081 1082 inode = alloc_inode(sb); 1083 if (inode) { 1084 struct inode *old; 1085 1086 spin_lock(&inode_hash_lock); 1087 /* We released the lock, so.. */ 1088 old = find_inode_fast(sb, head, ino); 1089 if (!old) { 1090 inode->i_ino = ino; 1091 spin_lock(&inode->i_lock); 1092 inode->i_state = I_NEW; 1093 hlist_add_head(&inode->i_hash, head); 1094 spin_unlock(&inode->i_lock); 1095 inode_sb_list_add(inode); 1096 spin_unlock(&inode_hash_lock); 1097 1098 /* Return the locked inode with I_NEW set, the 1099 * caller is responsible for filling in the contents 1100 */ 1101 return inode; 1102 } 1103 1104 /* 1105 * Uhhuh, somebody else created the same inode under 1106 * us. Use the old inode instead of the one we just 1107 * allocated. 1108 */ 1109 spin_unlock(&inode_hash_lock); 1110 destroy_inode(inode); 1111 inode = old; 1112 wait_on_inode(inode); 1113 } 1114 return inode; 1115 } 1116 EXPORT_SYMBOL(iget_locked); 1117 1118 /* 1119 * search the inode cache for a matching inode number. 1120 * If we find one, then the inode number we are trying to 1121 * allocate is not unique and so we should not use it. 1122 * 1123 * Returns 1 if the inode number is unique, 0 if it is not. 1124 */ 1125 static int test_inode_iunique(struct super_block *sb, unsigned long ino) 1126 { 1127 struct hlist_head *b = inode_hashtable + hash(sb, ino); 1128 struct inode *inode; 1129 1130 spin_lock(&inode_hash_lock); 1131 hlist_for_each_entry(inode, b, i_hash) { 1132 if (inode->i_ino == ino && inode->i_sb == sb) { 1133 spin_unlock(&inode_hash_lock); 1134 return 0; 1135 } 1136 } 1137 spin_unlock(&inode_hash_lock); 1138 1139 return 1; 1140 } 1141 1142 /** 1143 * iunique - get a unique inode number 1144 * @sb: superblock 1145 * @max_reserved: highest reserved inode number 1146 * 1147 * Obtain an inode number that is unique on the system for a given 1148 * superblock. This is used by file systems that have no natural 1149 * permanent inode numbering system. An inode number is returned that 1150 * is higher than the reserved limit but unique. 1151 * 1152 * BUGS: 1153 * With a large number of inodes live on the file system this function 1154 * currently becomes quite slow. 1155 */ 1156 ino_t iunique(struct super_block *sb, ino_t max_reserved) 1157 { 1158 /* 1159 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 1160 * error if st_ino won't fit in target struct field. Use 32bit counter 1161 * here to attempt to avoid that. 1162 */ 1163 static DEFINE_SPINLOCK(iunique_lock); 1164 static unsigned int counter; 1165 ino_t res; 1166 1167 spin_lock(&iunique_lock); 1168 do { 1169 if (counter <= max_reserved) 1170 counter = max_reserved + 1; 1171 res = counter++; 1172 } while (!test_inode_iunique(sb, res)); 1173 spin_unlock(&iunique_lock); 1174 1175 return res; 1176 } 1177 EXPORT_SYMBOL(iunique); 1178 1179 struct inode *igrab(struct inode *inode) 1180 { 1181 spin_lock(&inode->i_lock); 1182 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) { 1183 __iget(inode); 1184 spin_unlock(&inode->i_lock); 1185 } else { 1186 spin_unlock(&inode->i_lock); 1187 /* 1188 * Handle the case where s_op->clear_inode is not been 1189 * called yet, and somebody is calling igrab 1190 * while the inode is getting freed. 1191 */ 1192 inode = NULL; 1193 } 1194 return inode; 1195 } 1196 EXPORT_SYMBOL(igrab); 1197 1198 /** 1199 * ilookup5_nowait - search for an inode in the inode cache 1200 * @sb: super block of file system to search 1201 * @hashval: hash value (usually inode number) to search for 1202 * @test: callback used for comparisons between inodes 1203 * @data: opaque data pointer to pass to @test 1204 * 1205 * Search for the inode specified by @hashval and @data in the inode cache. 1206 * If the inode is in the cache, the inode is returned with an incremented 1207 * reference count. 1208 * 1209 * Note: I_NEW is not waited upon so you have to be very careful what you do 1210 * with the returned inode. You probably should be using ilookup5() instead. 1211 * 1212 * Note2: @test is called with the inode_hash_lock held, so can't sleep. 1213 */ 1214 struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, 1215 int (*test)(struct inode *, void *), void *data) 1216 { 1217 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1218 struct inode *inode; 1219 1220 spin_lock(&inode_hash_lock); 1221 inode = find_inode(sb, head, test, data); 1222 spin_unlock(&inode_hash_lock); 1223 1224 return inode; 1225 } 1226 EXPORT_SYMBOL(ilookup5_nowait); 1227 1228 /** 1229 * ilookup5 - search for an inode in the inode cache 1230 * @sb: super block of file system to search 1231 * @hashval: hash value (usually inode number) to search for 1232 * @test: callback used for comparisons between inodes 1233 * @data: opaque data pointer to pass to @test 1234 * 1235 * Search for the inode specified by @hashval and @data in the inode cache, 1236 * and if the inode is in the cache, return the inode with an incremented 1237 * reference count. Waits on I_NEW before returning the inode. 1238 * returned with an incremented reference count. 1239 * 1240 * This is a generalized version of ilookup() for file systems where the 1241 * inode number is not sufficient for unique identification of an inode. 1242 * 1243 * Note: @test is called with the inode_hash_lock held, so can't sleep. 1244 */ 1245 struct inode *ilookup5(struct super_block *sb, unsigned long hashval, 1246 int (*test)(struct inode *, void *), void *data) 1247 { 1248 struct inode *inode = ilookup5_nowait(sb, hashval, test, data); 1249 1250 if (inode) 1251 wait_on_inode(inode); 1252 return inode; 1253 } 1254 EXPORT_SYMBOL(ilookup5); 1255 1256 /** 1257 * ilookup - search for an inode in the inode cache 1258 * @sb: super block of file system to search 1259 * @ino: inode number to search for 1260 * 1261 * Search for the inode @ino in the inode cache, and if the inode is in the 1262 * cache, the inode is returned with an incremented reference count. 1263 */ 1264 struct inode *ilookup(struct super_block *sb, unsigned long ino) 1265 { 1266 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1267 struct inode *inode; 1268 1269 spin_lock(&inode_hash_lock); 1270 inode = find_inode_fast(sb, head, ino); 1271 spin_unlock(&inode_hash_lock); 1272 1273 if (inode) 1274 wait_on_inode(inode); 1275 return inode; 1276 } 1277 EXPORT_SYMBOL(ilookup); 1278 1279 /** 1280 * find_inode_nowait - find an inode in the inode cache 1281 * @sb: super block of file system to search 1282 * @hashval: hash value (usually inode number) to search for 1283 * @match: callback used for comparisons between inodes 1284 * @data: opaque data pointer to pass to @match 1285 * 1286 * Search for the inode specified by @hashval and @data in the inode 1287 * cache, where the helper function @match will return 0 if the inode 1288 * does not match, 1 if the inode does match, and -1 if the search 1289 * should be stopped. The @match function must be responsible for 1290 * taking the i_lock spin_lock and checking i_state for an inode being 1291 * freed or being initialized, and incrementing the reference count 1292 * before returning 1. It also must not sleep, since it is called with 1293 * the inode_hash_lock spinlock held. 1294 * 1295 * This is a even more generalized version of ilookup5() when the 1296 * function must never block --- find_inode() can block in 1297 * __wait_on_freeing_inode() --- or when the caller can not increment 1298 * the reference count because the resulting iput() might cause an 1299 * inode eviction. The tradeoff is that the @match funtion must be 1300 * very carefully implemented. 1301 */ 1302 struct inode *find_inode_nowait(struct super_block *sb, 1303 unsigned long hashval, 1304 int (*match)(struct inode *, unsigned long, 1305 void *), 1306 void *data) 1307 { 1308 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1309 struct inode *inode, *ret_inode = NULL; 1310 int mval; 1311 1312 spin_lock(&inode_hash_lock); 1313 hlist_for_each_entry(inode, head, i_hash) { 1314 if (inode->i_sb != sb) 1315 continue; 1316 mval = match(inode, hashval, data); 1317 if (mval == 0) 1318 continue; 1319 if (mval == 1) 1320 ret_inode = inode; 1321 goto out; 1322 } 1323 out: 1324 spin_unlock(&inode_hash_lock); 1325 return ret_inode; 1326 } 1327 EXPORT_SYMBOL(find_inode_nowait); 1328 1329 int insert_inode_locked(struct inode *inode) 1330 { 1331 struct super_block *sb = inode->i_sb; 1332 ino_t ino = inode->i_ino; 1333 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1334 1335 while (1) { 1336 struct inode *old = NULL; 1337 spin_lock(&inode_hash_lock); 1338 hlist_for_each_entry(old, head, i_hash) { 1339 if (old->i_ino != ino) 1340 continue; 1341 if (old->i_sb != sb) 1342 continue; 1343 spin_lock(&old->i_lock); 1344 if (old->i_state & (I_FREEING|I_WILL_FREE)) { 1345 spin_unlock(&old->i_lock); 1346 continue; 1347 } 1348 break; 1349 } 1350 if (likely(!old)) { 1351 spin_lock(&inode->i_lock); 1352 inode->i_state |= I_NEW; 1353 hlist_add_head(&inode->i_hash, head); 1354 spin_unlock(&inode->i_lock); 1355 spin_unlock(&inode_hash_lock); 1356 return 0; 1357 } 1358 __iget(old); 1359 spin_unlock(&old->i_lock); 1360 spin_unlock(&inode_hash_lock); 1361 wait_on_inode(old); 1362 if (unlikely(!inode_unhashed(old))) { 1363 iput(old); 1364 return -EBUSY; 1365 } 1366 iput(old); 1367 } 1368 } 1369 EXPORT_SYMBOL(insert_inode_locked); 1370 1371 int insert_inode_locked4(struct inode *inode, unsigned long hashval, 1372 int (*test)(struct inode *, void *), void *data) 1373 { 1374 struct super_block *sb = inode->i_sb; 1375 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1376 1377 while (1) { 1378 struct inode *old = NULL; 1379 1380 spin_lock(&inode_hash_lock); 1381 hlist_for_each_entry(old, head, i_hash) { 1382 if (old->i_sb != sb) 1383 continue; 1384 if (!test(old, data)) 1385 continue; 1386 spin_lock(&old->i_lock); 1387 if (old->i_state & (I_FREEING|I_WILL_FREE)) { 1388 spin_unlock(&old->i_lock); 1389 continue; 1390 } 1391 break; 1392 } 1393 if (likely(!old)) { 1394 spin_lock(&inode->i_lock); 1395 inode->i_state |= I_NEW; 1396 hlist_add_head(&inode->i_hash, head); 1397 spin_unlock(&inode->i_lock); 1398 spin_unlock(&inode_hash_lock); 1399 return 0; 1400 } 1401 __iget(old); 1402 spin_unlock(&old->i_lock); 1403 spin_unlock(&inode_hash_lock); 1404 wait_on_inode(old); 1405 if (unlikely(!inode_unhashed(old))) { 1406 iput(old); 1407 return -EBUSY; 1408 } 1409 iput(old); 1410 } 1411 } 1412 EXPORT_SYMBOL(insert_inode_locked4); 1413 1414 1415 int generic_delete_inode(struct inode *inode) 1416 { 1417 return 1; 1418 } 1419 EXPORT_SYMBOL(generic_delete_inode); 1420 1421 /* 1422 * Called when we're dropping the last reference 1423 * to an inode. 1424 * 1425 * Call the FS "drop_inode()" function, defaulting to 1426 * the legacy UNIX filesystem behaviour. If it tells 1427 * us to evict inode, do so. Otherwise, retain inode 1428 * in cache if fs is alive, sync and evict if fs is 1429 * shutting down. 1430 */ 1431 static void iput_final(struct inode *inode) 1432 { 1433 struct super_block *sb = inode->i_sb; 1434 const struct super_operations *op = inode->i_sb->s_op; 1435 int drop; 1436 1437 WARN_ON(inode->i_state & I_NEW); 1438 1439 if (op->drop_inode) 1440 drop = op->drop_inode(inode); 1441 else 1442 drop = generic_drop_inode(inode); 1443 1444 if (!drop && (sb->s_flags & MS_ACTIVE)) { 1445 inode->i_state |= I_REFERENCED; 1446 inode_add_lru(inode); 1447 spin_unlock(&inode->i_lock); 1448 return; 1449 } 1450 1451 if (!drop) { 1452 inode->i_state |= I_WILL_FREE; 1453 spin_unlock(&inode->i_lock); 1454 write_inode_now(inode, 1); 1455 spin_lock(&inode->i_lock); 1456 WARN_ON(inode->i_state & I_NEW); 1457 inode->i_state &= ~I_WILL_FREE; 1458 } 1459 1460 inode->i_state |= I_FREEING; 1461 if (!list_empty(&inode->i_lru)) 1462 inode_lru_list_del(inode); 1463 spin_unlock(&inode->i_lock); 1464 1465 evict(inode); 1466 } 1467 1468 /** 1469 * iput - put an inode 1470 * @inode: inode to put 1471 * 1472 * Puts an inode, dropping its usage count. If the inode use count hits 1473 * zero, the inode is then freed and may also be destroyed. 1474 * 1475 * Consequently, iput() can sleep. 1476 */ 1477 void iput(struct inode *inode) 1478 { 1479 if (!inode) 1480 return; 1481 BUG_ON(inode->i_state & I_CLEAR); 1482 retry: 1483 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) { 1484 if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) { 1485 atomic_inc(&inode->i_count); 1486 inode->i_state &= ~I_DIRTY_TIME; 1487 spin_unlock(&inode->i_lock); 1488 trace_writeback_lazytime_iput(inode); 1489 mark_inode_dirty_sync(inode); 1490 goto retry; 1491 } 1492 iput_final(inode); 1493 } 1494 } 1495 EXPORT_SYMBOL(iput); 1496 1497 /** 1498 * bmap - find a block number in a file 1499 * @inode: inode of file 1500 * @block: block to find 1501 * 1502 * Returns the block number on the device holding the inode that 1503 * is the disk block number for the block of the file requested. 1504 * That is, asked for block 4 of inode 1 the function will return the 1505 * disk block relative to the disk start that holds that block of the 1506 * file. 1507 */ 1508 sector_t bmap(struct inode *inode, sector_t block) 1509 { 1510 sector_t res = 0; 1511 if (inode->i_mapping->a_ops->bmap) 1512 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block); 1513 return res; 1514 } 1515 EXPORT_SYMBOL(bmap); 1516 1517 /* 1518 * With relative atime, only update atime if the previous atime is 1519 * earlier than either the ctime or mtime or if at least a day has 1520 * passed since the last atime update. 1521 */ 1522 static int relatime_need_update(struct vfsmount *mnt, struct inode *inode, 1523 struct timespec now) 1524 { 1525 1526 if (!(mnt->mnt_flags & MNT_RELATIME)) 1527 return 1; 1528 /* 1529 * Is mtime younger than atime? If yes, update atime: 1530 */ 1531 if (timespec_compare(&inode->i_mtime, &inode->i_atime) >= 0) 1532 return 1; 1533 /* 1534 * Is ctime younger than atime? If yes, update atime: 1535 */ 1536 if (timespec_compare(&inode->i_ctime, &inode->i_atime) >= 0) 1537 return 1; 1538 1539 /* 1540 * Is the previous atime value older than a day? If yes, 1541 * update atime: 1542 */ 1543 if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60) 1544 return 1; 1545 /* 1546 * Good, we can skip the atime update: 1547 */ 1548 return 0; 1549 } 1550 1551 int generic_update_time(struct inode *inode, struct timespec *time, int flags) 1552 { 1553 int iflags = I_DIRTY_TIME; 1554 1555 if (flags & S_ATIME) 1556 inode->i_atime = *time; 1557 if (flags & S_VERSION) 1558 inode_inc_iversion(inode); 1559 if (flags & S_CTIME) 1560 inode->i_ctime = *time; 1561 if (flags & S_MTIME) 1562 inode->i_mtime = *time; 1563 1564 if (!(inode->i_sb->s_flags & MS_LAZYTIME) || (flags & S_VERSION)) 1565 iflags |= I_DIRTY_SYNC; 1566 __mark_inode_dirty(inode, iflags); 1567 return 0; 1568 } 1569 EXPORT_SYMBOL(generic_update_time); 1570 1571 /* 1572 * This does the actual work of updating an inodes time or version. Must have 1573 * had called mnt_want_write() before calling this. 1574 */ 1575 static int update_time(struct inode *inode, struct timespec *time, int flags) 1576 { 1577 int (*update_time)(struct inode *, struct timespec *, int); 1578 1579 update_time = inode->i_op->update_time ? inode->i_op->update_time : 1580 generic_update_time; 1581 1582 return update_time(inode, time, flags); 1583 } 1584 1585 /** 1586 * touch_atime - update the access time 1587 * @path: the &struct path to update 1588 * 1589 * Update the accessed time on an inode and mark it for writeback. 1590 * This function automatically handles read only file systems and media, 1591 * as well as the "noatime" flag and inode specific "noatime" markers. 1592 */ 1593 bool atime_needs_update(const struct path *path, struct inode *inode) 1594 { 1595 struct vfsmount *mnt = path->mnt; 1596 struct timespec now; 1597 1598 if (inode->i_flags & S_NOATIME) 1599 return false; 1600 if (IS_NOATIME(inode)) 1601 return false; 1602 if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)) 1603 return false; 1604 1605 if (mnt->mnt_flags & MNT_NOATIME) 1606 return false; 1607 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)) 1608 return false; 1609 1610 now = current_fs_time(inode->i_sb); 1611 1612 if (!relatime_need_update(mnt, inode, now)) 1613 return false; 1614 1615 if (timespec_equal(&inode->i_atime, &now)) 1616 return false; 1617 1618 return true; 1619 } 1620 1621 void touch_atime(const struct path *path) 1622 { 1623 struct vfsmount *mnt = path->mnt; 1624 struct inode *inode = d_inode(path->dentry); 1625 struct timespec now; 1626 1627 if (!atime_needs_update(path, inode)) 1628 return; 1629 1630 if (!sb_start_write_trylock(inode->i_sb)) 1631 return; 1632 1633 if (__mnt_want_write(mnt) != 0) 1634 goto skip_update; 1635 /* 1636 * File systems can error out when updating inodes if they need to 1637 * allocate new space to modify an inode (such is the case for 1638 * Btrfs), but since we touch atime while walking down the path we 1639 * really don't care if we failed to update the atime of the file, 1640 * so just ignore the return value. 1641 * We may also fail on filesystems that have the ability to make parts 1642 * of the fs read only, e.g. subvolumes in Btrfs. 1643 */ 1644 now = current_fs_time(inode->i_sb); 1645 update_time(inode, &now, S_ATIME); 1646 __mnt_drop_write(mnt); 1647 skip_update: 1648 sb_end_write(inode->i_sb); 1649 } 1650 EXPORT_SYMBOL(touch_atime); 1651 1652 /* 1653 * The logic we want is 1654 * 1655 * if suid or (sgid and xgrp) 1656 * remove privs 1657 */ 1658 int should_remove_suid(struct dentry *dentry) 1659 { 1660 umode_t mode = d_inode(dentry)->i_mode; 1661 int kill = 0; 1662 1663 /* suid always must be killed */ 1664 if (unlikely(mode & S_ISUID)) 1665 kill = ATTR_KILL_SUID; 1666 1667 /* 1668 * sgid without any exec bits is just a mandatory locking mark; leave 1669 * it alone. If some exec bits are set, it's a real sgid; kill it. 1670 */ 1671 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) 1672 kill |= ATTR_KILL_SGID; 1673 1674 if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode))) 1675 return kill; 1676 1677 return 0; 1678 } 1679 EXPORT_SYMBOL(should_remove_suid); 1680 1681 /* 1682 * Return mask of changes for notify_change() that need to be done as a 1683 * response to write or truncate. Return 0 if nothing has to be changed. 1684 * Negative value on error (change should be denied). 1685 */ 1686 int dentry_needs_remove_privs(struct dentry *dentry) 1687 { 1688 struct inode *inode = d_inode(dentry); 1689 int mask = 0; 1690 int ret; 1691 1692 if (IS_NOSEC(inode)) 1693 return 0; 1694 1695 mask = should_remove_suid(dentry); 1696 ret = security_inode_need_killpriv(dentry); 1697 if (ret < 0) 1698 return ret; 1699 if (ret) 1700 mask |= ATTR_KILL_PRIV; 1701 return mask; 1702 } 1703 EXPORT_SYMBOL(dentry_needs_remove_privs); 1704 1705 static int __remove_privs(struct dentry *dentry, int kill) 1706 { 1707 struct iattr newattrs; 1708 1709 newattrs.ia_valid = ATTR_FORCE | kill; 1710 /* 1711 * Note we call this on write, so notify_change will not 1712 * encounter any conflicting delegations: 1713 */ 1714 return notify_change(dentry, &newattrs, NULL); 1715 } 1716 1717 /* 1718 * Remove special file priviledges (suid, capabilities) when file is written 1719 * to or truncated. 1720 */ 1721 int file_remove_privs(struct file *file) 1722 { 1723 struct dentry *dentry = file->f_path.dentry; 1724 struct inode *inode = d_inode(dentry); 1725 int kill; 1726 int error = 0; 1727 1728 /* Fast path for nothing security related */ 1729 if (IS_NOSEC(inode)) 1730 return 0; 1731 1732 kill = file_needs_remove_privs(file); 1733 if (kill < 0) 1734 return kill; 1735 if (kill) 1736 error = __remove_privs(dentry, kill); 1737 if (!error) 1738 inode_has_no_xattr(inode); 1739 1740 return error; 1741 } 1742 EXPORT_SYMBOL(file_remove_privs); 1743 1744 /** 1745 * file_update_time - update mtime and ctime time 1746 * @file: file accessed 1747 * 1748 * Update the mtime and ctime members of an inode and mark the inode 1749 * for writeback. Note that this function is meant exclusively for 1750 * usage in the file write path of filesystems, and filesystems may 1751 * choose to explicitly ignore update via this function with the 1752 * S_NOCMTIME inode flag, e.g. for network filesystem where these 1753 * timestamps are handled by the server. This can return an error for 1754 * file systems who need to allocate space in order to update an inode. 1755 */ 1756 1757 int file_update_time(struct file *file) 1758 { 1759 struct inode *inode = file_inode(file); 1760 struct timespec now; 1761 int sync_it = 0; 1762 int ret; 1763 1764 /* First try to exhaust all avenues to not sync */ 1765 if (IS_NOCMTIME(inode)) 1766 return 0; 1767 1768 now = current_fs_time(inode->i_sb); 1769 if (!timespec_equal(&inode->i_mtime, &now)) 1770 sync_it = S_MTIME; 1771 1772 if (!timespec_equal(&inode->i_ctime, &now)) 1773 sync_it |= S_CTIME; 1774 1775 if (IS_I_VERSION(inode)) 1776 sync_it |= S_VERSION; 1777 1778 if (!sync_it) 1779 return 0; 1780 1781 /* Finally allowed to write? Takes lock. */ 1782 if (__mnt_want_write_file(file)) 1783 return 0; 1784 1785 ret = update_time(inode, &now, sync_it); 1786 __mnt_drop_write_file(file); 1787 1788 return ret; 1789 } 1790 EXPORT_SYMBOL(file_update_time); 1791 1792 int inode_needs_sync(struct inode *inode) 1793 { 1794 if (IS_SYNC(inode)) 1795 return 1; 1796 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) 1797 return 1; 1798 return 0; 1799 } 1800 EXPORT_SYMBOL(inode_needs_sync); 1801 1802 /* 1803 * If we try to find an inode in the inode hash while it is being 1804 * deleted, we have to wait until the filesystem completes its 1805 * deletion before reporting that it isn't found. This function waits 1806 * until the deletion _might_ have completed. Callers are responsible 1807 * to recheck inode state. 1808 * 1809 * It doesn't matter if I_NEW is not set initially, a call to 1810 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list 1811 * will DTRT. 1812 */ 1813 static void __wait_on_freeing_inode(struct inode *inode) 1814 { 1815 wait_queue_head_t *wq; 1816 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW); 1817 wq = bit_waitqueue(&inode->i_state, __I_NEW); 1818 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); 1819 spin_unlock(&inode->i_lock); 1820 spin_unlock(&inode_hash_lock); 1821 schedule(); 1822 finish_wait(wq, &wait.wait); 1823 spin_lock(&inode_hash_lock); 1824 } 1825 1826 static __initdata unsigned long ihash_entries; 1827 static int __init set_ihash_entries(char *str) 1828 { 1829 if (!str) 1830 return 0; 1831 ihash_entries = simple_strtoul(str, &str, 0); 1832 return 1; 1833 } 1834 __setup("ihash_entries=", set_ihash_entries); 1835 1836 /* 1837 * Initialize the waitqueues and inode hash table. 1838 */ 1839 void __init inode_init_early(void) 1840 { 1841 unsigned int loop; 1842 1843 /* If hashes are distributed across NUMA nodes, defer 1844 * hash allocation until vmalloc space is available. 1845 */ 1846 if (hashdist) 1847 return; 1848 1849 inode_hashtable = 1850 alloc_large_system_hash("Inode-cache", 1851 sizeof(struct hlist_head), 1852 ihash_entries, 1853 14, 1854 HASH_EARLY, 1855 &i_hash_shift, 1856 &i_hash_mask, 1857 0, 1858 0); 1859 1860 for (loop = 0; loop < (1U << i_hash_shift); loop++) 1861 INIT_HLIST_HEAD(&inode_hashtable[loop]); 1862 } 1863 1864 void __init inode_init(void) 1865 { 1866 unsigned int loop; 1867 1868 /* inode slab cache */ 1869 inode_cachep = kmem_cache_create("inode_cache", 1870 sizeof(struct inode), 1871 0, 1872 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| 1873 SLAB_MEM_SPREAD), 1874 init_once); 1875 1876 /* Hash may have been set up in inode_init_early */ 1877 if (!hashdist) 1878 return; 1879 1880 inode_hashtable = 1881 alloc_large_system_hash("Inode-cache", 1882 sizeof(struct hlist_head), 1883 ihash_entries, 1884 14, 1885 0, 1886 &i_hash_shift, 1887 &i_hash_mask, 1888 0, 1889 0); 1890 1891 for (loop = 0; loop < (1U << i_hash_shift); loop++) 1892 INIT_HLIST_HEAD(&inode_hashtable[loop]); 1893 } 1894 1895 void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev) 1896 { 1897 inode->i_mode = mode; 1898 if (S_ISCHR(mode)) { 1899 inode->i_fop = &def_chr_fops; 1900 inode->i_rdev = rdev; 1901 } else if (S_ISBLK(mode)) { 1902 inode->i_fop = &def_blk_fops; 1903 inode->i_rdev = rdev; 1904 } else if (S_ISFIFO(mode)) 1905 inode->i_fop = &pipefifo_fops; 1906 else if (S_ISSOCK(mode)) 1907 ; /* leave it no_open_fops */ 1908 else 1909 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for" 1910 " inode %s:%lu\n", mode, inode->i_sb->s_id, 1911 inode->i_ino); 1912 } 1913 EXPORT_SYMBOL(init_special_inode); 1914 1915 /** 1916 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards 1917 * @inode: New inode 1918 * @dir: Directory inode 1919 * @mode: mode of the new inode 1920 */ 1921 void inode_init_owner(struct inode *inode, const struct inode *dir, 1922 umode_t mode) 1923 { 1924 inode->i_uid = current_fsuid(); 1925 if (dir && dir->i_mode & S_ISGID) { 1926 inode->i_gid = dir->i_gid; 1927 if (S_ISDIR(mode)) 1928 mode |= S_ISGID; 1929 } else 1930 inode->i_gid = current_fsgid(); 1931 inode->i_mode = mode; 1932 } 1933 EXPORT_SYMBOL(inode_init_owner); 1934 1935 /** 1936 * inode_owner_or_capable - check current task permissions to inode 1937 * @inode: inode being checked 1938 * 1939 * Return true if current either has CAP_FOWNER in a namespace with the 1940 * inode owner uid mapped, or owns the file. 1941 */ 1942 bool inode_owner_or_capable(const struct inode *inode) 1943 { 1944 struct user_namespace *ns; 1945 1946 if (uid_eq(current_fsuid(), inode->i_uid)) 1947 return true; 1948 1949 ns = current_user_ns(); 1950 if (ns_capable(ns, CAP_FOWNER) && kuid_has_mapping(ns, inode->i_uid)) 1951 return true; 1952 return false; 1953 } 1954 EXPORT_SYMBOL(inode_owner_or_capable); 1955 1956 /* 1957 * Direct i/o helper functions 1958 */ 1959 static void __inode_dio_wait(struct inode *inode) 1960 { 1961 wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP); 1962 DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP); 1963 1964 do { 1965 prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE); 1966 if (atomic_read(&inode->i_dio_count)) 1967 schedule(); 1968 } while (atomic_read(&inode->i_dio_count)); 1969 finish_wait(wq, &q.wait); 1970 } 1971 1972 /** 1973 * inode_dio_wait - wait for outstanding DIO requests to finish 1974 * @inode: inode to wait for 1975 * 1976 * Waits for all pending direct I/O requests to finish so that we can 1977 * proceed with a truncate or equivalent operation. 1978 * 1979 * Must be called under a lock that serializes taking new references 1980 * to i_dio_count, usually by inode->i_mutex. 1981 */ 1982 void inode_dio_wait(struct inode *inode) 1983 { 1984 if (atomic_read(&inode->i_dio_count)) 1985 __inode_dio_wait(inode); 1986 } 1987 EXPORT_SYMBOL(inode_dio_wait); 1988 1989 /* 1990 * inode_set_flags - atomically set some inode flags 1991 * 1992 * Note: the caller should be holding i_mutex, or else be sure that 1993 * they have exclusive access to the inode structure (i.e., while the 1994 * inode is being instantiated). The reason for the cmpxchg() loop 1995 * --- which wouldn't be necessary if all code paths which modify 1996 * i_flags actually followed this rule, is that there is at least one 1997 * code path which doesn't today so we use cmpxchg() out of an abundance 1998 * of caution. 1999 * 2000 * In the long run, i_mutex is overkill, and we should probably look 2001 * at using the i_lock spinlock to protect i_flags, and then make sure 2002 * it is so documented in include/linux/fs.h and that all code follows 2003 * the locking convention!! 2004 */ 2005 void inode_set_flags(struct inode *inode, unsigned int flags, 2006 unsigned int mask) 2007 { 2008 unsigned int old_flags, new_flags; 2009 2010 WARN_ON_ONCE(flags & ~mask); 2011 do { 2012 old_flags = ACCESS_ONCE(inode->i_flags); 2013 new_flags = (old_flags & ~mask) | flags; 2014 } while (unlikely(cmpxchg(&inode->i_flags, old_flags, 2015 new_flags) != old_flags)); 2016 } 2017 EXPORT_SYMBOL(inode_set_flags); 2018