1 /* 2 * (C) 1997 Linus Torvalds 3 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation) 4 */ 5 #include <linux/export.h> 6 #include <linux/fs.h> 7 #include <linux/mm.h> 8 #include <linux/backing-dev.h> 9 #include <linux/hash.h> 10 #include <linux/swap.h> 11 #include <linux/security.h> 12 #include <linux/cdev.h> 13 #include <linux/bootmem.h> 14 #include <linux/fsnotify.h> 15 #include <linux/mount.h> 16 #include <linux/posix_acl.h> 17 #include <linux/prefetch.h> 18 #include <linux/buffer_head.h> /* for inode_has_buffers */ 19 #include <linux/ratelimit.h> 20 #include <linux/list_lru.h> 21 #include <trace/events/writeback.h> 22 #include "internal.h" 23 24 /* 25 * Inode locking rules: 26 * 27 * inode->i_lock protects: 28 * inode->i_state, inode->i_hash, __iget() 29 * Inode LRU list locks protect: 30 * inode->i_sb->s_inode_lru, inode->i_lru 31 * inode_sb_list_lock protects: 32 * sb->s_inodes, inode->i_sb_list 33 * bdi->wb.list_lock protects: 34 * bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_wb_list 35 * inode_hash_lock protects: 36 * inode_hashtable, inode->i_hash 37 * 38 * Lock ordering: 39 * 40 * inode_sb_list_lock 41 * inode->i_lock 42 * Inode LRU list locks 43 * 44 * bdi->wb.list_lock 45 * inode->i_lock 46 * 47 * inode_hash_lock 48 * inode_sb_list_lock 49 * inode->i_lock 50 * 51 * iunique_lock 52 * inode_hash_lock 53 */ 54 55 static unsigned int i_hash_mask __read_mostly; 56 static unsigned int i_hash_shift __read_mostly; 57 static struct hlist_head *inode_hashtable __read_mostly; 58 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); 59 60 __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock); 61 62 /* 63 * Empty aops. Can be used for the cases where the user does not 64 * define any of the address_space operations. 65 */ 66 const struct address_space_operations empty_aops = { 67 }; 68 EXPORT_SYMBOL(empty_aops); 69 70 /* 71 * Statistics gathering.. 72 */ 73 struct inodes_stat_t inodes_stat; 74 75 static DEFINE_PER_CPU(unsigned long, nr_inodes); 76 static DEFINE_PER_CPU(unsigned long, nr_unused); 77 78 static struct kmem_cache *inode_cachep __read_mostly; 79 80 static long get_nr_inodes(void) 81 { 82 int i; 83 long sum = 0; 84 for_each_possible_cpu(i) 85 sum += per_cpu(nr_inodes, i); 86 return sum < 0 ? 0 : sum; 87 } 88 89 static inline long get_nr_inodes_unused(void) 90 { 91 int i; 92 long sum = 0; 93 for_each_possible_cpu(i) 94 sum += per_cpu(nr_unused, i); 95 return sum < 0 ? 0 : sum; 96 } 97 98 long get_nr_dirty_inodes(void) 99 { 100 /* not actually dirty inodes, but a wild approximation */ 101 long nr_dirty = get_nr_inodes() - get_nr_inodes_unused(); 102 return nr_dirty > 0 ? nr_dirty : 0; 103 } 104 105 /* 106 * Handle nr_inode sysctl 107 */ 108 #ifdef CONFIG_SYSCTL 109 int proc_nr_inodes(struct ctl_table *table, int write, 110 void __user *buffer, size_t *lenp, loff_t *ppos) 111 { 112 inodes_stat.nr_inodes = get_nr_inodes(); 113 inodes_stat.nr_unused = get_nr_inodes_unused(); 114 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 115 } 116 #endif 117 118 static int no_open(struct inode *inode, struct file *file) 119 { 120 return -ENXIO; 121 } 122 123 /** 124 * inode_init_always - perform inode structure intialisation 125 * @sb: superblock inode belongs to 126 * @inode: inode to initialise 127 * 128 * These are initializations that need to be done on every inode 129 * allocation as the fields are not initialised by slab allocation. 130 */ 131 int inode_init_always(struct super_block *sb, struct inode *inode) 132 { 133 static const struct inode_operations empty_iops; 134 static const struct file_operations no_open_fops = {.open = no_open}; 135 struct address_space *const mapping = &inode->i_data; 136 137 inode->i_sb = sb; 138 inode->i_blkbits = sb->s_blocksize_bits; 139 inode->i_flags = 0; 140 atomic_set(&inode->i_count, 1); 141 inode->i_op = &empty_iops; 142 inode->i_fop = &no_open_fops; 143 inode->__i_nlink = 1; 144 inode->i_opflags = 0; 145 i_uid_write(inode, 0); 146 i_gid_write(inode, 0); 147 atomic_set(&inode->i_writecount, 0); 148 inode->i_size = 0; 149 inode->i_blocks = 0; 150 inode->i_bytes = 0; 151 inode->i_generation = 0; 152 inode->i_pipe = NULL; 153 inode->i_bdev = NULL; 154 inode->i_cdev = NULL; 155 inode->i_link = NULL; 156 inode->i_rdev = 0; 157 inode->dirtied_when = 0; 158 159 if (security_inode_alloc(inode)) 160 goto out; 161 spin_lock_init(&inode->i_lock); 162 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); 163 164 mutex_init(&inode->i_mutex); 165 lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key); 166 167 atomic_set(&inode->i_dio_count, 0); 168 169 mapping->a_ops = &empty_aops; 170 mapping->host = inode; 171 mapping->flags = 0; 172 atomic_set(&mapping->i_mmap_writable, 0); 173 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); 174 mapping->private_data = NULL; 175 mapping->writeback_index = 0; 176 inode->i_private = NULL; 177 inode->i_mapping = mapping; 178 INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */ 179 #ifdef CONFIG_FS_POSIX_ACL 180 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED; 181 #endif 182 183 #ifdef CONFIG_FSNOTIFY 184 inode->i_fsnotify_mask = 0; 185 #endif 186 inode->i_flctx = NULL; 187 this_cpu_inc(nr_inodes); 188 189 return 0; 190 out: 191 return -ENOMEM; 192 } 193 EXPORT_SYMBOL(inode_init_always); 194 195 static struct inode *alloc_inode(struct super_block *sb) 196 { 197 struct inode *inode; 198 199 if (sb->s_op->alloc_inode) 200 inode = sb->s_op->alloc_inode(sb); 201 else 202 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL); 203 204 if (!inode) 205 return NULL; 206 207 if (unlikely(inode_init_always(sb, inode))) { 208 if (inode->i_sb->s_op->destroy_inode) 209 inode->i_sb->s_op->destroy_inode(inode); 210 else 211 kmem_cache_free(inode_cachep, inode); 212 return NULL; 213 } 214 215 return inode; 216 } 217 218 void free_inode_nonrcu(struct inode *inode) 219 { 220 kmem_cache_free(inode_cachep, inode); 221 } 222 EXPORT_SYMBOL(free_inode_nonrcu); 223 224 void __destroy_inode(struct inode *inode) 225 { 226 BUG_ON(inode_has_buffers(inode)); 227 security_inode_free(inode); 228 fsnotify_inode_delete(inode); 229 locks_free_lock_context(inode->i_flctx); 230 if (!inode->i_nlink) { 231 WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0); 232 atomic_long_dec(&inode->i_sb->s_remove_count); 233 } 234 235 #ifdef CONFIG_FS_POSIX_ACL 236 if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED) 237 posix_acl_release(inode->i_acl); 238 if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED) 239 posix_acl_release(inode->i_default_acl); 240 #endif 241 this_cpu_dec(nr_inodes); 242 } 243 EXPORT_SYMBOL(__destroy_inode); 244 245 static void i_callback(struct rcu_head *head) 246 { 247 struct inode *inode = container_of(head, struct inode, i_rcu); 248 kmem_cache_free(inode_cachep, inode); 249 } 250 251 static void destroy_inode(struct inode *inode) 252 { 253 BUG_ON(!list_empty(&inode->i_lru)); 254 __destroy_inode(inode); 255 if (inode->i_sb->s_op->destroy_inode) 256 inode->i_sb->s_op->destroy_inode(inode); 257 else 258 call_rcu(&inode->i_rcu, i_callback); 259 } 260 261 /** 262 * drop_nlink - directly drop an inode's link count 263 * @inode: inode 264 * 265 * This is a low-level filesystem helper to replace any 266 * direct filesystem manipulation of i_nlink. In cases 267 * where we are attempting to track writes to the 268 * filesystem, a decrement to zero means an imminent 269 * write when the file is truncated and actually unlinked 270 * on the filesystem. 271 */ 272 void drop_nlink(struct inode *inode) 273 { 274 WARN_ON(inode->i_nlink == 0); 275 inode->__i_nlink--; 276 if (!inode->i_nlink) 277 atomic_long_inc(&inode->i_sb->s_remove_count); 278 } 279 EXPORT_SYMBOL(drop_nlink); 280 281 /** 282 * clear_nlink - directly zero an inode's link count 283 * @inode: inode 284 * 285 * This is a low-level filesystem helper to replace any 286 * direct filesystem manipulation of i_nlink. See 287 * drop_nlink() for why we care about i_nlink hitting zero. 288 */ 289 void clear_nlink(struct inode *inode) 290 { 291 if (inode->i_nlink) { 292 inode->__i_nlink = 0; 293 atomic_long_inc(&inode->i_sb->s_remove_count); 294 } 295 } 296 EXPORT_SYMBOL(clear_nlink); 297 298 /** 299 * set_nlink - directly set an inode's link count 300 * @inode: inode 301 * @nlink: new nlink (should be non-zero) 302 * 303 * This is a low-level filesystem helper to replace any 304 * direct filesystem manipulation of i_nlink. 305 */ 306 void set_nlink(struct inode *inode, unsigned int nlink) 307 { 308 if (!nlink) { 309 clear_nlink(inode); 310 } else { 311 /* Yes, some filesystems do change nlink from zero to one */ 312 if (inode->i_nlink == 0) 313 atomic_long_dec(&inode->i_sb->s_remove_count); 314 315 inode->__i_nlink = nlink; 316 } 317 } 318 EXPORT_SYMBOL(set_nlink); 319 320 /** 321 * inc_nlink - directly increment an inode's link count 322 * @inode: inode 323 * 324 * This is a low-level filesystem helper to replace any 325 * direct filesystem manipulation of i_nlink. Currently, 326 * it is only here for parity with dec_nlink(). 327 */ 328 void inc_nlink(struct inode *inode) 329 { 330 if (unlikely(inode->i_nlink == 0)) { 331 WARN_ON(!(inode->i_state & I_LINKABLE)); 332 atomic_long_dec(&inode->i_sb->s_remove_count); 333 } 334 335 inode->__i_nlink++; 336 } 337 EXPORT_SYMBOL(inc_nlink); 338 339 void address_space_init_once(struct address_space *mapping) 340 { 341 memset(mapping, 0, sizeof(*mapping)); 342 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); 343 spin_lock_init(&mapping->tree_lock); 344 init_rwsem(&mapping->i_mmap_rwsem); 345 INIT_LIST_HEAD(&mapping->private_list); 346 spin_lock_init(&mapping->private_lock); 347 mapping->i_mmap = RB_ROOT; 348 } 349 EXPORT_SYMBOL(address_space_init_once); 350 351 /* 352 * These are initializations that only need to be done 353 * once, because the fields are idempotent across use 354 * of the inode, so let the slab aware of that. 355 */ 356 void inode_init_once(struct inode *inode) 357 { 358 memset(inode, 0, sizeof(*inode)); 359 INIT_HLIST_NODE(&inode->i_hash); 360 INIT_LIST_HEAD(&inode->i_devices); 361 INIT_LIST_HEAD(&inode->i_wb_list); 362 INIT_LIST_HEAD(&inode->i_lru); 363 address_space_init_once(&inode->i_data); 364 i_size_ordered_init(inode); 365 #ifdef CONFIG_FSNOTIFY 366 INIT_HLIST_HEAD(&inode->i_fsnotify_marks); 367 #endif 368 } 369 EXPORT_SYMBOL(inode_init_once); 370 371 static void init_once(void *foo) 372 { 373 struct inode *inode = (struct inode *) foo; 374 375 inode_init_once(inode); 376 } 377 378 /* 379 * inode->i_lock must be held 380 */ 381 void __iget(struct inode *inode) 382 { 383 atomic_inc(&inode->i_count); 384 } 385 386 /* 387 * get additional reference to inode; caller must already hold one. 388 */ 389 void ihold(struct inode *inode) 390 { 391 WARN_ON(atomic_inc_return(&inode->i_count) < 2); 392 } 393 EXPORT_SYMBOL(ihold); 394 395 static void inode_lru_list_add(struct inode *inode) 396 { 397 if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru)) 398 this_cpu_inc(nr_unused); 399 } 400 401 /* 402 * Add inode to LRU if needed (inode is unused and clean). 403 * 404 * Needs inode->i_lock held. 405 */ 406 void inode_add_lru(struct inode *inode) 407 { 408 if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC | 409 I_FREEING | I_WILL_FREE)) && 410 !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE) 411 inode_lru_list_add(inode); 412 } 413 414 415 static void inode_lru_list_del(struct inode *inode) 416 { 417 418 if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru)) 419 this_cpu_dec(nr_unused); 420 } 421 422 /** 423 * inode_sb_list_add - add inode to the superblock list of inodes 424 * @inode: inode to add 425 */ 426 void inode_sb_list_add(struct inode *inode) 427 { 428 spin_lock(&inode_sb_list_lock); 429 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes); 430 spin_unlock(&inode_sb_list_lock); 431 } 432 EXPORT_SYMBOL_GPL(inode_sb_list_add); 433 434 static inline void inode_sb_list_del(struct inode *inode) 435 { 436 if (!list_empty(&inode->i_sb_list)) { 437 spin_lock(&inode_sb_list_lock); 438 list_del_init(&inode->i_sb_list); 439 spin_unlock(&inode_sb_list_lock); 440 } 441 } 442 443 static unsigned long hash(struct super_block *sb, unsigned long hashval) 444 { 445 unsigned long tmp; 446 447 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) / 448 L1_CACHE_BYTES; 449 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift); 450 return tmp & i_hash_mask; 451 } 452 453 /** 454 * __insert_inode_hash - hash an inode 455 * @inode: unhashed inode 456 * @hashval: unsigned long value used to locate this object in the 457 * inode_hashtable. 458 * 459 * Add an inode to the inode hash for this superblock. 460 */ 461 void __insert_inode_hash(struct inode *inode, unsigned long hashval) 462 { 463 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval); 464 465 spin_lock(&inode_hash_lock); 466 spin_lock(&inode->i_lock); 467 hlist_add_head(&inode->i_hash, b); 468 spin_unlock(&inode->i_lock); 469 spin_unlock(&inode_hash_lock); 470 } 471 EXPORT_SYMBOL(__insert_inode_hash); 472 473 /** 474 * __remove_inode_hash - remove an inode from the hash 475 * @inode: inode to unhash 476 * 477 * Remove an inode from the superblock. 478 */ 479 void __remove_inode_hash(struct inode *inode) 480 { 481 spin_lock(&inode_hash_lock); 482 spin_lock(&inode->i_lock); 483 hlist_del_init(&inode->i_hash); 484 spin_unlock(&inode->i_lock); 485 spin_unlock(&inode_hash_lock); 486 } 487 EXPORT_SYMBOL(__remove_inode_hash); 488 489 void clear_inode(struct inode *inode) 490 { 491 might_sleep(); 492 /* 493 * We have to cycle tree_lock here because reclaim can be still in the 494 * process of removing the last page (in __delete_from_page_cache()) 495 * and we must not free mapping under it. 496 */ 497 spin_lock_irq(&inode->i_data.tree_lock); 498 BUG_ON(inode->i_data.nrpages); 499 BUG_ON(inode->i_data.nrshadows); 500 spin_unlock_irq(&inode->i_data.tree_lock); 501 BUG_ON(!list_empty(&inode->i_data.private_list)); 502 BUG_ON(!(inode->i_state & I_FREEING)); 503 BUG_ON(inode->i_state & I_CLEAR); 504 /* don't need i_lock here, no concurrent mods to i_state */ 505 inode->i_state = I_FREEING | I_CLEAR; 506 } 507 EXPORT_SYMBOL(clear_inode); 508 509 /* 510 * Free the inode passed in, removing it from the lists it is still connected 511 * to. We remove any pages still attached to the inode and wait for any IO that 512 * is still in progress before finally destroying the inode. 513 * 514 * An inode must already be marked I_FREEING so that we avoid the inode being 515 * moved back onto lists if we race with other code that manipulates the lists 516 * (e.g. writeback_single_inode). The caller is responsible for setting this. 517 * 518 * An inode must already be removed from the LRU list before being evicted from 519 * the cache. This should occur atomically with setting the I_FREEING state 520 * flag, so no inodes here should ever be on the LRU when being evicted. 521 */ 522 static void evict(struct inode *inode) 523 { 524 const struct super_operations *op = inode->i_sb->s_op; 525 526 BUG_ON(!(inode->i_state & I_FREEING)); 527 BUG_ON(!list_empty(&inode->i_lru)); 528 529 if (!list_empty(&inode->i_wb_list)) 530 inode_wb_list_del(inode); 531 532 inode_sb_list_del(inode); 533 534 /* 535 * Wait for flusher thread to be done with the inode so that filesystem 536 * does not start destroying it while writeback is still running. Since 537 * the inode has I_FREEING set, flusher thread won't start new work on 538 * the inode. We just have to wait for running writeback to finish. 539 */ 540 inode_wait_for_writeback(inode); 541 542 if (op->evict_inode) { 543 op->evict_inode(inode); 544 } else { 545 truncate_inode_pages_final(&inode->i_data); 546 clear_inode(inode); 547 } 548 if (S_ISBLK(inode->i_mode) && inode->i_bdev) 549 bd_forget(inode); 550 if (S_ISCHR(inode->i_mode) && inode->i_cdev) 551 cd_forget(inode); 552 553 remove_inode_hash(inode); 554 555 spin_lock(&inode->i_lock); 556 wake_up_bit(&inode->i_state, __I_NEW); 557 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR)); 558 spin_unlock(&inode->i_lock); 559 560 destroy_inode(inode); 561 } 562 563 /* 564 * dispose_list - dispose of the contents of a local list 565 * @head: the head of the list to free 566 * 567 * Dispose-list gets a local list with local inodes in it, so it doesn't 568 * need to worry about list corruption and SMP locks. 569 */ 570 static void dispose_list(struct list_head *head) 571 { 572 while (!list_empty(head)) { 573 struct inode *inode; 574 575 inode = list_first_entry(head, struct inode, i_lru); 576 list_del_init(&inode->i_lru); 577 578 evict(inode); 579 } 580 } 581 582 /** 583 * evict_inodes - evict all evictable inodes for a superblock 584 * @sb: superblock to operate on 585 * 586 * Make sure that no inodes with zero refcount are retained. This is 587 * called by superblock shutdown after having MS_ACTIVE flag removed, 588 * so any inode reaching zero refcount during or after that call will 589 * be immediately evicted. 590 */ 591 void evict_inodes(struct super_block *sb) 592 { 593 struct inode *inode, *next; 594 LIST_HEAD(dispose); 595 596 spin_lock(&inode_sb_list_lock); 597 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 598 if (atomic_read(&inode->i_count)) 599 continue; 600 601 spin_lock(&inode->i_lock); 602 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 603 spin_unlock(&inode->i_lock); 604 continue; 605 } 606 607 inode->i_state |= I_FREEING; 608 inode_lru_list_del(inode); 609 spin_unlock(&inode->i_lock); 610 list_add(&inode->i_lru, &dispose); 611 } 612 spin_unlock(&inode_sb_list_lock); 613 614 dispose_list(&dispose); 615 } 616 617 /** 618 * invalidate_inodes - attempt to free all inodes on a superblock 619 * @sb: superblock to operate on 620 * @kill_dirty: flag to guide handling of dirty inodes 621 * 622 * Attempts to free all inodes for a given superblock. If there were any 623 * busy inodes return a non-zero value, else zero. 624 * If @kill_dirty is set, discard dirty inodes too, otherwise treat 625 * them as busy. 626 */ 627 int invalidate_inodes(struct super_block *sb, bool kill_dirty) 628 { 629 int busy = 0; 630 struct inode *inode, *next; 631 LIST_HEAD(dispose); 632 633 spin_lock(&inode_sb_list_lock); 634 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 635 spin_lock(&inode->i_lock); 636 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 637 spin_unlock(&inode->i_lock); 638 continue; 639 } 640 if (inode->i_state & I_DIRTY_ALL && !kill_dirty) { 641 spin_unlock(&inode->i_lock); 642 busy = 1; 643 continue; 644 } 645 if (atomic_read(&inode->i_count)) { 646 spin_unlock(&inode->i_lock); 647 busy = 1; 648 continue; 649 } 650 651 inode->i_state |= I_FREEING; 652 inode_lru_list_del(inode); 653 spin_unlock(&inode->i_lock); 654 list_add(&inode->i_lru, &dispose); 655 } 656 spin_unlock(&inode_sb_list_lock); 657 658 dispose_list(&dispose); 659 660 return busy; 661 } 662 663 /* 664 * Isolate the inode from the LRU in preparation for freeing it. 665 * 666 * Any inodes which are pinned purely because of attached pagecache have their 667 * pagecache removed. If the inode has metadata buffers attached to 668 * mapping->private_list then try to remove them. 669 * 670 * If the inode has the I_REFERENCED flag set, then it means that it has been 671 * used recently - the flag is set in iput_final(). When we encounter such an 672 * inode, clear the flag and move it to the back of the LRU so it gets another 673 * pass through the LRU before it gets reclaimed. This is necessary because of 674 * the fact we are doing lazy LRU updates to minimise lock contention so the 675 * LRU does not have strict ordering. Hence we don't want to reclaim inodes 676 * with this flag set because they are the inodes that are out of order. 677 */ 678 static enum lru_status inode_lru_isolate(struct list_head *item, 679 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) 680 { 681 struct list_head *freeable = arg; 682 struct inode *inode = container_of(item, struct inode, i_lru); 683 684 /* 685 * we are inverting the lru lock/inode->i_lock here, so use a trylock. 686 * If we fail to get the lock, just skip it. 687 */ 688 if (!spin_trylock(&inode->i_lock)) 689 return LRU_SKIP; 690 691 /* 692 * Referenced or dirty inodes are still in use. Give them another pass 693 * through the LRU as we canot reclaim them now. 694 */ 695 if (atomic_read(&inode->i_count) || 696 (inode->i_state & ~I_REFERENCED)) { 697 list_lru_isolate(lru, &inode->i_lru); 698 spin_unlock(&inode->i_lock); 699 this_cpu_dec(nr_unused); 700 return LRU_REMOVED; 701 } 702 703 /* recently referenced inodes get one more pass */ 704 if (inode->i_state & I_REFERENCED) { 705 inode->i_state &= ~I_REFERENCED; 706 spin_unlock(&inode->i_lock); 707 return LRU_ROTATE; 708 } 709 710 if (inode_has_buffers(inode) || inode->i_data.nrpages) { 711 __iget(inode); 712 spin_unlock(&inode->i_lock); 713 spin_unlock(lru_lock); 714 if (remove_inode_buffers(inode)) { 715 unsigned long reap; 716 reap = invalidate_mapping_pages(&inode->i_data, 0, -1); 717 if (current_is_kswapd()) 718 __count_vm_events(KSWAPD_INODESTEAL, reap); 719 else 720 __count_vm_events(PGINODESTEAL, reap); 721 if (current->reclaim_state) 722 current->reclaim_state->reclaimed_slab += reap; 723 } 724 iput(inode); 725 spin_lock(lru_lock); 726 return LRU_RETRY; 727 } 728 729 WARN_ON(inode->i_state & I_NEW); 730 inode->i_state |= I_FREEING; 731 list_lru_isolate_move(lru, &inode->i_lru, freeable); 732 spin_unlock(&inode->i_lock); 733 734 this_cpu_dec(nr_unused); 735 return LRU_REMOVED; 736 } 737 738 /* 739 * Walk the superblock inode LRU for freeable inodes and attempt to free them. 740 * This is called from the superblock shrinker function with a number of inodes 741 * to trim from the LRU. Inodes to be freed are moved to a temporary list and 742 * then are freed outside inode_lock by dispose_list(). 743 */ 744 long prune_icache_sb(struct super_block *sb, struct shrink_control *sc) 745 { 746 LIST_HEAD(freeable); 747 long freed; 748 749 freed = list_lru_shrink_walk(&sb->s_inode_lru, sc, 750 inode_lru_isolate, &freeable); 751 dispose_list(&freeable); 752 return freed; 753 } 754 755 static void __wait_on_freeing_inode(struct inode *inode); 756 /* 757 * Called with the inode lock held. 758 */ 759 static struct inode *find_inode(struct super_block *sb, 760 struct hlist_head *head, 761 int (*test)(struct inode *, void *), 762 void *data) 763 { 764 struct inode *inode = NULL; 765 766 repeat: 767 hlist_for_each_entry(inode, head, i_hash) { 768 if (inode->i_sb != sb) 769 continue; 770 if (!test(inode, data)) 771 continue; 772 spin_lock(&inode->i_lock); 773 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 774 __wait_on_freeing_inode(inode); 775 goto repeat; 776 } 777 __iget(inode); 778 spin_unlock(&inode->i_lock); 779 return inode; 780 } 781 return NULL; 782 } 783 784 /* 785 * find_inode_fast is the fast path version of find_inode, see the comment at 786 * iget_locked for details. 787 */ 788 static struct inode *find_inode_fast(struct super_block *sb, 789 struct hlist_head *head, unsigned long ino) 790 { 791 struct inode *inode = NULL; 792 793 repeat: 794 hlist_for_each_entry(inode, head, i_hash) { 795 if (inode->i_ino != ino) 796 continue; 797 if (inode->i_sb != sb) 798 continue; 799 spin_lock(&inode->i_lock); 800 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 801 __wait_on_freeing_inode(inode); 802 goto repeat; 803 } 804 __iget(inode); 805 spin_unlock(&inode->i_lock); 806 return inode; 807 } 808 return NULL; 809 } 810 811 /* 812 * Each cpu owns a range of LAST_INO_BATCH numbers. 813 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations, 814 * to renew the exhausted range. 815 * 816 * This does not significantly increase overflow rate because every CPU can 817 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is 818 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the 819 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase 820 * overflow rate by 2x, which does not seem too significant. 821 * 822 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 823 * error if st_ino won't fit in target struct field. Use 32bit counter 824 * here to attempt to avoid that. 825 */ 826 #define LAST_INO_BATCH 1024 827 static DEFINE_PER_CPU(unsigned int, last_ino); 828 829 unsigned int get_next_ino(void) 830 { 831 unsigned int *p = &get_cpu_var(last_ino); 832 unsigned int res = *p; 833 834 #ifdef CONFIG_SMP 835 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) { 836 static atomic_t shared_last_ino; 837 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino); 838 839 res = next - LAST_INO_BATCH; 840 } 841 #endif 842 843 *p = ++res; 844 put_cpu_var(last_ino); 845 return res; 846 } 847 EXPORT_SYMBOL(get_next_ino); 848 849 /** 850 * new_inode_pseudo - obtain an inode 851 * @sb: superblock 852 * 853 * Allocates a new inode for given superblock. 854 * Inode wont be chained in superblock s_inodes list 855 * This means : 856 * - fs can't be unmount 857 * - quotas, fsnotify, writeback can't work 858 */ 859 struct inode *new_inode_pseudo(struct super_block *sb) 860 { 861 struct inode *inode = alloc_inode(sb); 862 863 if (inode) { 864 spin_lock(&inode->i_lock); 865 inode->i_state = 0; 866 spin_unlock(&inode->i_lock); 867 INIT_LIST_HEAD(&inode->i_sb_list); 868 } 869 return inode; 870 } 871 872 /** 873 * new_inode - obtain an inode 874 * @sb: superblock 875 * 876 * Allocates a new inode for given superblock. The default gfp_mask 877 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE. 878 * If HIGHMEM pages are unsuitable or it is known that pages allocated 879 * for the page cache are not reclaimable or migratable, 880 * mapping_set_gfp_mask() must be called with suitable flags on the 881 * newly created inode's mapping 882 * 883 */ 884 struct inode *new_inode(struct super_block *sb) 885 { 886 struct inode *inode; 887 888 spin_lock_prefetch(&inode_sb_list_lock); 889 890 inode = new_inode_pseudo(sb); 891 if (inode) 892 inode_sb_list_add(inode); 893 return inode; 894 } 895 EXPORT_SYMBOL(new_inode); 896 897 #ifdef CONFIG_DEBUG_LOCK_ALLOC 898 void lockdep_annotate_inode_mutex_key(struct inode *inode) 899 { 900 if (S_ISDIR(inode->i_mode)) { 901 struct file_system_type *type = inode->i_sb->s_type; 902 903 /* Set new key only if filesystem hasn't already changed it */ 904 if (lockdep_match_class(&inode->i_mutex, &type->i_mutex_key)) { 905 /* 906 * ensure nobody is actually holding i_mutex 907 */ 908 mutex_destroy(&inode->i_mutex); 909 mutex_init(&inode->i_mutex); 910 lockdep_set_class(&inode->i_mutex, 911 &type->i_mutex_dir_key); 912 } 913 } 914 } 915 EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key); 916 #endif 917 918 /** 919 * unlock_new_inode - clear the I_NEW state and wake up any waiters 920 * @inode: new inode to unlock 921 * 922 * Called when the inode is fully initialised to clear the new state of the 923 * inode and wake up anyone waiting for the inode to finish initialisation. 924 */ 925 void unlock_new_inode(struct inode *inode) 926 { 927 lockdep_annotate_inode_mutex_key(inode); 928 spin_lock(&inode->i_lock); 929 WARN_ON(!(inode->i_state & I_NEW)); 930 inode->i_state &= ~I_NEW; 931 smp_mb(); 932 wake_up_bit(&inode->i_state, __I_NEW); 933 spin_unlock(&inode->i_lock); 934 } 935 EXPORT_SYMBOL(unlock_new_inode); 936 937 /** 938 * lock_two_nondirectories - take two i_mutexes on non-directory objects 939 * 940 * Lock any non-NULL argument that is not a directory. 941 * Zero, one or two objects may be locked by this function. 942 * 943 * @inode1: first inode to lock 944 * @inode2: second inode to lock 945 */ 946 void lock_two_nondirectories(struct inode *inode1, struct inode *inode2) 947 { 948 if (inode1 > inode2) 949 swap(inode1, inode2); 950 951 if (inode1 && !S_ISDIR(inode1->i_mode)) 952 mutex_lock(&inode1->i_mutex); 953 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1) 954 mutex_lock_nested(&inode2->i_mutex, I_MUTEX_NONDIR2); 955 } 956 EXPORT_SYMBOL(lock_two_nondirectories); 957 958 /** 959 * unlock_two_nondirectories - release locks from lock_two_nondirectories() 960 * @inode1: first inode to unlock 961 * @inode2: second inode to unlock 962 */ 963 void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2) 964 { 965 if (inode1 && !S_ISDIR(inode1->i_mode)) 966 mutex_unlock(&inode1->i_mutex); 967 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1) 968 mutex_unlock(&inode2->i_mutex); 969 } 970 EXPORT_SYMBOL(unlock_two_nondirectories); 971 972 /** 973 * iget5_locked - obtain an inode from a mounted file system 974 * @sb: super block of file system 975 * @hashval: hash value (usually inode number) to get 976 * @test: callback used for comparisons between inodes 977 * @set: callback used to initialize a new struct inode 978 * @data: opaque data pointer to pass to @test and @set 979 * 980 * Search for the inode specified by @hashval and @data in the inode cache, 981 * and if present it is return it with an increased reference count. This is 982 * a generalized version of iget_locked() for file systems where the inode 983 * number is not sufficient for unique identification of an inode. 984 * 985 * If the inode is not in cache, allocate a new inode and return it locked, 986 * hashed, and with the I_NEW flag set. The file system gets to fill it in 987 * before unlocking it via unlock_new_inode(). 988 * 989 * Note both @test and @set are called with the inode_hash_lock held, so can't 990 * sleep. 991 */ 992 struct inode *iget5_locked(struct super_block *sb, unsigned long hashval, 993 int (*test)(struct inode *, void *), 994 int (*set)(struct inode *, void *), void *data) 995 { 996 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 997 struct inode *inode; 998 999 spin_lock(&inode_hash_lock); 1000 inode = find_inode(sb, head, test, data); 1001 spin_unlock(&inode_hash_lock); 1002 1003 if (inode) { 1004 wait_on_inode(inode); 1005 return inode; 1006 } 1007 1008 inode = alloc_inode(sb); 1009 if (inode) { 1010 struct inode *old; 1011 1012 spin_lock(&inode_hash_lock); 1013 /* We released the lock, so.. */ 1014 old = find_inode(sb, head, test, data); 1015 if (!old) { 1016 if (set(inode, data)) 1017 goto set_failed; 1018 1019 spin_lock(&inode->i_lock); 1020 inode->i_state = I_NEW; 1021 hlist_add_head(&inode->i_hash, head); 1022 spin_unlock(&inode->i_lock); 1023 inode_sb_list_add(inode); 1024 spin_unlock(&inode_hash_lock); 1025 1026 /* Return the locked inode with I_NEW set, the 1027 * caller is responsible for filling in the contents 1028 */ 1029 return inode; 1030 } 1031 1032 /* 1033 * Uhhuh, somebody else created the same inode under 1034 * us. Use the old inode instead of the one we just 1035 * allocated. 1036 */ 1037 spin_unlock(&inode_hash_lock); 1038 destroy_inode(inode); 1039 inode = old; 1040 wait_on_inode(inode); 1041 } 1042 return inode; 1043 1044 set_failed: 1045 spin_unlock(&inode_hash_lock); 1046 destroy_inode(inode); 1047 return NULL; 1048 } 1049 EXPORT_SYMBOL(iget5_locked); 1050 1051 /** 1052 * iget_locked - obtain an inode from a mounted file system 1053 * @sb: super block of file system 1054 * @ino: inode number to get 1055 * 1056 * Search for the inode specified by @ino in the inode cache and if present 1057 * return it with an increased reference count. This is for file systems 1058 * where the inode number is sufficient for unique identification of an inode. 1059 * 1060 * If the inode is not in cache, allocate a new inode and return it locked, 1061 * hashed, and with the I_NEW flag set. The file system gets to fill it in 1062 * before unlocking it via unlock_new_inode(). 1063 */ 1064 struct inode *iget_locked(struct super_block *sb, unsigned long ino) 1065 { 1066 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1067 struct inode *inode; 1068 1069 spin_lock(&inode_hash_lock); 1070 inode = find_inode_fast(sb, head, ino); 1071 spin_unlock(&inode_hash_lock); 1072 if (inode) { 1073 wait_on_inode(inode); 1074 return inode; 1075 } 1076 1077 inode = alloc_inode(sb); 1078 if (inode) { 1079 struct inode *old; 1080 1081 spin_lock(&inode_hash_lock); 1082 /* We released the lock, so.. */ 1083 old = find_inode_fast(sb, head, ino); 1084 if (!old) { 1085 inode->i_ino = ino; 1086 spin_lock(&inode->i_lock); 1087 inode->i_state = I_NEW; 1088 hlist_add_head(&inode->i_hash, head); 1089 spin_unlock(&inode->i_lock); 1090 inode_sb_list_add(inode); 1091 spin_unlock(&inode_hash_lock); 1092 1093 /* Return the locked inode with I_NEW set, the 1094 * caller is responsible for filling in the contents 1095 */ 1096 return inode; 1097 } 1098 1099 /* 1100 * Uhhuh, somebody else created the same inode under 1101 * us. Use the old inode instead of the one we just 1102 * allocated. 1103 */ 1104 spin_unlock(&inode_hash_lock); 1105 destroy_inode(inode); 1106 inode = old; 1107 wait_on_inode(inode); 1108 } 1109 return inode; 1110 } 1111 EXPORT_SYMBOL(iget_locked); 1112 1113 /* 1114 * search the inode cache for a matching inode number. 1115 * If we find one, then the inode number we are trying to 1116 * allocate is not unique and so we should not use it. 1117 * 1118 * Returns 1 if the inode number is unique, 0 if it is not. 1119 */ 1120 static int test_inode_iunique(struct super_block *sb, unsigned long ino) 1121 { 1122 struct hlist_head *b = inode_hashtable + hash(sb, ino); 1123 struct inode *inode; 1124 1125 spin_lock(&inode_hash_lock); 1126 hlist_for_each_entry(inode, b, i_hash) { 1127 if (inode->i_ino == ino && inode->i_sb == sb) { 1128 spin_unlock(&inode_hash_lock); 1129 return 0; 1130 } 1131 } 1132 spin_unlock(&inode_hash_lock); 1133 1134 return 1; 1135 } 1136 1137 /** 1138 * iunique - get a unique inode number 1139 * @sb: superblock 1140 * @max_reserved: highest reserved inode number 1141 * 1142 * Obtain an inode number that is unique on the system for a given 1143 * superblock. This is used by file systems that have no natural 1144 * permanent inode numbering system. An inode number is returned that 1145 * is higher than the reserved limit but unique. 1146 * 1147 * BUGS: 1148 * With a large number of inodes live on the file system this function 1149 * currently becomes quite slow. 1150 */ 1151 ino_t iunique(struct super_block *sb, ino_t max_reserved) 1152 { 1153 /* 1154 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 1155 * error if st_ino won't fit in target struct field. Use 32bit counter 1156 * here to attempt to avoid that. 1157 */ 1158 static DEFINE_SPINLOCK(iunique_lock); 1159 static unsigned int counter; 1160 ino_t res; 1161 1162 spin_lock(&iunique_lock); 1163 do { 1164 if (counter <= max_reserved) 1165 counter = max_reserved + 1; 1166 res = counter++; 1167 } while (!test_inode_iunique(sb, res)); 1168 spin_unlock(&iunique_lock); 1169 1170 return res; 1171 } 1172 EXPORT_SYMBOL(iunique); 1173 1174 struct inode *igrab(struct inode *inode) 1175 { 1176 spin_lock(&inode->i_lock); 1177 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) { 1178 __iget(inode); 1179 spin_unlock(&inode->i_lock); 1180 } else { 1181 spin_unlock(&inode->i_lock); 1182 /* 1183 * Handle the case where s_op->clear_inode is not been 1184 * called yet, and somebody is calling igrab 1185 * while the inode is getting freed. 1186 */ 1187 inode = NULL; 1188 } 1189 return inode; 1190 } 1191 EXPORT_SYMBOL(igrab); 1192 1193 /** 1194 * ilookup5_nowait - search for an inode in the inode cache 1195 * @sb: super block of file system to search 1196 * @hashval: hash value (usually inode number) to search for 1197 * @test: callback used for comparisons between inodes 1198 * @data: opaque data pointer to pass to @test 1199 * 1200 * Search for the inode specified by @hashval and @data in the inode cache. 1201 * If the inode is in the cache, the inode is returned with an incremented 1202 * reference count. 1203 * 1204 * Note: I_NEW is not waited upon so you have to be very careful what you do 1205 * with the returned inode. You probably should be using ilookup5() instead. 1206 * 1207 * Note2: @test is called with the inode_hash_lock held, so can't sleep. 1208 */ 1209 struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, 1210 int (*test)(struct inode *, void *), void *data) 1211 { 1212 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1213 struct inode *inode; 1214 1215 spin_lock(&inode_hash_lock); 1216 inode = find_inode(sb, head, test, data); 1217 spin_unlock(&inode_hash_lock); 1218 1219 return inode; 1220 } 1221 EXPORT_SYMBOL(ilookup5_nowait); 1222 1223 /** 1224 * ilookup5 - search for an inode in the inode cache 1225 * @sb: super block of file system to search 1226 * @hashval: hash value (usually inode number) to search for 1227 * @test: callback used for comparisons between inodes 1228 * @data: opaque data pointer to pass to @test 1229 * 1230 * Search for the inode specified by @hashval and @data in the inode cache, 1231 * and if the inode is in the cache, return the inode with an incremented 1232 * reference count. Waits on I_NEW before returning the inode. 1233 * returned with an incremented reference count. 1234 * 1235 * This is a generalized version of ilookup() for file systems where the 1236 * inode number is not sufficient for unique identification of an inode. 1237 * 1238 * Note: @test is called with the inode_hash_lock held, so can't sleep. 1239 */ 1240 struct inode *ilookup5(struct super_block *sb, unsigned long hashval, 1241 int (*test)(struct inode *, void *), void *data) 1242 { 1243 struct inode *inode = ilookup5_nowait(sb, hashval, test, data); 1244 1245 if (inode) 1246 wait_on_inode(inode); 1247 return inode; 1248 } 1249 EXPORT_SYMBOL(ilookup5); 1250 1251 /** 1252 * ilookup - search for an inode in the inode cache 1253 * @sb: super block of file system to search 1254 * @ino: inode number to search for 1255 * 1256 * Search for the inode @ino in the inode cache, and if the inode is in the 1257 * cache, the inode is returned with an incremented reference count. 1258 */ 1259 struct inode *ilookup(struct super_block *sb, unsigned long ino) 1260 { 1261 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1262 struct inode *inode; 1263 1264 spin_lock(&inode_hash_lock); 1265 inode = find_inode_fast(sb, head, ino); 1266 spin_unlock(&inode_hash_lock); 1267 1268 if (inode) 1269 wait_on_inode(inode); 1270 return inode; 1271 } 1272 EXPORT_SYMBOL(ilookup); 1273 1274 /** 1275 * find_inode_nowait - find an inode in the inode cache 1276 * @sb: super block of file system to search 1277 * @hashval: hash value (usually inode number) to search for 1278 * @match: callback used for comparisons between inodes 1279 * @data: opaque data pointer to pass to @match 1280 * 1281 * Search for the inode specified by @hashval and @data in the inode 1282 * cache, where the helper function @match will return 0 if the inode 1283 * does not match, 1 if the inode does match, and -1 if the search 1284 * should be stopped. The @match function must be responsible for 1285 * taking the i_lock spin_lock and checking i_state for an inode being 1286 * freed or being initialized, and incrementing the reference count 1287 * before returning 1. It also must not sleep, since it is called with 1288 * the inode_hash_lock spinlock held. 1289 * 1290 * This is a even more generalized version of ilookup5() when the 1291 * function must never block --- find_inode() can block in 1292 * __wait_on_freeing_inode() --- or when the caller can not increment 1293 * the reference count because the resulting iput() might cause an 1294 * inode eviction. The tradeoff is that the @match funtion must be 1295 * very carefully implemented. 1296 */ 1297 struct inode *find_inode_nowait(struct super_block *sb, 1298 unsigned long hashval, 1299 int (*match)(struct inode *, unsigned long, 1300 void *), 1301 void *data) 1302 { 1303 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1304 struct inode *inode, *ret_inode = NULL; 1305 int mval; 1306 1307 spin_lock(&inode_hash_lock); 1308 hlist_for_each_entry(inode, head, i_hash) { 1309 if (inode->i_sb != sb) 1310 continue; 1311 mval = match(inode, hashval, data); 1312 if (mval == 0) 1313 continue; 1314 if (mval == 1) 1315 ret_inode = inode; 1316 goto out; 1317 } 1318 out: 1319 spin_unlock(&inode_hash_lock); 1320 return ret_inode; 1321 } 1322 EXPORT_SYMBOL(find_inode_nowait); 1323 1324 int insert_inode_locked(struct inode *inode) 1325 { 1326 struct super_block *sb = inode->i_sb; 1327 ino_t ino = inode->i_ino; 1328 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1329 1330 while (1) { 1331 struct inode *old = NULL; 1332 spin_lock(&inode_hash_lock); 1333 hlist_for_each_entry(old, head, i_hash) { 1334 if (old->i_ino != ino) 1335 continue; 1336 if (old->i_sb != sb) 1337 continue; 1338 spin_lock(&old->i_lock); 1339 if (old->i_state & (I_FREEING|I_WILL_FREE)) { 1340 spin_unlock(&old->i_lock); 1341 continue; 1342 } 1343 break; 1344 } 1345 if (likely(!old)) { 1346 spin_lock(&inode->i_lock); 1347 inode->i_state |= I_NEW; 1348 hlist_add_head(&inode->i_hash, head); 1349 spin_unlock(&inode->i_lock); 1350 spin_unlock(&inode_hash_lock); 1351 return 0; 1352 } 1353 __iget(old); 1354 spin_unlock(&old->i_lock); 1355 spin_unlock(&inode_hash_lock); 1356 wait_on_inode(old); 1357 if (unlikely(!inode_unhashed(old))) { 1358 iput(old); 1359 return -EBUSY; 1360 } 1361 iput(old); 1362 } 1363 } 1364 EXPORT_SYMBOL(insert_inode_locked); 1365 1366 int insert_inode_locked4(struct inode *inode, unsigned long hashval, 1367 int (*test)(struct inode *, void *), void *data) 1368 { 1369 struct super_block *sb = inode->i_sb; 1370 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1371 1372 while (1) { 1373 struct inode *old = NULL; 1374 1375 spin_lock(&inode_hash_lock); 1376 hlist_for_each_entry(old, head, i_hash) { 1377 if (old->i_sb != sb) 1378 continue; 1379 if (!test(old, data)) 1380 continue; 1381 spin_lock(&old->i_lock); 1382 if (old->i_state & (I_FREEING|I_WILL_FREE)) { 1383 spin_unlock(&old->i_lock); 1384 continue; 1385 } 1386 break; 1387 } 1388 if (likely(!old)) { 1389 spin_lock(&inode->i_lock); 1390 inode->i_state |= I_NEW; 1391 hlist_add_head(&inode->i_hash, head); 1392 spin_unlock(&inode->i_lock); 1393 spin_unlock(&inode_hash_lock); 1394 return 0; 1395 } 1396 __iget(old); 1397 spin_unlock(&old->i_lock); 1398 spin_unlock(&inode_hash_lock); 1399 wait_on_inode(old); 1400 if (unlikely(!inode_unhashed(old))) { 1401 iput(old); 1402 return -EBUSY; 1403 } 1404 iput(old); 1405 } 1406 } 1407 EXPORT_SYMBOL(insert_inode_locked4); 1408 1409 1410 int generic_delete_inode(struct inode *inode) 1411 { 1412 return 1; 1413 } 1414 EXPORT_SYMBOL(generic_delete_inode); 1415 1416 /* 1417 * Called when we're dropping the last reference 1418 * to an inode. 1419 * 1420 * Call the FS "drop_inode()" function, defaulting to 1421 * the legacy UNIX filesystem behaviour. If it tells 1422 * us to evict inode, do so. Otherwise, retain inode 1423 * in cache if fs is alive, sync and evict if fs is 1424 * shutting down. 1425 */ 1426 static void iput_final(struct inode *inode) 1427 { 1428 struct super_block *sb = inode->i_sb; 1429 const struct super_operations *op = inode->i_sb->s_op; 1430 int drop; 1431 1432 WARN_ON(inode->i_state & I_NEW); 1433 1434 if (op->drop_inode) 1435 drop = op->drop_inode(inode); 1436 else 1437 drop = generic_drop_inode(inode); 1438 1439 if (!drop && (sb->s_flags & MS_ACTIVE)) { 1440 inode->i_state |= I_REFERENCED; 1441 inode_add_lru(inode); 1442 spin_unlock(&inode->i_lock); 1443 return; 1444 } 1445 1446 if (!drop) { 1447 inode->i_state |= I_WILL_FREE; 1448 spin_unlock(&inode->i_lock); 1449 write_inode_now(inode, 1); 1450 spin_lock(&inode->i_lock); 1451 WARN_ON(inode->i_state & I_NEW); 1452 inode->i_state &= ~I_WILL_FREE; 1453 } 1454 1455 inode->i_state |= I_FREEING; 1456 if (!list_empty(&inode->i_lru)) 1457 inode_lru_list_del(inode); 1458 spin_unlock(&inode->i_lock); 1459 1460 evict(inode); 1461 } 1462 1463 /** 1464 * iput - put an inode 1465 * @inode: inode to put 1466 * 1467 * Puts an inode, dropping its usage count. If the inode use count hits 1468 * zero, the inode is then freed and may also be destroyed. 1469 * 1470 * Consequently, iput() can sleep. 1471 */ 1472 void iput(struct inode *inode) 1473 { 1474 if (!inode) 1475 return; 1476 BUG_ON(inode->i_state & I_CLEAR); 1477 retry: 1478 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) { 1479 if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) { 1480 atomic_inc(&inode->i_count); 1481 inode->i_state &= ~I_DIRTY_TIME; 1482 spin_unlock(&inode->i_lock); 1483 trace_writeback_lazytime_iput(inode); 1484 mark_inode_dirty_sync(inode); 1485 goto retry; 1486 } 1487 iput_final(inode); 1488 } 1489 } 1490 EXPORT_SYMBOL(iput); 1491 1492 /** 1493 * bmap - find a block number in a file 1494 * @inode: inode of file 1495 * @block: block to find 1496 * 1497 * Returns the block number on the device holding the inode that 1498 * is the disk block number for the block of the file requested. 1499 * That is, asked for block 4 of inode 1 the function will return the 1500 * disk block relative to the disk start that holds that block of the 1501 * file. 1502 */ 1503 sector_t bmap(struct inode *inode, sector_t block) 1504 { 1505 sector_t res = 0; 1506 if (inode->i_mapping->a_ops->bmap) 1507 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block); 1508 return res; 1509 } 1510 EXPORT_SYMBOL(bmap); 1511 1512 /* 1513 * With relative atime, only update atime if the previous atime is 1514 * earlier than either the ctime or mtime or if at least a day has 1515 * passed since the last atime update. 1516 */ 1517 static int relatime_need_update(struct vfsmount *mnt, struct inode *inode, 1518 struct timespec now) 1519 { 1520 1521 if (!(mnt->mnt_flags & MNT_RELATIME)) 1522 return 1; 1523 /* 1524 * Is mtime younger than atime? If yes, update atime: 1525 */ 1526 if (timespec_compare(&inode->i_mtime, &inode->i_atime) >= 0) 1527 return 1; 1528 /* 1529 * Is ctime younger than atime? If yes, update atime: 1530 */ 1531 if (timespec_compare(&inode->i_ctime, &inode->i_atime) >= 0) 1532 return 1; 1533 1534 /* 1535 * Is the previous atime value older than a day? If yes, 1536 * update atime: 1537 */ 1538 if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60) 1539 return 1; 1540 /* 1541 * Good, we can skip the atime update: 1542 */ 1543 return 0; 1544 } 1545 1546 int generic_update_time(struct inode *inode, struct timespec *time, int flags) 1547 { 1548 int iflags = I_DIRTY_TIME; 1549 1550 if (flags & S_ATIME) 1551 inode->i_atime = *time; 1552 if (flags & S_VERSION) 1553 inode_inc_iversion(inode); 1554 if (flags & S_CTIME) 1555 inode->i_ctime = *time; 1556 if (flags & S_MTIME) 1557 inode->i_mtime = *time; 1558 1559 if (!(inode->i_sb->s_flags & MS_LAZYTIME) || (flags & S_VERSION)) 1560 iflags |= I_DIRTY_SYNC; 1561 __mark_inode_dirty(inode, iflags); 1562 return 0; 1563 } 1564 EXPORT_SYMBOL(generic_update_time); 1565 1566 /* 1567 * This does the actual work of updating an inodes time or version. Must have 1568 * had called mnt_want_write() before calling this. 1569 */ 1570 static int update_time(struct inode *inode, struct timespec *time, int flags) 1571 { 1572 int (*update_time)(struct inode *, struct timespec *, int); 1573 1574 update_time = inode->i_op->update_time ? inode->i_op->update_time : 1575 generic_update_time; 1576 1577 return update_time(inode, time, flags); 1578 } 1579 1580 /** 1581 * touch_atime - update the access time 1582 * @path: the &struct path to update 1583 * 1584 * Update the accessed time on an inode and mark it for writeback. 1585 * This function automatically handles read only file systems and media, 1586 * as well as the "noatime" flag and inode specific "noatime" markers. 1587 */ 1588 bool atime_needs_update(const struct path *path, struct inode *inode) 1589 { 1590 struct vfsmount *mnt = path->mnt; 1591 struct timespec now; 1592 1593 if (inode->i_flags & S_NOATIME) 1594 return false; 1595 if (IS_NOATIME(inode)) 1596 return false; 1597 if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)) 1598 return false; 1599 1600 if (mnt->mnt_flags & MNT_NOATIME) 1601 return false; 1602 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)) 1603 return false; 1604 1605 now = current_fs_time(inode->i_sb); 1606 1607 if (!relatime_need_update(mnt, inode, now)) 1608 return false; 1609 1610 if (timespec_equal(&inode->i_atime, &now)) 1611 return false; 1612 1613 return true; 1614 } 1615 1616 void touch_atime(const struct path *path) 1617 { 1618 struct vfsmount *mnt = path->mnt; 1619 struct inode *inode = d_inode(path->dentry); 1620 struct timespec now; 1621 1622 if (!atime_needs_update(path, inode)) 1623 return; 1624 1625 if (!sb_start_write_trylock(inode->i_sb)) 1626 return; 1627 1628 if (__mnt_want_write(mnt) != 0) 1629 goto skip_update; 1630 /* 1631 * File systems can error out when updating inodes if they need to 1632 * allocate new space to modify an inode (such is the case for 1633 * Btrfs), but since we touch atime while walking down the path we 1634 * really don't care if we failed to update the atime of the file, 1635 * so just ignore the return value. 1636 * We may also fail on filesystems that have the ability to make parts 1637 * of the fs read only, e.g. subvolumes in Btrfs. 1638 */ 1639 now = current_fs_time(inode->i_sb); 1640 update_time(inode, &now, S_ATIME); 1641 __mnt_drop_write(mnt); 1642 skip_update: 1643 sb_end_write(inode->i_sb); 1644 } 1645 EXPORT_SYMBOL(touch_atime); 1646 1647 /* 1648 * The logic we want is 1649 * 1650 * if suid or (sgid and xgrp) 1651 * remove privs 1652 */ 1653 int should_remove_suid(struct dentry *dentry) 1654 { 1655 umode_t mode = d_inode(dentry)->i_mode; 1656 int kill = 0; 1657 1658 /* suid always must be killed */ 1659 if (unlikely(mode & S_ISUID)) 1660 kill = ATTR_KILL_SUID; 1661 1662 /* 1663 * sgid without any exec bits is just a mandatory locking mark; leave 1664 * it alone. If some exec bits are set, it's a real sgid; kill it. 1665 */ 1666 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) 1667 kill |= ATTR_KILL_SGID; 1668 1669 if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode))) 1670 return kill; 1671 1672 return 0; 1673 } 1674 EXPORT_SYMBOL(should_remove_suid); 1675 1676 static int __remove_suid(struct dentry *dentry, int kill) 1677 { 1678 struct iattr newattrs; 1679 1680 newattrs.ia_valid = ATTR_FORCE | kill; 1681 /* 1682 * Note we call this on write, so notify_change will not 1683 * encounter any conflicting delegations: 1684 */ 1685 return notify_change(dentry, &newattrs, NULL); 1686 } 1687 1688 int file_remove_suid(struct file *file) 1689 { 1690 struct dentry *dentry = file->f_path.dentry; 1691 struct inode *inode = d_inode(dentry); 1692 int killsuid; 1693 int killpriv; 1694 int error = 0; 1695 1696 /* Fast path for nothing security related */ 1697 if (IS_NOSEC(inode)) 1698 return 0; 1699 1700 killsuid = should_remove_suid(dentry); 1701 killpriv = security_inode_need_killpriv(dentry); 1702 1703 if (killpriv < 0) 1704 return killpriv; 1705 if (killpriv) 1706 error = security_inode_killpriv(dentry); 1707 if (!error && killsuid) 1708 error = __remove_suid(dentry, killsuid); 1709 if (!error && (inode->i_sb->s_flags & MS_NOSEC)) 1710 inode->i_flags |= S_NOSEC; 1711 1712 return error; 1713 } 1714 EXPORT_SYMBOL(file_remove_suid); 1715 1716 /** 1717 * file_update_time - update mtime and ctime time 1718 * @file: file accessed 1719 * 1720 * Update the mtime and ctime members of an inode and mark the inode 1721 * for writeback. Note that this function is meant exclusively for 1722 * usage in the file write path of filesystems, and filesystems may 1723 * choose to explicitly ignore update via this function with the 1724 * S_NOCMTIME inode flag, e.g. for network filesystem where these 1725 * timestamps are handled by the server. This can return an error for 1726 * file systems who need to allocate space in order to update an inode. 1727 */ 1728 1729 int file_update_time(struct file *file) 1730 { 1731 struct inode *inode = file_inode(file); 1732 struct timespec now; 1733 int sync_it = 0; 1734 int ret; 1735 1736 /* First try to exhaust all avenues to not sync */ 1737 if (IS_NOCMTIME(inode)) 1738 return 0; 1739 1740 now = current_fs_time(inode->i_sb); 1741 if (!timespec_equal(&inode->i_mtime, &now)) 1742 sync_it = S_MTIME; 1743 1744 if (!timespec_equal(&inode->i_ctime, &now)) 1745 sync_it |= S_CTIME; 1746 1747 if (IS_I_VERSION(inode)) 1748 sync_it |= S_VERSION; 1749 1750 if (!sync_it) 1751 return 0; 1752 1753 /* Finally allowed to write? Takes lock. */ 1754 if (__mnt_want_write_file(file)) 1755 return 0; 1756 1757 ret = update_time(inode, &now, sync_it); 1758 __mnt_drop_write_file(file); 1759 1760 return ret; 1761 } 1762 EXPORT_SYMBOL(file_update_time); 1763 1764 int inode_needs_sync(struct inode *inode) 1765 { 1766 if (IS_SYNC(inode)) 1767 return 1; 1768 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) 1769 return 1; 1770 return 0; 1771 } 1772 EXPORT_SYMBOL(inode_needs_sync); 1773 1774 /* 1775 * If we try to find an inode in the inode hash while it is being 1776 * deleted, we have to wait until the filesystem completes its 1777 * deletion before reporting that it isn't found. This function waits 1778 * until the deletion _might_ have completed. Callers are responsible 1779 * to recheck inode state. 1780 * 1781 * It doesn't matter if I_NEW is not set initially, a call to 1782 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list 1783 * will DTRT. 1784 */ 1785 static void __wait_on_freeing_inode(struct inode *inode) 1786 { 1787 wait_queue_head_t *wq; 1788 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW); 1789 wq = bit_waitqueue(&inode->i_state, __I_NEW); 1790 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); 1791 spin_unlock(&inode->i_lock); 1792 spin_unlock(&inode_hash_lock); 1793 schedule(); 1794 finish_wait(wq, &wait.wait); 1795 spin_lock(&inode_hash_lock); 1796 } 1797 1798 static __initdata unsigned long ihash_entries; 1799 static int __init set_ihash_entries(char *str) 1800 { 1801 if (!str) 1802 return 0; 1803 ihash_entries = simple_strtoul(str, &str, 0); 1804 return 1; 1805 } 1806 __setup("ihash_entries=", set_ihash_entries); 1807 1808 /* 1809 * Initialize the waitqueues and inode hash table. 1810 */ 1811 void __init inode_init_early(void) 1812 { 1813 unsigned int loop; 1814 1815 /* If hashes are distributed across NUMA nodes, defer 1816 * hash allocation until vmalloc space is available. 1817 */ 1818 if (hashdist) 1819 return; 1820 1821 inode_hashtable = 1822 alloc_large_system_hash("Inode-cache", 1823 sizeof(struct hlist_head), 1824 ihash_entries, 1825 14, 1826 HASH_EARLY, 1827 &i_hash_shift, 1828 &i_hash_mask, 1829 0, 1830 0); 1831 1832 for (loop = 0; loop < (1U << i_hash_shift); loop++) 1833 INIT_HLIST_HEAD(&inode_hashtable[loop]); 1834 } 1835 1836 void __init inode_init(void) 1837 { 1838 unsigned int loop; 1839 1840 /* inode slab cache */ 1841 inode_cachep = kmem_cache_create("inode_cache", 1842 sizeof(struct inode), 1843 0, 1844 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| 1845 SLAB_MEM_SPREAD), 1846 init_once); 1847 1848 /* Hash may have been set up in inode_init_early */ 1849 if (!hashdist) 1850 return; 1851 1852 inode_hashtable = 1853 alloc_large_system_hash("Inode-cache", 1854 sizeof(struct hlist_head), 1855 ihash_entries, 1856 14, 1857 0, 1858 &i_hash_shift, 1859 &i_hash_mask, 1860 0, 1861 0); 1862 1863 for (loop = 0; loop < (1U << i_hash_shift); loop++) 1864 INIT_HLIST_HEAD(&inode_hashtable[loop]); 1865 } 1866 1867 void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev) 1868 { 1869 inode->i_mode = mode; 1870 if (S_ISCHR(mode)) { 1871 inode->i_fop = &def_chr_fops; 1872 inode->i_rdev = rdev; 1873 } else if (S_ISBLK(mode)) { 1874 inode->i_fop = &def_blk_fops; 1875 inode->i_rdev = rdev; 1876 } else if (S_ISFIFO(mode)) 1877 inode->i_fop = &pipefifo_fops; 1878 else if (S_ISSOCK(mode)) 1879 ; /* leave it no_open_fops */ 1880 else 1881 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for" 1882 " inode %s:%lu\n", mode, inode->i_sb->s_id, 1883 inode->i_ino); 1884 } 1885 EXPORT_SYMBOL(init_special_inode); 1886 1887 /** 1888 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards 1889 * @inode: New inode 1890 * @dir: Directory inode 1891 * @mode: mode of the new inode 1892 */ 1893 void inode_init_owner(struct inode *inode, const struct inode *dir, 1894 umode_t mode) 1895 { 1896 inode->i_uid = current_fsuid(); 1897 if (dir && dir->i_mode & S_ISGID) { 1898 inode->i_gid = dir->i_gid; 1899 if (S_ISDIR(mode)) 1900 mode |= S_ISGID; 1901 } else 1902 inode->i_gid = current_fsgid(); 1903 inode->i_mode = mode; 1904 } 1905 EXPORT_SYMBOL(inode_init_owner); 1906 1907 /** 1908 * inode_owner_or_capable - check current task permissions to inode 1909 * @inode: inode being checked 1910 * 1911 * Return true if current either has CAP_FOWNER in a namespace with the 1912 * inode owner uid mapped, or owns the file. 1913 */ 1914 bool inode_owner_or_capable(const struct inode *inode) 1915 { 1916 struct user_namespace *ns; 1917 1918 if (uid_eq(current_fsuid(), inode->i_uid)) 1919 return true; 1920 1921 ns = current_user_ns(); 1922 if (ns_capable(ns, CAP_FOWNER) && kuid_has_mapping(ns, inode->i_uid)) 1923 return true; 1924 return false; 1925 } 1926 EXPORT_SYMBOL(inode_owner_or_capable); 1927 1928 /* 1929 * Direct i/o helper functions 1930 */ 1931 static void __inode_dio_wait(struct inode *inode) 1932 { 1933 wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP); 1934 DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP); 1935 1936 do { 1937 prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE); 1938 if (atomic_read(&inode->i_dio_count)) 1939 schedule(); 1940 } while (atomic_read(&inode->i_dio_count)); 1941 finish_wait(wq, &q.wait); 1942 } 1943 1944 /** 1945 * inode_dio_wait - wait for outstanding DIO requests to finish 1946 * @inode: inode to wait for 1947 * 1948 * Waits for all pending direct I/O requests to finish so that we can 1949 * proceed with a truncate or equivalent operation. 1950 * 1951 * Must be called under a lock that serializes taking new references 1952 * to i_dio_count, usually by inode->i_mutex. 1953 */ 1954 void inode_dio_wait(struct inode *inode) 1955 { 1956 if (atomic_read(&inode->i_dio_count)) 1957 __inode_dio_wait(inode); 1958 } 1959 EXPORT_SYMBOL(inode_dio_wait); 1960 1961 /* 1962 * inode_set_flags - atomically set some inode flags 1963 * 1964 * Note: the caller should be holding i_mutex, or else be sure that 1965 * they have exclusive access to the inode structure (i.e., while the 1966 * inode is being instantiated). The reason for the cmpxchg() loop 1967 * --- which wouldn't be necessary if all code paths which modify 1968 * i_flags actually followed this rule, is that there is at least one 1969 * code path which doesn't today --- for example, 1970 * __generic_file_aio_write() calls file_remove_suid() without holding 1971 * i_mutex --- so we use cmpxchg() out of an abundance of caution. 1972 * 1973 * In the long run, i_mutex is overkill, and we should probably look 1974 * at using the i_lock spinlock to protect i_flags, and then make sure 1975 * it is so documented in include/linux/fs.h and that all code follows 1976 * the locking convention!! 1977 */ 1978 void inode_set_flags(struct inode *inode, unsigned int flags, 1979 unsigned int mask) 1980 { 1981 unsigned int old_flags, new_flags; 1982 1983 WARN_ON_ONCE(flags & ~mask); 1984 do { 1985 old_flags = ACCESS_ONCE(inode->i_flags); 1986 new_flags = (old_flags & ~mask) | flags; 1987 } while (unlikely(cmpxchg(&inode->i_flags, old_flags, 1988 new_flags) != old_flags)); 1989 } 1990 EXPORT_SYMBOL(inode_set_flags); 1991