1 /* 2 * (C) 1997 Linus Torvalds 3 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation) 4 */ 5 #include <linux/export.h> 6 #include <linux/fs.h> 7 #include <linux/mm.h> 8 #include <linux/backing-dev.h> 9 #include <linux/hash.h> 10 #include <linux/swap.h> 11 #include <linux/security.h> 12 #include <linux/cdev.h> 13 #include <linux/bootmem.h> 14 #include <linux/fsnotify.h> 15 #include <linux/mount.h> 16 #include <linux/posix_acl.h> 17 #include <linux/prefetch.h> 18 #include <linux/buffer_head.h> /* for inode_has_buffers */ 19 #include <linux/ratelimit.h> 20 #include <linux/list_lru.h> 21 #include <trace/events/writeback.h> 22 #include "internal.h" 23 24 /* 25 * Inode locking rules: 26 * 27 * inode->i_lock protects: 28 * inode->i_state, inode->i_hash, __iget() 29 * Inode LRU list locks protect: 30 * inode->i_sb->s_inode_lru, inode->i_lru 31 * inode->i_sb->s_inode_list_lock protects: 32 * inode->i_sb->s_inodes, inode->i_sb_list 33 * bdi->wb.list_lock protects: 34 * bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list 35 * inode_hash_lock protects: 36 * inode_hashtable, inode->i_hash 37 * 38 * Lock ordering: 39 * 40 * inode->i_sb->s_inode_list_lock 41 * inode->i_lock 42 * Inode LRU list locks 43 * 44 * bdi->wb.list_lock 45 * inode->i_lock 46 * 47 * inode_hash_lock 48 * inode->i_sb->s_inode_list_lock 49 * inode->i_lock 50 * 51 * iunique_lock 52 * inode_hash_lock 53 */ 54 55 static unsigned int i_hash_mask __read_mostly; 56 static unsigned int i_hash_shift __read_mostly; 57 static struct hlist_head *inode_hashtable __read_mostly; 58 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); 59 60 /* 61 * Empty aops. Can be used for the cases where the user does not 62 * define any of the address_space operations. 63 */ 64 const struct address_space_operations empty_aops = { 65 }; 66 EXPORT_SYMBOL(empty_aops); 67 68 /* 69 * Statistics gathering.. 70 */ 71 struct inodes_stat_t inodes_stat; 72 73 static DEFINE_PER_CPU(unsigned long, nr_inodes); 74 static DEFINE_PER_CPU(unsigned long, nr_unused); 75 76 static struct kmem_cache *inode_cachep __read_mostly; 77 78 static long get_nr_inodes(void) 79 { 80 int i; 81 long sum = 0; 82 for_each_possible_cpu(i) 83 sum += per_cpu(nr_inodes, i); 84 return sum < 0 ? 0 : sum; 85 } 86 87 static inline long get_nr_inodes_unused(void) 88 { 89 int i; 90 long sum = 0; 91 for_each_possible_cpu(i) 92 sum += per_cpu(nr_unused, i); 93 return sum < 0 ? 0 : sum; 94 } 95 96 long get_nr_dirty_inodes(void) 97 { 98 /* not actually dirty inodes, but a wild approximation */ 99 long nr_dirty = get_nr_inodes() - get_nr_inodes_unused(); 100 return nr_dirty > 0 ? nr_dirty : 0; 101 } 102 103 /* 104 * Handle nr_inode sysctl 105 */ 106 #ifdef CONFIG_SYSCTL 107 int proc_nr_inodes(struct ctl_table *table, int write, 108 void __user *buffer, size_t *lenp, loff_t *ppos) 109 { 110 inodes_stat.nr_inodes = get_nr_inodes(); 111 inodes_stat.nr_unused = get_nr_inodes_unused(); 112 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 113 } 114 #endif 115 116 static int no_open(struct inode *inode, struct file *file) 117 { 118 return -ENXIO; 119 } 120 121 /** 122 * inode_init_always - perform inode structure intialisation 123 * @sb: superblock inode belongs to 124 * @inode: inode to initialise 125 * 126 * These are initializations that need to be done on every inode 127 * allocation as the fields are not initialised by slab allocation. 128 */ 129 int inode_init_always(struct super_block *sb, struct inode *inode) 130 { 131 static const struct inode_operations empty_iops; 132 static const struct file_operations no_open_fops = {.open = no_open}; 133 struct address_space *const mapping = &inode->i_data; 134 135 inode->i_sb = sb; 136 inode->i_blkbits = sb->s_blocksize_bits; 137 inode->i_flags = 0; 138 atomic_set(&inode->i_count, 1); 139 inode->i_op = &empty_iops; 140 inode->i_fop = &no_open_fops; 141 inode->__i_nlink = 1; 142 inode->i_opflags = 0; 143 i_uid_write(inode, 0); 144 i_gid_write(inode, 0); 145 atomic_set(&inode->i_writecount, 0); 146 inode->i_size = 0; 147 inode->i_blocks = 0; 148 inode->i_bytes = 0; 149 inode->i_generation = 0; 150 inode->i_pipe = NULL; 151 inode->i_bdev = NULL; 152 inode->i_cdev = NULL; 153 inode->i_link = NULL; 154 inode->i_dir_seq = 0; 155 inode->i_rdev = 0; 156 inode->dirtied_when = 0; 157 158 #ifdef CONFIG_CGROUP_WRITEBACK 159 inode->i_wb_frn_winner = 0; 160 inode->i_wb_frn_avg_time = 0; 161 inode->i_wb_frn_history = 0; 162 #endif 163 164 if (security_inode_alloc(inode)) 165 goto out; 166 spin_lock_init(&inode->i_lock); 167 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); 168 169 init_rwsem(&inode->i_rwsem); 170 lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key); 171 172 atomic_set(&inode->i_dio_count, 0); 173 174 mapping->a_ops = &empty_aops; 175 mapping->host = inode; 176 mapping->flags = 0; 177 atomic_set(&mapping->i_mmap_writable, 0); 178 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); 179 mapping->private_data = NULL; 180 mapping->writeback_index = 0; 181 inode->i_private = NULL; 182 inode->i_mapping = mapping; 183 INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */ 184 #ifdef CONFIG_FS_POSIX_ACL 185 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED; 186 #endif 187 188 #ifdef CONFIG_FSNOTIFY 189 inode->i_fsnotify_mask = 0; 190 #endif 191 inode->i_flctx = NULL; 192 this_cpu_inc(nr_inodes); 193 194 return 0; 195 out: 196 return -ENOMEM; 197 } 198 EXPORT_SYMBOL(inode_init_always); 199 200 static struct inode *alloc_inode(struct super_block *sb) 201 { 202 struct inode *inode; 203 204 if (sb->s_op->alloc_inode) 205 inode = sb->s_op->alloc_inode(sb); 206 else 207 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL); 208 209 if (!inode) 210 return NULL; 211 212 if (unlikely(inode_init_always(sb, inode))) { 213 if (inode->i_sb->s_op->destroy_inode) 214 inode->i_sb->s_op->destroy_inode(inode); 215 else 216 kmem_cache_free(inode_cachep, inode); 217 return NULL; 218 } 219 220 return inode; 221 } 222 223 void free_inode_nonrcu(struct inode *inode) 224 { 225 kmem_cache_free(inode_cachep, inode); 226 } 227 EXPORT_SYMBOL(free_inode_nonrcu); 228 229 void __destroy_inode(struct inode *inode) 230 { 231 BUG_ON(inode_has_buffers(inode)); 232 inode_detach_wb(inode); 233 security_inode_free(inode); 234 fsnotify_inode_delete(inode); 235 locks_free_lock_context(inode); 236 if (!inode->i_nlink) { 237 WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0); 238 atomic_long_dec(&inode->i_sb->s_remove_count); 239 } 240 241 #ifdef CONFIG_FS_POSIX_ACL 242 if (inode->i_acl && !is_uncached_acl(inode->i_acl)) 243 posix_acl_release(inode->i_acl); 244 if (inode->i_default_acl && !is_uncached_acl(inode->i_default_acl)) 245 posix_acl_release(inode->i_default_acl); 246 #endif 247 this_cpu_dec(nr_inodes); 248 } 249 EXPORT_SYMBOL(__destroy_inode); 250 251 static void i_callback(struct rcu_head *head) 252 { 253 struct inode *inode = container_of(head, struct inode, i_rcu); 254 kmem_cache_free(inode_cachep, inode); 255 } 256 257 static void destroy_inode(struct inode *inode) 258 { 259 BUG_ON(!list_empty(&inode->i_lru)); 260 __destroy_inode(inode); 261 if (inode->i_sb->s_op->destroy_inode) 262 inode->i_sb->s_op->destroy_inode(inode); 263 else 264 call_rcu(&inode->i_rcu, i_callback); 265 } 266 267 /** 268 * drop_nlink - directly drop an inode's link count 269 * @inode: inode 270 * 271 * This is a low-level filesystem helper to replace any 272 * direct filesystem manipulation of i_nlink. In cases 273 * where we are attempting to track writes to the 274 * filesystem, a decrement to zero means an imminent 275 * write when the file is truncated and actually unlinked 276 * on the filesystem. 277 */ 278 void drop_nlink(struct inode *inode) 279 { 280 WARN_ON(inode->i_nlink == 0); 281 inode->__i_nlink--; 282 if (!inode->i_nlink) 283 atomic_long_inc(&inode->i_sb->s_remove_count); 284 } 285 EXPORT_SYMBOL(drop_nlink); 286 287 /** 288 * clear_nlink - directly zero an inode's link count 289 * @inode: inode 290 * 291 * This is a low-level filesystem helper to replace any 292 * direct filesystem manipulation of i_nlink. See 293 * drop_nlink() for why we care about i_nlink hitting zero. 294 */ 295 void clear_nlink(struct inode *inode) 296 { 297 if (inode->i_nlink) { 298 inode->__i_nlink = 0; 299 atomic_long_inc(&inode->i_sb->s_remove_count); 300 } 301 } 302 EXPORT_SYMBOL(clear_nlink); 303 304 /** 305 * set_nlink - directly set an inode's link count 306 * @inode: inode 307 * @nlink: new nlink (should be non-zero) 308 * 309 * This is a low-level filesystem helper to replace any 310 * direct filesystem manipulation of i_nlink. 311 */ 312 void set_nlink(struct inode *inode, unsigned int nlink) 313 { 314 if (!nlink) { 315 clear_nlink(inode); 316 } else { 317 /* Yes, some filesystems do change nlink from zero to one */ 318 if (inode->i_nlink == 0) 319 atomic_long_dec(&inode->i_sb->s_remove_count); 320 321 inode->__i_nlink = nlink; 322 } 323 } 324 EXPORT_SYMBOL(set_nlink); 325 326 /** 327 * inc_nlink - directly increment an inode's link count 328 * @inode: inode 329 * 330 * This is a low-level filesystem helper to replace any 331 * direct filesystem manipulation of i_nlink. Currently, 332 * it is only here for parity with dec_nlink(). 333 */ 334 void inc_nlink(struct inode *inode) 335 { 336 if (unlikely(inode->i_nlink == 0)) { 337 WARN_ON(!(inode->i_state & I_LINKABLE)); 338 atomic_long_dec(&inode->i_sb->s_remove_count); 339 } 340 341 inode->__i_nlink++; 342 } 343 EXPORT_SYMBOL(inc_nlink); 344 345 void address_space_init_once(struct address_space *mapping) 346 { 347 memset(mapping, 0, sizeof(*mapping)); 348 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); 349 spin_lock_init(&mapping->tree_lock); 350 init_rwsem(&mapping->i_mmap_rwsem); 351 INIT_LIST_HEAD(&mapping->private_list); 352 spin_lock_init(&mapping->private_lock); 353 mapping->i_mmap = RB_ROOT; 354 } 355 EXPORT_SYMBOL(address_space_init_once); 356 357 /* 358 * These are initializations that only need to be done 359 * once, because the fields are idempotent across use 360 * of the inode, so let the slab aware of that. 361 */ 362 void inode_init_once(struct inode *inode) 363 { 364 memset(inode, 0, sizeof(*inode)); 365 INIT_HLIST_NODE(&inode->i_hash); 366 INIT_LIST_HEAD(&inode->i_devices); 367 INIT_LIST_HEAD(&inode->i_io_list); 368 INIT_LIST_HEAD(&inode->i_lru); 369 address_space_init_once(&inode->i_data); 370 i_size_ordered_init(inode); 371 #ifdef CONFIG_FSNOTIFY 372 INIT_HLIST_HEAD(&inode->i_fsnotify_marks); 373 #endif 374 } 375 EXPORT_SYMBOL(inode_init_once); 376 377 static void init_once(void *foo) 378 { 379 struct inode *inode = (struct inode *) foo; 380 381 inode_init_once(inode); 382 } 383 384 /* 385 * inode->i_lock must be held 386 */ 387 void __iget(struct inode *inode) 388 { 389 atomic_inc(&inode->i_count); 390 } 391 392 /* 393 * get additional reference to inode; caller must already hold one. 394 */ 395 void ihold(struct inode *inode) 396 { 397 WARN_ON(atomic_inc_return(&inode->i_count) < 2); 398 } 399 EXPORT_SYMBOL(ihold); 400 401 static void inode_lru_list_add(struct inode *inode) 402 { 403 if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru)) 404 this_cpu_inc(nr_unused); 405 } 406 407 /* 408 * Add inode to LRU if needed (inode is unused and clean). 409 * 410 * Needs inode->i_lock held. 411 */ 412 void inode_add_lru(struct inode *inode) 413 { 414 if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC | 415 I_FREEING | I_WILL_FREE)) && 416 !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE) 417 inode_lru_list_add(inode); 418 } 419 420 421 static void inode_lru_list_del(struct inode *inode) 422 { 423 424 if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru)) 425 this_cpu_dec(nr_unused); 426 } 427 428 /** 429 * inode_sb_list_add - add inode to the superblock list of inodes 430 * @inode: inode to add 431 */ 432 void inode_sb_list_add(struct inode *inode) 433 { 434 spin_lock(&inode->i_sb->s_inode_list_lock); 435 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes); 436 spin_unlock(&inode->i_sb->s_inode_list_lock); 437 } 438 EXPORT_SYMBOL_GPL(inode_sb_list_add); 439 440 static inline void inode_sb_list_del(struct inode *inode) 441 { 442 if (!list_empty(&inode->i_sb_list)) { 443 spin_lock(&inode->i_sb->s_inode_list_lock); 444 list_del_init(&inode->i_sb_list); 445 spin_unlock(&inode->i_sb->s_inode_list_lock); 446 } 447 } 448 449 static unsigned long hash(struct super_block *sb, unsigned long hashval) 450 { 451 unsigned long tmp; 452 453 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) / 454 L1_CACHE_BYTES; 455 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift); 456 return tmp & i_hash_mask; 457 } 458 459 /** 460 * __insert_inode_hash - hash an inode 461 * @inode: unhashed inode 462 * @hashval: unsigned long value used to locate this object in the 463 * inode_hashtable. 464 * 465 * Add an inode to the inode hash for this superblock. 466 */ 467 void __insert_inode_hash(struct inode *inode, unsigned long hashval) 468 { 469 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval); 470 471 spin_lock(&inode_hash_lock); 472 spin_lock(&inode->i_lock); 473 hlist_add_head(&inode->i_hash, b); 474 spin_unlock(&inode->i_lock); 475 spin_unlock(&inode_hash_lock); 476 } 477 EXPORT_SYMBOL(__insert_inode_hash); 478 479 /** 480 * __remove_inode_hash - remove an inode from the hash 481 * @inode: inode to unhash 482 * 483 * Remove an inode from the superblock. 484 */ 485 void __remove_inode_hash(struct inode *inode) 486 { 487 spin_lock(&inode_hash_lock); 488 spin_lock(&inode->i_lock); 489 hlist_del_init(&inode->i_hash); 490 spin_unlock(&inode->i_lock); 491 spin_unlock(&inode_hash_lock); 492 } 493 EXPORT_SYMBOL(__remove_inode_hash); 494 495 void clear_inode(struct inode *inode) 496 { 497 might_sleep(); 498 /* 499 * We have to cycle tree_lock here because reclaim can be still in the 500 * process of removing the last page (in __delete_from_page_cache()) 501 * and we must not free mapping under it. 502 */ 503 spin_lock_irq(&inode->i_data.tree_lock); 504 BUG_ON(inode->i_data.nrpages); 505 BUG_ON(inode->i_data.nrexceptional); 506 spin_unlock_irq(&inode->i_data.tree_lock); 507 BUG_ON(!list_empty(&inode->i_data.private_list)); 508 BUG_ON(!(inode->i_state & I_FREEING)); 509 BUG_ON(inode->i_state & I_CLEAR); 510 /* don't need i_lock here, no concurrent mods to i_state */ 511 inode->i_state = I_FREEING | I_CLEAR; 512 } 513 EXPORT_SYMBOL(clear_inode); 514 515 /* 516 * Free the inode passed in, removing it from the lists it is still connected 517 * to. We remove any pages still attached to the inode and wait for any IO that 518 * is still in progress before finally destroying the inode. 519 * 520 * An inode must already be marked I_FREEING so that we avoid the inode being 521 * moved back onto lists if we race with other code that manipulates the lists 522 * (e.g. writeback_single_inode). The caller is responsible for setting this. 523 * 524 * An inode must already be removed from the LRU list before being evicted from 525 * the cache. This should occur atomically with setting the I_FREEING state 526 * flag, so no inodes here should ever be on the LRU when being evicted. 527 */ 528 static void evict(struct inode *inode) 529 { 530 const struct super_operations *op = inode->i_sb->s_op; 531 532 BUG_ON(!(inode->i_state & I_FREEING)); 533 BUG_ON(!list_empty(&inode->i_lru)); 534 535 if (!list_empty(&inode->i_io_list)) 536 inode_io_list_del(inode); 537 538 inode_sb_list_del(inode); 539 540 /* 541 * Wait for flusher thread to be done with the inode so that filesystem 542 * does not start destroying it while writeback is still running. Since 543 * the inode has I_FREEING set, flusher thread won't start new work on 544 * the inode. We just have to wait for running writeback to finish. 545 */ 546 inode_wait_for_writeback(inode); 547 548 if (op->evict_inode) { 549 op->evict_inode(inode); 550 } else { 551 truncate_inode_pages_final(&inode->i_data); 552 clear_inode(inode); 553 } 554 if (S_ISBLK(inode->i_mode) && inode->i_bdev) 555 bd_forget(inode); 556 if (S_ISCHR(inode->i_mode) && inode->i_cdev) 557 cd_forget(inode); 558 559 remove_inode_hash(inode); 560 561 spin_lock(&inode->i_lock); 562 wake_up_bit(&inode->i_state, __I_NEW); 563 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR)); 564 spin_unlock(&inode->i_lock); 565 566 destroy_inode(inode); 567 } 568 569 /* 570 * dispose_list - dispose of the contents of a local list 571 * @head: the head of the list to free 572 * 573 * Dispose-list gets a local list with local inodes in it, so it doesn't 574 * need to worry about list corruption and SMP locks. 575 */ 576 static void dispose_list(struct list_head *head) 577 { 578 while (!list_empty(head)) { 579 struct inode *inode; 580 581 inode = list_first_entry(head, struct inode, i_lru); 582 list_del_init(&inode->i_lru); 583 584 evict(inode); 585 cond_resched(); 586 } 587 } 588 589 /** 590 * evict_inodes - evict all evictable inodes for a superblock 591 * @sb: superblock to operate on 592 * 593 * Make sure that no inodes with zero refcount are retained. This is 594 * called by superblock shutdown after having MS_ACTIVE flag removed, 595 * so any inode reaching zero refcount during or after that call will 596 * be immediately evicted. 597 */ 598 void evict_inodes(struct super_block *sb) 599 { 600 struct inode *inode, *next; 601 LIST_HEAD(dispose); 602 603 again: 604 spin_lock(&sb->s_inode_list_lock); 605 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 606 if (atomic_read(&inode->i_count)) 607 continue; 608 609 spin_lock(&inode->i_lock); 610 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 611 spin_unlock(&inode->i_lock); 612 continue; 613 } 614 615 inode->i_state |= I_FREEING; 616 inode_lru_list_del(inode); 617 spin_unlock(&inode->i_lock); 618 list_add(&inode->i_lru, &dispose); 619 620 /* 621 * We can have a ton of inodes to evict at unmount time given 622 * enough memory, check to see if we need to go to sleep for a 623 * bit so we don't livelock. 624 */ 625 if (need_resched()) { 626 spin_unlock(&sb->s_inode_list_lock); 627 cond_resched(); 628 dispose_list(&dispose); 629 goto again; 630 } 631 } 632 spin_unlock(&sb->s_inode_list_lock); 633 634 dispose_list(&dispose); 635 } 636 637 /** 638 * invalidate_inodes - attempt to free all inodes on a superblock 639 * @sb: superblock to operate on 640 * @kill_dirty: flag to guide handling of dirty inodes 641 * 642 * Attempts to free all inodes for a given superblock. If there were any 643 * busy inodes return a non-zero value, else zero. 644 * If @kill_dirty is set, discard dirty inodes too, otherwise treat 645 * them as busy. 646 */ 647 int invalidate_inodes(struct super_block *sb, bool kill_dirty) 648 { 649 int busy = 0; 650 struct inode *inode, *next; 651 LIST_HEAD(dispose); 652 653 spin_lock(&sb->s_inode_list_lock); 654 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 655 spin_lock(&inode->i_lock); 656 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 657 spin_unlock(&inode->i_lock); 658 continue; 659 } 660 if (inode->i_state & I_DIRTY_ALL && !kill_dirty) { 661 spin_unlock(&inode->i_lock); 662 busy = 1; 663 continue; 664 } 665 if (atomic_read(&inode->i_count)) { 666 spin_unlock(&inode->i_lock); 667 busy = 1; 668 continue; 669 } 670 671 inode->i_state |= I_FREEING; 672 inode_lru_list_del(inode); 673 spin_unlock(&inode->i_lock); 674 list_add(&inode->i_lru, &dispose); 675 } 676 spin_unlock(&sb->s_inode_list_lock); 677 678 dispose_list(&dispose); 679 680 return busy; 681 } 682 683 /* 684 * Isolate the inode from the LRU in preparation for freeing it. 685 * 686 * Any inodes which are pinned purely because of attached pagecache have their 687 * pagecache removed. If the inode has metadata buffers attached to 688 * mapping->private_list then try to remove them. 689 * 690 * If the inode has the I_REFERENCED flag set, then it means that it has been 691 * used recently - the flag is set in iput_final(). When we encounter such an 692 * inode, clear the flag and move it to the back of the LRU so it gets another 693 * pass through the LRU before it gets reclaimed. This is necessary because of 694 * the fact we are doing lazy LRU updates to minimise lock contention so the 695 * LRU does not have strict ordering. Hence we don't want to reclaim inodes 696 * with this flag set because they are the inodes that are out of order. 697 */ 698 static enum lru_status inode_lru_isolate(struct list_head *item, 699 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) 700 { 701 struct list_head *freeable = arg; 702 struct inode *inode = container_of(item, struct inode, i_lru); 703 704 /* 705 * we are inverting the lru lock/inode->i_lock here, so use a trylock. 706 * If we fail to get the lock, just skip it. 707 */ 708 if (!spin_trylock(&inode->i_lock)) 709 return LRU_SKIP; 710 711 /* 712 * Referenced or dirty inodes are still in use. Give them another pass 713 * through the LRU as we canot reclaim them now. 714 */ 715 if (atomic_read(&inode->i_count) || 716 (inode->i_state & ~I_REFERENCED)) { 717 list_lru_isolate(lru, &inode->i_lru); 718 spin_unlock(&inode->i_lock); 719 this_cpu_dec(nr_unused); 720 return LRU_REMOVED; 721 } 722 723 /* recently referenced inodes get one more pass */ 724 if (inode->i_state & I_REFERENCED) { 725 inode->i_state &= ~I_REFERENCED; 726 spin_unlock(&inode->i_lock); 727 return LRU_ROTATE; 728 } 729 730 if (inode_has_buffers(inode) || inode->i_data.nrpages) { 731 __iget(inode); 732 spin_unlock(&inode->i_lock); 733 spin_unlock(lru_lock); 734 if (remove_inode_buffers(inode)) { 735 unsigned long reap; 736 reap = invalidate_mapping_pages(&inode->i_data, 0, -1); 737 if (current_is_kswapd()) 738 __count_vm_events(KSWAPD_INODESTEAL, reap); 739 else 740 __count_vm_events(PGINODESTEAL, reap); 741 if (current->reclaim_state) 742 current->reclaim_state->reclaimed_slab += reap; 743 } 744 iput(inode); 745 spin_lock(lru_lock); 746 return LRU_RETRY; 747 } 748 749 WARN_ON(inode->i_state & I_NEW); 750 inode->i_state |= I_FREEING; 751 list_lru_isolate_move(lru, &inode->i_lru, freeable); 752 spin_unlock(&inode->i_lock); 753 754 this_cpu_dec(nr_unused); 755 return LRU_REMOVED; 756 } 757 758 /* 759 * Walk the superblock inode LRU for freeable inodes and attempt to free them. 760 * This is called from the superblock shrinker function with a number of inodes 761 * to trim from the LRU. Inodes to be freed are moved to a temporary list and 762 * then are freed outside inode_lock by dispose_list(). 763 */ 764 long prune_icache_sb(struct super_block *sb, struct shrink_control *sc) 765 { 766 LIST_HEAD(freeable); 767 long freed; 768 769 freed = list_lru_shrink_walk(&sb->s_inode_lru, sc, 770 inode_lru_isolate, &freeable); 771 dispose_list(&freeable); 772 return freed; 773 } 774 775 static void __wait_on_freeing_inode(struct inode *inode); 776 /* 777 * Called with the inode lock held. 778 */ 779 static struct inode *find_inode(struct super_block *sb, 780 struct hlist_head *head, 781 int (*test)(struct inode *, void *), 782 void *data) 783 { 784 struct inode *inode = NULL; 785 786 repeat: 787 hlist_for_each_entry(inode, head, i_hash) { 788 if (inode->i_sb != sb) 789 continue; 790 if (!test(inode, data)) 791 continue; 792 spin_lock(&inode->i_lock); 793 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 794 __wait_on_freeing_inode(inode); 795 goto repeat; 796 } 797 __iget(inode); 798 spin_unlock(&inode->i_lock); 799 return inode; 800 } 801 return NULL; 802 } 803 804 /* 805 * find_inode_fast is the fast path version of find_inode, see the comment at 806 * iget_locked for details. 807 */ 808 static struct inode *find_inode_fast(struct super_block *sb, 809 struct hlist_head *head, unsigned long ino) 810 { 811 struct inode *inode = NULL; 812 813 repeat: 814 hlist_for_each_entry(inode, head, i_hash) { 815 if (inode->i_ino != ino) 816 continue; 817 if (inode->i_sb != sb) 818 continue; 819 spin_lock(&inode->i_lock); 820 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 821 __wait_on_freeing_inode(inode); 822 goto repeat; 823 } 824 __iget(inode); 825 spin_unlock(&inode->i_lock); 826 return inode; 827 } 828 return NULL; 829 } 830 831 /* 832 * Each cpu owns a range of LAST_INO_BATCH numbers. 833 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations, 834 * to renew the exhausted range. 835 * 836 * This does not significantly increase overflow rate because every CPU can 837 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is 838 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the 839 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase 840 * overflow rate by 2x, which does not seem too significant. 841 * 842 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 843 * error if st_ino won't fit in target struct field. Use 32bit counter 844 * here to attempt to avoid that. 845 */ 846 #define LAST_INO_BATCH 1024 847 static DEFINE_PER_CPU(unsigned int, last_ino); 848 849 unsigned int get_next_ino(void) 850 { 851 unsigned int *p = &get_cpu_var(last_ino); 852 unsigned int res = *p; 853 854 #ifdef CONFIG_SMP 855 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) { 856 static atomic_t shared_last_ino; 857 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino); 858 859 res = next - LAST_INO_BATCH; 860 } 861 #endif 862 863 res++; 864 /* get_next_ino should not provide a 0 inode number */ 865 if (unlikely(!res)) 866 res++; 867 *p = res; 868 put_cpu_var(last_ino); 869 return res; 870 } 871 EXPORT_SYMBOL(get_next_ino); 872 873 /** 874 * new_inode_pseudo - obtain an inode 875 * @sb: superblock 876 * 877 * Allocates a new inode for given superblock. 878 * Inode wont be chained in superblock s_inodes list 879 * This means : 880 * - fs can't be unmount 881 * - quotas, fsnotify, writeback can't work 882 */ 883 struct inode *new_inode_pseudo(struct super_block *sb) 884 { 885 struct inode *inode = alloc_inode(sb); 886 887 if (inode) { 888 spin_lock(&inode->i_lock); 889 inode->i_state = 0; 890 spin_unlock(&inode->i_lock); 891 INIT_LIST_HEAD(&inode->i_sb_list); 892 } 893 return inode; 894 } 895 896 /** 897 * new_inode - obtain an inode 898 * @sb: superblock 899 * 900 * Allocates a new inode for given superblock. The default gfp_mask 901 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE. 902 * If HIGHMEM pages are unsuitable or it is known that pages allocated 903 * for the page cache are not reclaimable or migratable, 904 * mapping_set_gfp_mask() must be called with suitable flags on the 905 * newly created inode's mapping 906 * 907 */ 908 struct inode *new_inode(struct super_block *sb) 909 { 910 struct inode *inode; 911 912 spin_lock_prefetch(&sb->s_inode_list_lock); 913 914 inode = new_inode_pseudo(sb); 915 if (inode) 916 inode_sb_list_add(inode); 917 return inode; 918 } 919 EXPORT_SYMBOL(new_inode); 920 921 #ifdef CONFIG_DEBUG_LOCK_ALLOC 922 void lockdep_annotate_inode_mutex_key(struct inode *inode) 923 { 924 if (S_ISDIR(inode->i_mode)) { 925 struct file_system_type *type = inode->i_sb->s_type; 926 927 /* Set new key only if filesystem hasn't already changed it */ 928 if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) { 929 /* 930 * ensure nobody is actually holding i_mutex 931 */ 932 // mutex_destroy(&inode->i_mutex); 933 init_rwsem(&inode->i_rwsem); 934 lockdep_set_class(&inode->i_rwsem, 935 &type->i_mutex_dir_key); 936 } 937 } 938 } 939 EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key); 940 #endif 941 942 /** 943 * unlock_new_inode - clear the I_NEW state and wake up any waiters 944 * @inode: new inode to unlock 945 * 946 * Called when the inode is fully initialised to clear the new state of the 947 * inode and wake up anyone waiting for the inode to finish initialisation. 948 */ 949 void unlock_new_inode(struct inode *inode) 950 { 951 lockdep_annotate_inode_mutex_key(inode); 952 spin_lock(&inode->i_lock); 953 WARN_ON(!(inode->i_state & I_NEW)); 954 inode->i_state &= ~I_NEW; 955 smp_mb(); 956 wake_up_bit(&inode->i_state, __I_NEW); 957 spin_unlock(&inode->i_lock); 958 } 959 EXPORT_SYMBOL(unlock_new_inode); 960 961 /** 962 * lock_two_nondirectories - take two i_mutexes on non-directory objects 963 * 964 * Lock any non-NULL argument that is not a directory. 965 * Zero, one or two objects may be locked by this function. 966 * 967 * @inode1: first inode to lock 968 * @inode2: second inode to lock 969 */ 970 void lock_two_nondirectories(struct inode *inode1, struct inode *inode2) 971 { 972 if (inode1 > inode2) 973 swap(inode1, inode2); 974 975 if (inode1 && !S_ISDIR(inode1->i_mode)) 976 inode_lock(inode1); 977 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1) 978 inode_lock_nested(inode2, I_MUTEX_NONDIR2); 979 } 980 EXPORT_SYMBOL(lock_two_nondirectories); 981 982 /** 983 * unlock_two_nondirectories - release locks from lock_two_nondirectories() 984 * @inode1: first inode to unlock 985 * @inode2: second inode to unlock 986 */ 987 void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2) 988 { 989 if (inode1 && !S_ISDIR(inode1->i_mode)) 990 inode_unlock(inode1); 991 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1) 992 inode_unlock(inode2); 993 } 994 EXPORT_SYMBOL(unlock_two_nondirectories); 995 996 /** 997 * iget5_locked - obtain an inode from a mounted file system 998 * @sb: super block of file system 999 * @hashval: hash value (usually inode number) to get 1000 * @test: callback used for comparisons between inodes 1001 * @set: callback used to initialize a new struct inode 1002 * @data: opaque data pointer to pass to @test and @set 1003 * 1004 * Search for the inode specified by @hashval and @data in the inode cache, 1005 * and if present it is return it with an increased reference count. This is 1006 * a generalized version of iget_locked() for file systems where the inode 1007 * number is not sufficient for unique identification of an inode. 1008 * 1009 * If the inode is not in cache, allocate a new inode and return it locked, 1010 * hashed, and with the I_NEW flag set. The file system gets to fill it in 1011 * before unlocking it via unlock_new_inode(). 1012 * 1013 * Note both @test and @set are called with the inode_hash_lock held, so can't 1014 * sleep. 1015 */ 1016 struct inode *iget5_locked(struct super_block *sb, unsigned long hashval, 1017 int (*test)(struct inode *, void *), 1018 int (*set)(struct inode *, void *), void *data) 1019 { 1020 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1021 struct inode *inode; 1022 1023 spin_lock(&inode_hash_lock); 1024 inode = find_inode(sb, head, test, data); 1025 spin_unlock(&inode_hash_lock); 1026 1027 if (inode) { 1028 wait_on_inode(inode); 1029 return inode; 1030 } 1031 1032 inode = alloc_inode(sb); 1033 if (inode) { 1034 struct inode *old; 1035 1036 spin_lock(&inode_hash_lock); 1037 /* We released the lock, so.. */ 1038 old = find_inode(sb, head, test, data); 1039 if (!old) { 1040 if (set(inode, data)) 1041 goto set_failed; 1042 1043 spin_lock(&inode->i_lock); 1044 inode->i_state = I_NEW; 1045 hlist_add_head(&inode->i_hash, head); 1046 spin_unlock(&inode->i_lock); 1047 inode_sb_list_add(inode); 1048 spin_unlock(&inode_hash_lock); 1049 1050 /* Return the locked inode with I_NEW set, the 1051 * caller is responsible for filling in the contents 1052 */ 1053 return inode; 1054 } 1055 1056 /* 1057 * Uhhuh, somebody else created the same inode under 1058 * us. Use the old inode instead of the one we just 1059 * allocated. 1060 */ 1061 spin_unlock(&inode_hash_lock); 1062 destroy_inode(inode); 1063 inode = old; 1064 wait_on_inode(inode); 1065 } 1066 return inode; 1067 1068 set_failed: 1069 spin_unlock(&inode_hash_lock); 1070 destroy_inode(inode); 1071 return NULL; 1072 } 1073 EXPORT_SYMBOL(iget5_locked); 1074 1075 /** 1076 * iget_locked - obtain an inode from a mounted file system 1077 * @sb: super block of file system 1078 * @ino: inode number to get 1079 * 1080 * Search for the inode specified by @ino in the inode cache and if present 1081 * return it with an increased reference count. This is for file systems 1082 * where the inode number is sufficient for unique identification of an inode. 1083 * 1084 * If the inode is not in cache, allocate a new inode and return it locked, 1085 * hashed, and with the I_NEW flag set. The file system gets to fill it in 1086 * before unlocking it via unlock_new_inode(). 1087 */ 1088 struct inode *iget_locked(struct super_block *sb, unsigned long ino) 1089 { 1090 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1091 struct inode *inode; 1092 1093 spin_lock(&inode_hash_lock); 1094 inode = find_inode_fast(sb, head, ino); 1095 spin_unlock(&inode_hash_lock); 1096 if (inode) { 1097 wait_on_inode(inode); 1098 return inode; 1099 } 1100 1101 inode = alloc_inode(sb); 1102 if (inode) { 1103 struct inode *old; 1104 1105 spin_lock(&inode_hash_lock); 1106 /* We released the lock, so.. */ 1107 old = find_inode_fast(sb, head, ino); 1108 if (!old) { 1109 inode->i_ino = ino; 1110 spin_lock(&inode->i_lock); 1111 inode->i_state = I_NEW; 1112 hlist_add_head(&inode->i_hash, head); 1113 spin_unlock(&inode->i_lock); 1114 inode_sb_list_add(inode); 1115 spin_unlock(&inode_hash_lock); 1116 1117 /* Return the locked inode with I_NEW set, the 1118 * caller is responsible for filling in the contents 1119 */ 1120 return inode; 1121 } 1122 1123 /* 1124 * Uhhuh, somebody else created the same inode under 1125 * us. Use the old inode instead of the one we just 1126 * allocated. 1127 */ 1128 spin_unlock(&inode_hash_lock); 1129 destroy_inode(inode); 1130 inode = old; 1131 wait_on_inode(inode); 1132 } 1133 return inode; 1134 } 1135 EXPORT_SYMBOL(iget_locked); 1136 1137 /* 1138 * search the inode cache for a matching inode number. 1139 * If we find one, then the inode number we are trying to 1140 * allocate is not unique and so we should not use it. 1141 * 1142 * Returns 1 if the inode number is unique, 0 if it is not. 1143 */ 1144 static int test_inode_iunique(struct super_block *sb, unsigned long ino) 1145 { 1146 struct hlist_head *b = inode_hashtable + hash(sb, ino); 1147 struct inode *inode; 1148 1149 spin_lock(&inode_hash_lock); 1150 hlist_for_each_entry(inode, b, i_hash) { 1151 if (inode->i_ino == ino && inode->i_sb == sb) { 1152 spin_unlock(&inode_hash_lock); 1153 return 0; 1154 } 1155 } 1156 spin_unlock(&inode_hash_lock); 1157 1158 return 1; 1159 } 1160 1161 /** 1162 * iunique - get a unique inode number 1163 * @sb: superblock 1164 * @max_reserved: highest reserved inode number 1165 * 1166 * Obtain an inode number that is unique on the system for a given 1167 * superblock. This is used by file systems that have no natural 1168 * permanent inode numbering system. An inode number is returned that 1169 * is higher than the reserved limit but unique. 1170 * 1171 * BUGS: 1172 * With a large number of inodes live on the file system this function 1173 * currently becomes quite slow. 1174 */ 1175 ino_t iunique(struct super_block *sb, ino_t max_reserved) 1176 { 1177 /* 1178 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 1179 * error if st_ino won't fit in target struct field. Use 32bit counter 1180 * here to attempt to avoid that. 1181 */ 1182 static DEFINE_SPINLOCK(iunique_lock); 1183 static unsigned int counter; 1184 ino_t res; 1185 1186 spin_lock(&iunique_lock); 1187 do { 1188 if (counter <= max_reserved) 1189 counter = max_reserved + 1; 1190 res = counter++; 1191 } while (!test_inode_iunique(sb, res)); 1192 spin_unlock(&iunique_lock); 1193 1194 return res; 1195 } 1196 EXPORT_SYMBOL(iunique); 1197 1198 struct inode *igrab(struct inode *inode) 1199 { 1200 spin_lock(&inode->i_lock); 1201 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) { 1202 __iget(inode); 1203 spin_unlock(&inode->i_lock); 1204 } else { 1205 spin_unlock(&inode->i_lock); 1206 /* 1207 * Handle the case where s_op->clear_inode is not been 1208 * called yet, and somebody is calling igrab 1209 * while the inode is getting freed. 1210 */ 1211 inode = NULL; 1212 } 1213 return inode; 1214 } 1215 EXPORT_SYMBOL(igrab); 1216 1217 /** 1218 * ilookup5_nowait - search for an inode in the inode cache 1219 * @sb: super block of file system to search 1220 * @hashval: hash value (usually inode number) to search for 1221 * @test: callback used for comparisons between inodes 1222 * @data: opaque data pointer to pass to @test 1223 * 1224 * Search for the inode specified by @hashval and @data in the inode cache. 1225 * If the inode is in the cache, the inode is returned with an incremented 1226 * reference count. 1227 * 1228 * Note: I_NEW is not waited upon so you have to be very careful what you do 1229 * with the returned inode. You probably should be using ilookup5() instead. 1230 * 1231 * Note2: @test is called with the inode_hash_lock held, so can't sleep. 1232 */ 1233 struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, 1234 int (*test)(struct inode *, void *), void *data) 1235 { 1236 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1237 struct inode *inode; 1238 1239 spin_lock(&inode_hash_lock); 1240 inode = find_inode(sb, head, test, data); 1241 spin_unlock(&inode_hash_lock); 1242 1243 return inode; 1244 } 1245 EXPORT_SYMBOL(ilookup5_nowait); 1246 1247 /** 1248 * ilookup5 - search for an inode in the inode cache 1249 * @sb: super block of file system to search 1250 * @hashval: hash value (usually inode number) to search for 1251 * @test: callback used for comparisons between inodes 1252 * @data: opaque data pointer to pass to @test 1253 * 1254 * Search for the inode specified by @hashval and @data in the inode cache, 1255 * and if the inode is in the cache, return the inode with an incremented 1256 * reference count. Waits on I_NEW before returning the inode. 1257 * returned with an incremented reference count. 1258 * 1259 * This is a generalized version of ilookup() for file systems where the 1260 * inode number is not sufficient for unique identification of an inode. 1261 * 1262 * Note: @test is called with the inode_hash_lock held, so can't sleep. 1263 */ 1264 struct inode *ilookup5(struct super_block *sb, unsigned long hashval, 1265 int (*test)(struct inode *, void *), void *data) 1266 { 1267 struct inode *inode = ilookup5_nowait(sb, hashval, test, data); 1268 1269 if (inode) 1270 wait_on_inode(inode); 1271 return inode; 1272 } 1273 EXPORT_SYMBOL(ilookup5); 1274 1275 /** 1276 * ilookup - search for an inode in the inode cache 1277 * @sb: super block of file system to search 1278 * @ino: inode number to search for 1279 * 1280 * Search for the inode @ino in the inode cache, and if the inode is in the 1281 * cache, the inode is returned with an incremented reference count. 1282 */ 1283 struct inode *ilookup(struct super_block *sb, unsigned long ino) 1284 { 1285 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1286 struct inode *inode; 1287 1288 spin_lock(&inode_hash_lock); 1289 inode = find_inode_fast(sb, head, ino); 1290 spin_unlock(&inode_hash_lock); 1291 1292 if (inode) 1293 wait_on_inode(inode); 1294 return inode; 1295 } 1296 EXPORT_SYMBOL(ilookup); 1297 1298 /** 1299 * find_inode_nowait - find an inode in the inode cache 1300 * @sb: super block of file system to search 1301 * @hashval: hash value (usually inode number) to search for 1302 * @match: callback used for comparisons between inodes 1303 * @data: opaque data pointer to pass to @match 1304 * 1305 * Search for the inode specified by @hashval and @data in the inode 1306 * cache, where the helper function @match will return 0 if the inode 1307 * does not match, 1 if the inode does match, and -1 if the search 1308 * should be stopped. The @match function must be responsible for 1309 * taking the i_lock spin_lock and checking i_state for an inode being 1310 * freed or being initialized, and incrementing the reference count 1311 * before returning 1. It also must not sleep, since it is called with 1312 * the inode_hash_lock spinlock held. 1313 * 1314 * This is a even more generalized version of ilookup5() when the 1315 * function must never block --- find_inode() can block in 1316 * __wait_on_freeing_inode() --- or when the caller can not increment 1317 * the reference count because the resulting iput() might cause an 1318 * inode eviction. The tradeoff is that the @match funtion must be 1319 * very carefully implemented. 1320 */ 1321 struct inode *find_inode_nowait(struct super_block *sb, 1322 unsigned long hashval, 1323 int (*match)(struct inode *, unsigned long, 1324 void *), 1325 void *data) 1326 { 1327 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1328 struct inode *inode, *ret_inode = NULL; 1329 int mval; 1330 1331 spin_lock(&inode_hash_lock); 1332 hlist_for_each_entry(inode, head, i_hash) { 1333 if (inode->i_sb != sb) 1334 continue; 1335 mval = match(inode, hashval, data); 1336 if (mval == 0) 1337 continue; 1338 if (mval == 1) 1339 ret_inode = inode; 1340 goto out; 1341 } 1342 out: 1343 spin_unlock(&inode_hash_lock); 1344 return ret_inode; 1345 } 1346 EXPORT_SYMBOL(find_inode_nowait); 1347 1348 int insert_inode_locked(struct inode *inode) 1349 { 1350 struct super_block *sb = inode->i_sb; 1351 ino_t ino = inode->i_ino; 1352 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1353 1354 while (1) { 1355 struct inode *old = NULL; 1356 spin_lock(&inode_hash_lock); 1357 hlist_for_each_entry(old, head, i_hash) { 1358 if (old->i_ino != ino) 1359 continue; 1360 if (old->i_sb != sb) 1361 continue; 1362 spin_lock(&old->i_lock); 1363 if (old->i_state & (I_FREEING|I_WILL_FREE)) { 1364 spin_unlock(&old->i_lock); 1365 continue; 1366 } 1367 break; 1368 } 1369 if (likely(!old)) { 1370 spin_lock(&inode->i_lock); 1371 inode->i_state |= I_NEW; 1372 hlist_add_head(&inode->i_hash, head); 1373 spin_unlock(&inode->i_lock); 1374 spin_unlock(&inode_hash_lock); 1375 return 0; 1376 } 1377 __iget(old); 1378 spin_unlock(&old->i_lock); 1379 spin_unlock(&inode_hash_lock); 1380 wait_on_inode(old); 1381 if (unlikely(!inode_unhashed(old))) { 1382 iput(old); 1383 return -EBUSY; 1384 } 1385 iput(old); 1386 } 1387 } 1388 EXPORT_SYMBOL(insert_inode_locked); 1389 1390 int insert_inode_locked4(struct inode *inode, unsigned long hashval, 1391 int (*test)(struct inode *, void *), void *data) 1392 { 1393 struct super_block *sb = inode->i_sb; 1394 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1395 1396 while (1) { 1397 struct inode *old = NULL; 1398 1399 spin_lock(&inode_hash_lock); 1400 hlist_for_each_entry(old, head, i_hash) { 1401 if (old->i_sb != sb) 1402 continue; 1403 if (!test(old, data)) 1404 continue; 1405 spin_lock(&old->i_lock); 1406 if (old->i_state & (I_FREEING|I_WILL_FREE)) { 1407 spin_unlock(&old->i_lock); 1408 continue; 1409 } 1410 break; 1411 } 1412 if (likely(!old)) { 1413 spin_lock(&inode->i_lock); 1414 inode->i_state |= I_NEW; 1415 hlist_add_head(&inode->i_hash, head); 1416 spin_unlock(&inode->i_lock); 1417 spin_unlock(&inode_hash_lock); 1418 return 0; 1419 } 1420 __iget(old); 1421 spin_unlock(&old->i_lock); 1422 spin_unlock(&inode_hash_lock); 1423 wait_on_inode(old); 1424 if (unlikely(!inode_unhashed(old))) { 1425 iput(old); 1426 return -EBUSY; 1427 } 1428 iput(old); 1429 } 1430 } 1431 EXPORT_SYMBOL(insert_inode_locked4); 1432 1433 1434 int generic_delete_inode(struct inode *inode) 1435 { 1436 return 1; 1437 } 1438 EXPORT_SYMBOL(generic_delete_inode); 1439 1440 /* 1441 * Called when we're dropping the last reference 1442 * to an inode. 1443 * 1444 * Call the FS "drop_inode()" function, defaulting to 1445 * the legacy UNIX filesystem behaviour. If it tells 1446 * us to evict inode, do so. Otherwise, retain inode 1447 * in cache if fs is alive, sync and evict if fs is 1448 * shutting down. 1449 */ 1450 static void iput_final(struct inode *inode) 1451 { 1452 struct super_block *sb = inode->i_sb; 1453 const struct super_operations *op = inode->i_sb->s_op; 1454 int drop; 1455 1456 WARN_ON(inode->i_state & I_NEW); 1457 1458 if (op->drop_inode) 1459 drop = op->drop_inode(inode); 1460 else 1461 drop = generic_drop_inode(inode); 1462 1463 if (!drop && (sb->s_flags & MS_ACTIVE)) { 1464 inode->i_state |= I_REFERENCED; 1465 inode_add_lru(inode); 1466 spin_unlock(&inode->i_lock); 1467 return; 1468 } 1469 1470 if (!drop) { 1471 inode->i_state |= I_WILL_FREE; 1472 spin_unlock(&inode->i_lock); 1473 write_inode_now(inode, 1); 1474 spin_lock(&inode->i_lock); 1475 WARN_ON(inode->i_state & I_NEW); 1476 inode->i_state &= ~I_WILL_FREE; 1477 } 1478 1479 inode->i_state |= I_FREEING; 1480 if (!list_empty(&inode->i_lru)) 1481 inode_lru_list_del(inode); 1482 spin_unlock(&inode->i_lock); 1483 1484 evict(inode); 1485 } 1486 1487 /** 1488 * iput - put an inode 1489 * @inode: inode to put 1490 * 1491 * Puts an inode, dropping its usage count. If the inode use count hits 1492 * zero, the inode is then freed and may also be destroyed. 1493 * 1494 * Consequently, iput() can sleep. 1495 */ 1496 void iput(struct inode *inode) 1497 { 1498 if (!inode) 1499 return; 1500 BUG_ON(inode->i_state & I_CLEAR); 1501 retry: 1502 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) { 1503 if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) { 1504 atomic_inc(&inode->i_count); 1505 inode->i_state &= ~I_DIRTY_TIME; 1506 spin_unlock(&inode->i_lock); 1507 trace_writeback_lazytime_iput(inode); 1508 mark_inode_dirty_sync(inode); 1509 goto retry; 1510 } 1511 iput_final(inode); 1512 } 1513 } 1514 EXPORT_SYMBOL(iput); 1515 1516 /** 1517 * bmap - find a block number in a file 1518 * @inode: inode of file 1519 * @block: block to find 1520 * 1521 * Returns the block number on the device holding the inode that 1522 * is the disk block number for the block of the file requested. 1523 * That is, asked for block 4 of inode 1 the function will return the 1524 * disk block relative to the disk start that holds that block of the 1525 * file. 1526 */ 1527 sector_t bmap(struct inode *inode, sector_t block) 1528 { 1529 sector_t res = 0; 1530 if (inode->i_mapping->a_ops->bmap) 1531 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block); 1532 return res; 1533 } 1534 EXPORT_SYMBOL(bmap); 1535 1536 /* 1537 * With relative atime, only update atime if the previous atime is 1538 * earlier than either the ctime or mtime or if at least a day has 1539 * passed since the last atime update. 1540 */ 1541 static int relatime_need_update(struct vfsmount *mnt, struct inode *inode, 1542 struct timespec now) 1543 { 1544 1545 if (!(mnt->mnt_flags & MNT_RELATIME)) 1546 return 1; 1547 /* 1548 * Is mtime younger than atime? If yes, update atime: 1549 */ 1550 if (timespec_compare(&inode->i_mtime, &inode->i_atime) >= 0) 1551 return 1; 1552 /* 1553 * Is ctime younger than atime? If yes, update atime: 1554 */ 1555 if (timespec_compare(&inode->i_ctime, &inode->i_atime) >= 0) 1556 return 1; 1557 1558 /* 1559 * Is the previous atime value older than a day? If yes, 1560 * update atime: 1561 */ 1562 if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60) 1563 return 1; 1564 /* 1565 * Good, we can skip the atime update: 1566 */ 1567 return 0; 1568 } 1569 1570 int generic_update_time(struct inode *inode, struct timespec *time, int flags) 1571 { 1572 int iflags = I_DIRTY_TIME; 1573 1574 if (flags & S_ATIME) 1575 inode->i_atime = *time; 1576 if (flags & S_VERSION) 1577 inode_inc_iversion(inode); 1578 if (flags & S_CTIME) 1579 inode->i_ctime = *time; 1580 if (flags & S_MTIME) 1581 inode->i_mtime = *time; 1582 1583 if (!(inode->i_sb->s_flags & MS_LAZYTIME) || (flags & S_VERSION)) 1584 iflags |= I_DIRTY_SYNC; 1585 __mark_inode_dirty(inode, iflags); 1586 return 0; 1587 } 1588 EXPORT_SYMBOL(generic_update_time); 1589 1590 /* 1591 * This does the actual work of updating an inodes time or version. Must have 1592 * had called mnt_want_write() before calling this. 1593 */ 1594 static int update_time(struct inode *inode, struct timespec *time, int flags) 1595 { 1596 int (*update_time)(struct inode *, struct timespec *, int); 1597 1598 update_time = inode->i_op->update_time ? inode->i_op->update_time : 1599 generic_update_time; 1600 1601 return update_time(inode, time, flags); 1602 } 1603 1604 /** 1605 * touch_atime - update the access time 1606 * @path: the &struct path to update 1607 * @inode: inode to update 1608 * 1609 * Update the accessed time on an inode and mark it for writeback. 1610 * This function automatically handles read only file systems and media, 1611 * as well as the "noatime" flag and inode specific "noatime" markers. 1612 */ 1613 bool atime_needs_update(const struct path *path, struct inode *inode) 1614 { 1615 struct vfsmount *mnt = path->mnt; 1616 struct timespec now; 1617 1618 if (inode->i_flags & S_NOATIME) 1619 return false; 1620 if (IS_NOATIME(inode)) 1621 return false; 1622 if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)) 1623 return false; 1624 1625 if (mnt->mnt_flags & MNT_NOATIME) 1626 return false; 1627 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)) 1628 return false; 1629 1630 now = current_fs_time(inode->i_sb); 1631 1632 if (!relatime_need_update(mnt, inode, now)) 1633 return false; 1634 1635 if (timespec_equal(&inode->i_atime, &now)) 1636 return false; 1637 1638 return true; 1639 } 1640 1641 void touch_atime(const struct path *path) 1642 { 1643 struct vfsmount *mnt = path->mnt; 1644 struct inode *inode = d_inode(path->dentry); 1645 struct timespec now; 1646 1647 if (!atime_needs_update(path, inode)) 1648 return; 1649 1650 if (!sb_start_write_trylock(inode->i_sb)) 1651 return; 1652 1653 if (__mnt_want_write(mnt) != 0) 1654 goto skip_update; 1655 /* 1656 * File systems can error out when updating inodes if they need to 1657 * allocate new space to modify an inode (such is the case for 1658 * Btrfs), but since we touch atime while walking down the path we 1659 * really don't care if we failed to update the atime of the file, 1660 * so just ignore the return value. 1661 * We may also fail on filesystems that have the ability to make parts 1662 * of the fs read only, e.g. subvolumes in Btrfs. 1663 */ 1664 now = current_fs_time(inode->i_sb); 1665 update_time(inode, &now, S_ATIME); 1666 __mnt_drop_write(mnt); 1667 skip_update: 1668 sb_end_write(inode->i_sb); 1669 } 1670 EXPORT_SYMBOL(touch_atime); 1671 1672 /* 1673 * The logic we want is 1674 * 1675 * if suid or (sgid and xgrp) 1676 * remove privs 1677 */ 1678 int should_remove_suid(struct dentry *dentry) 1679 { 1680 umode_t mode = d_inode(dentry)->i_mode; 1681 int kill = 0; 1682 1683 /* suid always must be killed */ 1684 if (unlikely(mode & S_ISUID)) 1685 kill = ATTR_KILL_SUID; 1686 1687 /* 1688 * sgid without any exec bits is just a mandatory locking mark; leave 1689 * it alone. If some exec bits are set, it's a real sgid; kill it. 1690 */ 1691 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) 1692 kill |= ATTR_KILL_SGID; 1693 1694 if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode))) 1695 return kill; 1696 1697 return 0; 1698 } 1699 EXPORT_SYMBOL(should_remove_suid); 1700 1701 /* 1702 * Return mask of changes for notify_change() that need to be done as a 1703 * response to write or truncate. Return 0 if nothing has to be changed. 1704 * Negative value on error (change should be denied). 1705 */ 1706 int dentry_needs_remove_privs(struct dentry *dentry) 1707 { 1708 struct inode *inode = d_inode(dentry); 1709 int mask = 0; 1710 int ret; 1711 1712 if (IS_NOSEC(inode)) 1713 return 0; 1714 1715 mask = should_remove_suid(dentry); 1716 ret = security_inode_need_killpriv(dentry); 1717 if (ret < 0) 1718 return ret; 1719 if (ret) 1720 mask |= ATTR_KILL_PRIV; 1721 return mask; 1722 } 1723 EXPORT_SYMBOL(dentry_needs_remove_privs); 1724 1725 static int __remove_privs(struct dentry *dentry, int kill) 1726 { 1727 struct iattr newattrs; 1728 1729 newattrs.ia_valid = ATTR_FORCE | kill; 1730 /* 1731 * Note we call this on write, so notify_change will not 1732 * encounter any conflicting delegations: 1733 */ 1734 return notify_change(dentry, &newattrs, NULL); 1735 } 1736 1737 /* 1738 * Remove special file priviledges (suid, capabilities) when file is written 1739 * to or truncated. 1740 */ 1741 int file_remove_privs(struct file *file) 1742 { 1743 struct dentry *dentry = file->f_path.dentry; 1744 struct inode *inode = d_inode(dentry); 1745 int kill; 1746 int error = 0; 1747 1748 /* Fast path for nothing security related */ 1749 if (IS_NOSEC(inode)) 1750 return 0; 1751 1752 kill = file_needs_remove_privs(file); 1753 if (kill < 0) 1754 return kill; 1755 if (kill) 1756 error = __remove_privs(dentry, kill); 1757 if (!error) 1758 inode_has_no_xattr(inode); 1759 1760 return error; 1761 } 1762 EXPORT_SYMBOL(file_remove_privs); 1763 1764 /** 1765 * file_update_time - update mtime and ctime time 1766 * @file: file accessed 1767 * 1768 * Update the mtime and ctime members of an inode and mark the inode 1769 * for writeback. Note that this function is meant exclusively for 1770 * usage in the file write path of filesystems, and filesystems may 1771 * choose to explicitly ignore update via this function with the 1772 * S_NOCMTIME inode flag, e.g. for network filesystem where these 1773 * timestamps are handled by the server. This can return an error for 1774 * file systems who need to allocate space in order to update an inode. 1775 */ 1776 1777 int file_update_time(struct file *file) 1778 { 1779 struct inode *inode = file_inode(file); 1780 struct timespec now; 1781 int sync_it = 0; 1782 int ret; 1783 1784 /* First try to exhaust all avenues to not sync */ 1785 if (IS_NOCMTIME(inode)) 1786 return 0; 1787 1788 now = current_fs_time(inode->i_sb); 1789 if (!timespec_equal(&inode->i_mtime, &now)) 1790 sync_it = S_MTIME; 1791 1792 if (!timespec_equal(&inode->i_ctime, &now)) 1793 sync_it |= S_CTIME; 1794 1795 if (IS_I_VERSION(inode)) 1796 sync_it |= S_VERSION; 1797 1798 if (!sync_it) 1799 return 0; 1800 1801 /* Finally allowed to write? Takes lock. */ 1802 if (__mnt_want_write_file(file)) 1803 return 0; 1804 1805 ret = update_time(inode, &now, sync_it); 1806 __mnt_drop_write_file(file); 1807 1808 return ret; 1809 } 1810 EXPORT_SYMBOL(file_update_time); 1811 1812 int inode_needs_sync(struct inode *inode) 1813 { 1814 if (IS_SYNC(inode)) 1815 return 1; 1816 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) 1817 return 1; 1818 return 0; 1819 } 1820 EXPORT_SYMBOL(inode_needs_sync); 1821 1822 /* 1823 * If we try to find an inode in the inode hash while it is being 1824 * deleted, we have to wait until the filesystem completes its 1825 * deletion before reporting that it isn't found. This function waits 1826 * until the deletion _might_ have completed. Callers are responsible 1827 * to recheck inode state. 1828 * 1829 * It doesn't matter if I_NEW is not set initially, a call to 1830 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list 1831 * will DTRT. 1832 */ 1833 static void __wait_on_freeing_inode(struct inode *inode) 1834 { 1835 wait_queue_head_t *wq; 1836 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW); 1837 wq = bit_waitqueue(&inode->i_state, __I_NEW); 1838 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); 1839 spin_unlock(&inode->i_lock); 1840 spin_unlock(&inode_hash_lock); 1841 schedule(); 1842 finish_wait(wq, &wait.wait); 1843 spin_lock(&inode_hash_lock); 1844 } 1845 1846 static __initdata unsigned long ihash_entries; 1847 static int __init set_ihash_entries(char *str) 1848 { 1849 if (!str) 1850 return 0; 1851 ihash_entries = simple_strtoul(str, &str, 0); 1852 return 1; 1853 } 1854 __setup("ihash_entries=", set_ihash_entries); 1855 1856 /* 1857 * Initialize the waitqueues and inode hash table. 1858 */ 1859 void __init inode_init_early(void) 1860 { 1861 unsigned int loop; 1862 1863 /* If hashes are distributed across NUMA nodes, defer 1864 * hash allocation until vmalloc space is available. 1865 */ 1866 if (hashdist) 1867 return; 1868 1869 inode_hashtable = 1870 alloc_large_system_hash("Inode-cache", 1871 sizeof(struct hlist_head), 1872 ihash_entries, 1873 14, 1874 HASH_EARLY, 1875 &i_hash_shift, 1876 &i_hash_mask, 1877 0, 1878 0); 1879 1880 for (loop = 0; loop < (1U << i_hash_shift); loop++) 1881 INIT_HLIST_HEAD(&inode_hashtable[loop]); 1882 } 1883 1884 void __init inode_init(void) 1885 { 1886 unsigned int loop; 1887 1888 /* inode slab cache */ 1889 inode_cachep = kmem_cache_create("inode_cache", 1890 sizeof(struct inode), 1891 0, 1892 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| 1893 SLAB_MEM_SPREAD|SLAB_ACCOUNT), 1894 init_once); 1895 1896 /* Hash may have been set up in inode_init_early */ 1897 if (!hashdist) 1898 return; 1899 1900 inode_hashtable = 1901 alloc_large_system_hash("Inode-cache", 1902 sizeof(struct hlist_head), 1903 ihash_entries, 1904 14, 1905 0, 1906 &i_hash_shift, 1907 &i_hash_mask, 1908 0, 1909 0); 1910 1911 for (loop = 0; loop < (1U << i_hash_shift); loop++) 1912 INIT_HLIST_HEAD(&inode_hashtable[loop]); 1913 } 1914 1915 void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev) 1916 { 1917 inode->i_mode = mode; 1918 if (S_ISCHR(mode)) { 1919 inode->i_fop = &def_chr_fops; 1920 inode->i_rdev = rdev; 1921 } else if (S_ISBLK(mode)) { 1922 inode->i_fop = &def_blk_fops; 1923 inode->i_rdev = rdev; 1924 } else if (S_ISFIFO(mode)) 1925 inode->i_fop = &pipefifo_fops; 1926 else if (S_ISSOCK(mode)) 1927 ; /* leave it no_open_fops */ 1928 else 1929 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for" 1930 " inode %s:%lu\n", mode, inode->i_sb->s_id, 1931 inode->i_ino); 1932 } 1933 EXPORT_SYMBOL(init_special_inode); 1934 1935 /** 1936 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards 1937 * @inode: New inode 1938 * @dir: Directory inode 1939 * @mode: mode of the new inode 1940 */ 1941 void inode_init_owner(struct inode *inode, const struct inode *dir, 1942 umode_t mode) 1943 { 1944 inode->i_uid = current_fsuid(); 1945 if (dir && dir->i_mode & S_ISGID) { 1946 inode->i_gid = dir->i_gid; 1947 if (S_ISDIR(mode)) 1948 mode |= S_ISGID; 1949 } else 1950 inode->i_gid = current_fsgid(); 1951 inode->i_mode = mode; 1952 } 1953 EXPORT_SYMBOL(inode_init_owner); 1954 1955 /** 1956 * inode_owner_or_capable - check current task permissions to inode 1957 * @inode: inode being checked 1958 * 1959 * Return true if current either has CAP_FOWNER in a namespace with the 1960 * inode owner uid mapped, or owns the file. 1961 */ 1962 bool inode_owner_or_capable(const struct inode *inode) 1963 { 1964 struct user_namespace *ns; 1965 1966 if (uid_eq(current_fsuid(), inode->i_uid)) 1967 return true; 1968 1969 ns = current_user_ns(); 1970 if (ns_capable(ns, CAP_FOWNER) && kuid_has_mapping(ns, inode->i_uid)) 1971 return true; 1972 return false; 1973 } 1974 EXPORT_SYMBOL(inode_owner_or_capable); 1975 1976 /* 1977 * Direct i/o helper functions 1978 */ 1979 static void __inode_dio_wait(struct inode *inode) 1980 { 1981 wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP); 1982 DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP); 1983 1984 do { 1985 prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE); 1986 if (atomic_read(&inode->i_dio_count)) 1987 schedule(); 1988 } while (atomic_read(&inode->i_dio_count)); 1989 finish_wait(wq, &q.wait); 1990 } 1991 1992 /** 1993 * inode_dio_wait - wait for outstanding DIO requests to finish 1994 * @inode: inode to wait for 1995 * 1996 * Waits for all pending direct I/O requests to finish so that we can 1997 * proceed with a truncate or equivalent operation. 1998 * 1999 * Must be called under a lock that serializes taking new references 2000 * to i_dio_count, usually by inode->i_mutex. 2001 */ 2002 void inode_dio_wait(struct inode *inode) 2003 { 2004 if (atomic_read(&inode->i_dio_count)) 2005 __inode_dio_wait(inode); 2006 } 2007 EXPORT_SYMBOL(inode_dio_wait); 2008 2009 /* 2010 * inode_set_flags - atomically set some inode flags 2011 * 2012 * Note: the caller should be holding i_mutex, or else be sure that 2013 * they have exclusive access to the inode structure (i.e., while the 2014 * inode is being instantiated). The reason for the cmpxchg() loop 2015 * --- which wouldn't be necessary if all code paths which modify 2016 * i_flags actually followed this rule, is that there is at least one 2017 * code path which doesn't today so we use cmpxchg() out of an abundance 2018 * of caution. 2019 * 2020 * In the long run, i_mutex is overkill, and we should probably look 2021 * at using the i_lock spinlock to protect i_flags, and then make sure 2022 * it is so documented in include/linux/fs.h and that all code follows 2023 * the locking convention!! 2024 */ 2025 void inode_set_flags(struct inode *inode, unsigned int flags, 2026 unsigned int mask) 2027 { 2028 unsigned int old_flags, new_flags; 2029 2030 WARN_ON_ONCE(flags & ~mask); 2031 do { 2032 old_flags = ACCESS_ONCE(inode->i_flags); 2033 new_flags = (old_flags & ~mask) | flags; 2034 } while (unlikely(cmpxchg(&inode->i_flags, old_flags, 2035 new_flags) != old_flags)); 2036 } 2037 EXPORT_SYMBOL(inode_set_flags); 2038 2039 void inode_nohighmem(struct inode *inode) 2040 { 2041 mapping_set_gfp_mask(inode->i_mapping, GFP_USER); 2042 } 2043 EXPORT_SYMBOL(inode_nohighmem); 2044