1 /* 2 * (C) 1997 Linus Torvalds 3 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation) 4 */ 5 #include <linux/export.h> 6 #include <linux/fs.h> 7 #include <linux/mm.h> 8 #include <linux/backing-dev.h> 9 #include <linux/hash.h> 10 #include <linux/swap.h> 11 #include <linux/security.h> 12 #include <linux/cdev.h> 13 #include <linux/bootmem.h> 14 #include <linux/fsnotify.h> 15 #include <linux/mount.h> 16 #include <linux/posix_acl.h> 17 #include <linux/prefetch.h> 18 #include <linux/buffer_head.h> /* for inode_has_buffers */ 19 #include <linux/ratelimit.h> 20 #include <linux/list_lru.h> 21 #include <trace/events/writeback.h> 22 #include "internal.h" 23 24 /* 25 * Inode locking rules: 26 * 27 * inode->i_lock protects: 28 * inode->i_state, inode->i_hash, __iget() 29 * Inode LRU list locks protect: 30 * inode->i_sb->s_inode_lru, inode->i_lru 31 * inode->i_sb->s_inode_list_lock protects: 32 * inode->i_sb->s_inodes, inode->i_sb_list 33 * bdi->wb.list_lock protects: 34 * bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list 35 * inode_hash_lock protects: 36 * inode_hashtable, inode->i_hash 37 * 38 * Lock ordering: 39 * 40 * inode->i_sb->s_inode_list_lock 41 * inode->i_lock 42 * Inode LRU list locks 43 * 44 * bdi->wb.list_lock 45 * inode->i_lock 46 * 47 * inode_hash_lock 48 * inode->i_sb->s_inode_list_lock 49 * inode->i_lock 50 * 51 * iunique_lock 52 * inode_hash_lock 53 */ 54 55 static unsigned int i_hash_mask __read_mostly; 56 static unsigned int i_hash_shift __read_mostly; 57 static struct hlist_head *inode_hashtable __read_mostly; 58 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); 59 60 /* 61 * Empty aops. Can be used for the cases where the user does not 62 * define any of the address_space operations. 63 */ 64 const struct address_space_operations empty_aops = { 65 }; 66 EXPORT_SYMBOL(empty_aops); 67 68 /* 69 * Statistics gathering.. 70 */ 71 struct inodes_stat_t inodes_stat; 72 73 static DEFINE_PER_CPU(unsigned long, nr_inodes); 74 static DEFINE_PER_CPU(unsigned long, nr_unused); 75 76 static struct kmem_cache *inode_cachep __read_mostly; 77 78 static long get_nr_inodes(void) 79 { 80 int i; 81 long sum = 0; 82 for_each_possible_cpu(i) 83 sum += per_cpu(nr_inodes, i); 84 return sum < 0 ? 0 : sum; 85 } 86 87 static inline long get_nr_inodes_unused(void) 88 { 89 int i; 90 long sum = 0; 91 for_each_possible_cpu(i) 92 sum += per_cpu(nr_unused, i); 93 return sum < 0 ? 0 : sum; 94 } 95 96 long get_nr_dirty_inodes(void) 97 { 98 /* not actually dirty inodes, but a wild approximation */ 99 long nr_dirty = get_nr_inodes() - get_nr_inodes_unused(); 100 return nr_dirty > 0 ? nr_dirty : 0; 101 } 102 103 /* 104 * Handle nr_inode sysctl 105 */ 106 #ifdef CONFIG_SYSCTL 107 int proc_nr_inodes(struct ctl_table *table, int write, 108 void __user *buffer, size_t *lenp, loff_t *ppos) 109 { 110 inodes_stat.nr_inodes = get_nr_inodes(); 111 inodes_stat.nr_unused = get_nr_inodes_unused(); 112 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 113 } 114 #endif 115 116 static int no_open(struct inode *inode, struct file *file) 117 { 118 return -ENXIO; 119 } 120 121 /** 122 * inode_init_always - perform inode structure intialisation 123 * @sb: superblock inode belongs to 124 * @inode: inode to initialise 125 * 126 * These are initializations that need to be done on every inode 127 * allocation as the fields are not initialised by slab allocation. 128 */ 129 int inode_init_always(struct super_block *sb, struct inode *inode) 130 { 131 static const struct inode_operations empty_iops; 132 static const struct file_operations no_open_fops = {.open = no_open}; 133 struct address_space *const mapping = &inode->i_data; 134 135 inode->i_sb = sb; 136 inode->i_blkbits = sb->s_blocksize_bits; 137 inode->i_flags = 0; 138 atomic_set(&inode->i_count, 1); 139 inode->i_op = &empty_iops; 140 inode->i_fop = &no_open_fops; 141 inode->__i_nlink = 1; 142 inode->i_opflags = 0; 143 i_uid_write(inode, 0); 144 i_gid_write(inode, 0); 145 atomic_set(&inode->i_writecount, 0); 146 inode->i_size = 0; 147 inode->i_blocks = 0; 148 inode->i_bytes = 0; 149 inode->i_generation = 0; 150 inode->i_pipe = NULL; 151 inode->i_bdev = NULL; 152 inode->i_cdev = NULL; 153 inode->i_link = NULL; 154 inode->i_dir_seq = 0; 155 inode->i_rdev = 0; 156 inode->dirtied_when = 0; 157 158 #ifdef CONFIG_CGROUP_WRITEBACK 159 inode->i_wb_frn_winner = 0; 160 inode->i_wb_frn_avg_time = 0; 161 inode->i_wb_frn_history = 0; 162 #endif 163 164 if (security_inode_alloc(inode)) 165 goto out; 166 spin_lock_init(&inode->i_lock); 167 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); 168 169 init_rwsem(&inode->i_rwsem); 170 lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key); 171 172 atomic_set(&inode->i_dio_count, 0); 173 174 mapping->a_ops = &empty_aops; 175 mapping->host = inode; 176 mapping->flags = 0; 177 atomic_set(&mapping->i_mmap_writable, 0); 178 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); 179 mapping->private_data = NULL; 180 mapping->writeback_index = 0; 181 inode->i_private = NULL; 182 inode->i_mapping = mapping; 183 INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */ 184 #ifdef CONFIG_FS_POSIX_ACL 185 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED; 186 #endif 187 188 #ifdef CONFIG_FSNOTIFY 189 inode->i_fsnotify_mask = 0; 190 #endif 191 inode->i_flctx = NULL; 192 this_cpu_inc(nr_inodes); 193 194 return 0; 195 out: 196 return -ENOMEM; 197 } 198 EXPORT_SYMBOL(inode_init_always); 199 200 static struct inode *alloc_inode(struct super_block *sb) 201 { 202 struct inode *inode; 203 204 if (sb->s_op->alloc_inode) 205 inode = sb->s_op->alloc_inode(sb); 206 else 207 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL); 208 209 if (!inode) 210 return NULL; 211 212 if (unlikely(inode_init_always(sb, inode))) { 213 if (inode->i_sb->s_op->destroy_inode) 214 inode->i_sb->s_op->destroy_inode(inode); 215 else 216 kmem_cache_free(inode_cachep, inode); 217 return NULL; 218 } 219 220 return inode; 221 } 222 223 void free_inode_nonrcu(struct inode *inode) 224 { 225 kmem_cache_free(inode_cachep, inode); 226 } 227 EXPORT_SYMBOL(free_inode_nonrcu); 228 229 void __destroy_inode(struct inode *inode) 230 { 231 BUG_ON(inode_has_buffers(inode)); 232 inode_detach_wb(inode); 233 security_inode_free(inode); 234 fsnotify_inode_delete(inode); 235 locks_free_lock_context(inode); 236 if (!inode->i_nlink) { 237 WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0); 238 atomic_long_dec(&inode->i_sb->s_remove_count); 239 } 240 241 #ifdef CONFIG_FS_POSIX_ACL 242 if (inode->i_acl && !is_uncached_acl(inode->i_acl)) 243 posix_acl_release(inode->i_acl); 244 if (inode->i_default_acl && !is_uncached_acl(inode->i_default_acl)) 245 posix_acl_release(inode->i_default_acl); 246 #endif 247 this_cpu_dec(nr_inodes); 248 } 249 EXPORT_SYMBOL(__destroy_inode); 250 251 static void i_callback(struct rcu_head *head) 252 { 253 struct inode *inode = container_of(head, struct inode, i_rcu); 254 kmem_cache_free(inode_cachep, inode); 255 } 256 257 static void destroy_inode(struct inode *inode) 258 { 259 BUG_ON(!list_empty(&inode->i_lru)); 260 __destroy_inode(inode); 261 if (inode->i_sb->s_op->destroy_inode) 262 inode->i_sb->s_op->destroy_inode(inode); 263 else 264 call_rcu(&inode->i_rcu, i_callback); 265 } 266 267 /** 268 * drop_nlink - directly drop an inode's link count 269 * @inode: inode 270 * 271 * This is a low-level filesystem helper to replace any 272 * direct filesystem manipulation of i_nlink. In cases 273 * where we are attempting to track writes to the 274 * filesystem, a decrement to zero means an imminent 275 * write when the file is truncated and actually unlinked 276 * on the filesystem. 277 */ 278 void drop_nlink(struct inode *inode) 279 { 280 WARN_ON(inode->i_nlink == 0); 281 inode->__i_nlink--; 282 if (!inode->i_nlink) 283 atomic_long_inc(&inode->i_sb->s_remove_count); 284 } 285 EXPORT_SYMBOL(drop_nlink); 286 287 /** 288 * clear_nlink - directly zero an inode's link count 289 * @inode: inode 290 * 291 * This is a low-level filesystem helper to replace any 292 * direct filesystem manipulation of i_nlink. See 293 * drop_nlink() for why we care about i_nlink hitting zero. 294 */ 295 void clear_nlink(struct inode *inode) 296 { 297 if (inode->i_nlink) { 298 inode->__i_nlink = 0; 299 atomic_long_inc(&inode->i_sb->s_remove_count); 300 } 301 } 302 EXPORT_SYMBOL(clear_nlink); 303 304 /** 305 * set_nlink - directly set an inode's link count 306 * @inode: inode 307 * @nlink: new nlink (should be non-zero) 308 * 309 * This is a low-level filesystem helper to replace any 310 * direct filesystem manipulation of i_nlink. 311 */ 312 void set_nlink(struct inode *inode, unsigned int nlink) 313 { 314 if (!nlink) { 315 clear_nlink(inode); 316 } else { 317 /* Yes, some filesystems do change nlink from zero to one */ 318 if (inode->i_nlink == 0) 319 atomic_long_dec(&inode->i_sb->s_remove_count); 320 321 inode->__i_nlink = nlink; 322 } 323 } 324 EXPORT_SYMBOL(set_nlink); 325 326 /** 327 * inc_nlink - directly increment an inode's link count 328 * @inode: inode 329 * 330 * This is a low-level filesystem helper to replace any 331 * direct filesystem manipulation of i_nlink. Currently, 332 * it is only here for parity with dec_nlink(). 333 */ 334 void inc_nlink(struct inode *inode) 335 { 336 if (unlikely(inode->i_nlink == 0)) { 337 WARN_ON(!(inode->i_state & I_LINKABLE)); 338 atomic_long_dec(&inode->i_sb->s_remove_count); 339 } 340 341 inode->__i_nlink++; 342 } 343 EXPORT_SYMBOL(inc_nlink); 344 345 void address_space_init_once(struct address_space *mapping) 346 { 347 memset(mapping, 0, sizeof(*mapping)); 348 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC | __GFP_ACCOUNT); 349 spin_lock_init(&mapping->tree_lock); 350 init_rwsem(&mapping->i_mmap_rwsem); 351 INIT_LIST_HEAD(&mapping->private_list); 352 spin_lock_init(&mapping->private_lock); 353 mapping->i_mmap = RB_ROOT; 354 } 355 EXPORT_SYMBOL(address_space_init_once); 356 357 /* 358 * These are initializations that only need to be done 359 * once, because the fields are idempotent across use 360 * of the inode, so let the slab aware of that. 361 */ 362 void inode_init_once(struct inode *inode) 363 { 364 memset(inode, 0, sizeof(*inode)); 365 INIT_HLIST_NODE(&inode->i_hash); 366 INIT_LIST_HEAD(&inode->i_devices); 367 INIT_LIST_HEAD(&inode->i_io_list); 368 INIT_LIST_HEAD(&inode->i_wb_list); 369 INIT_LIST_HEAD(&inode->i_lru); 370 address_space_init_once(&inode->i_data); 371 i_size_ordered_init(inode); 372 #ifdef CONFIG_FSNOTIFY 373 INIT_HLIST_HEAD(&inode->i_fsnotify_marks); 374 #endif 375 } 376 EXPORT_SYMBOL(inode_init_once); 377 378 static void init_once(void *foo) 379 { 380 struct inode *inode = (struct inode *) foo; 381 382 inode_init_once(inode); 383 } 384 385 /* 386 * inode->i_lock must be held 387 */ 388 void __iget(struct inode *inode) 389 { 390 atomic_inc(&inode->i_count); 391 } 392 393 /* 394 * get additional reference to inode; caller must already hold one. 395 */ 396 void ihold(struct inode *inode) 397 { 398 WARN_ON(atomic_inc_return(&inode->i_count) < 2); 399 } 400 EXPORT_SYMBOL(ihold); 401 402 static void inode_lru_list_add(struct inode *inode) 403 { 404 if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru)) 405 this_cpu_inc(nr_unused); 406 } 407 408 /* 409 * Add inode to LRU if needed (inode is unused and clean). 410 * 411 * Needs inode->i_lock held. 412 */ 413 void inode_add_lru(struct inode *inode) 414 { 415 if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC | 416 I_FREEING | I_WILL_FREE)) && 417 !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE) 418 inode_lru_list_add(inode); 419 } 420 421 422 static void inode_lru_list_del(struct inode *inode) 423 { 424 425 if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru)) 426 this_cpu_dec(nr_unused); 427 } 428 429 /** 430 * inode_sb_list_add - add inode to the superblock list of inodes 431 * @inode: inode to add 432 */ 433 void inode_sb_list_add(struct inode *inode) 434 { 435 spin_lock(&inode->i_sb->s_inode_list_lock); 436 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes); 437 spin_unlock(&inode->i_sb->s_inode_list_lock); 438 } 439 EXPORT_SYMBOL_GPL(inode_sb_list_add); 440 441 static inline void inode_sb_list_del(struct inode *inode) 442 { 443 if (!list_empty(&inode->i_sb_list)) { 444 spin_lock(&inode->i_sb->s_inode_list_lock); 445 list_del_init(&inode->i_sb_list); 446 spin_unlock(&inode->i_sb->s_inode_list_lock); 447 } 448 } 449 450 static unsigned long hash(struct super_block *sb, unsigned long hashval) 451 { 452 unsigned long tmp; 453 454 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) / 455 L1_CACHE_BYTES; 456 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift); 457 return tmp & i_hash_mask; 458 } 459 460 /** 461 * __insert_inode_hash - hash an inode 462 * @inode: unhashed inode 463 * @hashval: unsigned long value used to locate this object in the 464 * inode_hashtable. 465 * 466 * Add an inode to the inode hash for this superblock. 467 */ 468 void __insert_inode_hash(struct inode *inode, unsigned long hashval) 469 { 470 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval); 471 472 spin_lock(&inode_hash_lock); 473 spin_lock(&inode->i_lock); 474 hlist_add_head(&inode->i_hash, b); 475 spin_unlock(&inode->i_lock); 476 spin_unlock(&inode_hash_lock); 477 } 478 EXPORT_SYMBOL(__insert_inode_hash); 479 480 /** 481 * __remove_inode_hash - remove an inode from the hash 482 * @inode: inode to unhash 483 * 484 * Remove an inode from the superblock. 485 */ 486 void __remove_inode_hash(struct inode *inode) 487 { 488 spin_lock(&inode_hash_lock); 489 spin_lock(&inode->i_lock); 490 hlist_del_init(&inode->i_hash); 491 spin_unlock(&inode->i_lock); 492 spin_unlock(&inode_hash_lock); 493 } 494 EXPORT_SYMBOL(__remove_inode_hash); 495 496 void clear_inode(struct inode *inode) 497 { 498 might_sleep(); 499 /* 500 * We have to cycle tree_lock here because reclaim can be still in the 501 * process of removing the last page (in __delete_from_page_cache()) 502 * and we must not free mapping under it. 503 */ 504 spin_lock_irq(&inode->i_data.tree_lock); 505 BUG_ON(inode->i_data.nrpages); 506 BUG_ON(inode->i_data.nrexceptional); 507 spin_unlock_irq(&inode->i_data.tree_lock); 508 BUG_ON(!list_empty(&inode->i_data.private_list)); 509 BUG_ON(!(inode->i_state & I_FREEING)); 510 BUG_ON(inode->i_state & I_CLEAR); 511 BUG_ON(!list_empty(&inode->i_wb_list)); 512 /* don't need i_lock here, no concurrent mods to i_state */ 513 inode->i_state = I_FREEING | I_CLEAR; 514 } 515 EXPORT_SYMBOL(clear_inode); 516 517 /* 518 * Free the inode passed in, removing it from the lists it is still connected 519 * to. We remove any pages still attached to the inode and wait for any IO that 520 * is still in progress before finally destroying the inode. 521 * 522 * An inode must already be marked I_FREEING so that we avoid the inode being 523 * moved back onto lists if we race with other code that manipulates the lists 524 * (e.g. writeback_single_inode). The caller is responsible for setting this. 525 * 526 * An inode must already be removed from the LRU list before being evicted from 527 * the cache. This should occur atomically with setting the I_FREEING state 528 * flag, so no inodes here should ever be on the LRU when being evicted. 529 */ 530 static void evict(struct inode *inode) 531 { 532 const struct super_operations *op = inode->i_sb->s_op; 533 534 BUG_ON(!(inode->i_state & I_FREEING)); 535 BUG_ON(!list_empty(&inode->i_lru)); 536 537 if (!list_empty(&inode->i_io_list)) 538 inode_io_list_del(inode); 539 540 inode_sb_list_del(inode); 541 542 /* 543 * Wait for flusher thread to be done with the inode so that filesystem 544 * does not start destroying it while writeback is still running. Since 545 * the inode has I_FREEING set, flusher thread won't start new work on 546 * the inode. We just have to wait for running writeback to finish. 547 */ 548 inode_wait_for_writeback(inode); 549 550 if (op->evict_inode) { 551 op->evict_inode(inode); 552 } else { 553 truncate_inode_pages_final(&inode->i_data); 554 clear_inode(inode); 555 } 556 if (S_ISBLK(inode->i_mode) && inode->i_bdev) 557 bd_forget(inode); 558 if (S_ISCHR(inode->i_mode) && inode->i_cdev) 559 cd_forget(inode); 560 561 remove_inode_hash(inode); 562 563 spin_lock(&inode->i_lock); 564 wake_up_bit(&inode->i_state, __I_NEW); 565 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR)); 566 spin_unlock(&inode->i_lock); 567 568 destroy_inode(inode); 569 } 570 571 /* 572 * dispose_list - dispose of the contents of a local list 573 * @head: the head of the list to free 574 * 575 * Dispose-list gets a local list with local inodes in it, so it doesn't 576 * need to worry about list corruption and SMP locks. 577 */ 578 static void dispose_list(struct list_head *head) 579 { 580 while (!list_empty(head)) { 581 struct inode *inode; 582 583 inode = list_first_entry(head, struct inode, i_lru); 584 list_del_init(&inode->i_lru); 585 586 evict(inode); 587 cond_resched(); 588 } 589 } 590 591 /** 592 * evict_inodes - evict all evictable inodes for a superblock 593 * @sb: superblock to operate on 594 * 595 * Make sure that no inodes with zero refcount are retained. This is 596 * called by superblock shutdown after having MS_ACTIVE flag removed, 597 * so any inode reaching zero refcount during or after that call will 598 * be immediately evicted. 599 */ 600 void evict_inodes(struct super_block *sb) 601 { 602 struct inode *inode, *next; 603 LIST_HEAD(dispose); 604 605 again: 606 spin_lock(&sb->s_inode_list_lock); 607 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 608 if (atomic_read(&inode->i_count)) 609 continue; 610 611 spin_lock(&inode->i_lock); 612 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 613 spin_unlock(&inode->i_lock); 614 continue; 615 } 616 617 inode->i_state |= I_FREEING; 618 inode_lru_list_del(inode); 619 spin_unlock(&inode->i_lock); 620 list_add(&inode->i_lru, &dispose); 621 622 /* 623 * We can have a ton of inodes to evict at unmount time given 624 * enough memory, check to see if we need to go to sleep for a 625 * bit so we don't livelock. 626 */ 627 if (need_resched()) { 628 spin_unlock(&sb->s_inode_list_lock); 629 cond_resched(); 630 dispose_list(&dispose); 631 goto again; 632 } 633 } 634 spin_unlock(&sb->s_inode_list_lock); 635 636 dispose_list(&dispose); 637 } 638 639 /** 640 * invalidate_inodes - attempt to free all inodes on a superblock 641 * @sb: superblock to operate on 642 * @kill_dirty: flag to guide handling of dirty inodes 643 * 644 * Attempts to free all inodes for a given superblock. If there were any 645 * busy inodes return a non-zero value, else zero. 646 * If @kill_dirty is set, discard dirty inodes too, otherwise treat 647 * them as busy. 648 */ 649 int invalidate_inodes(struct super_block *sb, bool kill_dirty) 650 { 651 int busy = 0; 652 struct inode *inode, *next; 653 LIST_HEAD(dispose); 654 655 spin_lock(&sb->s_inode_list_lock); 656 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 657 spin_lock(&inode->i_lock); 658 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 659 spin_unlock(&inode->i_lock); 660 continue; 661 } 662 if (inode->i_state & I_DIRTY_ALL && !kill_dirty) { 663 spin_unlock(&inode->i_lock); 664 busy = 1; 665 continue; 666 } 667 if (atomic_read(&inode->i_count)) { 668 spin_unlock(&inode->i_lock); 669 busy = 1; 670 continue; 671 } 672 673 inode->i_state |= I_FREEING; 674 inode_lru_list_del(inode); 675 spin_unlock(&inode->i_lock); 676 list_add(&inode->i_lru, &dispose); 677 } 678 spin_unlock(&sb->s_inode_list_lock); 679 680 dispose_list(&dispose); 681 682 return busy; 683 } 684 685 /* 686 * Isolate the inode from the LRU in preparation for freeing it. 687 * 688 * Any inodes which are pinned purely because of attached pagecache have their 689 * pagecache removed. If the inode has metadata buffers attached to 690 * mapping->private_list then try to remove them. 691 * 692 * If the inode has the I_REFERENCED flag set, then it means that it has been 693 * used recently - the flag is set in iput_final(). When we encounter such an 694 * inode, clear the flag and move it to the back of the LRU so it gets another 695 * pass through the LRU before it gets reclaimed. This is necessary because of 696 * the fact we are doing lazy LRU updates to minimise lock contention so the 697 * LRU does not have strict ordering. Hence we don't want to reclaim inodes 698 * with this flag set because they are the inodes that are out of order. 699 */ 700 static enum lru_status inode_lru_isolate(struct list_head *item, 701 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) 702 { 703 struct list_head *freeable = arg; 704 struct inode *inode = container_of(item, struct inode, i_lru); 705 706 /* 707 * we are inverting the lru lock/inode->i_lock here, so use a trylock. 708 * If we fail to get the lock, just skip it. 709 */ 710 if (!spin_trylock(&inode->i_lock)) 711 return LRU_SKIP; 712 713 /* 714 * Referenced or dirty inodes are still in use. Give them another pass 715 * through the LRU as we canot reclaim them now. 716 */ 717 if (atomic_read(&inode->i_count) || 718 (inode->i_state & ~I_REFERENCED)) { 719 list_lru_isolate(lru, &inode->i_lru); 720 spin_unlock(&inode->i_lock); 721 this_cpu_dec(nr_unused); 722 return LRU_REMOVED; 723 } 724 725 /* recently referenced inodes get one more pass */ 726 if (inode->i_state & I_REFERENCED) { 727 inode->i_state &= ~I_REFERENCED; 728 spin_unlock(&inode->i_lock); 729 return LRU_ROTATE; 730 } 731 732 if (inode_has_buffers(inode) || inode->i_data.nrpages) { 733 __iget(inode); 734 spin_unlock(&inode->i_lock); 735 spin_unlock(lru_lock); 736 if (remove_inode_buffers(inode)) { 737 unsigned long reap; 738 reap = invalidate_mapping_pages(&inode->i_data, 0, -1); 739 if (current_is_kswapd()) 740 __count_vm_events(KSWAPD_INODESTEAL, reap); 741 else 742 __count_vm_events(PGINODESTEAL, reap); 743 if (current->reclaim_state) 744 current->reclaim_state->reclaimed_slab += reap; 745 } 746 iput(inode); 747 spin_lock(lru_lock); 748 return LRU_RETRY; 749 } 750 751 WARN_ON(inode->i_state & I_NEW); 752 inode->i_state |= I_FREEING; 753 list_lru_isolate_move(lru, &inode->i_lru, freeable); 754 spin_unlock(&inode->i_lock); 755 756 this_cpu_dec(nr_unused); 757 return LRU_REMOVED; 758 } 759 760 /* 761 * Walk the superblock inode LRU for freeable inodes and attempt to free them. 762 * This is called from the superblock shrinker function with a number of inodes 763 * to trim from the LRU. Inodes to be freed are moved to a temporary list and 764 * then are freed outside inode_lock by dispose_list(). 765 */ 766 long prune_icache_sb(struct super_block *sb, struct shrink_control *sc) 767 { 768 LIST_HEAD(freeable); 769 long freed; 770 771 freed = list_lru_shrink_walk(&sb->s_inode_lru, sc, 772 inode_lru_isolate, &freeable); 773 dispose_list(&freeable); 774 return freed; 775 } 776 777 static void __wait_on_freeing_inode(struct inode *inode); 778 /* 779 * Called with the inode lock held. 780 */ 781 static struct inode *find_inode(struct super_block *sb, 782 struct hlist_head *head, 783 int (*test)(struct inode *, void *), 784 void *data) 785 { 786 struct inode *inode = NULL; 787 788 repeat: 789 hlist_for_each_entry(inode, head, i_hash) { 790 if (inode->i_sb != sb) 791 continue; 792 if (!test(inode, data)) 793 continue; 794 spin_lock(&inode->i_lock); 795 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 796 __wait_on_freeing_inode(inode); 797 goto repeat; 798 } 799 __iget(inode); 800 spin_unlock(&inode->i_lock); 801 return inode; 802 } 803 return NULL; 804 } 805 806 /* 807 * find_inode_fast is the fast path version of find_inode, see the comment at 808 * iget_locked for details. 809 */ 810 static struct inode *find_inode_fast(struct super_block *sb, 811 struct hlist_head *head, unsigned long ino) 812 { 813 struct inode *inode = NULL; 814 815 repeat: 816 hlist_for_each_entry(inode, head, i_hash) { 817 if (inode->i_ino != ino) 818 continue; 819 if (inode->i_sb != sb) 820 continue; 821 spin_lock(&inode->i_lock); 822 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 823 __wait_on_freeing_inode(inode); 824 goto repeat; 825 } 826 __iget(inode); 827 spin_unlock(&inode->i_lock); 828 return inode; 829 } 830 return NULL; 831 } 832 833 /* 834 * Each cpu owns a range of LAST_INO_BATCH numbers. 835 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations, 836 * to renew the exhausted range. 837 * 838 * This does not significantly increase overflow rate because every CPU can 839 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is 840 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the 841 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase 842 * overflow rate by 2x, which does not seem too significant. 843 * 844 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 845 * error if st_ino won't fit in target struct field. Use 32bit counter 846 * here to attempt to avoid that. 847 */ 848 #define LAST_INO_BATCH 1024 849 static DEFINE_PER_CPU(unsigned int, last_ino); 850 851 unsigned int get_next_ino(void) 852 { 853 unsigned int *p = &get_cpu_var(last_ino); 854 unsigned int res = *p; 855 856 #ifdef CONFIG_SMP 857 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) { 858 static atomic_t shared_last_ino; 859 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino); 860 861 res = next - LAST_INO_BATCH; 862 } 863 #endif 864 865 res++; 866 /* get_next_ino should not provide a 0 inode number */ 867 if (unlikely(!res)) 868 res++; 869 *p = res; 870 put_cpu_var(last_ino); 871 return res; 872 } 873 EXPORT_SYMBOL(get_next_ino); 874 875 /** 876 * new_inode_pseudo - obtain an inode 877 * @sb: superblock 878 * 879 * Allocates a new inode for given superblock. 880 * Inode wont be chained in superblock s_inodes list 881 * This means : 882 * - fs can't be unmount 883 * - quotas, fsnotify, writeback can't work 884 */ 885 struct inode *new_inode_pseudo(struct super_block *sb) 886 { 887 struct inode *inode = alloc_inode(sb); 888 889 if (inode) { 890 spin_lock(&inode->i_lock); 891 inode->i_state = 0; 892 spin_unlock(&inode->i_lock); 893 INIT_LIST_HEAD(&inode->i_sb_list); 894 } 895 return inode; 896 } 897 898 /** 899 * new_inode - obtain an inode 900 * @sb: superblock 901 * 902 * Allocates a new inode for given superblock. The default gfp_mask 903 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE. 904 * If HIGHMEM pages are unsuitable or it is known that pages allocated 905 * for the page cache are not reclaimable or migratable, 906 * mapping_set_gfp_mask() must be called with suitable flags on the 907 * newly created inode's mapping 908 * 909 */ 910 struct inode *new_inode(struct super_block *sb) 911 { 912 struct inode *inode; 913 914 spin_lock_prefetch(&sb->s_inode_list_lock); 915 916 inode = new_inode_pseudo(sb); 917 if (inode) 918 inode_sb_list_add(inode); 919 return inode; 920 } 921 EXPORT_SYMBOL(new_inode); 922 923 #ifdef CONFIG_DEBUG_LOCK_ALLOC 924 void lockdep_annotate_inode_mutex_key(struct inode *inode) 925 { 926 if (S_ISDIR(inode->i_mode)) { 927 struct file_system_type *type = inode->i_sb->s_type; 928 929 /* Set new key only if filesystem hasn't already changed it */ 930 if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) { 931 /* 932 * ensure nobody is actually holding i_mutex 933 */ 934 // mutex_destroy(&inode->i_mutex); 935 init_rwsem(&inode->i_rwsem); 936 lockdep_set_class(&inode->i_rwsem, 937 &type->i_mutex_dir_key); 938 } 939 } 940 } 941 EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key); 942 #endif 943 944 /** 945 * unlock_new_inode - clear the I_NEW state and wake up any waiters 946 * @inode: new inode to unlock 947 * 948 * Called when the inode is fully initialised to clear the new state of the 949 * inode and wake up anyone waiting for the inode to finish initialisation. 950 */ 951 void unlock_new_inode(struct inode *inode) 952 { 953 lockdep_annotate_inode_mutex_key(inode); 954 spin_lock(&inode->i_lock); 955 WARN_ON(!(inode->i_state & I_NEW)); 956 inode->i_state &= ~I_NEW; 957 smp_mb(); 958 wake_up_bit(&inode->i_state, __I_NEW); 959 spin_unlock(&inode->i_lock); 960 } 961 EXPORT_SYMBOL(unlock_new_inode); 962 963 /** 964 * lock_two_nondirectories - take two i_mutexes on non-directory objects 965 * 966 * Lock any non-NULL argument that is not a directory. 967 * Zero, one or two objects may be locked by this function. 968 * 969 * @inode1: first inode to lock 970 * @inode2: second inode to lock 971 */ 972 void lock_two_nondirectories(struct inode *inode1, struct inode *inode2) 973 { 974 if (inode1 > inode2) 975 swap(inode1, inode2); 976 977 if (inode1 && !S_ISDIR(inode1->i_mode)) 978 inode_lock(inode1); 979 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1) 980 inode_lock_nested(inode2, I_MUTEX_NONDIR2); 981 } 982 EXPORT_SYMBOL(lock_two_nondirectories); 983 984 /** 985 * unlock_two_nondirectories - release locks from lock_two_nondirectories() 986 * @inode1: first inode to unlock 987 * @inode2: second inode to unlock 988 */ 989 void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2) 990 { 991 if (inode1 && !S_ISDIR(inode1->i_mode)) 992 inode_unlock(inode1); 993 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1) 994 inode_unlock(inode2); 995 } 996 EXPORT_SYMBOL(unlock_two_nondirectories); 997 998 /** 999 * iget5_locked - obtain an inode from a mounted file system 1000 * @sb: super block of file system 1001 * @hashval: hash value (usually inode number) to get 1002 * @test: callback used for comparisons between inodes 1003 * @set: callback used to initialize a new struct inode 1004 * @data: opaque data pointer to pass to @test and @set 1005 * 1006 * Search for the inode specified by @hashval and @data in the inode cache, 1007 * and if present it is return it with an increased reference count. This is 1008 * a generalized version of iget_locked() for file systems where the inode 1009 * number is not sufficient for unique identification of an inode. 1010 * 1011 * If the inode is not in cache, allocate a new inode and return it locked, 1012 * hashed, and with the I_NEW flag set. The file system gets to fill it in 1013 * before unlocking it via unlock_new_inode(). 1014 * 1015 * Note both @test and @set are called with the inode_hash_lock held, so can't 1016 * sleep. 1017 */ 1018 struct inode *iget5_locked(struct super_block *sb, unsigned long hashval, 1019 int (*test)(struct inode *, void *), 1020 int (*set)(struct inode *, void *), void *data) 1021 { 1022 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1023 struct inode *inode; 1024 1025 spin_lock(&inode_hash_lock); 1026 inode = find_inode(sb, head, test, data); 1027 spin_unlock(&inode_hash_lock); 1028 1029 if (inode) { 1030 wait_on_inode(inode); 1031 return inode; 1032 } 1033 1034 inode = alloc_inode(sb); 1035 if (inode) { 1036 struct inode *old; 1037 1038 spin_lock(&inode_hash_lock); 1039 /* We released the lock, so.. */ 1040 old = find_inode(sb, head, test, data); 1041 if (!old) { 1042 if (set(inode, data)) 1043 goto set_failed; 1044 1045 spin_lock(&inode->i_lock); 1046 inode->i_state = I_NEW; 1047 hlist_add_head(&inode->i_hash, head); 1048 spin_unlock(&inode->i_lock); 1049 inode_sb_list_add(inode); 1050 spin_unlock(&inode_hash_lock); 1051 1052 /* Return the locked inode with I_NEW set, the 1053 * caller is responsible for filling in the contents 1054 */ 1055 return inode; 1056 } 1057 1058 /* 1059 * Uhhuh, somebody else created the same inode under 1060 * us. Use the old inode instead of the one we just 1061 * allocated. 1062 */ 1063 spin_unlock(&inode_hash_lock); 1064 destroy_inode(inode); 1065 inode = old; 1066 wait_on_inode(inode); 1067 } 1068 return inode; 1069 1070 set_failed: 1071 spin_unlock(&inode_hash_lock); 1072 destroy_inode(inode); 1073 return NULL; 1074 } 1075 EXPORT_SYMBOL(iget5_locked); 1076 1077 /** 1078 * iget_locked - obtain an inode from a mounted file system 1079 * @sb: super block of file system 1080 * @ino: inode number to get 1081 * 1082 * Search for the inode specified by @ino in the inode cache and if present 1083 * return it with an increased reference count. This is for file systems 1084 * where the inode number is sufficient for unique identification of an inode. 1085 * 1086 * If the inode is not in cache, allocate a new inode and return it locked, 1087 * hashed, and with the I_NEW flag set. The file system gets to fill it in 1088 * before unlocking it via unlock_new_inode(). 1089 */ 1090 struct inode *iget_locked(struct super_block *sb, unsigned long ino) 1091 { 1092 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1093 struct inode *inode; 1094 1095 spin_lock(&inode_hash_lock); 1096 inode = find_inode_fast(sb, head, ino); 1097 spin_unlock(&inode_hash_lock); 1098 if (inode) { 1099 wait_on_inode(inode); 1100 return inode; 1101 } 1102 1103 inode = alloc_inode(sb); 1104 if (inode) { 1105 struct inode *old; 1106 1107 spin_lock(&inode_hash_lock); 1108 /* We released the lock, so.. */ 1109 old = find_inode_fast(sb, head, ino); 1110 if (!old) { 1111 inode->i_ino = ino; 1112 spin_lock(&inode->i_lock); 1113 inode->i_state = I_NEW; 1114 hlist_add_head(&inode->i_hash, head); 1115 spin_unlock(&inode->i_lock); 1116 inode_sb_list_add(inode); 1117 spin_unlock(&inode_hash_lock); 1118 1119 /* Return the locked inode with I_NEW set, the 1120 * caller is responsible for filling in the contents 1121 */ 1122 return inode; 1123 } 1124 1125 /* 1126 * Uhhuh, somebody else created the same inode under 1127 * us. Use the old inode instead of the one we just 1128 * allocated. 1129 */ 1130 spin_unlock(&inode_hash_lock); 1131 destroy_inode(inode); 1132 inode = old; 1133 wait_on_inode(inode); 1134 } 1135 return inode; 1136 } 1137 EXPORT_SYMBOL(iget_locked); 1138 1139 /* 1140 * search the inode cache for a matching inode number. 1141 * If we find one, then the inode number we are trying to 1142 * allocate is not unique and so we should not use it. 1143 * 1144 * Returns 1 if the inode number is unique, 0 if it is not. 1145 */ 1146 static int test_inode_iunique(struct super_block *sb, unsigned long ino) 1147 { 1148 struct hlist_head *b = inode_hashtable + hash(sb, ino); 1149 struct inode *inode; 1150 1151 spin_lock(&inode_hash_lock); 1152 hlist_for_each_entry(inode, b, i_hash) { 1153 if (inode->i_ino == ino && inode->i_sb == sb) { 1154 spin_unlock(&inode_hash_lock); 1155 return 0; 1156 } 1157 } 1158 spin_unlock(&inode_hash_lock); 1159 1160 return 1; 1161 } 1162 1163 /** 1164 * iunique - get a unique inode number 1165 * @sb: superblock 1166 * @max_reserved: highest reserved inode number 1167 * 1168 * Obtain an inode number that is unique on the system for a given 1169 * superblock. This is used by file systems that have no natural 1170 * permanent inode numbering system. An inode number is returned that 1171 * is higher than the reserved limit but unique. 1172 * 1173 * BUGS: 1174 * With a large number of inodes live on the file system this function 1175 * currently becomes quite slow. 1176 */ 1177 ino_t iunique(struct super_block *sb, ino_t max_reserved) 1178 { 1179 /* 1180 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 1181 * error if st_ino won't fit in target struct field. Use 32bit counter 1182 * here to attempt to avoid that. 1183 */ 1184 static DEFINE_SPINLOCK(iunique_lock); 1185 static unsigned int counter; 1186 ino_t res; 1187 1188 spin_lock(&iunique_lock); 1189 do { 1190 if (counter <= max_reserved) 1191 counter = max_reserved + 1; 1192 res = counter++; 1193 } while (!test_inode_iunique(sb, res)); 1194 spin_unlock(&iunique_lock); 1195 1196 return res; 1197 } 1198 EXPORT_SYMBOL(iunique); 1199 1200 struct inode *igrab(struct inode *inode) 1201 { 1202 spin_lock(&inode->i_lock); 1203 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) { 1204 __iget(inode); 1205 spin_unlock(&inode->i_lock); 1206 } else { 1207 spin_unlock(&inode->i_lock); 1208 /* 1209 * Handle the case where s_op->clear_inode is not been 1210 * called yet, and somebody is calling igrab 1211 * while the inode is getting freed. 1212 */ 1213 inode = NULL; 1214 } 1215 return inode; 1216 } 1217 EXPORT_SYMBOL(igrab); 1218 1219 /** 1220 * ilookup5_nowait - search for an inode in the inode cache 1221 * @sb: super block of file system to search 1222 * @hashval: hash value (usually inode number) to search for 1223 * @test: callback used for comparisons between inodes 1224 * @data: opaque data pointer to pass to @test 1225 * 1226 * Search for the inode specified by @hashval and @data in the inode cache. 1227 * If the inode is in the cache, the inode is returned with an incremented 1228 * reference count. 1229 * 1230 * Note: I_NEW is not waited upon so you have to be very careful what you do 1231 * with the returned inode. You probably should be using ilookup5() instead. 1232 * 1233 * Note2: @test is called with the inode_hash_lock held, so can't sleep. 1234 */ 1235 struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, 1236 int (*test)(struct inode *, void *), void *data) 1237 { 1238 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1239 struct inode *inode; 1240 1241 spin_lock(&inode_hash_lock); 1242 inode = find_inode(sb, head, test, data); 1243 spin_unlock(&inode_hash_lock); 1244 1245 return inode; 1246 } 1247 EXPORT_SYMBOL(ilookup5_nowait); 1248 1249 /** 1250 * ilookup5 - search for an inode in the inode cache 1251 * @sb: super block of file system to search 1252 * @hashval: hash value (usually inode number) to search for 1253 * @test: callback used for comparisons between inodes 1254 * @data: opaque data pointer to pass to @test 1255 * 1256 * Search for the inode specified by @hashval and @data in the inode cache, 1257 * and if the inode is in the cache, return the inode with an incremented 1258 * reference count. Waits on I_NEW before returning the inode. 1259 * returned with an incremented reference count. 1260 * 1261 * This is a generalized version of ilookup() for file systems where the 1262 * inode number is not sufficient for unique identification of an inode. 1263 * 1264 * Note: @test is called with the inode_hash_lock held, so can't sleep. 1265 */ 1266 struct inode *ilookup5(struct super_block *sb, unsigned long hashval, 1267 int (*test)(struct inode *, void *), void *data) 1268 { 1269 struct inode *inode = ilookup5_nowait(sb, hashval, test, data); 1270 1271 if (inode) 1272 wait_on_inode(inode); 1273 return inode; 1274 } 1275 EXPORT_SYMBOL(ilookup5); 1276 1277 /** 1278 * ilookup - search for an inode in the inode cache 1279 * @sb: super block of file system to search 1280 * @ino: inode number to search for 1281 * 1282 * Search for the inode @ino in the inode cache, and if the inode is in the 1283 * cache, the inode is returned with an incremented reference count. 1284 */ 1285 struct inode *ilookup(struct super_block *sb, unsigned long ino) 1286 { 1287 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1288 struct inode *inode; 1289 1290 spin_lock(&inode_hash_lock); 1291 inode = find_inode_fast(sb, head, ino); 1292 spin_unlock(&inode_hash_lock); 1293 1294 if (inode) 1295 wait_on_inode(inode); 1296 return inode; 1297 } 1298 EXPORT_SYMBOL(ilookup); 1299 1300 /** 1301 * find_inode_nowait - find an inode in the inode cache 1302 * @sb: super block of file system to search 1303 * @hashval: hash value (usually inode number) to search for 1304 * @match: callback used for comparisons between inodes 1305 * @data: opaque data pointer to pass to @match 1306 * 1307 * Search for the inode specified by @hashval and @data in the inode 1308 * cache, where the helper function @match will return 0 if the inode 1309 * does not match, 1 if the inode does match, and -1 if the search 1310 * should be stopped. The @match function must be responsible for 1311 * taking the i_lock spin_lock and checking i_state for an inode being 1312 * freed or being initialized, and incrementing the reference count 1313 * before returning 1. It also must not sleep, since it is called with 1314 * the inode_hash_lock spinlock held. 1315 * 1316 * This is a even more generalized version of ilookup5() when the 1317 * function must never block --- find_inode() can block in 1318 * __wait_on_freeing_inode() --- or when the caller can not increment 1319 * the reference count because the resulting iput() might cause an 1320 * inode eviction. The tradeoff is that the @match funtion must be 1321 * very carefully implemented. 1322 */ 1323 struct inode *find_inode_nowait(struct super_block *sb, 1324 unsigned long hashval, 1325 int (*match)(struct inode *, unsigned long, 1326 void *), 1327 void *data) 1328 { 1329 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1330 struct inode *inode, *ret_inode = NULL; 1331 int mval; 1332 1333 spin_lock(&inode_hash_lock); 1334 hlist_for_each_entry(inode, head, i_hash) { 1335 if (inode->i_sb != sb) 1336 continue; 1337 mval = match(inode, hashval, data); 1338 if (mval == 0) 1339 continue; 1340 if (mval == 1) 1341 ret_inode = inode; 1342 goto out; 1343 } 1344 out: 1345 spin_unlock(&inode_hash_lock); 1346 return ret_inode; 1347 } 1348 EXPORT_SYMBOL(find_inode_nowait); 1349 1350 int insert_inode_locked(struct inode *inode) 1351 { 1352 struct super_block *sb = inode->i_sb; 1353 ino_t ino = inode->i_ino; 1354 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1355 1356 while (1) { 1357 struct inode *old = NULL; 1358 spin_lock(&inode_hash_lock); 1359 hlist_for_each_entry(old, head, i_hash) { 1360 if (old->i_ino != ino) 1361 continue; 1362 if (old->i_sb != sb) 1363 continue; 1364 spin_lock(&old->i_lock); 1365 if (old->i_state & (I_FREEING|I_WILL_FREE)) { 1366 spin_unlock(&old->i_lock); 1367 continue; 1368 } 1369 break; 1370 } 1371 if (likely(!old)) { 1372 spin_lock(&inode->i_lock); 1373 inode->i_state |= I_NEW; 1374 hlist_add_head(&inode->i_hash, head); 1375 spin_unlock(&inode->i_lock); 1376 spin_unlock(&inode_hash_lock); 1377 return 0; 1378 } 1379 __iget(old); 1380 spin_unlock(&old->i_lock); 1381 spin_unlock(&inode_hash_lock); 1382 wait_on_inode(old); 1383 if (unlikely(!inode_unhashed(old))) { 1384 iput(old); 1385 return -EBUSY; 1386 } 1387 iput(old); 1388 } 1389 } 1390 EXPORT_SYMBOL(insert_inode_locked); 1391 1392 int insert_inode_locked4(struct inode *inode, unsigned long hashval, 1393 int (*test)(struct inode *, void *), void *data) 1394 { 1395 struct super_block *sb = inode->i_sb; 1396 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1397 1398 while (1) { 1399 struct inode *old = NULL; 1400 1401 spin_lock(&inode_hash_lock); 1402 hlist_for_each_entry(old, head, i_hash) { 1403 if (old->i_sb != sb) 1404 continue; 1405 if (!test(old, data)) 1406 continue; 1407 spin_lock(&old->i_lock); 1408 if (old->i_state & (I_FREEING|I_WILL_FREE)) { 1409 spin_unlock(&old->i_lock); 1410 continue; 1411 } 1412 break; 1413 } 1414 if (likely(!old)) { 1415 spin_lock(&inode->i_lock); 1416 inode->i_state |= I_NEW; 1417 hlist_add_head(&inode->i_hash, head); 1418 spin_unlock(&inode->i_lock); 1419 spin_unlock(&inode_hash_lock); 1420 return 0; 1421 } 1422 __iget(old); 1423 spin_unlock(&old->i_lock); 1424 spin_unlock(&inode_hash_lock); 1425 wait_on_inode(old); 1426 if (unlikely(!inode_unhashed(old))) { 1427 iput(old); 1428 return -EBUSY; 1429 } 1430 iput(old); 1431 } 1432 } 1433 EXPORT_SYMBOL(insert_inode_locked4); 1434 1435 1436 int generic_delete_inode(struct inode *inode) 1437 { 1438 return 1; 1439 } 1440 EXPORT_SYMBOL(generic_delete_inode); 1441 1442 /* 1443 * Called when we're dropping the last reference 1444 * to an inode. 1445 * 1446 * Call the FS "drop_inode()" function, defaulting to 1447 * the legacy UNIX filesystem behaviour. If it tells 1448 * us to evict inode, do so. Otherwise, retain inode 1449 * in cache if fs is alive, sync and evict if fs is 1450 * shutting down. 1451 */ 1452 static void iput_final(struct inode *inode) 1453 { 1454 struct super_block *sb = inode->i_sb; 1455 const struct super_operations *op = inode->i_sb->s_op; 1456 int drop; 1457 1458 WARN_ON(inode->i_state & I_NEW); 1459 1460 if (op->drop_inode) 1461 drop = op->drop_inode(inode); 1462 else 1463 drop = generic_drop_inode(inode); 1464 1465 if (!drop && (sb->s_flags & MS_ACTIVE)) { 1466 inode->i_state |= I_REFERENCED; 1467 inode_add_lru(inode); 1468 spin_unlock(&inode->i_lock); 1469 return; 1470 } 1471 1472 if (!drop) { 1473 inode->i_state |= I_WILL_FREE; 1474 spin_unlock(&inode->i_lock); 1475 write_inode_now(inode, 1); 1476 spin_lock(&inode->i_lock); 1477 WARN_ON(inode->i_state & I_NEW); 1478 inode->i_state &= ~I_WILL_FREE; 1479 } 1480 1481 inode->i_state |= I_FREEING; 1482 if (!list_empty(&inode->i_lru)) 1483 inode_lru_list_del(inode); 1484 spin_unlock(&inode->i_lock); 1485 1486 evict(inode); 1487 } 1488 1489 /** 1490 * iput - put an inode 1491 * @inode: inode to put 1492 * 1493 * Puts an inode, dropping its usage count. If the inode use count hits 1494 * zero, the inode is then freed and may also be destroyed. 1495 * 1496 * Consequently, iput() can sleep. 1497 */ 1498 void iput(struct inode *inode) 1499 { 1500 if (!inode) 1501 return; 1502 BUG_ON(inode->i_state & I_CLEAR); 1503 retry: 1504 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) { 1505 if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) { 1506 atomic_inc(&inode->i_count); 1507 inode->i_state &= ~I_DIRTY_TIME; 1508 spin_unlock(&inode->i_lock); 1509 trace_writeback_lazytime_iput(inode); 1510 mark_inode_dirty_sync(inode); 1511 goto retry; 1512 } 1513 iput_final(inode); 1514 } 1515 } 1516 EXPORT_SYMBOL(iput); 1517 1518 /** 1519 * bmap - find a block number in a file 1520 * @inode: inode of file 1521 * @block: block to find 1522 * 1523 * Returns the block number on the device holding the inode that 1524 * is the disk block number for the block of the file requested. 1525 * That is, asked for block 4 of inode 1 the function will return the 1526 * disk block relative to the disk start that holds that block of the 1527 * file. 1528 */ 1529 sector_t bmap(struct inode *inode, sector_t block) 1530 { 1531 sector_t res = 0; 1532 if (inode->i_mapping->a_ops->bmap) 1533 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block); 1534 return res; 1535 } 1536 EXPORT_SYMBOL(bmap); 1537 1538 /* 1539 * With relative atime, only update atime if the previous atime is 1540 * earlier than either the ctime or mtime or if at least a day has 1541 * passed since the last atime update. 1542 */ 1543 static int relatime_need_update(struct vfsmount *mnt, struct inode *inode, 1544 struct timespec now) 1545 { 1546 1547 if (!(mnt->mnt_flags & MNT_RELATIME)) 1548 return 1; 1549 /* 1550 * Is mtime younger than atime? If yes, update atime: 1551 */ 1552 if (timespec_compare(&inode->i_mtime, &inode->i_atime) >= 0) 1553 return 1; 1554 /* 1555 * Is ctime younger than atime? If yes, update atime: 1556 */ 1557 if (timespec_compare(&inode->i_ctime, &inode->i_atime) >= 0) 1558 return 1; 1559 1560 /* 1561 * Is the previous atime value older than a day? If yes, 1562 * update atime: 1563 */ 1564 if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60) 1565 return 1; 1566 /* 1567 * Good, we can skip the atime update: 1568 */ 1569 return 0; 1570 } 1571 1572 int generic_update_time(struct inode *inode, struct timespec *time, int flags) 1573 { 1574 int iflags = I_DIRTY_TIME; 1575 1576 if (flags & S_ATIME) 1577 inode->i_atime = *time; 1578 if (flags & S_VERSION) 1579 inode_inc_iversion(inode); 1580 if (flags & S_CTIME) 1581 inode->i_ctime = *time; 1582 if (flags & S_MTIME) 1583 inode->i_mtime = *time; 1584 1585 if (!(inode->i_sb->s_flags & MS_LAZYTIME) || (flags & S_VERSION)) 1586 iflags |= I_DIRTY_SYNC; 1587 __mark_inode_dirty(inode, iflags); 1588 return 0; 1589 } 1590 EXPORT_SYMBOL(generic_update_time); 1591 1592 /* 1593 * This does the actual work of updating an inodes time or version. Must have 1594 * had called mnt_want_write() before calling this. 1595 */ 1596 static int update_time(struct inode *inode, struct timespec *time, int flags) 1597 { 1598 int (*update_time)(struct inode *, struct timespec *, int); 1599 1600 update_time = inode->i_op->update_time ? inode->i_op->update_time : 1601 generic_update_time; 1602 1603 return update_time(inode, time, flags); 1604 } 1605 1606 /** 1607 * touch_atime - update the access time 1608 * @path: the &struct path to update 1609 * @inode: inode to update 1610 * 1611 * Update the accessed time on an inode and mark it for writeback. 1612 * This function automatically handles read only file systems and media, 1613 * as well as the "noatime" flag and inode specific "noatime" markers. 1614 */ 1615 bool atime_needs_update(const struct path *path, struct inode *inode) 1616 { 1617 struct vfsmount *mnt = path->mnt; 1618 struct timespec now; 1619 1620 if (inode->i_flags & S_NOATIME) 1621 return false; 1622 1623 /* Atime updates will likely cause i_uid and i_gid to be written 1624 * back improprely if their true value is unknown to the vfs. 1625 */ 1626 if (HAS_UNMAPPED_ID(inode)) 1627 return false; 1628 1629 if (IS_NOATIME(inode)) 1630 return false; 1631 if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)) 1632 return false; 1633 1634 if (mnt->mnt_flags & MNT_NOATIME) 1635 return false; 1636 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)) 1637 return false; 1638 1639 now = current_fs_time(inode->i_sb); 1640 1641 if (!relatime_need_update(mnt, inode, now)) 1642 return false; 1643 1644 if (timespec_equal(&inode->i_atime, &now)) 1645 return false; 1646 1647 return true; 1648 } 1649 1650 void touch_atime(const struct path *path) 1651 { 1652 struct vfsmount *mnt = path->mnt; 1653 struct inode *inode = d_inode(path->dentry); 1654 struct timespec now; 1655 1656 if (!atime_needs_update(path, inode)) 1657 return; 1658 1659 if (!sb_start_write_trylock(inode->i_sb)) 1660 return; 1661 1662 if (__mnt_want_write(mnt) != 0) 1663 goto skip_update; 1664 /* 1665 * File systems can error out when updating inodes if they need to 1666 * allocate new space to modify an inode (such is the case for 1667 * Btrfs), but since we touch atime while walking down the path we 1668 * really don't care if we failed to update the atime of the file, 1669 * so just ignore the return value. 1670 * We may also fail on filesystems that have the ability to make parts 1671 * of the fs read only, e.g. subvolumes in Btrfs. 1672 */ 1673 now = current_fs_time(inode->i_sb); 1674 update_time(inode, &now, S_ATIME); 1675 __mnt_drop_write(mnt); 1676 skip_update: 1677 sb_end_write(inode->i_sb); 1678 } 1679 EXPORT_SYMBOL(touch_atime); 1680 1681 /* 1682 * The logic we want is 1683 * 1684 * if suid or (sgid and xgrp) 1685 * remove privs 1686 */ 1687 int should_remove_suid(struct dentry *dentry) 1688 { 1689 umode_t mode = d_inode(dentry)->i_mode; 1690 int kill = 0; 1691 1692 /* suid always must be killed */ 1693 if (unlikely(mode & S_ISUID)) 1694 kill = ATTR_KILL_SUID; 1695 1696 /* 1697 * sgid without any exec bits is just a mandatory locking mark; leave 1698 * it alone. If some exec bits are set, it's a real sgid; kill it. 1699 */ 1700 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) 1701 kill |= ATTR_KILL_SGID; 1702 1703 if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode))) 1704 return kill; 1705 1706 return 0; 1707 } 1708 EXPORT_SYMBOL(should_remove_suid); 1709 1710 /* 1711 * Return mask of changes for notify_change() that need to be done as a 1712 * response to write or truncate. Return 0 if nothing has to be changed. 1713 * Negative value on error (change should be denied). 1714 */ 1715 int dentry_needs_remove_privs(struct dentry *dentry) 1716 { 1717 struct inode *inode = d_inode(dentry); 1718 int mask = 0; 1719 int ret; 1720 1721 if (IS_NOSEC(inode)) 1722 return 0; 1723 1724 mask = should_remove_suid(dentry); 1725 ret = security_inode_need_killpriv(dentry); 1726 if (ret < 0) 1727 return ret; 1728 if (ret) 1729 mask |= ATTR_KILL_PRIV; 1730 return mask; 1731 } 1732 1733 static int __remove_privs(struct dentry *dentry, int kill) 1734 { 1735 struct iattr newattrs; 1736 1737 newattrs.ia_valid = ATTR_FORCE | kill; 1738 /* 1739 * Note we call this on write, so notify_change will not 1740 * encounter any conflicting delegations: 1741 */ 1742 return notify_change(dentry, &newattrs, NULL); 1743 } 1744 1745 /* 1746 * Remove special file priviledges (suid, capabilities) when file is written 1747 * to or truncated. 1748 */ 1749 int file_remove_privs(struct file *file) 1750 { 1751 struct dentry *dentry = file_dentry(file); 1752 struct inode *inode = file_inode(file); 1753 int kill; 1754 int error = 0; 1755 1756 /* Fast path for nothing security related */ 1757 if (IS_NOSEC(inode)) 1758 return 0; 1759 1760 kill = dentry_needs_remove_privs(dentry); 1761 if (kill < 0) 1762 return kill; 1763 if (kill) 1764 error = __remove_privs(dentry, kill); 1765 if (!error) 1766 inode_has_no_xattr(inode); 1767 1768 return error; 1769 } 1770 EXPORT_SYMBOL(file_remove_privs); 1771 1772 /** 1773 * file_update_time - update mtime and ctime time 1774 * @file: file accessed 1775 * 1776 * Update the mtime and ctime members of an inode and mark the inode 1777 * for writeback. Note that this function is meant exclusively for 1778 * usage in the file write path of filesystems, and filesystems may 1779 * choose to explicitly ignore update via this function with the 1780 * S_NOCMTIME inode flag, e.g. for network filesystem where these 1781 * timestamps are handled by the server. This can return an error for 1782 * file systems who need to allocate space in order to update an inode. 1783 */ 1784 1785 int file_update_time(struct file *file) 1786 { 1787 struct inode *inode = file_inode(file); 1788 struct timespec now; 1789 int sync_it = 0; 1790 int ret; 1791 1792 /* First try to exhaust all avenues to not sync */ 1793 if (IS_NOCMTIME(inode)) 1794 return 0; 1795 1796 now = current_fs_time(inode->i_sb); 1797 if (!timespec_equal(&inode->i_mtime, &now)) 1798 sync_it = S_MTIME; 1799 1800 if (!timespec_equal(&inode->i_ctime, &now)) 1801 sync_it |= S_CTIME; 1802 1803 if (IS_I_VERSION(inode)) 1804 sync_it |= S_VERSION; 1805 1806 if (!sync_it) 1807 return 0; 1808 1809 /* Finally allowed to write? Takes lock. */ 1810 if (__mnt_want_write_file(file)) 1811 return 0; 1812 1813 ret = update_time(inode, &now, sync_it); 1814 __mnt_drop_write_file(file); 1815 1816 return ret; 1817 } 1818 EXPORT_SYMBOL(file_update_time); 1819 1820 int inode_needs_sync(struct inode *inode) 1821 { 1822 if (IS_SYNC(inode)) 1823 return 1; 1824 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) 1825 return 1; 1826 return 0; 1827 } 1828 EXPORT_SYMBOL(inode_needs_sync); 1829 1830 /* 1831 * If we try to find an inode in the inode hash while it is being 1832 * deleted, we have to wait until the filesystem completes its 1833 * deletion before reporting that it isn't found. This function waits 1834 * until the deletion _might_ have completed. Callers are responsible 1835 * to recheck inode state. 1836 * 1837 * It doesn't matter if I_NEW is not set initially, a call to 1838 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list 1839 * will DTRT. 1840 */ 1841 static void __wait_on_freeing_inode(struct inode *inode) 1842 { 1843 wait_queue_head_t *wq; 1844 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW); 1845 wq = bit_waitqueue(&inode->i_state, __I_NEW); 1846 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); 1847 spin_unlock(&inode->i_lock); 1848 spin_unlock(&inode_hash_lock); 1849 schedule(); 1850 finish_wait(wq, &wait.wait); 1851 spin_lock(&inode_hash_lock); 1852 } 1853 1854 static __initdata unsigned long ihash_entries; 1855 static int __init set_ihash_entries(char *str) 1856 { 1857 if (!str) 1858 return 0; 1859 ihash_entries = simple_strtoul(str, &str, 0); 1860 return 1; 1861 } 1862 __setup("ihash_entries=", set_ihash_entries); 1863 1864 /* 1865 * Initialize the waitqueues and inode hash table. 1866 */ 1867 void __init inode_init_early(void) 1868 { 1869 unsigned int loop; 1870 1871 /* If hashes are distributed across NUMA nodes, defer 1872 * hash allocation until vmalloc space is available. 1873 */ 1874 if (hashdist) 1875 return; 1876 1877 inode_hashtable = 1878 alloc_large_system_hash("Inode-cache", 1879 sizeof(struct hlist_head), 1880 ihash_entries, 1881 14, 1882 HASH_EARLY, 1883 &i_hash_shift, 1884 &i_hash_mask, 1885 0, 1886 0); 1887 1888 for (loop = 0; loop < (1U << i_hash_shift); loop++) 1889 INIT_HLIST_HEAD(&inode_hashtable[loop]); 1890 } 1891 1892 void __init inode_init(void) 1893 { 1894 unsigned int loop; 1895 1896 /* inode slab cache */ 1897 inode_cachep = kmem_cache_create("inode_cache", 1898 sizeof(struct inode), 1899 0, 1900 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| 1901 SLAB_MEM_SPREAD|SLAB_ACCOUNT), 1902 init_once); 1903 1904 /* Hash may have been set up in inode_init_early */ 1905 if (!hashdist) 1906 return; 1907 1908 inode_hashtable = 1909 alloc_large_system_hash("Inode-cache", 1910 sizeof(struct hlist_head), 1911 ihash_entries, 1912 14, 1913 0, 1914 &i_hash_shift, 1915 &i_hash_mask, 1916 0, 1917 0); 1918 1919 for (loop = 0; loop < (1U << i_hash_shift); loop++) 1920 INIT_HLIST_HEAD(&inode_hashtable[loop]); 1921 } 1922 1923 void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev) 1924 { 1925 inode->i_mode = mode; 1926 if (S_ISCHR(mode)) { 1927 inode->i_fop = &def_chr_fops; 1928 inode->i_rdev = rdev; 1929 } else if (S_ISBLK(mode)) { 1930 inode->i_fop = &def_blk_fops; 1931 inode->i_rdev = rdev; 1932 } else if (S_ISFIFO(mode)) 1933 inode->i_fop = &pipefifo_fops; 1934 else if (S_ISSOCK(mode)) 1935 ; /* leave it no_open_fops */ 1936 else 1937 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for" 1938 " inode %s:%lu\n", mode, inode->i_sb->s_id, 1939 inode->i_ino); 1940 } 1941 EXPORT_SYMBOL(init_special_inode); 1942 1943 /** 1944 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards 1945 * @inode: New inode 1946 * @dir: Directory inode 1947 * @mode: mode of the new inode 1948 */ 1949 void inode_init_owner(struct inode *inode, const struct inode *dir, 1950 umode_t mode) 1951 { 1952 inode->i_uid = current_fsuid(); 1953 if (dir && dir->i_mode & S_ISGID) { 1954 inode->i_gid = dir->i_gid; 1955 if (S_ISDIR(mode)) 1956 mode |= S_ISGID; 1957 } else 1958 inode->i_gid = current_fsgid(); 1959 inode->i_mode = mode; 1960 } 1961 EXPORT_SYMBOL(inode_init_owner); 1962 1963 /** 1964 * inode_owner_or_capable - check current task permissions to inode 1965 * @inode: inode being checked 1966 * 1967 * Return true if current either has CAP_FOWNER in a namespace with the 1968 * inode owner uid mapped, or owns the file. 1969 */ 1970 bool inode_owner_or_capable(const struct inode *inode) 1971 { 1972 struct user_namespace *ns; 1973 1974 if (uid_eq(current_fsuid(), inode->i_uid)) 1975 return true; 1976 1977 ns = current_user_ns(); 1978 if (ns_capable(ns, CAP_FOWNER) && kuid_has_mapping(ns, inode->i_uid)) 1979 return true; 1980 return false; 1981 } 1982 EXPORT_SYMBOL(inode_owner_or_capable); 1983 1984 /* 1985 * Direct i/o helper functions 1986 */ 1987 static void __inode_dio_wait(struct inode *inode) 1988 { 1989 wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP); 1990 DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP); 1991 1992 do { 1993 prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE); 1994 if (atomic_read(&inode->i_dio_count)) 1995 schedule(); 1996 } while (atomic_read(&inode->i_dio_count)); 1997 finish_wait(wq, &q.wait); 1998 } 1999 2000 /** 2001 * inode_dio_wait - wait for outstanding DIO requests to finish 2002 * @inode: inode to wait for 2003 * 2004 * Waits for all pending direct I/O requests to finish so that we can 2005 * proceed with a truncate or equivalent operation. 2006 * 2007 * Must be called under a lock that serializes taking new references 2008 * to i_dio_count, usually by inode->i_mutex. 2009 */ 2010 void inode_dio_wait(struct inode *inode) 2011 { 2012 if (atomic_read(&inode->i_dio_count)) 2013 __inode_dio_wait(inode); 2014 } 2015 EXPORT_SYMBOL(inode_dio_wait); 2016 2017 /* 2018 * inode_set_flags - atomically set some inode flags 2019 * 2020 * Note: the caller should be holding i_mutex, or else be sure that 2021 * they have exclusive access to the inode structure (i.e., while the 2022 * inode is being instantiated). The reason for the cmpxchg() loop 2023 * --- which wouldn't be necessary if all code paths which modify 2024 * i_flags actually followed this rule, is that there is at least one 2025 * code path which doesn't today so we use cmpxchg() out of an abundance 2026 * of caution. 2027 * 2028 * In the long run, i_mutex is overkill, and we should probably look 2029 * at using the i_lock spinlock to protect i_flags, and then make sure 2030 * it is so documented in include/linux/fs.h and that all code follows 2031 * the locking convention!! 2032 */ 2033 void inode_set_flags(struct inode *inode, unsigned int flags, 2034 unsigned int mask) 2035 { 2036 unsigned int old_flags, new_flags; 2037 2038 WARN_ON_ONCE(flags & ~mask); 2039 do { 2040 old_flags = ACCESS_ONCE(inode->i_flags); 2041 new_flags = (old_flags & ~mask) | flags; 2042 } while (unlikely(cmpxchg(&inode->i_flags, old_flags, 2043 new_flags) != old_flags)); 2044 } 2045 EXPORT_SYMBOL(inode_set_flags); 2046 2047 void inode_nohighmem(struct inode *inode) 2048 { 2049 mapping_set_gfp_mask(inode->i_mapping, GFP_USER); 2050 } 2051 EXPORT_SYMBOL(inode_nohighmem); 2052