1 /* 2 * (C) 1997 Linus Torvalds 3 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation) 4 */ 5 #include <linux/fs.h> 6 #include <linux/mm.h> 7 #include <linux/dcache.h> 8 #include <linux/init.h> 9 #include <linux/slab.h> 10 #include <linux/writeback.h> 11 #include <linux/module.h> 12 #include <linux/backing-dev.h> 13 #include <linux/wait.h> 14 #include <linux/rwsem.h> 15 #include <linux/hash.h> 16 #include <linux/swap.h> 17 #include <linux/security.h> 18 #include <linux/pagemap.h> 19 #include <linux/cdev.h> 20 #include <linux/bootmem.h> 21 #include <linux/fsnotify.h> 22 #include <linux/mount.h> 23 #include <linux/async.h> 24 #include <linux/posix_acl.h> 25 #include <linux/prefetch.h> 26 #include <linux/ima.h> 27 #include <linux/cred.h> 28 #include <linux/buffer_head.h> /* for inode_has_buffers */ 29 #include "internal.h" 30 31 /* 32 * Inode locking rules: 33 * 34 * inode->i_lock protects: 35 * inode->i_state, inode->i_hash, __iget() 36 * inode->i_sb->s_inode_lru_lock protects: 37 * inode->i_sb->s_inode_lru, inode->i_lru 38 * inode_sb_list_lock protects: 39 * sb->s_inodes, inode->i_sb_list 40 * inode_wb_list_lock protects: 41 * bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list 42 * inode_hash_lock protects: 43 * inode_hashtable, inode->i_hash 44 * 45 * Lock ordering: 46 * 47 * inode_sb_list_lock 48 * inode->i_lock 49 * inode->i_sb->s_inode_lru_lock 50 * 51 * inode_wb_list_lock 52 * inode->i_lock 53 * 54 * inode_hash_lock 55 * inode_sb_list_lock 56 * inode->i_lock 57 * 58 * iunique_lock 59 * inode_hash_lock 60 */ 61 62 static unsigned int i_hash_mask __read_mostly; 63 static unsigned int i_hash_shift __read_mostly; 64 static struct hlist_head *inode_hashtable __read_mostly; 65 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); 66 67 __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock); 68 __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock); 69 70 /* 71 * Empty aops. Can be used for the cases where the user does not 72 * define any of the address_space operations. 73 */ 74 const struct address_space_operations empty_aops = { 75 }; 76 EXPORT_SYMBOL(empty_aops); 77 78 /* 79 * Statistics gathering.. 80 */ 81 struct inodes_stat_t inodes_stat; 82 83 static DEFINE_PER_CPU(unsigned int, nr_inodes); 84 static DEFINE_PER_CPU(unsigned int, nr_unused); 85 86 static struct kmem_cache *inode_cachep __read_mostly; 87 88 static int get_nr_inodes(void) 89 { 90 int i; 91 int sum = 0; 92 for_each_possible_cpu(i) 93 sum += per_cpu(nr_inodes, i); 94 return sum < 0 ? 0 : sum; 95 } 96 97 static inline int get_nr_inodes_unused(void) 98 { 99 int i; 100 int sum = 0; 101 for_each_possible_cpu(i) 102 sum += per_cpu(nr_unused, i); 103 return sum < 0 ? 0 : sum; 104 } 105 106 int get_nr_dirty_inodes(void) 107 { 108 /* not actually dirty inodes, but a wild approximation */ 109 int nr_dirty = get_nr_inodes() - get_nr_inodes_unused(); 110 return nr_dirty > 0 ? nr_dirty : 0; 111 } 112 113 /* 114 * Handle nr_inode sysctl 115 */ 116 #ifdef CONFIG_SYSCTL 117 int proc_nr_inodes(ctl_table *table, int write, 118 void __user *buffer, size_t *lenp, loff_t *ppos) 119 { 120 inodes_stat.nr_inodes = get_nr_inodes(); 121 inodes_stat.nr_unused = get_nr_inodes_unused(); 122 return proc_dointvec(table, write, buffer, lenp, ppos); 123 } 124 #endif 125 126 /** 127 * inode_init_always - perform inode structure intialisation 128 * @sb: superblock inode belongs to 129 * @inode: inode to initialise 130 * 131 * These are initializations that need to be done on every inode 132 * allocation as the fields are not initialised by slab allocation. 133 */ 134 int inode_init_always(struct super_block *sb, struct inode *inode) 135 { 136 static const struct inode_operations empty_iops; 137 static const struct file_operations empty_fops; 138 struct address_space *const mapping = &inode->i_data; 139 140 inode->i_sb = sb; 141 inode->i_blkbits = sb->s_blocksize_bits; 142 inode->i_flags = 0; 143 atomic_set(&inode->i_count, 1); 144 inode->i_op = &empty_iops; 145 inode->i_fop = &empty_fops; 146 inode->i_nlink = 1; 147 inode->i_uid = 0; 148 inode->i_gid = 0; 149 atomic_set(&inode->i_writecount, 0); 150 inode->i_size = 0; 151 inode->i_blocks = 0; 152 inode->i_bytes = 0; 153 inode->i_generation = 0; 154 #ifdef CONFIG_QUOTA 155 memset(&inode->i_dquot, 0, sizeof(inode->i_dquot)); 156 #endif 157 inode->i_pipe = NULL; 158 inode->i_bdev = NULL; 159 inode->i_cdev = NULL; 160 inode->i_rdev = 0; 161 inode->dirtied_when = 0; 162 163 if (security_inode_alloc(inode)) 164 goto out; 165 spin_lock_init(&inode->i_lock); 166 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); 167 168 mutex_init(&inode->i_mutex); 169 lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key); 170 171 atomic_set(&inode->i_dio_count, 0); 172 173 mapping->a_ops = &empty_aops; 174 mapping->host = inode; 175 mapping->flags = 0; 176 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); 177 mapping->assoc_mapping = NULL; 178 mapping->backing_dev_info = &default_backing_dev_info; 179 mapping->writeback_index = 0; 180 181 /* 182 * If the block_device provides a backing_dev_info for client 183 * inodes then use that. Otherwise the inode share the bdev's 184 * backing_dev_info. 185 */ 186 if (sb->s_bdev) { 187 struct backing_dev_info *bdi; 188 189 bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info; 190 mapping->backing_dev_info = bdi; 191 } 192 inode->i_private = NULL; 193 inode->i_mapping = mapping; 194 #ifdef CONFIG_FS_POSIX_ACL 195 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED; 196 #endif 197 198 #ifdef CONFIG_FSNOTIFY 199 inode->i_fsnotify_mask = 0; 200 #endif 201 202 this_cpu_inc(nr_inodes); 203 204 return 0; 205 out: 206 return -ENOMEM; 207 } 208 EXPORT_SYMBOL(inode_init_always); 209 210 static struct inode *alloc_inode(struct super_block *sb) 211 { 212 struct inode *inode; 213 214 if (sb->s_op->alloc_inode) 215 inode = sb->s_op->alloc_inode(sb); 216 else 217 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL); 218 219 if (!inode) 220 return NULL; 221 222 if (unlikely(inode_init_always(sb, inode))) { 223 if (inode->i_sb->s_op->destroy_inode) 224 inode->i_sb->s_op->destroy_inode(inode); 225 else 226 kmem_cache_free(inode_cachep, inode); 227 return NULL; 228 } 229 230 return inode; 231 } 232 233 void free_inode_nonrcu(struct inode *inode) 234 { 235 kmem_cache_free(inode_cachep, inode); 236 } 237 EXPORT_SYMBOL(free_inode_nonrcu); 238 239 void __destroy_inode(struct inode *inode) 240 { 241 BUG_ON(inode_has_buffers(inode)); 242 security_inode_free(inode); 243 fsnotify_inode_delete(inode); 244 #ifdef CONFIG_FS_POSIX_ACL 245 if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED) 246 posix_acl_release(inode->i_acl); 247 if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED) 248 posix_acl_release(inode->i_default_acl); 249 #endif 250 this_cpu_dec(nr_inodes); 251 } 252 EXPORT_SYMBOL(__destroy_inode); 253 254 static void i_callback(struct rcu_head *head) 255 { 256 struct inode *inode = container_of(head, struct inode, i_rcu); 257 INIT_LIST_HEAD(&inode->i_dentry); 258 kmem_cache_free(inode_cachep, inode); 259 } 260 261 static void destroy_inode(struct inode *inode) 262 { 263 BUG_ON(!list_empty(&inode->i_lru)); 264 __destroy_inode(inode); 265 if (inode->i_sb->s_op->destroy_inode) 266 inode->i_sb->s_op->destroy_inode(inode); 267 else 268 call_rcu(&inode->i_rcu, i_callback); 269 } 270 271 void address_space_init_once(struct address_space *mapping) 272 { 273 memset(mapping, 0, sizeof(*mapping)); 274 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); 275 spin_lock_init(&mapping->tree_lock); 276 mutex_init(&mapping->i_mmap_mutex); 277 INIT_LIST_HEAD(&mapping->private_list); 278 spin_lock_init(&mapping->private_lock); 279 INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap); 280 INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); 281 } 282 EXPORT_SYMBOL(address_space_init_once); 283 284 /* 285 * These are initializations that only need to be done 286 * once, because the fields are idempotent across use 287 * of the inode, so let the slab aware of that. 288 */ 289 void inode_init_once(struct inode *inode) 290 { 291 memset(inode, 0, sizeof(*inode)); 292 INIT_HLIST_NODE(&inode->i_hash); 293 INIT_LIST_HEAD(&inode->i_dentry); 294 INIT_LIST_HEAD(&inode->i_devices); 295 INIT_LIST_HEAD(&inode->i_wb_list); 296 INIT_LIST_HEAD(&inode->i_lru); 297 address_space_init_once(&inode->i_data); 298 i_size_ordered_init(inode); 299 #ifdef CONFIG_FSNOTIFY 300 INIT_HLIST_HEAD(&inode->i_fsnotify_marks); 301 #endif 302 } 303 EXPORT_SYMBOL(inode_init_once); 304 305 static void init_once(void *foo) 306 { 307 struct inode *inode = (struct inode *) foo; 308 309 inode_init_once(inode); 310 } 311 312 /* 313 * inode->i_lock must be held 314 */ 315 void __iget(struct inode *inode) 316 { 317 atomic_inc(&inode->i_count); 318 } 319 320 /* 321 * get additional reference to inode; caller must already hold one. 322 */ 323 void ihold(struct inode *inode) 324 { 325 WARN_ON(atomic_inc_return(&inode->i_count) < 2); 326 } 327 EXPORT_SYMBOL(ihold); 328 329 static void inode_lru_list_add(struct inode *inode) 330 { 331 spin_lock(&inode->i_sb->s_inode_lru_lock); 332 if (list_empty(&inode->i_lru)) { 333 list_add(&inode->i_lru, &inode->i_sb->s_inode_lru); 334 inode->i_sb->s_nr_inodes_unused++; 335 this_cpu_inc(nr_unused); 336 } 337 spin_unlock(&inode->i_sb->s_inode_lru_lock); 338 } 339 340 static void inode_lru_list_del(struct inode *inode) 341 { 342 spin_lock(&inode->i_sb->s_inode_lru_lock); 343 if (!list_empty(&inode->i_lru)) { 344 list_del_init(&inode->i_lru); 345 inode->i_sb->s_nr_inodes_unused--; 346 this_cpu_dec(nr_unused); 347 } 348 spin_unlock(&inode->i_sb->s_inode_lru_lock); 349 } 350 351 /** 352 * inode_sb_list_add - add inode to the superblock list of inodes 353 * @inode: inode to add 354 */ 355 void inode_sb_list_add(struct inode *inode) 356 { 357 spin_lock(&inode_sb_list_lock); 358 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes); 359 spin_unlock(&inode_sb_list_lock); 360 } 361 EXPORT_SYMBOL_GPL(inode_sb_list_add); 362 363 static inline void inode_sb_list_del(struct inode *inode) 364 { 365 spin_lock(&inode_sb_list_lock); 366 list_del_init(&inode->i_sb_list); 367 spin_unlock(&inode_sb_list_lock); 368 } 369 370 static unsigned long hash(struct super_block *sb, unsigned long hashval) 371 { 372 unsigned long tmp; 373 374 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) / 375 L1_CACHE_BYTES; 376 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift); 377 return tmp & i_hash_mask; 378 } 379 380 /** 381 * __insert_inode_hash - hash an inode 382 * @inode: unhashed inode 383 * @hashval: unsigned long value used to locate this object in the 384 * inode_hashtable. 385 * 386 * Add an inode to the inode hash for this superblock. 387 */ 388 void __insert_inode_hash(struct inode *inode, unsigned long hashval) 389 { 390 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval); 391 392 spin_lock(&inode_hash_lock); 393 spin_lock(&inode->i_lock); 394 hlist_add_head(&inode->i_hash, b); 395 spin_unlock(&inode->i_lock); 396 spin_unlock(&inode_hash_lock); 397 } 398 EXPORT_SYMBOL(__insert_inode_hash); 399 400 /** 401 * remove_inode_hash - remove an inode from the hash 402 * @inode: inode to unhash 403 * 404 * Remove an inode from the superblock. 405 */ 406 void remove_inode_hash(struct inode *inode) 407 { 408 spin_lock(&inode_hash_lock); 409 spin_lock(&inode->i_lock); 410 hlist_del_init(&inode->i_hash); 411 spin_unlock(&inode->i_lock); 412 spin_unlock(&inode_hash_lock); 413 } 414 EXPORT_SYMBOL(remove_inode_hash); 415 416 void end_writeback(struct inode *inode) 417 { 418 might_sleep(); 419 /* 420 * We have to cycle tree_lock here because reclaim can be still in the 421 * process of removing the last page (in __delete_from_page_cache()) 422 * and we must not free mapping under it. 423 */ 424 spin_lock_irq(&inode->i_data.tree_lock); 425 BUG_ON(inode->i_data.nrpages); 426 spin_unlock_irq(&inode->i_data.tree_lock); 427 BUG_ON(!list_empty(&inode->i_data.private_list)); 428 BUG_ON(!(inode->i_state & I_FREEING)); 429 BUG_ON(inode->i_state & I_CLEAR); 430 inode_sync_wait(inode); 431 /* don't need i_lock here, no concurrent mods to i_state */ 432 inode->i_state = I_FREEING | I_CLEAR; 433 } 434 EXPORT_SYMBOL(end_writeback); 435 436 /* 437 * Free the inode passed in, removing it from the lists it is still connected 438 * to. We remove any pages still attached to the inode and wait for any IO that 439 * is still in progress before finally destroying the inode. 440 * 441 * An inode must already be marked I_FREEING so that we avoid the inode being 442 * moved back onto lists if we race with other code that manipulates the lists 443 * (e.g. writeback_single_inode). The caller is responsible for setting this. 444 * 445 * An inode must already be removed from the LRU list before being evicted from 446 * the cache. This should occur atomically with setting the I_FREEING state 447 * flag, so no inodes here should ever be on the LRU when being evicted. 448 */ 449 static void evict(struct inode *inode) 450 { 451 const struct super_operations *op = inode->i_sb->s_op; 452 453 BUG_ON(!(inode->i_state & I_FREEING)); 454 BUG_ON(!list_empty(&inode->i_lru)); 455 456 inode_wb_list_del(inode); 457 inode_sb_list_del(inode); 458 459 if (op->evict_inode) { 460 op->evict_inode(inode); 461 } else { 462 if (inode->i_data.nrpages) 463 truncate_inode_pages(&inode->i_data, 0); 464 end_writeback(inode); 465 } 466 if (S_ISBLK(inode->i_mode) && inode->i_bdev) 467 bd_forget(inode); 468 if (S_ISCHR(inode->i_mode) && inode->i_cdev) 469 cd_forget(inode); 470 471 remove_inode_hash(inode); 472 473 spin_lock(&inode->i_lock); 474 wake_up_bit(&inode->i_state, __I_NEW); 475 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR)); 476 spin_unlock(&inode->i_lock); 477 478 destroy_inode(inode); 479 } 480 481 /* 482 * dispose_list - dispose of the contents of a local list 483 * @head: the head of the list to free 484 * 485 * Dispose-list gets a local list with local inodes in it, so it doesn't 486 * need to worry about list corruption and SMP locks. 487 */ 488 static void dispose_list(struct list_head *head) 489 { 490 while (!list_empty(head)) { 491 struct inode *inode; 492 493 inode = list_first_entry(head, struct inode, i_lru); 494 list_del_init(&inode->i_lru); 495 496 evict(inode); 497 } 498 } 499 500 /** 501 * evict_inodes - evict all evictable inodes for a superblock 502 * @sb: superblock to operate on 503 * 504 * Make sure that no inodes with zero refcount are retained. This is 505 * called by superblock shutdown after having MS_ACTIVE flag removed, 506 * so any inode reaching zero refcount during or after that call will 507 * be immediately evicted. 508 */ 509 void evict_inodes(struct super_block *sb) 510 { 511 struct inode *inode, *next; 512 LIST_HEAD(dispose); 513 514 spin_lock(&inode_sb_list_lock); 515 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 516 if (atomic_read(&inode->i_count)) 517 continue; 518 519 spin_lock(&inode->i_lock); 520 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 521 spin_unlock(&inode->i_lock); 522 continue; 523 } 524 525 inode->i_state |= I_FREEING; 526 inode_lru_list_del(inode); 527 spin_unlock(&inode->i_lock); 528 list_add(&inode->i_lru, &dispose); 529 } 530 spin_unlock(&inode_sb_list_lock); 531 532 dispose_list(&dispose); 533 } 534 535 /** 536 * invalidate_inodes - attempt to free all inodes on a superblock 537 * @sb: superblock to operate on 538 * @kill_dirty: flag to guide handling of dirty inodes 539 * 540 * Attempts to free all inodes for a given superblock. If there were any 541 * busy inodes return a non-zero value, else zero. 542 * If @kill_dirty is set, discard dirty inodes too, otherwise treat 543 * them as busy. 544 */ 545 int invalidate_inodes(struct super_block *sb, bool kill_dirty) 546 { 547 int busy = 0; 548 struct inode *inode, *next; 549 LIST_HEAD(dispose); 550 551 spin_lock(&inode_sb_list_lock); 552 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 553 spin_lock(&inode->i_lock); 554 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 555 spin_unlock(&inode->i_lock); 556 continue; 557 } 558 if (inode->i_state & I_DIRTY && !kill_dirty) { 559 spin_unlock(&inode->i_lock); 560 busy = 1; 561 continue; 562 } 563 if (atomic_read(&inode->i_count)) { 564 spin_unlock(&inode->i_lock); 565 busy = 1; 566 continue; 567 } 568 569 inode->i_state |= I_FREEING; 570 inode_lru_list_del(inode); 571 spin_unlock(&inode->i_lock); 572 list_add(&inode->i_lru, &dispose); 573 } 574 spin_unlock(&inode_sb_list_lock); 575 576 dispose_list(&dispose); 577 578 return busy; 579 } 580 581 static int can_unuse(struct inode *inode) 582 { 583 if (inode->i_state & ~I_REFERENCED) 584 return 0; 585 if (inode_has_buffers(inode)) 586 return 0; 587 if (atomic_read(&inode->i_count)) 588 return 0; 589 if (inode->i_data.nrpages) 590 return 0; 591 return 1; 592 } 593 594 /* 595 * Walk the superblock inode LRU for freeable inodes and attempt to free them. 596 * This is called from the superblock shrinker function with a number of inodes 597 * to trim from the LRU. Inodes to be freed are moved to a temporary list and 598 * then are freed outside inode_lock by dispose_list(). 599 * 600 * Any inodes which are pinned purely because of attached pagecache have their 601 * pagecache removed. If the inode has metadata buffers attached to 602 * mapping->private_list then try to remove them. 603 * 604 * If the inode has the I_REFERENCED flag set, then it means that it has been 605 * used recently - the flag is set in iput_final(). When we encounter such an 606 * inode, clear the flag and move it to the back of the LRU so it gets another 607 * pass through the LRU before it gets reclaimed. This is necessary because of 608 * the fact we are doing lazy LRU updates to minimise lock contention so the 609 * LRU does not have strict ordering. Hence we don't want to reclaim inodes 610 * with this flag set because they are the inodes that are out of order. 611 */ 612 void prune_icache_sb(struct super_block *sb, int nr_to_scan) 613 { 614 LIST_HEAD(freeable); 615 int nr_scanned; 616 unsigned long reap = 0; 617 618 spin_lock(&sb->s_inode_lru_lock); 619 for (nr_scanned = nr_to_scan; nr_scanned >= 0; nr_scanned--) { 620 struct inode *inode; 621 622 if (list_empty(&sb->s_inode_lru)) 623 break; 624 625 inode = list_entry(sb->s_inode_lru.prev, struct inode, i_lru); 626 627 /* 628 * we are inverting the sb->s_inode_lru_lock/inode->i_lock here, 629 * so use a trylock. If we fail to get the lock, just move the 630 * inode to the back of the list so we don't spin on it. 631 */ 632 if (!spin_trylock(&inode->i_lock)) { 633 list_move(&inode->i_lru, &sb->s_inode_lru); 634 continue; 635 } 636 637 /* 638 * Referenced or dirty inodes are still in use. Give them 639 * another pass through the LRU as we canot reclaim them now. 640 */ 641 if (atomic_read(&inode->i_count) || 642 (inode->i_state & ~I_REFERENCED)) { 643 list_del_init(&inode->i_lru); 644 spin_unlock(&inode->i_lock); 645 sb->s_nr_inodes_unused--; 646 this_cpu_dec(nr_unused); 647 continue; 648 } 649 650 /* recently referenced inodes get one more pass */ 651 if (inode->i_state & I_REFERENCED) { 652 inode->i_state &= ~I_REFERENCED; 653 list_move(&inode->i_lru, &sb->s_inode_lru); 654 spin_unlock(&inode->i_lock); 655 continue; 656 } 657 if (inode_has_buffers(inode) || inode->i_data.nrpages) { 658 __iget(inode); 659 spin_unlock(&inode->i_lock); 660 spin_unlock(&sb->s_inode_lru_lock); 661 if (remove_inode_buffers(inode)) 662 reap += invalidate_mapping_pages(&inode->i_data, 663 0, -1); 664 iput(inode); 665 spin_lock(&sb->s_inode_lru_lock); 666 667 if (inode != list_entry(sb->s_inode_lru.next, 668 struct inode, i_lru)) 669 continue; /* wrong inode or list_empty */ 670 /* avoid lock inversions with trylock */ 671 if (!spin_trylock(&inode->i_lock)) 672 continue; 673 if (!can_unuse(inode)) { 674 spin_unlock(&inode->i_lock); 675 continue; 676 } 677 } 678 WARN_ON(inode->i_state & I_NEW); 679 inode->i_state |= I_FREEING; 680 spin_unlock(&inode->i_lock); 681 682 list_move(&inode->i_lru, &freeable); 683 sb->s_nr_inodes_unused--; 684 this_cpu_dec(nr_unused); 685 } 686 if (current_is_kswapd()) 687 __count_vm_events(KSWAPD_INODESTEAL, reap); 688 else 689 __count_vm_events(PGINODESTEAL, reap); 690 spin_unlock(&sb->s_inode_lru_lock); 691 692 dispose_list(&freeable); 693 } 694 695 static void __wait_on_freeing_inode(struct inode *inode); 696 /* 697 * Called with the inode lock held. 698 */ 699 static struct inode *find_inode(struct super_block *sb, 700 struct hlist_head *head, 701 int (*test)(struct inode *, void *), 702 void *data) 703 { 704 struct hlist_node *node; 705 struct inode *inode = NULL; 706 707 repeat: 708 hlist_for_each_entry(inode, node, head, i_hash) { 709 spin_lock(&inode->i_lock); 710 if (inode->i_sb != sb) { 711 spin_unlock(&inode->i_lock); 712 continue; 713 } 714 if (!test(inode, data)) { 715 spin_unlock(&inode->i_lock); 716 continue; 717 } 718 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 719 __wait_on_freeing_inode(inode); 720 goto repeat; 721 } 722 __iget(inode); 723 spin_unlock(&inode->i_lock); 724 return inode; 725 } 726 return NULL; 727 } 728 729 /* 730 * find_inode_fast is the fast path version of find_inode, see the comment at 731 * iget_locked for details. 732 */ 733 static struct inode *find_inode_fast(struct super_block *sb, 734 struct hlist_head *head, unsigned long ino) 735 { 736 struct hlist_node *node; 737 struct inode *inode = NULL; 738 739 repeat: 740 hlist_for_each_entry(inode, node, head, i_hash) { 741 spin_lock(&inode->i_lock); 742 if (inode->i_ino != ino) { 743 spin_unlock(&inode->i_lock); 744 continue; 745 } 746 if (inode->i_sb != sb) { 747 spin_unlock(&inode->i_lock); 748 continue; 749 } 750 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 751 __wait_on_freeing_inode(inode); 752 goto repeat; 753 } 754 __iget(inode); 755 spin_unlock(&inode->i_lock); 756 return inode; 757 } 758 return NULL; 759 } 760 761 /* 762 * Each cpu owns a range of LAST_INO_BATCH numbers. 763 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations, 764 * to renew the exhausted range. 765 * 766 * This does not significantly increase overflow rate because every CPU can 767 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is 768 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the 769 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase 770 * overflow rate by 2x, which does not seem too significant. 771 * 772 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 773 * error if st_ino won't fit in target struct field. Use 32bit counter 774 * here to attempt to avoid that. 775 */ 776 #define LAST_INO_BATCH 1024 777 static DEFINE_PER_CPU(unsigned int, last_ino); 778 779 unsigned int get_next_ino(void) 780 { 781 unsigned int *p = &get_cpu_var(last_ino); 782 unsigned int res = *p; 783 784 #ifdef CONFIG_SMP 785 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) { 786 static atomic_t shared_last_ino; 787 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino); 788 789 res = next - LAST_INO_BATCH; 790 } 791 #endif 792 793 *p = ++res; 794 put_cpu_var(last_ino); 795 return res; 796 } 797 EXPORT_SYMBOL(get_next_ino); 798 799 /** 800 * new_inode - obtain an inode 801 * @sb: superblock 802 * 803 * Allocates a new inode for given superblock. The default gfp_mask 804 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE. 805 * If HIGHMEM pages are unsuitable or it is known that pages allocated 806 * for the page cache are not reclaimable or migratable, 807 * mapping_set_gfp_mask() must be called with suitable flags on the 808 * newly created inode's mapping 809 * 810 */ 811 struct inode *new_inode(struct super_block *sb) 812 { 813 struct inode *inode; 814 815 spin_lock_prefetch(&inode_sb_list_lock); 816 817 inode = alloc_inode(sb); 818 if (inode) { 819 spin_lock(&inode->i_lock); 820 inode->i_state = 0; 821 spin_unlock(&inode->i_lock); 822 inode_sb_list_add(inode); 823 } 824 return inode; 825 } 826 EXPORT_SYMBOL(new_inode); 827 828 /** 829 * unlock_new_inode - clear the I_NEW state and wake up any waiters 830 * @inode: new inode to unlock 831 * 832 * Called when the inode is fully initialised to clear the new state of the 833 * inode and wake up anyone waiting for the inode to finish initialisation. 834 */ 835 void unlock_new_inode(struct inode *inode) 836 { 837 #ifdef CONFIG_DEBUG_LOCK_ALLOC 838 if (S_ISDIR(inode->i_mode)) { 839 struct file_system_type *type = inode->i_sb->s_type; 840 841 /* Set new key only if filesystem hasn't already changed it */ 842 if (!lockdep_match_class(&inode->i_mutex, 843 &type->i_mutex_key)) { 844 /* 845 * ensure nobody is actually holding i_mutex 846 */ 847 mutex_destroy(&inode->i_mutex); 848 mutex_init(&inode->i_mutex); 849 lockdep_set_class(&inode->i_mutex, 850 &type->i_mutex_dir_key); 851 } 852 } 853 #endif 854 spin_lock(&inode->i_lock); 855 WARN_ON(!(inode->i_state & I_NEW)); 856 inode->i_state &= ~I_NEW; 857 wake_up_bit(&inode->i_state, __I_NEW); 858 spin_unlock(&inode->i_lock); 859 } 860 EXPORT_SYMBOL(unlock_new_inode); 861 862 /** 863 * iget5_locked - obtain an inode from a mounted file system 864 * @sb: super block of file system 865 * @hashval: hash value (usually inode number) to get 866 * @test: callback used for comparisons between inodes 867 * @set: callback used to initialize a new struct inode 868 * @data: opaque data pointer to pass to @test and @set 869 * 870 * Search for the inode specified by @hashval and @data in the inode cache, 871 * and if present it is return it with an increased reference count. This is 872 * a generalized version of iget_locked() for file systems where the inode 873 * number is not sufficient for unique identification of an inode. 874 * 875 * If the inode is not in cache, allocate a new inode and return it locked, 876 * hashed, and with the I_NEW flag set. The file system gets to fill it in 877 * before unlocking it via unlock_new_inode(). 878 * 879 * Note both @test and @set are called with the inode_hash_lock held, so can't 880 * sleep. 881 */ 882 struct inode *iget5_locked(struct super_block *sb, unsigned long hashval, 883 int (*test)(struct inode *, void *), 884 int (*set)(struct inode *, void *), void *data) 885 { 886 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 887 struct inode *inode; 888 889 spin_lock(&inode_hash_lock); 890 inode = find_inode(sb, head, test, data); 891 spin_unlock(&inode_hash_lock); 892 893 if (inode) { 894 wait_on_inode(inode); 895 return inode; 896 } 897 898 inode = alloc_inode(sb); 899 if (inode) { 900 struct inode *old; 901 902 spin_lock(&inode_hash_lock); 903 /* We released the lock, so.. */ 904 old = find_inode(sb, head, test, data); 905 if (!old) { 906 if (set(inode, data)) 907 goto set_failed; 908 909 spin_lock(&inode->i_lock); 910 inode->i_state = I_NEW; 911 hlist_add_head(&inode->i_hash, head); 912 spin_unlock(&inode->i_lock); 913 inode_sb_list_add(inode); 914 spin_unlock(&inode_hash_lock); 915 916 /* Return the locked inode with I_NEW set, the 917 * caller is responsible for filling in the contents 918 */ 919 return inode; 920 } 921 922 /* 923 * Uhhuh, somebody else created the same inode under 924 * us. Use the old inode instead of the one we just 925 * allocated. 926 */ 927 spin_unlock(&inode_hash_lock); 928 destroy_inode(inode); 929 inode = old; 930 wait_on_inode(inode); 931 } 932 return inode; 933 934 set_failed: 935 spin_unlock(&inode_hash_lock); 936 destroy_inode(inode); 937 return NULL; 938 } 939 EXPORT_SYMBOL(iget5_locked); 940 941 /** 942 * iget_locked - obtain an inode from a mounted file system 943 * @sb: super block of file system 944 * @ino: inode number to get 945 * 946 * Search for the inode specified by @ino in the inode cache and if present 947 * return it with an increased reference count. This is for file systems 948 * where the inode number is sufficient for unique identification of an inode. 949 * 950 * If the inode is not in cache, allocate a new inode and return it locked, 951 * hashed, and with the I_NEW flag set. The file system gets to fill it in 952 * before unlocking it via unlock_new_inode(). 953 */ 954 struct inode *iget_locked(struct super_block *sb, unsigned long ino) 955 { 956 struct hlist_head *head = inode_hashtable + hash(sb, ino); 957 struct inode *inode; 958 959 spin_lock(&inode_hash_lock); 960 inode = find_inode_fast(sb, head, ino); 961 spin_unlock(&inode_hash_lock); 962 if (inode) { 963 wait_on_inode(inode); 964 return inode; 965 } 966 967 inode = alloc_inode(sb); 968 if (inode) { 969 struct inode *old; 970 971 spin_lock(&inode_hash_lock); 972 /* We released the lock, so.. */ 973 old = find_inode_fast(sb, head, ino); 974 if (!old) { 975 inode->i_ino = ino; 976 spin_lock(&inode->i_lock); 977 inode->i_state = I_NEW; 978 hlist_add_head(&inode->i_hash, head); 979 spin_unlock(&inode->i_lock); 980 inode_sb_list_add(inode); 981 spin_unlock(&inode_hash_lock); 982 983 /* Return the locked inode with I_NEW set, the 984 * caller is responsible for filling in the contents 985 */ 986 return inode; 987 } 988 989 /* 990 * Uhhuh, somebody else created the same inode under 991 * us. Use the old inode instead of the one we just 992 * allocated. 993 */ 994 spin_unlock(&inode_hash_lock); 995 destroy_inode(inode); 996 inode = old; 997 wait_on_inode(inode); 998 } 999 return inode; 1000 } 1001 EXPORT_SYMBOL(iget_locked); 1002 1003 /* 1004 * search the inode cache for a matching inode number. 1005 * If we find one, then the inode number we are trying to 1006 * allocate is not unique and so we should not use it. 1007 * 1008 * Returns 1 if the inode number is unique, 0 if it is not. 1009 */ 1010 static int test_inode_iunique(struct super_block *sb, unsigned long ino) 1011 { 1012 struct hlist_head *b = inode_hashtable + hash(sb, ino); 1013 struct hlist_node *node; 1014 struct inode *inode; 1015 1016 spin_lock(&inode_hash_lock); 1017 hlist_for_each_entry(inode, node, b, i_hash) { 1018 if (inode->i_ino == ino && inode->i_sb == sb) { 1019 spin_unlock(&inode_hash_lock); 1020 return 0; 1021 } 1022 } 1023 spin_unlock(&inode_hash_lock); 1024 1025 return 1; 1026 } 1027 1028 /** 1029 * iunique - get a unique inode number 1030 * @sb: superblock 1031 * @max_reserved: highest reserved inode number 1032 * 1033 * Obtain an inode number that is unique on the system for a given 1034 * superblock. This is used by file systems that have no natural 1035 * permanent inode numbering system. An inode number is returned that 1036 * is higher than the reserved limit but unique. 1037 * 1038 * BUGS: 1039 * With a large number of inodes live on the file system this function 1040 * currently becomes quite slow. 1041 */ 1042 ino_t iunique(struct super_block *sb, ino_t max_reserved) 1043 { 1044 /* 1045 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 1046 * error if st_ino won't fit in target struct field. Use 32bit counter 1047 * here to attempt to avoid that. 1048 */ 1049 static DEFINE_SPINLOCK(iunique_lock); 1050 static unsigned int counter; 1051 ino_t res; 1052 1053 spin_lock(&iunique_lock); 1054 do { 1055 if (counter <= max_reserved) 1056 counter = max_reserved + 1; 1057 res = counter++; 1058 } while (!test_inode_iunique(sb, res)); 1059 spin_unlock(&iunique_lock); 1060 1061 return res; 1062 } 1063 EXPORT_SYMBOL(iunique); 1064 1065 struct inode *igrab(struct inode *inode) 1066 { 1067 spin_lock(&inode->i_lock); 1068 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) { 1069 __iget(inode); 1070 spin_unlock(&inode->i_lock); 1071 } else { 1072 spin_unlock(&inode->i_lock); 1073 /* 1074 * Handle the case where s_op->clear_inode is not been 1075 * called yet, and somebody is calling igrab 1076 * while the inode is getting freed. 1077 */ 1078 inode = NULL; 1079 } 1080 return inode; 1081 } 1082 EXPORT_SYMBOL(igrab); 1083 1084 /** 1085 * ilookup5_nowait - search for an inode in the inode cache 1086 * @sb: super block of file system to search 1087 * @hashval: hash value (usually inode number) to search for 1088 * @test: callback used for comparisons between inodes 1089 * @data: opaque data pointer to pass to @test 1090 * 1091 * Search for the inode specified by @hashval and @data in the inode cache. 1092 * If the inode is in the cache, the inode is returned with an incremented 1093 * reference count. 1094 * 1095 * Note: I_NEW is not waited upon so you have to be very careful what you do 1096 * with the returned inode. You probably should be using ilookup5() instead. 1097 * 1098 * Note2: @test is called with the inode_hash_lock held, so can't sleep. 1099 */ 1100 struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, 1101 int (*test)(struct inode *, void *), void *data) 1102 { 1103 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1104 struct inode *inode; 1105 1106 spin_lock(&inode_hash_lock); 1107 inode = find_inode(sb, head, test, data); 1108 spin_unlock(&inode_hash_lock); 1109 1110 return inode; 1111 } 1112 EXPORT_SYMBOL(ilookup5_nowait); 1113 1114 /** 1115 * ilookup5 - search for an inode in the inode cache 1116 * @sb: super block of file system to search 1117 * @hashval: hash value (usually inode number) to search for 1118 * @test: callback used for comparisons between inodes 1119 * @data: opaque data pointer to pass to @test 1120 * 1121 * Search for the inode specified by @hashval and @data in the inode cache, 1122 * and if the inode is in the cache, return the inode with an incremented 1123 * reference count. Waits on I_NEW before returning the inode. 1124 * returned with an incremented reference count. 1125 * 1126 * This is a generalized version of ilookup() for file systems where the 1127 * inode number is not sufficient for unique identification of an inode. 1128 * 1129 * Note: @test is called with the inode_hash_lock held, so can't sleep. 1130 */ 1131 struct inode *ilookup5(struct super_block *sb, unsigned long hashval, 1132 int (*test)(struct inode *, void *), void *data) 1133 { 1134 struct inode *inode = ilookup5_nowait(sb, hashval, test, data); 1135 1136 if (inode) 1137 wait_on_inode(inode); 1138 return inode; 1139 } 1140 EXPORT_SYMBOL(ilookup5); 1141 1142 /** 1143 * ilookup - search for an inode in the inode cache 1144 * @sb: super block of file system to search 1145 * @ino: inode number to search for 1146 * 1147 * Search for the inode @ino in the inode cache, and if the inode is in the 1148 * cache, the inode is returned with an incremented reference count. 1149 */ 1150 struct inode *ilookup(struct super_block *sb, unsigned long ino) 1151 { 1152 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1153 struct inode *inode; 1154 1155 spin_lock(&inode_hash_lock); 1156 inode = find_inode_fast(sb, head, ino); 1157 spin_unlock(&inode_hash_lock); 1158 1159 if (inode) 1160 wait_on_inode(inode); 1161 return inode; 1162 } 1163 EXPORT_SYMBOL(ilookup); 1164 1165 int insert_inode_locked(struct inode *inode) 1166 { 1167 struct super_block *sb = inode->i_sb; 1168 ino_t ino = inode->i_ino; 1169 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1170 1171 while (1) { 1172 struct hlist_node *node; 1173 struct inode *old = NULL; 1174 spin_lock(&inode_hash_lock); 1175 hlist_for_each_entry(old, node, head, i_hash) { 1176 if (old->i_ino != ino) 1177 continue; 1178 if (old->i_sb != sb) 1179 continue; 1180 spin_lock(&old->i_lock); 1181 if (old->i_state & (I_FREEING|I_WILL_FREE)) { 1182 spin_unlock(&old->i_lock); 1183 continue; 1184 } 1185 break; 1186 } 1187 if (likely(!node)) { 1188 spin_lock(&inode->i_lock); 1189 inode->i_state |= I_NEW; 1190 hlist_add_head(&inode->i_hash, head); 1191 spin_unlock(&inode->i_lock); 1192 spin_unlock(&inode_hash_lock); 1193 return 0; 1194 } 1195 __iget(old); 1196 spin_unlock(&old->i_lock); 1197 spin_unlock(&inode_hash_lock); 1198 wait_on_inode(old); 1199 if (unlikely(!inode_unhashed(old))) { 1200 iput(old); 1201 return -EBUSY; 1202 } 1203 iput(old); 1204 } 1205 } 1206 EXPORT_SYMBOL(insert_inode_locked); 1207 1208 int insert_inode_locked4(struct inode *inode, unsigned long hashval, 1209 int (*test)(struct inode *, void *), void *data) 1210 { 1211 struct super_block *sb = inode->i_sb; 1212 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1213 1214 while (1) { 1215 struct hlist_node *node; 1216 struct inode *old = NULL; 1217 1218 spin_lock(&inode_hash_lock); 1219 hlist_for_each_entry(old, node, head, i_hash) { 1220 if (old->i_sb != sb) 1221 continue; 1222 if (!test(old, data)) 1223 continue; 1224 spin_lock(&old->i_lock); 1225 if (old->i_state & (I_FREEING|I_WILL_FREE)) { 1226 spin_unlock(&old->i_lock); 1227 continue; 1228 } 1229 break; 1230 } 1231 if (likely(!node)) { 1232 spin_lock(&inode->i_lock); 1233 inode->i_state |= I_NEW; 1234 hlist_add_head(&inode->i_hash, head); 1235 spin_unlock(&inode->i_lock); 1236 spin_unlock(&inode_hash_lock); 1237 return 0; 1238 } 1239 __iget(old); 1240 spin_unlock(&old->i_lock); 1241 spin_unlock(&inode_hash_lock); 1242 wait_on_inode(old); 1243 if (unlikely(!inode_unhashed(old))) { 1244 iput(old); 1245 return -EBUSY; 1246 } 1247 iput(old); 1248 } 1249 } 1250 EXPORT_SYMBOL(insert_inode_locked4); 1251 1252 1253 int generic_delete_inode(struct inode *inode) 1254 { 1255 return 1; 1256 } 1257 EXPORT_SYMBOL(generic_delete_inode); 1258 1259 /* 1260 * Normal UNIX filesystem behaviour: delete the 1261 * inode when the usage count drops to zero, and 1262 * i_nlink is zero. 1263 */ 1264 int generic_drop_inode(struct inode *inode) 1265 { 1266 return !inode->i_nlink || inode_unhashed(inode); 1267 } 1268 EXPORT_SYMBOL_GPL(generic_drop_inode); 1269 1270 /* 1271 * Called when we're dropping the last reference 1272 * to an inode. 1273 * 1274 * Call the FS "drop_inode()" function, defaulting to 1275 * the legacy UNIX filesystem behaviour. If it tells 1276 * us to evict inode, do so. Otherwise, retain inode 1277 * in cache if fs is alive, sync and evict if fs is 1278 * shutting down. 1279 */ 1280 static void iput_final(struct inode *inode) 1281 { 1282 struct super_block *sb = inode->i_sb; 1283 const struct super_operations *op = inode->i_sb->s_op; 1284 int drop; 1285 1286 WARN_ON(inode->i_state & I_NEW); 1287 1288 if (op->drop_inode) 1289 drop = op->drop_inode(inode); 1290 else 1291 drop = generic_drop_inode(inode); 1292 1293 if (!drop && (sb->s_flags & MS_ACTIVE)) { 1294 inode->i_state |= I_REFERENCED; 1295 if (!(inode->i_state & (I_DIRTY|I_SYNC))) 1296 inode_lru_list_add(inode); 1297 spin_unlock(&inode->i_lock); 1298 return; 1299 } 1300 1301 if (!drop) { 1302 inode->i_state |= I_WILL_FREE; 1303 spin_unlock(&inode->i_lock); 1304 write_inode_now(inode, 1); 1305 spin_lock(&inode->i_lock); 1306 WARN_ON(inode->i_state & I_NEW); 1307 inode->i_state &= ~I_WILL_FREE; 1308 } 1309 1310 inode->i_state |= I_FREEING; 1311 inode_lru_list_del(inode); 1312 spin_unlock(&inode->i_lock); 1313 1314 evict(inode); 1315 } 1316 1317 /** 1318 * iput - put an inode 1319 * @inode: inode to put 1320 * 1321 * Puts an inode, dropping its usage count. If the inode use count hits 1322 * zero, the inode is then freed and may also be destroyed. 1323 * 1324 * Consequently, iput() can sleep. 1325 */ 1326 void iput(struct inode *inode) 1327 { 1328 if (inode) { 1329 BUG_ON(inode->i_state & I_CLEAR); 1330 1331 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) 1332 iput_final(inode); 1333 } 1334 } 1335 EXPORT_SYMBOL(iput); 1336 1337 /** 1338 * bmap - find a block number in a file 1339 * @inode: inode of file 1340 * @block: block to find 1341 * 1342 * Returns the block number on the device holding the inode that 1343 * is the disk block number for the block of the file requested. 1344 * That is, asked for block 4 of inode 1 the function will return the 1345 * disk block relative to the disk start that holds that block of the 1346 * file. 1347 */ 1348 sector_t bmap(struct inode *inode, sector_t block) 1349 { 1350 sector_t res = 0; 1351 if (inode->i_mapping->a_ops->bmap) 1352 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block); 1353 return res; 1354 } 1355 EXPORT_SYMBOL(bmap); 1356 1357 /* 1358 * With relative atime, only update atime if the previous atime is 1359 * earlier than either the ctime or mtime or if at least a day has 1360 * passed since the last atime update. 1361 */ 1362 static int relatime_need_update(struct vfsmount *mnt, struct inode *inode, 1363 struct timespec now) 1364 { 1365 1366 if (!(mnt->mnt_flags & MNT_RELATIME)) 1367 return 1; 1368 /* 1369 * Is mtime younger than atime? If yes, update atime: 1370 */ 1371 if (timespec_compare(&inode->i_mtime, &inode->i_atime) >= 0) 1372 return 1; 1373 /* 1374 * Is ctime younger than atime? If yes, update atime: 1375 */ 1376 if (timespec_compare(&inode->i_ctime, &inode->i_atime) >= 0) 1377 return 1; 1378 1379 /* 1380 * Is the previous atime value older than a day? If yes, 1381 * update atime: 1382 */ 1383 if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60) 1384 return 1; 1385 /* 1386 * Good, we can skip the atime update: 1387 */ 1388 return 0; 1389 } 1390 1391 /** 1392 * touch_atime - update the access time 1393 * @mnt: mount the inode is accessed on 1394 * @dentry: dentry accessed 1395 * 1396 * Update the accessed time on an inode and mark it for writeback. 1397 * This function automatically handles read only file systems and media, 1398 * as well as the "noatime" flag and inode specific "noatime" markers. 1399 */ 1400 void touch_atime(struct vfsmount *mnt, struct dentry *dentry) 1401 { 1402 struct inode *inode = dentry->d_inode; 1403 struct timespec now; 1404 1405 if (inode->i_flags & S_NOATIME) 1406 return; 1407 if (IS_NOATIME(inode)) 1408 return; 1409 if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)) 1410 return; 1411 1412 if (mnt->mnt_flags & MNT_NOATIME) 1413 return; 1414 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)) 1415 return; 1416 1417 now = current_fs_time(inode->i_sb); 1418 1419 if (!relatime_need_update(mnt, inode, now)) 1420 return; 1421 1422 if (timespec_equal(&inode->i_atime, &now)) 1423 return; 1424 1425 if (mnt_want_write(mnt)) 1426 return; 1427 1428 inode->i_atime = now; 1429 mark_inode_dirty_sync(inode); 1430 mnt_drop_write(mnt); 1431 } 1432 EXPORT_SYMBOL(touch_atime); 1433 1434 /** 1435 * file_update_time - update mtime and ctime time 1436 * @file: file accessed 1437 * 1438 * Update the mtime and ctime members of an inode and mark the inode 1439 * for writeback. Note that this function is meant exclusively for 1440 * usage in the file write path of filesystems, and filesystems may 1441 * choose to explicitly ignore update via this function with the 1442 * S_NOCMTIME inode flag, e.g. for network filesystem where these 1443 * timestamps are handled by the server. 1444 */ 1445 1446 void file_update_time(struct file *file) 1447 { 1448 struct inode *inode = file->f_path.dentry->d_inode; 1449 struct timespec now; 1450 enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0; 1451 1452 /* First try to exhaust all avenues to not sync */ 1453 if (IS_NOCMTIME(inode)) 1454 return; 1455 1456 now = current_fs_time(inode->i_sb); 1457 if (!timespec_equal(&inode->i_mtime, &now)) 1458 sync_it = S_MTIME; 1459 1460 if (!timespec_equal(&inode->i_ctime, &now)) 1461 sync_it |= S_CTIME; 1462 1463 if (IS_I_VERSION(inode)) 1464 sync_it |= S_VERSION; 1465 1466 if (!sync_it) 1467 return; 1468 1469 /* Finally allowed to write? Takes lock. */ 1470 if (mnt_want_write_file(file)) 1471 return; 1472 1473 /* Only change inode inside the lock region */ 1474 if (sync_it & S_VERSION) 1475 inode_inc_iversion(inode); 1476 if (sync_it & S_CTIME) 1477 inode->i_ctime = now; 1478 if (sync_it & S_MTIME) 1479 inode->i_mtime = now; 1480 mark_inode_dirty_sync(inode); 1481 mnt_drop_write(file->f_path.mnt); 1482 } 1483 EXPORT_SYMBOL(file_update_time); 1484 1485 int inode_needs_sync(struct inode *inode) 1486 { 1487 if (IS_SYNC(inode)) 1488 return 1; 1489 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) 1490 return 1; 1491 return 0; 1492 } 1493 EXPORT_SYMBOL(inode_needs_sync); 1494 1495 int inode_wait(void *word) 1496 { 1497 schedule(); 1498 return 0; 1499 } 1500 EXPORT_SYMBOL(inode_wait); 1501 1502 /* 1503 * If we try to find an inode in the inode hash while it is being 1504 * deleted, we have to wait until the filesystem completes its 1505 * deletion before reporting that it isn't found. This function waits 1506 * until the deletion _might_ have completed. Callers are responsible 1507 * to recheck inode state. 1508 * 1509 * It doesn't matter if I_NEW is not set initially, a call to 1510 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list 1511 * will DTRT. 1512 */ 1513 static void __wait_on_freeing_inode(struct inode *inode) 1514 { 1515 wait_queue_head_t *wq; 1516 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW); 1517 wq = bit_waitqueue(&inode->i_state, __I_NEW); 1518 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); 1519 spin_unlock(&inode->i_lock); 1520 spin_unlock(&inode_hash_lock); 1521 schedule(); 1522 finish_wait(wq, &wait.wait); 1523 spin_lock(&inode_hash_lock); 1524 } 1525 1526 static __initdata unsigned long ihash_entries; 1527 static int __init set_ihash_entries(char *str) 1528 { 1529 if (!str) 1530 return 0; 1531 ihash_entries = simple_strtoul(str, &str, 0); 1532 return 1; 1533 } 1534 __setup("ihash_entries=", set_ihash_entries); 1535 1536 /* 1537 * Initialize the waitqueues and inode hash table. 1538 */ 1539 void __init inode_init_early(void) 1540 { 1541 int loop; 1542 1543 /* If hashes are distributed across NUMA nodes, defer 1544 * hash allocation until vmalloc space is available. 1545 */ 1546 if (hashdist) 1547 return; 1548 1549 inode_hashtable = 1550 alloc_large_system_hash("Inode-cache", 1551 sizeof(struct hlist_head), 1552 ihash_entries, 1553 14, 1554 HASH_EARLY, 1555 &i_hash_shift, 1556 &i_hash_mask, 1557 0); 1558 1559 for (loop = 0; loop < (1 << i_hash_shift); loop++) 1560 INIT_HLIST_HEAD(&inode_hashtable[loop]); 1561 } 1562 1563 void __init inode_init(void) 1564 { 1565 int loop; 1566 1567 /* inode slab cache */ 1568 inode_cachep = kmem_cache_create("inode_cache", 1569 sizeof(struct inode), 1570 0, 1571 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| 1572 SLAB_MEM_SPREAD), 1573 init_once); 1574 1575 /* Hash may have been set up in inode_init_early */ 1576 if (!hashdist) 1577 return; 1578 1579 inode_hashtable = 1580 alloc_large_system_hash("Inode-cache", 1581 sizeof(struct hlist_head), 1582 ihash_entries, 1583 14, 1584 0, 1585 &i_hash_shift, 1586 &i_hash_mask, 1587 0); 1588 1589 for (loop = 0; loop < (1 << i_hash_shift); loop++) 1590 INIT_HLIST_HEAD(&inode_hashtable[loop]); 1591 } 1592 1593 void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev) 1594 { 1595 inode->i_mode = mode; 1596 if (S_ISCHR(mode)) { 1597 inode->i_fop = &def_chr_fops; 1598 inode->i_rdev = rdev; 1599 } else if (S_ISBLK(mode)) { 1600 inode->i_fop = &def_blk_fops; 1601 inode->i_rdev = rdev; 1602 } else if (S_ISFIFO(mode)) 1603 inode->i_fop = &def_fifo_fops; 1604 else if (S_ISSOCK(mode)) 1605 inode->i_fop = &bad_sock_fops; 1606 else 1607 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for" 1608 " inode %s:%lu\n", mode, inode->i_sb->s_id, 1609 inode->i_ino); 1610 } 1611 EXPORT_SYMBOL(init_special_inode); 1612 1613 /** 1614 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards 1615 * @inode: New inode 1616 * @dir: Directory inode 1617 * @mode: mode of the new inode 1618 */ 1619 void inode_init_owner(struct inode *inode, const struct inode *dir, 1620 mode_t mode) 1621 { 1622 inode->i_uid = current_fsuid(); 1623 if (dir && dir->i_mode & S_ISGID) { 1624 inode->i_gid = dir->i_gid; 1625 if (S_ISDIR(mode)) 1626 mode |= S_ISGID; 1627 } else 1628 inode->i_gid = current_fsgid(); 1629 inode->i_mode = mode; 1630 } 1631 EXPORT_SYMBOL(inode_init_owner); 1632 1633 /** 1634 * inode_owner_or_capable - check current task permissions to inode 1635 * @inode: inode being checked 1636 * 1637 * Return true if current either has CAP_FOWNER to the inode, or 1638 * owns the file. 1639 */ 1640 bool inode_owner_or_capable(const struct inode *inode) 1641 { 1642 struct user_namespace *ns = inode_userns(inode); 1643 1644 if (current_user_ns() == ns && current_fsuid() == inode->i_uid) 1645 return true; 1646 if (ns_capable(ns, CAP_FOWNER)) 1647 return true; 1648 return false; 1649 } 1650 EXPORT_SYMBOL(inode_owner_or_capable); 1651