1 /* 2 * linux/fs/inode.c 3 * 4 * (C) 1997 Linus Torvalds 5 */ 6 7 #include <linux/fs.h> 8 #include <linux/mm.h> 9 #include <linux/dcache.h> 10 #include <linux/init.h> 11 #include <linux/quotaops.h> 12 #include <linux/slab.h> 13 #include <linux/writeback.h> 14 #include <linux/module.h> 15 #include <linux/backing-dev.h> 16 #include <linux/wait.h> 17 #include <linux/hash.h> 18 #include <linux/swap.h> 19 #include <linux/security.h> 20 #include <linux/pagemap.h> 21 #include <linux/cdev.h> 22 #include <linux/bootmem.h> 23 #include <linux/inotify.h> 24 #include <linux/mount.h> 25 26 /* 27 * This is needed for the following functions: 28 * - inode_has_buffers 29 * - invalidate_inode_buffers 30 * - invalidate_bdev 31 * 32 * FIXME: remove all knowledge of the buffer layer from this file 33 */ 34 #include <linux/buffer_head.h> 35 36 /* 37 * New inode.c implementation. 38 * 39 * This implementation has the basic premise of trying 40 * to be extremely low-overhead and SMP-safe, yet be 41 * simple enough to be "obviously correct". 42 * 43 * Famous last words. 44 */ 45 46 /* inode dynamic allocation 1999, Andrea Arcangeli <andrea@suse.de> */ 47 48 /* #define INODE_PARANOIA 1 */ 49 /* #define INODE_DEBUG 1 */ 50 51 /* 52 * Inode lookup is no longer as critical as it used to be: 53 * most of the lookups are going to be through the dcache. 54 */ 55 #define I_HASHBITS i_hash_shift 56 #define I_HASHMASK i_hash_mask 57 58 static unsigned int i_hash_mask __read_mostly; 59 static unsigned int i_hash_shift __read_mostly; 60 61 /* 62 * Each inode can be on two separate lists. One is 63 * the hash list of the inode, used for lookups. The 64 * other linked list is the "type" list: 65 * "in_use" - valid inode, i_count > 0, i_nlink > 0 66 * "dirty" - as "in_use" but also dirty 67 * "unused" - valid inode, i_count = 0 68 * 69 * A "dirty" list is maintained for each super block, 70 * allowing for low-overhead inode sync() operations. 71 */ 72 73 LIST_HEAD(inode_in_use); 74 LIST_HEAD(inode_unused); 75 static struct hlist_head *inode_hashtable __read_mostly; 76 77 /* 78 * A simple spinlock to protect the list manipulations. 79 * 80 * NOTE! You also have to own the lock if you change 81 * the i_state of an inode while it is in use.. 82 */ 83 DEFINE_SPINLOCK(inode_lock); 84 85 /* 86 * iprune_mutex provides exclusion between the kswapd or try_to_free_pages 87 * icache shrinking path, and the umount path. Without this exclusion, 88 * by the time prune_icache calls iput for the inode whose pages it has 89 * been invalidating, or by the time it calls clear_inode & destroy_inode 90 * from its final dispose_list, the struct super_block they refer to 91 * (for inode->i_sb->s_op) may already have been freed and reused. 92 */ 93 static DEFINE_MUTEX(iprune_mutex); 94 95 /* 96 * Statistics gathering.. 97 */ 98 struct inodes_stat_t inodes_stat; 99 100 static struct kmem_cache * inode_cachep __read_mostly; 101 102 static struct inode *alloc_inode(struct super_block *sb) 103 { 104 static const struct address_space_operations empty_aops; 105 static struct inode_operations empty_iops; 106 static const struct file_operations empty_fops; 107 struct inode *inode; 108 109 if (sb->s_op->alloc_inode) 110 inode = sb->s_op->alloc_inode(sb); 111 else 112 inode = (struct inode *) kmem_cache_alloc(inode_cachep, GFP_KERNEL); 113 114 if (inode) { 115 struct address_space * const mapping = &inode->i_data; 116 117 inode->i_sb = sb; 118 inode->i_blkbits = sb->s_blocksize_bits; 119 inode->i_flags = 0; 120 atomic_set(&inode->i_count, 1); 121 inode->i_op = &empty_iops; 122 inode->i_fop = &empty_fops; 123 inode->i_nlink = 1; 124 atomic_set(&inode->i_writecount, 0); 125 inode->i_size = 0; 126 inode->i_blocks = 0; 127 inode->i_bytes = 0; 128 inode->i_generation = 0; 129 #ifdef CONFIG_QUOTA 130 memset(&inode->i_dquot, 0, sizeof(inode->i_dquot)); 131 #endif 132 inode->i_pipe = NULL; 133 inode->i_bdev = NULL; 134 inode->i_cdev = NULL; 135 inode->i_rdev = 0; 136 inode->dirtied_when = 0; 137 if (security_inode_alloc(inode)) { 138 if (inode->i_sb->s_op->destroy_inode) 139 inode->i_sb->s_op->destroy_inode(inode); 140 else 141 kmem_cache_free(inode_cachep, (inode)); 142 return NULL; 143 } 144 145 mapping->a_ops = &empty_aops; 146 mapping->host = inode; 147 mapping->flags = 0; 148 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_PAGECACHE); 149 mapping->assoc_mapping = NULL; 150 mapping->backing_dev_info = &default_backing_dev_info; 151 152 /* 153 * If the block_device provides a backing_dev_info for client 154 * inodes then use that. Otherwise the inode share the bdev's 155 * backing_dev_info. 156 */ 157 if (sb->s_bdev) { 158 struct backing_dev_info *bdi; 159 160 bdi = sb->s_bdev->bd_inode_backing_dev_info; 161 if (!bdi) 162 bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info; 163 mapping->backing_dev_info = bdi; 164 } 165 inode->i_private = NULL; 166 inode->i_mapping = mapping; 167 } 168 return inode; 169 } 170 171 void destroy_inode(struct inode *inode) 172 { 173 BUG_ON(inode_has_buffers(inode)); 174 security_inode_free(inode); 175 if (inode->i_sb->s_op->destroy_inode) 176 inode->i_sb->s_op->destroy_inode(inode); 177 else 178 kmem_cache_free(inode_cachep, (inode)); 179 } 180 181 182 /* 183 * These are initializations that only need to be done 184 * once, because the fields are idempotent across use 185 * of the inode, so let the slab aware of that. 186 */ 187 void inode_init_once(struct inode *inode) 188 { 189 memset(inode, 0, sizeof(*inode)); 190 INIT_HLIST_NODE(&inode->i_hash); 191 INIT_LIST_HEAD(&inode->i_dentry); 192 INIT_LIST_HEAD(&inode->i_devices); 193 mutex_init(&inode->i_mutex); 194 init_rwsem(&inode->i_alloc_sem); 195 INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC); 196 rwlock_init(&inode->i_data.tree_lock); 197 spin_lock_init(&inode->i_data.i_mmap_lock); 198 INIT_LIST_HEAD(&inode->i_data.private_list); 199 spin_lock_init(&inode->i_data.private_lock); 200 INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap); 201 INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear); 202 spin_lock_init(&inode->i_lock); 203 i_size_ordered_init(inode); 204 #ifdef CONFIG_INOTIFY 205 INIT_LIST_HEAD(&inode->inotify_watches); 206 mutex_init(&inode->inotify_mutex); 207 #endif 208 } 209 210 EXPORT_SYMBOL(inode_init_once); 211 212 static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 213 { 214 struct inode * inode = (struct inode *) foo; 215 216 inode_init_once(inode); 217 } 218 219 /* 220 * inode_lock must be held 221 */ 222 void __iget(struct inode * inode) 223 { 224 if (atomic_read(&inode->i_count)) { 225 atomic_inc(&inode->i_count); 226 return; 227 } 228 atomic_inc(&inode->i_count); 229 if (!(inode->i_state & (I_DIRTY|I_LOCK))) 230 list_move(&inode->i_list, &inode_in_use); 231 inodes_stat.nr_unused--; 232 } 233 234 /** 235 * clear_inode - clear an inode 236 * @inode: inode to clear 237 * 238 * This is called by the filesystem to tell us 239 * that the inode is no longer useful. We just 240 * terminate it with extreme prejudice. 241 */ 242 void clear_inode(struct inode *inode) 243 { 244 might_sleep(); 245 invalidate_inode_buffers(inode); 246 247 BUG_ON(inode->i_data.nrpages); 248 BUG_ON(!(inode->i_state & I_FREEING)); 249 BUG_ON(inode->i_state & I_CLEAR); 250 wait_on_inode(inode); 251 DQUOT_DROP(inode); 252 if (inode->i_sb->s_op->clear_inode) 253 inode->i_sb->s_op->clear_inode(inode); 254 if (S_ISBLK(inode->i_mode) && inode->i_bdev) 255 bd_forget(inode); 256 if (S_ISCHR(inode->i_mode) && inode->i_cdev) 257 cd_forget(inode); 258 inode->i_state = I_CLEAR; 259 } 260 261 EXPORT_SYMBOL(clear_inode); 262 263 /* 264 * dispose_list - dispose of the contents of a local list 265 * @head: the head of the list to free 266 * 267 * Dispose-list gets a local list with local inodes in it, so it doesn't 268 * need to worry about list corruption and SMP locks. 269 */ 270 static void dispose_list(struct list_head *head) 271 { 272 int nr_disposed = 0; 273 274 while (!list_empty(head)) { 275 struct inode *inode; 276 277 inode = list_first_entry(head, struct inode, i_list); 278 list_del(&inode->i_list); 279 280 if (inode->i_data.nrpages) 281 truncate_inode_pages(&inode->i_data, 0); 282 clear_inode(inode); 283 284 spin_lock(&inode_lock); 285 hlist_del_init(&inode->i_hash); 286 list_del_init(&inode->i_sb_list); 287 spin_unlock(&inode_lock); 288 289 wake_up_inode(inode); 290 destroy_inode(inode); 291 nr_disposed++; 292 } 293 spin_lock(&inode_lock); 294 inodes_stat.nr_inodes -= nr_disposed; 295 spin_unlock(&inode_lock); 296 } 297 298 /* 299 * Invalidate all inodes for a device. 300 */ 301 static int invalidate_list(struct list_head *head, struct list_head *dispose) 302 { 303 struct list_head *next; 304 int busy = 0, count = 0; 305 306 next = head->next; 307 for (;;) { 308 struct list_head * tmp = next; 309 struct inode * inode; 310 311 /* 312 * We can reschedule here without worrying about the list's 313 * consistency because the per-sb list of inodes must not 314 * change during umount anymore, and because iprune_mutex keeps 315 * shrink_icache_memory() away. 316 */ 317 cond_resched_lock(&inode_lock); 318 319 next = next->next; 320 if (tmp == head) 321 break; 322 inode = list_entry(tmp, struct inode, i_sb_list); 323 invalidate_inode_buffers(inode); 324 if (!atomic_read(&inode->i_count)) { 325 list_move(&inode->i_list, dispose); 326 inode->i_state |= I_FREEING; 327 count++; 328 continue; 329 } 330 busy = 1; 331 } 332 /* only unused inodes may be cached with i_count zero */ 333 inodes_stat.nr_unused -= count; 334 return busy; 335 } 336 337 /** 338 * invalidate_inodes - discard the inodes on a device 339 * @sb: superblock 340 * 341 * Discard all of the inodes for a given superblock. If the discard 342 * fails because there are busy inodes then a non zero value is returned. 343 * If the discard is successful all the inodes have been discarded. 344 */ 345 int invalidate_inodes(struct super_block * sb) 346 { 347 int busy; 348 LIST_HEAD(throw_away); 349 350 mutex_lock(&iprune_mutex); 351 spin_lock(&inode_lock); 352 inotify_unmount_inodes(&sb->s_inodes); 353 busy = invalidate_list(&sb->s_inodes, &throw_away); 354 spin_unlock(&inode_lock); 355 356 dispose_list(&throw_away); 357 mutex_unlock(&iprune_mutex); 358 359 return busy; 360 } 361 362 EXPORT_SYMBOL(invalidate_inodes); 363 364 static int can_unuse(struct inode *inode) 365 { 366 if (inode->i_state) 367 return 0; 368 if (inode_has_buffers(inode)) 369 return 0; 370 if (atomic_read(&inode->i_count)) 371 return 0; 372 if (inode->i_data.nrpages) 373 return 0; 374 return 1; 375 } 376 377 /* 378 * Scan `goal' inodes on the unused list for freeable ones. They are moved to 379 * a temporary list and then are freed outside inode_lock by dispose_list(). 380 * 381 * Any inodes which are pinned purely because of attached pagecache have their 382 * pagecache removed. We expect the final iput() on that inode to add it to 383 * the front of the inode_unused list. So look for it there and if the 384 * inode is still freeable, proceed. The right inode is found 99.9% of the 385 * time in testing on a 4-way. 386 * 387 * If the inode has metadata buffers attached to mapping->private_list then 388 * try to remove them. 389 */ 390 static void prune_icache(int nr_to_scan) 391 { 392 LIST_HEAD(freeable); 393 int nr_pruned = 0; 394 int nr_scanned; 395 unsigned long reap = 0; 396 397 mutex_lock(&iprune_mutex); 398 spin_lock(&inode_lock); 399 for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) { 400 struct inode *inode; 401 402 if (list_empty(&inode_unused)) 403 break; 404 405 inode = list_entry(inode_unused.prev, struct inode, i_list); 406 407 if (inode->i_state || atomic_read(&inode->i_count)) { 408 list_move(&inode->i_list, &inode_unused); 409 continue; 410 } 411 if (inode_has_buffers(inode) || inode->i_data.nrpages) { 412 __iget(inode); 413 spin_unlock(&inode_lock); 414 if (remove_inode_buffers(inode)) 415 reap += invalidate_mapping_pages(&inode->i_data, 416 0, -1); 417 iput(inode); 418 spin_lock(&inode_lock); 419 420 if (inode != list_entry(inode_unused.next, 421 struct inode, i_list)) 422 continue; /* wrong inode or list_empty */ 423 if (!can_unuse(inode)) 424 continue; 425 } 426 list_move(&inode->i_list, &freeable); 427 inode->i_state |= I_FREEING; 428 nr_pruned++; 429 } 430 inodes_stat.nr_unused -= nr_pruned; 431 if (current_is_kswapd()) 432 __count_vm_events(KSWAPD_INODESTEAL, reap); 433 else 434 __count_vm_events(PGINODESTEAL, reap); 435 spin_unlock(&inode_lock); 436 437 dispose_list(&freeable); 438 mutex_unlock(&iprune_mutex); 439 } 440 441 /* 442 * shrink_icache_memory() will attempt to reclaim some unused inodes. Here, 443 * "unused" means that no dentries are referring to the inodes: the files are 444 * not open and the dcache references to those inodes have already been 445 * reclaimed. 446 * 447 * This function is passed the number of inodes to scan, and it returns the 448 * total number of remaining possibly-reclaimable inodes. 449 */ 450 static int shrink_icache_memory(int nr, gfp_t gfp_mask) 451 { 452 if (nr) { 453 /* 454 * Nasty deadlock avoidance. We may hold various FS locks, 455 * and we don't want to recurse into the FS that called us 456 * in clear_inode() and friends.. 457 */ 458 if (!(gfp_mask & __GFP_FS)) 459 return -1; 460 prune_icache(nr); 461 } 462 return (inodes_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; 463 } 464 465 static struct shrinker icache_shrinker = { 466 .shrink = shrink_icache_memory, 467 .seeks = DEFAULT_SEEKS, 468 }; 469 470 static void __wait_on_freeing_inode(struct inode *inode); 471 /* 472 * Called with the inode lock held. 473 * NOTE: we are not increasing the inode-refcount, you must call __iget() 474 * by hand after calling find_inode now! This simplifies iunique and won't 475 * add any additional branch in the common code. 476 */ 477 static struct inode * find_inode(struct super_block * sb, struct hlist_head *head, int (*test)(struct inode *, void *), void *data) 478 { 479 struct hlist_node *node; 480 struct inode * inode = NULL; 481 482 repeat: 483 hlist_for_each (node, head) { 484 inode = hlist_entry(node, struct inode, i_hash); 485 if (inode->i_sb != sb) 486 continue; 487 if (!test(inode, data)) 488 continue; 489 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) { 490 __wait_on_freeing_inode(inode); 491 goto repeat; 492 } 493 break; 494 } 495 return node ? inode : NULL; 496 } 497 498 /* 499 * find_inode_fast is the fast path version of find_inode, see the comment at 500 * iget_locked for details. 501 */ 502 static struct inode * find_inode_fast(struct super_block * sb, struct hlist_head *head, unsigned long ino) 503 { 504 struct hlist_node *node; 505 struct inode * inode = NULL; 506 507 repeat: 508 hlist_for_each (node, head) { 509 inode = hlist_entry(node, struct inode, i_hash); 510 if (inode->i_ino != ino) 511 continue; 512 if (inode->i_sb != sb) 513 continue; 514 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) { 515 __wait_on_freeing_inode(inode); 516 goto repeat; 517 } 518 break; 519 } 520 return node ? inode : NULL; 521 } 522 523 /** 524 * new_inode - obtain an inode 525 * @sb: superblock 526 * 527 * Allocates a new inode for given superblock. The default gfp_mask 528 * for allocations related to inode->i_mapping is GFP_HIGHUSER_PAGECACHE. 529 * If HIGHMEM pages are unsuitable or it is known that pages allocated 530 * for the page cache are not reclaimable or migratable, 531 * mapping_set_gfp_mask() must be called with suitable flags on the 532 * newly created inode's mapping 533 * 534 */ 535 struct inode *new_inode(struct super_block *sb) 536 { 537 /* 538 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 539 * error if st_ino won't fit in target struct field. Use 32bit counter 540 * here to attempt to avoid that. 541 */ 542 static unsigned int last_ino; 543 struct inode * inode; 544 545 spin_lock_prefetch(&inode_lock); 546 547 inode = alloc_inode(sb); 548 if (inode) { 549 spin_lock(&inode_lock); 550 inodes_stat.nr_inodes++; 551 list_add(&inode->i_list, &inode_in_use); 552 list_add(&inode->i_sb_list, &sb->s_inodes); 553 inode->i_ino = ++last_ino; 554 inode->i_state = 0; 555 spin_unlock(&inode_lock); 556 } 557 return inode; 558 } 559 560 EXPORT_SYMBOL(new_inode); 561 562 void unlock_new_inode(struct inode *inode) 563 { 564 /* 565 * This is special! We do not need the spinlock 566 * when clearing I_LOCK, because we're guaranteed 567 * that nobody else tries to do anything about the 568 * state of the inode when it is locked, as we 569 * just created it (so there can be no old holders 570 * that haven't tested I_LOCK). 571 */ 572 inode->i_state &= ~(I_LOCK|I_NEW); 573 wake_up_inode(inode); 574 } 575 576 EXPORT_SYMBOL(unlock_new_inode); 577 578 /* 579 * This is called without the inode lock held.. Be careful. 580 * 581 * We no longer cache the sb_flags in i_flags - see fs.h 582 * -- rmk@arm.uk.linux.org 583 */ 584 static struct inode * get_new_inode(struct super_block *sb, struct hlist_head *head, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *data) 585 { 586 struct inode * inode; 587 588 inode = alloc_inode(sb); 589 if (inode) { 590 struct inode * old; 591 592 spin_lock(&inode_lock); 593 /* We released the lock, so.. */ 594 old = find_inode(sb, head, test, data); 595 if (!old) { 596 if (set(inode, data)) 597 goto set_failed; 598 599 inodes_stat.nr_inodes++; 600 list_add(&inode->i_list, &inode_in_use); 601 list_add(&inode->i_sb_list, &sb->s_inodes); 602 hlist_add_head(&inode->i_hash, head); 603 inode->i_state = I_LOCK|I_NEW; 604 spin_unlock(&inode_lock); 605 606 /* Return the locked inode with I_NEW set, the 607 * caller is responsible for filling in the contents 608 */ 609 return inode; 610 } 611 612 /* 613 * Uhhuh, somebody else created the same inode under 614 * us. Use the old inode instead of the one we just 615 * allocated. 616 */ 617 __iget(old); 618 spin_unlock(&inode_lock); 619 destroy_inode(inode); 620 inode = old; 621 wait_on_inode(inode); 622 } 623 return inode; 624 625 set_failed: 626 spin_unlock(&inode_lock); 627 destroy_inode(inode); 628 return NULL; 629 } 630 631 /* 632 * get_new_inode_fast is the fast path version of get_new_inode, see the 633 * comment at iget_locked for details. 634 */ 635 static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_head *head, unsigned long ino) 636 { 637 struct inode * inode; 638 639 inode = alloc_inode(sb); 640 if (inode) { 641 struct inode * old; 642 643 spin_lock(&inode_lock); 644 /* We released the lock, so.. */ 645 old = find_inode_fast(sb, head, ino); 646 if (!old) { 647 inode->i_ino = ino; 648 inodes_stat.nr_inodes++; 649 list_add(&inode->i_list, &inode_in_use); 650 list_add(&inode->i_sb_list, &sb->s_inodes); 651 hlist_add_head(&inode->i_hash, head); 652 inode->i_state = I_LOCK|I_NEW; 653 spin_unlock(&inode_lock); 654 655 /* Return the locked inode with I_NEW set, the 656 * caller is responsible for filling in the contents 657 */ 658 return inode; 659 } 660 661 /* 662 * Uhhuh, somebody else created the same inode under 663 * us. Use the old inode instead of the one we just 664 * allocated. 665 */ 666 __iget(old); 667 spin_unlock(&inode_lock); 668 destroy_inode(inode); 669 inode = old; 670 wait_on_inode(inode); 671 } 672 return inode; 673 } 674 675 static unsigned long hash(struct super_block *sb, unsigned long hashval) 676 { 677 unsigned long tmp; 678 679 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) / 680 L1_CACHE_BYTES; 681 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS); 682 return tmp & I_HASHMASK; 683 } 684 685 /** 686 * iunique - get a unique inode number 687 * @sb: superblock 688 * @max_reserved: highest reserved inode number 689 * 690 * Obtain an inode number that is unique on the system for a given 691 * superblock. This is used by file systems that have no natural 692 * permanent inode numbering system. An inode number is returned that 693 * is higher than the reserved limit but unique. 694 * 695 * BUGS: 696 * With a large number of inodes live on the file system this function 697 * currently becomes quite slow. 698 */ 699 ino_t iunique(struct super_block *sb, ino_t max_reserved) 700 { 701 /* 702 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 703 * error if st_ino won't fit in target struct field. Use 32bit counter 704 * here to attempt to avoid that. 705 */ 706 static unsigned int counter; 707 struct inode *inode; 708 struct hlist_head *head; 709 ino_t res; 710 711 spin_lock(&inode_lock); 712 do { 713 if (counter <= max_reserved) 714 counter = max_reserved + 1; 715 res = counter++; 716 head = inode_hashtable + hash(sb, res); 717 inode = find_inode_fast(sb, head, res); 718 } while (inode != NULL); 719 spin_unlock(&inode_lock); 720 721 return res; 722 } 723 EXPORT_SYMBOL(iunique); 724 725 struct inode *igrab(struct inode *inode) 726 { 727 spin_lock(&inode_lock); 728 if (!(inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE))) 729 __iget(inode); 730 else 731 /* 732 * Handle the case where s_op->clear_inode is not been 733 * called yet, and somebody is calling igrab 734 * while the inode is getting freed. 735 */ 736 inode = NULL; 737 spin_unlock(&inode_lock); 738 return inode; 739 } 740 741 EXPORT_SYMBOL(igrab); 742 743 /** 744 * ifind - internal function, you want ilookup5() or iget5(). 745 * @sb: super block of file system to search 746 * @head: the head of the list to search 747 * @test: callback used for comparisons between inodes 748 * @data: opaque data pointer to pass to @test 749 * @wait: if true wait for the inode to be unlocked, if false do not 750 * 751 * ifind() searches for the inode specified by @data in the inode 752 * cache. This is a generalized version of ifind_fast() for file systems where 753 * the inode number is not sufficient for unique identification of an inode. 754 * 755 * If the inode is in the cache, the inode is returned with an incremented 756 * reference count. 757 * 758 * Otherwise NULL is returned. 759 * 760 * Note, @test is called with the inode_lock held, so can't sleep. 761 */ 762 static struct inode *ifind(struct super_block *sb, 763 struct hlist_head *head, int (*test)(struct inode *, void *), 764 void *data, const int wait) 765 { 766 struct inode *inode; 767 768 spin_lock(&inode_lock); 769 inode = find_inode(sb, head, test, data); 770 if (inode) { 771 __iget(inode); 772 spin_unlock(&inode_lock); 773 if (likely(wait)) 774 wait_on_inode(inode); 775 return inode; 776 } 777 spin_unlock(&inode_lock); 778 return NULL; 779 } 780 781 /** 782 * ifind_fast - internal function, you want ilookup() or iget(). 783 * @sb: super block of file system to search 784 * @head: head of the list to search 785 * @ino: inode number to search for 786 * 787 * ifind_fast() searches for the inode @ino in the inode cache. This is for 788 * file systems where the inode number is sufficient for unique identification 789 * of an inode. 790 * 791 * If the inode is in the cache, the inode is returned with an incremented 792 * reference count. 793 * 794 * Otherwise NULL is returned. 795 */ 796 static struct inode *ifind_fast(struct super_block *sb, 797 struct hlist_head *head, unsigned long ino) 798 { 799 struct inode *inode; 800 801 spin_lock(&inode_lock); 802 inode = find_inode_fast(sb, head, ino); 803 if (inode) { 804 __iget(inode); 805 spin_unlock(&inode_lock); 806 wait_on_inode(inode); 807 return inode; 808 } 809 spin_unlock(&inode_lock); 810 return NULL; 811 } 812 813 /** 814 * ilookup5_nowait - search for an inode in the inode cache 815 * @sb: super block of file system to search 816 * @hashval: hash value (usually inode number) to search for 817 * @test: callback used for comparisons between inodes 818 * @data: opaque data pointer to pass to @test 819 * 820 * ilookup5() uses ifind() to search for the inode specified by @hashval and 821 * @data in the inode cache. This is a generalized version of ilookup() for 822 * file systems where the inode number is not sufficient for unique 823 * identification of an inode. 824 * 825 * If the inode is in the cache, the inode is returned with an incremented 826 * reference count. Note, the inode lock is not waited upon so you have to be 827 * very careful what you do with the returned inode. You probably should be 828 * using ilookup5() instead. 829 * 830 * Otherwise NULL is returned. 831 * 832 * Note, @test is called with the inode_lock held, so can't sleep. 833 */ 834 struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, 835 int (*test)(struct inode *, void *), void *data) 836 { 837 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 838 839 return ifind(sb, head, test, data, 0); 840 } 841 842 EXPORT_SYMBOL(ilookup5_nowait); 843 844 /** 845 * ilookup5 - search for an inode in the inode cache 846 * @sb: super block of file system to search 847 * @hashval: hash value (usually inode number) to search for 848 * @test: callback used for comparisons between inodes 849 * @data: opaque data pointer to pass to @test 850 * 851 * ilookup5() uses ifind() to search for the inode specified by @hashval and 852 * @data in the inode cache. This is a generalized version of ilookup() for 853 * file systems where the inode number is not sufficient for unique 854 * identification of an inode. 855 * 856 * If the inode is in the cache, the inode lock is waited upon and the inode is 857 * returned with an incremented reference count. 858 * 859 * Otherwise NULL is returned. 860 * 861 * Note, @test is called with the inode_lock held, so can't sleep. 862 */ 863 struct inode *ilookup5(struct super_block *sb, unsigned long hashval, 864 int (*test)(struct inode *, void *), void *data) 865 { 866 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 867 868 return ifind(sb, head, test, data, 1); 869 } 870 871 EXPORT_SYMBOL(ilookup5); 872 873 /** 874 * ilookup - search for an inode in the inode cache 875 * @sb: super block of file system to search 876 * @ino: inode number to search for 877 * 878 * ilookup() uses ifind_fast() to search for the inode @ino in the inode cache. 879 * This is for file systems where the inode number is sufficient for unique 880 * identification of an inode. 881 * 882 * If the inode is in the cache, the inode is returned with an incremented 883 * reference count. 884 * 885 * Otherwise NULL is returned. 886 */ 887 struct inode *ilookup(struct super_block *sb, unsigned long ino) 888 { 889 struct hlist_head *head = inode_hashtable + hash(sb, ino); 890 891 return ifind_fast(sb, head, ino); 892 } 893 894 EXPORT_SYMBOL(ilookup); 895 896 /** 897 * iget5_locked - obtain an inode from a mounted file system 898 * @sb: super block of file system 899 * @hashval: hash value (usually inode number) to get 900 * @test: callback used for comparisons between inodes 901 * @set: callback used to initialize a new struct inode 902 * @data: opaque data pointer to pass to @test and @set 903 * 904 * This is iget() without the read_inode() portion of get_new_inode(). 905 * 906 * iget5_locked() uses ifind() to search for the inode specified by @hashval 907 * and @data in the inode cache and if present it is returned with an increased 908 * reference count. This is a generalized version of iget_locked() for file 909 * systems where the inode number is not sufficient for unique identification 910 * of an inode. 911 * 912 * If the inode is not in cache, get_new_inode() is called to allocate a new 913 * inode and this is returned locked, hashed, and with the I_NEW flag set. The 914 * file system gets to fill it in before unlocking it via unlock_new_inode(). 915 * 916 * Note both @test and @set are called with the inode_lock held, so can't sleep. 917 */ 918 struct inode *iget5_locked(struct super_block *sb, unsigned long hashval, 919 int (*test)(struct inode *, void *), 920 int (*set)(struct inode *, void *), void *data) 921 { 922 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 923 struct inode *inode; 924 925 inode = ifind(sb, head, test, data, 1); 926 if (inode) 927 return inode; 928 /* 929 * get_new_inode() will do the right thing, re-trying the search 930 * in case it had to block at any point. 931 */ 932 return get_new_inode(sb, head, test, set, data); 933 } 934 935 EXPORT_SYMBOL(iget5_locked); 936 937 /** 938 * iget_locked - obtain an inode from a mounted file system 939 * @sb: super block of file system 940 * @ino: inode number to get 941 * 942 * This is iget() without the read_inode() portion of get_new_inode_fast(). 943 * 944 * iget_locked() uses ifind_fast() to search for the inode specified by @ino in 945 * the inode cache and if present it is returned with an increased reference 946 * count. This is for file systems where the inode number is sufficient for 947 * unique identification of an inode. 948 * 949 * If the inode is not in cache, get_new_inode_fast() is called to allocate a 950 * new inode and this is returned locked, hashed, and with the I_NEW flag set. 951 * The file system gets to fill it in before unlocking it via 952 * unlock_new_inode(). 953 */ 954 struct inode *iget_locked(struct super_block *sb, unsigned long ino) 955 { 956 struct hlist_head *head = inode_hashtable + hash(sb, ino); 957 struct inode *inode; 958 959 inode = ifind_fast(sb, head, ino); 960 if (inode) 961 return inode; 962 /* 963 * get_new_inode_fast() will do the right thing, re-trying the search 964 * in case it had to block at any point. 965 */ 966 return get_new_inode_fast(sb, head, ino); 967 } 968 969 EXPORT_SYMBOL(iget_locked); 970 971 /** 972 * __insert_inode_hash - hash an inode 973 * @inode: unhashed inode 974 * @hashval: unsigned long value used to locate this object in the 975 * inode_hashtable. 976 * 977 * Add an inode to the inode hash for this superblock. 978 */ 979 void __insert_inode_hash(struct inode *inode, unsigned long hashval) 980 { 981 struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval); 982 spin_lock(&inode_lock); 983 hlist_add_head(&inode->i_hash, head); 984 spin_unlock(&inode_lock); 985 } 986 987 EXPORT_SYMBOL(__insert_inode_hash); 988 989 /** 990 * remove_inode_hash - remove an inode from the hash 991 * @inode: inode to unhash 992 * 993 * Remove an inode from the superblock. 994 */ 995 void remove_inode_hash(struct inode *inode) 996 { 997 spin_lock(&inode_lock); 998 hlist_del_init(&inode->i_hash); 999 spin_unlock(&inode_lock); 1000 } 1001 1002 EXPORT_SYMBOL(remove_inode_hash); 1003 1004 /* 1005 * Tell the filesystem that this inode is no longer of any interest and should 1006 * be completely destroyed. 1007 * 1008 * We leave the inode in the inode hash table until *after* the filesystem's 1009 * ->delete_inode completes. This ensures that an iget (such as nfsd might 1010 * instigate) will always find up-to-date information either in the hash or on 1011 * disk. 1012 * 1013 * I_FREEING is set so that no-one will take a new reference to the inode while 1014 * it is being deleted. 1015 */ 1016 void generic_delete_inode(struct inode *inode) 1017 { 1018 const struct super_operations *op = inode->i_sb->s_op; 1019 1020 list_del_init(&inode->i_list); 1021 list_del_init(&inode->i_sb_list); 1022 inode->i_state |= I_FREEING; 1023 inodes_stat.nr_inodes--; 1024 spin_unlock(&inode_lock); 1025 1026 security_inode_delete(inode); 1027 1028 if (op->delete_inode) { 1029 void (*delete)(struct inode *) = op->delete_inode; 1030 if (!is_bad_inode(inode)) 1031 DQUOT_INIT(inode); 1032 /* Filesystems implementing their own 1033 * s_op->delete_inode are required to call 1034 * truncate_inode_pages and clear_inode() 1035 * internally */ 1036 delete(inode); 1037 } else { 1038 truncate_inode_pages(&inode->i_data, 0); 1039 clear_inode(inode); 1040 } 1041 spin_lock(&inode_lock); 1042 hlist_del_init(&inode->i_hash); 1043 spin_unlock(&inode_lock); 1044 wake_up_inode(inode); 1045 BUG_ON(inode->i_state != I_CLEAR); 1046 destroy_inode(inode); 1047 } 1048 1049 EXPORT_SYMBOL(generic_delete_inode); 1050 1051 static void generic_forget_inode(struct inode *inode) 1052 { 1053 struct super_block *sb = inode->i_sb; 1054 1055 if (!hlist_unhashed(&inode->i_hash)) { 1056 if (!(inode->i_state & (I_DIRTY|I_LOCK))) 1057 list_move(&inode->i_list, &inode_unused); 1058 inodes_stat.nr_unused++; 1059 if (sb->s_flags & MS_ACTIVE) { 1060 spin_unlock(&inode_lock); 1061 return; 1062 } 1063 inode->i_state |= I_WILL_FREE; 1064 spin_unlock(&inode_lock); 1065 write_inode_now(inode, 1); 1066 spin_lock(&inode_lock); 1067 inode->i_state &= ~I_WILL_FREE; 1068 inodes_stat.nr_unused--; 1069 hlist_del_init(&inode->i_hash); 1070 } 1071 list_del_init(&inode->i_list); 1072 list_del_init(&inode->i_sb_list); 1073 inode->i_state |= I_FREEING; 1074 inodes_stat.nr_inodes--; 1075 spin_unlock(&inode_lock); 1076 if (inode->i_data.nrpages) 1077 truncate_inode_pages(&inode->i_data, 0); 1078 clear_inode(inode); 1079 wake_up_inode(inode); 1080 destroy_inode(inode); 1081 } 1082 1083 /* 1084 * Normal UNIX filesystem behaviour: delete the 1085 * inode when the usage count drops to zero, and 1086 * i_nlink is zero. 1087 */ 1088 void generic_drop_inode(struct inode *inode) 1089 { 1090 if (!inode->i_nlink) 1091 generic_delete_inode(inode); 1092 else 1093 generic_forget_inode(inode); 1094 } 1095 1096 EXPORT_SYMBOL_GPL(generic_drop_inode); 1097 1098 /* 1099 * Called when we're dropping the last reference 1100 * to an inode. 1101 * 1102 * Call the FS "drop()" function, defaulting to 1103 * the legacy UNIX filesystem behaviour.. 1104 * 1105 * NOTE! NOTE! NOTE! We're called with the inode lock 1106 * held, and the drop function is supposed to release 1107 * the lock! 1108 */ 1109 static inline void iput_final(struct inode *inode) 1110 { 1111 const struct super_operations *op = inode->i_sb->s_op; 1112 void (*drop)(struct inode *) = generic_drop_inode; 1113 1114 if (op && op->drop_inode) 1115 drop = op->drop_inode; 1116 drop(inode); 1117 } 1118 1119 /** 1120 * iput - put an inode 1121 * @inode: inode to put 1122 * 1123 * Puts an inode, dropping its usage count. If the inode use count hits 1124 * zero, the inode is then freed and may also be destroyed. 1125 * 1126 * Consequently, iput() can sleep. 1127 */ 1128 void iput(struct inode *inode) 1129 { 1130 if (inode) { 1131 const struct super_operations *op = inode->i_sb->s_op; 1132 1133 BUG_ON(inode->i_state == I_CLEAR); 1134 1135 if (op && op->put_inode) 1136 op->put_inode(inode); 1137 1138 if (atomic_dec_and_lock(&inode->i_count, &inode_lock)) 1139 iput_final(inode); 1140 } 1141 } 1142 1143 EXPORT_SYMBOL(iput); 1144 1145 /** 1146 * bmap - find a block number in a file 1147 * @inode: inode of file 1148 * @block: block to find 1149 * 1150 * Returns the block number on the device holding the inode that 1151 * is the disk block number for the block of the file requested. 1152 * That is, asked for block 4 of inode 1 the function will return the 1153 * disk block relative to the disk start that holds that block of the 1154 * file. 1155 */ 1156 sector_t bmap(struct inode * inode, sector_t block) 1157 { 1158 sector_t res = 0; 1159 if (inode->i_mapping->a_ops->bmap) 1160 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block); 1161 return res; 1162 } 1163 EXPORT_SYMBOL(bmap); 1164 1165 /** 1166 * touch_atime - update the access time 1167 * @mnt: mount the inode is accessed on 1168 * @dentry: dentry accessed 1169 * 1170 * Update the accessed time on an inode and mark it for writeback. 1171 * This function automatically handles read only file systems and media, 1172 * as well as the "noatime" flag and inode specific "noatime" markers. 1173 */ 1174 void touch_atime(struct vfsmount *mnt, struct dentry *dentry) 1175 { 1176 struct inode *inode = dentry->d_inode; 1177 struct timespec now; 1178 1179 if (inode->i_flags & S_NOATIME) 1180 return; 1181 if (IS_NOATIME(inode)) 1182 return; 1183 if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)) 1184 return; 1185 1186 /* 1187 * We may have a NULL vfsmount when coming from NFSD 1188 */ 1189 if (mnt) { 1190 if (mnt->mnt_flags & MNT_NOATIME) 1191 return; 1192 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)) 1193 return; 1194 1195 if (mnt->mnt_flags & MNT_RELATIME) { 1196 /* 1197 * With relative atime, only update atime if the 1198 * previous atime is earlier than either the ctime or 1199 * mtime. 1200 */ 1201 if (timespec_compare(&inode->i_mtime, 1202 &inode->i_atime) < 0 && 1203 timespec_compare(&inode->i_ctime, 1204 &inode->i_atime) < 0) 1205 return; 1206 } 1207 } 1208 1209 now = current_fs_time(inode->i_sb); 1210 if (timespec_equal(&inode->i_atime, &now)) 1211 return; 1212 1213 inode->i_atime = now; 1214 mark_inode_dirty_sync(inode); 1215 } 1216 EXPORT_SYMBOL(touch_atime); 1217 1218 /** 1219 * file_update_time - update mtime and ctime time 1220 * @file: file accessed 1221 * 1222 * Update the mtime and ctime members of an inode and mark the inode 1223 * for writeback. Note that this function is meant exclusively for 1224 * usage in the file write path of filesystems, and filesystems may 1225 * choose to explicitly ignore update via this function with the 1226 * S_NOCTIME inode flag, e.g. for network filesystem where these 1227 * timestamps are handled by the server. 1228 */ 1229 1230 void file_update_time(struct file *file) 1231 { 1232 struct inode *inode = file->f_path.dentry->d_inode; 1233 struct timespec now; 1234 int sync_it = 0; 1235 1236 if (IS_NOCMTIME(inode)) 1237 return; 1238 if (IS_RDONLY(inode)) 1239 return; 1240 1241 now = current_fs_time(inode->i_sb); 1242 if (!timespec_equal(&inode->i_mtime, &now)) { 1243 inode->i_mtime = now; 1244 sync_it = 1; 1245 } 1246 1247 if (!timespec_equal(&inode->i_ctime, &now)) { 1248 inode->i_ctime = now; 1249 sync_it = 1; 1250 } 1251 1252 if (sync_it) 1253 mark_inode_dirty_sync(inode); 1254 } 1255 1256 EXPORT_SYMBOL(file_update_time); 1257 1258 int inode_needs_sync(struct inode *inode) 1259 { 1260 if (IS_SYNC(inode)) 1261 return 1; 1262 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) 1263 return 1; 1264 return 0; 1265 } 1266 1267 EXPORT_SYMBOL(inode_needs_sync); 1268 1269 int inode_wait(void *word) 1270 { 1271 schedule(); 1272 return 0; 1273 } 1274 1275 /* 1276 * If we try to find an inode in the inode hash while it is being 1277 * deleted, we have to wait until the filesystem completes its 1278 * deletion before reporting that it isn't found. This function waits 1279 * until the deletion _might_ have completed. Callers are responsible 1280 * to recheck inode state. 1281 * 1282 * It doesn't matter if I_LOCK is not set initially, a call to 1283 * wake_up_inode() after removing from the hash list will DTRT. 1284 * 1285 * This is called with inode_lock held. 1286 */ 1287 static void __wait_on_freeing_inode(struct inode *inode) 1288 { 1289 wait_queue_head_t *wq; 1290 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_LOCK); 1291 wq = bit_waitqueue(&inode->i_state, __I_LOCK); 1292 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); 1293 spin_unlock(&inode_lock); 1294 schedule(); 1295 finish_wait(wq, &wait.wait); 1296 spin_lock(&inode_lock); 1297 } 1298 1299 void wake_up_inode(struct inode *inode) 1300 { 1301 /* 1302 * Prevent speculative execution through spin_unlock(&inode_lock); 1303 */ 1304 smp_mb(); 1305 wake_up_bit(&inode->i_state, __I_LOCK); 1306 } 1307 1308 /* 1309 * We rarely want to lock two inodes that do not have a parent/child 1310 * relationship (such as directory, child inode) simultaneously. The 1311 * vast majority of file systems should be able to get along fine 1312 * without this. Do not use these functions except as a last resort. 1313 */ 1314 void inode_double_lock(struct inode *inode1, struct inode *inode2) 1315 { 1316 if (inode1 == NULL || inode2 == NULL || inode1 == inode2) { 1317 if (inode1) 1318 mutex_lock(&inode1->i_mutex); 1319 else if (inode2) 1320 mutex_lock(&inode2->i_mutex); 1321 return; 1322 } 1323 1324 if (inode1 < inode2) { 1325 mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT); 1326 mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD); 1327 } else { 1328 mutex_lock_nested(&inode2->i_mutex, I_MUTEX_PARENT); 1329 mutex_lock_nested(&inode1->i_mutex, I_MUTEX_CHILD); 1330 } 1331 } 1332 EXPORT_SYMBOL(inode_double_lock); 1333 1334 void inode_double_unlock(struct inode *inode1, struct inode *inode2) 1335 { 1336 if (inode1) 1337 mutex_unlock(&inode1->i_mutex); 1338 1339 if (inode2 && inode2 != inode1) 1340 mutex_unlock(&inode2->i_mutex); 1341 } 1342 EXPORT_SYMBOL(inode_double_unlock); 1343 1344 static __initdata unsigned long ihash_entries; 1345 static int __init set_ihash_entries(char *str) 1346 { 1347 if (!str) 1348 return 0; 1349 ihash_entries = simple_strtoul(str, &str, 0); 1350 return 1; 1351 } 1352 __setup("ihash_entries=", set_ihash_entries); 1353 1354 /* 1355 * Initialize the waitqueues and inode hash table. 1356 */ 1357 void __init inode_init_early(void) 1358 { 1359 int loop; 1360 1361 /* If hashes are distributed across NUMA nodes, defer 1362 * hash allocation until vmalloc space is available. 1363 */ 1364 if (hashdist) 1365 return; 1366 1367 inode_hashtable = 1368 alloc_large_system_hash("Inode-cache", 1369 sizeof(struct hlist_head), 1370 ihash_entries, 1371 14, 1372 HASH_EARLY, 1373 &i_hash_shift, 1374 &i_hash_mask, 1375 0); 1376 1377 for (loop = 0; loop < (1 << i_hash_shift); loop++) 1378 INIT_HLIST_HEAD(&inode_hashtable[loop]); 1379 } 1380 1381 void __init inode_init(unsigned long mempages) 1382 { 1383 int loop; 1384 1385 /* inode slab cache */ 1386 inode_cachep = kmem_cache_create("inode_cache", 1387 sizeof(struct inode), 1388 0, 1389 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| 1390 SLAB_MEM_SPREAD), 1391 init_once); 1392 register_shrinker(&icache_shrinker); 1393 1394 /* Hash may have been set up in inode_init_early */ 1395 if (!hashdist) 1396 return; 1397 1398 inode_hashtable = 1399 alloc_large_system_hash("Inode-cache", 1400 sizeof(struct hlist_head), 1401 ihash_entries, 1402 14, 1403 0, 1404 &i_hash_shift, 1405 &i_hash_mask, 1406 0); 1407 1408 for (loop = 0; loop < (1 << i_hash_shift); loop++) 1409 INIT_HLIST_HEAD(&inode_hashtable[loop]); 1410 } 1411 1412 void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev) 1413 { 1414 inode->i_mode = mode; 1415 if (S_ISCHR(mode)) { 1416 inode->i_fop = &def_chr_fops; 1417 inode->i_rdev = rdev; 1418 } else if (S_ISBLK(mode)) { 1419 inode->i_fop = &def_blk_fops; 1420 inode->i_rdev = rdev; 1421 } else if (S_ISFIFO(mode)) 1422 inode->i_fop = &def_fifo_fops; 1423 else if (S_ISSOCK(mode)) 1424 inode->i_fop = &bad_sock_fops; 1425 else 1426 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o)\n", 1427 mode); 1428 } 1429 EXPORT_SYMBOL(init_special_inode); 1430