1 /* 2 * (C) 1997 Linus Torvalds 3 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation) 4 */ 5 #include <linux/export.h> 6 #include <linux/fs.h> 7 #include <linux/mm.h> 8 #include <linux/backing-dev.h> 9 #include <linux/hash.h> 10 #include <linux/swap.h> 11 #include <linux/security.h> 12 #include <linux/cdev.h> 13 #include <linux/bootmem.h> 14 #include <linux/fsnotify.h> 15 #include <linux/mount.h> 16 #include <linux/posix_acl.h> 17 #include <linux/prefetch.h> 18 #include <linux/buffer_head.h> /* for inode_has_buffers */ 19 #include <linux/ratelimit.h> 20 #include <linux/list_lru.h> 21 #include <trace/events/writeback.h> 22 #include "internal.h" 23 24 /* 25 * Inode locking rules: 26 * 27 * inode->i_lock protects: 28 * inode->i_state, inode->i_hash, __iget() 29 * Inode LRU list locks protect: 30 * inode->i_sb->s_inode_lru, inode->i_lru 31 * inode->i_sb->s_inode_list_lock protects: 32 * inode->i_sb->s_inodes, inode->i_sb_list 33 * bdi->wb.list_lock protects: 34 * bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list 35 * inode_hash_lock protects: 36 * inode_hashtable, inode->i_hash 37 * 38 * Lock ordering: 39 * 40 * inode->i_sb->s_inode_list_lock 41 * inode->i_lock 42 * Inode LRU list locks 43 * 44 * bdi->wb.list_lock 45 * inode->i_lock 46 * 47 * inode_hash_lock 48 * inode->i_sb->s_inode_list_lock 49 * inode->i_lock 50 * 51 * iunique_lock 52 * inode_hash_lock 53 */ 54 55 static unsigned int i_hash_mask __read_mostly; 56 static unsigned int i_hash_shift __read_mostly; 57 static struct hlist_head *inode_hashtable __read_mostly; 58 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); 59 60 /* 61 * Empty aops. Can be used for the cases where the user does not 62 * define any of the address_space operations. 63 */ 64 const struct address_space_operations empty_aops = { 65 }; 66 EXPORT_SYMBOL(empty_aops); 67 68 /* 69 * Statistics gathering.. 70 */ 71 struct inodes_stat_t inodes_stat; 72 73 static DEFINE_PER_CPU(unsigned long, nr_inodes); 74 static DEFINE_PER_CPU(unsigned long, nr_unused); 75 76 static struct kmem_cache *inode_cachep __read_mostly; 77 78 static long get_nr_inodes(void) 79 { 80 int i; 81 long sum = 0; 82 for_each_possible_cpu(i) 83 sum += per_cpu(nr_inodes, i); 84 return sum < 0 ? 0 : sum; 85 } 86 87 static inline long get_nr_inodes_unused(void) 88 { 89 int i; 90 long sum = 0; 91 for_each_possible_cpu(i) 92 sum += per_cpu(nr_unused, i); 93 return sum < 0 ? 0 : sum; 94 } 95 96 long get_nr_dirty_inodes(void) 97 { 98 /* not actually dirty inodes, but a wild approximation */ 99 long nr_dirty = get_nr_inodes() - get_nr_inodes_unused(); 100 return nr_dirty > 0 ? nr_dirty : 0; 101 } 102 103 /* 104 * Handle nr_inode sysctl 105 */ 106 #ifdef CONFIG_SYSCTL 107 int proc_nr_inodes(struct ctl_table *table, int write, 108 void __user *buffer, size_t *lenp, loff_t *ppos) 109 { 110 inodes_stat.nr_inodes = get_nr_inodes(); 111 inodes_stat.nr_unused = get_nr_inodes_unused(); 112 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 113 } 114 #endif 115 116 static int no_open(struct inode *inode, struct file *file) 117 { 118 return -ENXIO; 119 } 120 121 /** 122 * inode_init_always - perform inode structure initialisation 123 * @sb: superblock inode belongs to 124 * @inode: inode to initialise 125 * 126 * These are initializations that need to be done on every inode 127 * allocation as the fields are not initialised by slab allocation. 128 */ 129 int inode_init_always(struct super_block *sb, struct inode *inode) 130 { 131 static const struct inode_operations empty_iops; 132 static const struct file_operations no_open_fops = {.open = no_open}; 133 struct address_space *const mapping = &inode->i_data; 134 135 inode->i_sb = sb; 136 inode->i_blkbits = sb->s_blocksize_bits; 137 inode->i_flags = 0; 138 atomic_set(&inode->i_count, 1); 139 inode->i_op = &empty_iops; 140 inode->i_fop = &no_open_fops; 141 inode->__i_nlink = 1; 142 inode->i_opflags = 0; 143 if (sb->s_xattr) 144 inode->i_opflags |= IOP_XATTR; 145 i_uid_write(inode, 0); 146 i_gid_write(inode, 0); 147 atomic_set(&inode->i_writecount, 0); 148 inode->i_size = 0; 149 inode->i_blocks = 0; 150 inode->i_bytes = 0; 151 inode->i_generation = 0; 152 inode->i_pipe = NULL; 153 inode->i_bdev = NULL; 154 inode->i_cdev = NULL; 155 inode->i_link = NULL; 156 inode->i_dir_seq = 0; 157 inode->i_rdev = 0; 158 inode->dirtied_when = 0; 159 160 #ifdef CONFIG_CGROUP_WRITEBACK 161 inode->i_wb_frn_winner = 0; 162 inode->i_wb_frn_avg_time = 0; 163 inode->i_wb_frn_history = 0; 164 #endif 165 166 if (security_inode_alloc(inode)) 167 goto out; 168 spin_lock_init(&inode->i_lock); 169 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); 170 171 init_rwsem(&inode->i_rwsem); 172 lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key); 173 174 atomic_set(&inode->i_dio_count, 0); 175 176 mapping->a_ops = &empty_aops; 177 mapping->host = inode; 178 mapping->flags = 0; 179 atomic_set(&mapping->i_mmap_writable, 0); 180 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); 181 mapping->private_data = NULL; 182 mapping->writeback_index = 0; 183 inode->i_private = NULL; 184 inode->i_mapping = mapping; 185 INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */ 186 #ifdef CONFIG_FS_POSIX_ACL 187 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED; 188 #endif 189 190 #ifdef CONFIG_FSNOTIFY 191 inode->i_fsnotify_mask = 0; 192 #endif 193 inode->i_flctx = NULL; 194 this_cpu_inc(nr_inodes); 195 196 return 0; 197 out: 198 return -ENOMEM; 199 } 200 EXPORT_SYMBOL(inode_init_always); 201 202 static struct inode *alloc_inode(struct super_block *sb) 203 { 204 struct inode *inode; 205 206 if (sb->s_op->alloc_inode) 207 inode = sb->s_op->alloc_inode(sb); 208 else 209 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL); 210 211 if (!inode) 212 return NULL; 213 214 if (unlikely(inode_init_always(sb, inode))) { 215 if (inode->i_sb->s_op->destroy_inode) 216 inode->i_sb->s_op->destroy_inode(inode); 217 else 218 kmem_cache_free(inode_cachep, inode); 219 return NULL; 220 } 221 222 return inode; 223 } 224 225 void free_inode_nonrcu(struct inode *inode) 226 { 227 kmem_cache_free(inode_cachep, inode); 228 } 229 EXPORT_SYMBOL(free_inode_nonrcu); 230 231 void __destroy_inode(struct inode *inode) 232 { 233 BUG_ON(inode_has_buffers(inode)); 234 inode_detach_wb(inode); 235 security_inode_free(inode); 236 fsnotify_inode_delete(inode); 237 locks_free_lock_context(inode); 238 if (!inode->i_nlink) { 239 WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0); 240 atomic_long_dec(&inode->i_sb->s_remove_count); 241 } 242 243 #ifdef CONFIG_FS_POSIX_ACL 244 if (inode->i_acl && !is_uncached_acl(inode->i_acl)) 245 posix_acl_release(inode->i_acl); 246 if (inode->i_default_acl && !is_uncached_acl(inode->i_default_acl)) 247 posix_acl_release(inode->i_default_acl); 248 #endif 249 this_cpu_dec(nr_inodes); 250 } 251 EXPORT_SYMBOL(__destroy_inode); 252 253 static void i_callback(struct rcu_head *head) 254 { 255 struct inode *inode = container_of(head, struct inode, i_rcu); 256 kmem_cache_free(inode_cachep, inode); 257 } 258 259 static void destroy_inode(struct inode *inode) 260 { 261 BUG_ON(!list_empty(&inode->i_lru)); 262 __destroy_inode(inode); 263 if (inode->i_sb->s_op->destroy_inode) 264 inode->i_sb->s_op->destroy_inode(inode); 265 else 266 call_rcu(&inode->i_rcu, i_callback); 267 } 268 269 /** 270 * drop_nlink - directly drop an inode's link count 271 * @inode: inode 272 * 273 * This is a low-level filesystem helper to replace any 274 * direct filesystem manipulation of i_nlink. In cases 275 * where we are attempting to track writes to the 276 * filesystem, a decrement to zero means an imminent 277 * write when the file is truncated and actually unlinked 278 * on the filesystem. 279 */ 280 void drop_nlink(struct inode *inode) 281 { 282 WARN_ON(inode->i_nlink == 0); 283 inode->__i_nlink--; 284 if (!inode->i_nlink) 285 atomic_long_inc(&inode->i_sb->s_remove_count); 286 } 287 EXPORT_SYMBOL(drop_nlink); 288 289 /** 290 * clear_nlink - directly zero an inode's link count 291 * @inode: inode 292 * 293 * This is a low-level filesystem helper to replace any 294 * direct filesystem manipulation of i_nlink. See 295 * drop_nlink() for why we care about i_nlink hitting zero. 296 */ 297 void clear_nlink(struct inode *inode) 298 { 299 if (inode->i_nlink) { 300 inode->__i_nlink = 0; 301 atomic_long_inc(&inode->i_sb->s_remove_count); 302 } 303 } 304 EXPORT_SYMBOL(clear_nlink); 305 306 /** 307 * set_nlink - directly set an inode's link count 308 * @inode: inode 309 * @nlink: new nlink (should be non-zero) 310 * 311 * This is a low-level filesystem helper to replace any 312 * direct filesystem manipulation of i_nlink. 313 */ 314 void set_nlink(struct inode *inode, unsigned int nlink) 315 { 316 if (!nlink) { 317 clear_nlink(inode); 318 } else { 319 /* Yes, some filesystems do change nlink from zero to one */ 320 if (inode->i_nlink == 0) 321 atomic_long_dec(&inode->i_sb->s_remove_count); 322 323 inode->__i_nlink = nlink; 324 } 325 } 326 EXPORT_SYMBOL(set_nlink); 327 328 /** 329 * inc_nlink - directly increment an inode's link count 330 * @inode: inode 331 * 332 * This is a low-level filesystem helper to replace any 333 * direct filesystem manipulation of i_nlink. Currently, 334 * it is only here for parity with dec_nlink(). 335 */ 336 void inc_nlink(struct inode *inode) 337 { 338 if (unlikely(inode->i_nlink == 0)) { 339 WARN_ON(!(inode->i_state & I_LINKABLE)); 340 atomic_long_dec(&inode->i_sb->s_remove_count); 341 } 342 343 inode->__i_nlink++; 344 } 345 EXPORT_SYMBOL(inc_nlink); 346 347 void address_space_init_once(struct address_space *mapping) 348 { 349 memset(mapping, 0, sizeof(*mapping)); 350 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC | __GFP_ACCOUNT); 351 spin_lock_init(&mapping->tree_lock); 352 init_rwsem(&mapping->i_mmap_rwsem); 353 INIT_LIST_HEAD(&mapping->private_list); 354 spin_lock_init(&mapping->private_lock); 355 mapping->i_mmap = RB_ROOT; 356 } 357 EXPORT_SYMBOL(address_space_init_once); 358 359 /* 360 * These are initializations that only need to be done 361 * once, because the fields are idempotent across use 362 * of the inode, so let the slab aware of that. 363 */ 364 void inode_init_once(struct inode *inode) 365 { 366 memset(inode, 0, sizeof(*inode)); 367 INIT_HLIST_NODE(&inode->i_hash); 368 INIT_LIST_HEAD(&inode->i_devices); 369 INIT_LIST_HEAD(&inode->i_io_list); 370 INIT_LIST_HEAD(&inode->i_wb_list); 371 INIT_LIST_HEAD(&inode->i_lru); 372 address_space_init_once(&inode->i_data); 373 i_size_ordered_init(inode); 374 } 375 EXPORT_SYMBOL(inode_init_once); 376 377 static void init_once(void *foo) 378 { 379 struct inode *inode = (struct inode *) foo; 380 381 inode_init_once(inode); 382 } 383 384 /* 385 * inode->i_lock must be held 386 */ 387 void __iget(struct inode *inode) 388 { 389 atomic_inc(&inode->i_count); 390 } 391 392 /* 393 * get additional reference to inode; caller must already hold one. 394 */ 395 void ihold(struct inode *inode) 396 { 397 WARN_ON(atomic_inc_return(&inode->i_count) < 2); 398 } 399 EXPORT_SYMBOL(ihold); 400 401 static void inode_lru_list_add(struct inode *inode) 402 { 403 if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru)) 404 this_cpu_inc(nr_unused); 405 else 406 inode->i_state |= I_REFERENCED; 407 } 408 409 /* 410 * Add inode to LRU if needed (inode is unused and clean). 411 * 412 * Needs inode->i_lock held. 413 */ 414 void inode_add_lru(struct inode *inode) 415 { 416 if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC | 417 I_FREEING | I_WILL_FREE)) && 418 !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE) 419 inode_lru_list_add(inode); 420 } 421 422 423 static void inode_lru_list_del(struct inode *inode) 424 { 425 426 if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru)) 427 this_cpu_dec(nr_unused); 428 } 429 430 /** 431 * inode_sb_list_add - add inode to the superblock list of inodes 432 * @inode: inode to add 433 */ 434 void inode_sb_list_add(struct inode *inode) 435 { 436 spin_lock(&inode->i_sb->s_inode_list_lock); 437 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes); 438 spin_unlock(&inode->i_sb->s_inode_list_lock); 439 } 440 EXPORT_SYMBOL_GPL(inode_sb_list_add); 441 442 static inline void inode_sb_list_del(struct inode *inode) 443 { 444 if (!list_empty(&inode->i_sb_list)) { 445 spin_lock(&inode->i_sb->s_inode_list_lock); 446 list_del_init(&inode->i_sb_list); 447 spin_unlock(&inode->i_sb->s_inode_list_lock); 448 } 449 } 450 451 static unsigned long hash(struct super_block *sb, unsigned long hashval) 452 { 453 unsigned long tmp; 454 455 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) / 456 L1_CACHE_BYTES; 457 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift); 458 return tmp & i_hash_mask; 459 } 460 461 /** 462 * __insert_inode_hash - hash an inode 463 * @inode: unhashed inode 464 * @hashval: unsigned long value used to locate this object in the 465 * inode_hashtable. 466 * 467 * Add an inode to the inode hash for this superblock. 468 */ 469 void __insert_inode_hash(struct inode *inode, unsigned long hashval) 470 { 471 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval); 472 473 spin_lock(&inode_hash_lock); 474 spin_lock(&inode->i_lock); 475 hlist_add_head(&inode->i_hash, b); 476 spin_unlock(&inode->i_lock); 477 spin_unlock(&inode_hash_lock); 478 } 479 EXPORT_SYMBOL(__insert_inode_hash); 480 481 /** 482 * __remove_inode_hash - remove an inode from the hash 483 * @inode: inode to unhash 484 * 485 * Remove an inode from the superblock. 486 */ 487 void __remove_inode_hash(struct inode *inode) 488 { 489 spin_lock(&inode_hash_lock); 490 spin_lock(&inode->i_lock); 491 hlist_del_init(&inode->i_hash); 492 spin_unlock(&inode->i_lock); 493 spin_unlock(&inode_hash_lock); 494 } 495 EXPORT_SYMBOL(__remove_inode_hash); 496 497 void clear_inode(struct inode *inode) 498 { 499 might_sleep(); 500 /* 501 * We have to cycle tree_lock here because reclaim can be still in the 502 * process of removing the last page (in __delete_from_page_cache()) 503 * and we must not free mapping under it. 504 */ 505 spin_lock_irq(&inode->i_data.tree_lock); 506 BUG_ON(inode->i_data.nrpages); 507 BUG_ON(inode->i_data.nrexceptional); 508 spin_unlock_irq(&inode->i_data.tree_lock); 509 BUG_ON(!list_empty(&inode->i_data.private_list)); 510 BUG_ON(!(inode->i_state & I_FREEING)); 511 BUG_ON(inode->i_state & I_CLEAR); 512 BUG_ON(!list_empty(&inode->i_wb_list)); 513 /* don't need i_lock here, no concurrent mods to i_state */ 514 inode->i_state = I_FREEING | I_CLEAR; 515 } 516 EXPORT_SYMBOL(clear_inode); 517 518 /* 519 * Free the inode passed in, removing it from the lists it is still connected 520 * to. We remove any pages still attached to the inode and wait for any IO that 521 * is still in progress before finally destroying the inode. 522 * 523 * An inode must already be marked I_FREEING so that we avoid the inode being 524 * moved back onto lists if we race with other code that manipulates the lists 525 * (e.g. writeback_single_inode). The caller is responsible for setting this. 526 * 527 * An inode must already be removed from the LRU list before being evicted from 528 * the cache. This should occur atomically with setting the I_FREEING state 529 * flag, so no inodes here should ever be on the LRU when being evicted. 530 */ 531 static void evict(struct inode *inode) 532 { 533 const struct super_operations *op = inode->i_sb->s_op; 534 535 BUG_ON(!(inode->i_state & I_FREEING)); 536 BUG_ON(!list_empty(&inode->i_lru)); 537 538 if (!list_empty(&inode->i_io_list)) 539 inode_io_list_del(inode); 540 541 inode_sb_list_del(inode); 542 543 /* 544 * Wait for flusher thread to be done with the inode so that filesystem 545 * does not start destroying it while writeback is still running. Since 546 * the inode has I_FREEING set, flusher thread won't start new work on 547 * the inode. We just have to wait for running writeback to finish. 548 */ 549 inode_wait_for_writeback(inode); 550 551 if (op->evict_inode) { 552 op->evict_inode(inode); 553 } else { 554 truncate_inode_pages_final(&inode->i_data); 555 clear_inode(inode); 556 } 557 if (S_ISBLK(inode->i_mode) && inode->i_bdev) 558 bd_forget(inode); 559 if (S_ISCHR(inode->i_mode) && inode->i_cdev) 560 cd_forget(inode); 561 562 remove_inode_hash(inode); 563 564 spin_lock(&inode->i_lock); 565 wake_up_bit(&inode->i_state, __I_NEW); 566 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR)); 567 spin_unlock(&inode->i_lock); 568 569 destroy_inode(inode); 570 } 571 572 /* 573 * dispose_list - dispose of the contents of a local list 574 * @head: the head of the list to free 575 * 576 * Dispose-list gets a local list with local inodes in it, so it doesn't 577 * need to worry about list corruption and SMP locks. 578 */ 579 static void dispose_list(struct list_head *head) 580 { 581 while (!list_empty(head)) { 582 struct inode *inode; 583 584 inode = list_first_entry(head, struct inode, i_lru); 585 list_del_init(&inode->i_lru); 586 587 evict(inode); 588 cond_resched(); 589 } 590 } 591 592 /** 593 * evict_inodes - evict all evictable inodes for a superblock 594 * @sb: superblock to operate on 595 * 596 * Make sure that no inodes with zero refcount are retained. This is 597 * called by superblock shutdown after having MS_ACTIVE flag removed, 598 * so any inode reaching zero refcount during or after that call will 599 * be immediately evicted. 600 */ 601 void evict_inodes(struct super_block *sb) 602 { 603 struct inode *inode, *next; 604 LIST_HEAD(dispose); 605 606 again: 607 spin_lock(&sb->s_inode_list_lock); 608 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 609 if (atomic_read(&inode->i_count)) 610 continue; 611 612 spin_lock(&inode->i_lock); 613 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 614 spin_unlock(&inode->i_lock); 615 continue; 616 } 617 618 inode->i_state |= I_FREEING; 619 inode_lru_list_del(inode); 620 spin_unlock(&inode->i_lock); 621 list_add(&inode->i_lru, &dispose); 622 623 /* 624 * We can have a ton of inodes to evict at unmount time given 625 * enough memory, check to see if we need to go to sleep for a 626 * bit so we don't livelock. 627 */ 628 if (need_resched()) { 629 spin_unlock(&sb->s_inode_list_lock); 630 cond_resched(); 631 dispose_list(&dispose); 632 goto again; 633 } 634 } 635 spin_unlock(&sb->s_inode_list_lock); 636 637 dispose_list(&dispose); 638 } 639 640 /** 641 * invalidate_inodes - attempt to free all inodes on a superblock 642 * @sb: superblock to operate on 643 * @kill_dirty: flag to guide handling of dirty inodes 644 * 645 * Attempts to free all inodes for a given superblock. If there were any 646 * busy inodes return a non-zero value, else zero. 647 * If @kill_dirty is set, discard dirty inodes too, otherwise treat 648 * them as busy. 649 */ 650 int invalidate_inodes(struct super_block *sb, bool kill_dirty) 651 { 652 int busy = 0; 653 struct inode *inode, *next; 654 LIST_HEAD(dispose); 655 656 spin_lock(&sb->s_inode_list_lock); 657 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 658 spin_lock(&inode->i_lock); 659 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { 660 spin_unlock(&inode->i_lock); 661 continue; 662 } 663 if (inode->i_state & I_DIRTY_ALL && !kill_dirty) { 664 spin_unlock(&inode->i_lock); 665 busy = 1; 666 continue; 667 } 668 if (atomic_read(&inode->i_count)) { 669 spin_unlock(&inode->i_lock); 670 busy = 1; 671 continue; 672 } 673 674 inode->i_state |= I_FREEING; 675 inode_lru_list_del(inode); 676 spin_unlock(&inode->i_lock); 677 list_add(&inode->i_lru, &dispose); 678 } 679 spin_unlock(&sb->s_inode_list_lock); 680 681 dispose_list(&dispose); 682 683 return busy; 684 } 685 686 /* 687 * Isolate the inode from the LRU in preparation for freeing it. 688 * 689 * Any inodes which are pinned purely because of attached pagecache have their 690 * pagecache removed. If the inode has metadata buffers attached to 691 * mapping->private_list then try to remove them. 692 * 693 * If the inode has the I_REFERENCED flag set, then it means that it has been 694 * used recently - the flag is set in iput_final(). When we encounter such an 695 * inode, clear the flag and move it to the back of the LRU so it gets another 696 * pass through the LRU before it gets reclaimed. This is necessary because of 697 * the fact we are doing lazy LRU updates to minimise lock contention so the 698 * LRU does not have strict ordering. Hence we don't want to reclaim inodes 699 * with this flag set because they are the inodes that are out of order. 700 */ 701 static enum lru_status inode_lru_isolate(struct list_head *item, 702 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) 703 { 704 struct list_head *freeable = arg; 705 struct inode *inode = container_of(item, struct inode, i_lru); 706 707 /* 708 * we are inverting the lru lock/inode->i_lock here, so use a trylock. 709 * If we fail to get the lock, just skip it. 710 */ 711 if (!spin_trylock(&inode->i_lock)) 712 return LRU_SKIP; 713 714 /* 715 * Referenced or dirty inodes are still in use. Give them another pass 716 * through the LRU as we canot reclaim them now. 717 */ 718 if (atomic_read(&inode->i_count) || 719 (inode->i_state & ~I_REFERENCED)) { 720 list_lru_isolate(lru, &inode->i_lru); 721 spin_unlock(&inode->i_lock); 722 this_cpu_dec(nr_unused); 723 return LRU_REMOVED; 724 } 725 726 /* recently referenced inodes get one more pass */ 727 if (inode->i_state & I_REFERENCED) { 728 inode->i_state &= ~I_REFERENCED; 729 spin_unlock(&inode->i_lock); 730 return LRU_ROTATE; 731 } 732 733 if (inode_has_buffers(inode) || inode->i_data.nrpages) { 734 __iget(inode); 735 spin_unlock(&inode->i_lock); 736 spin_unlock(lru_lock); 737 if (remove_inode_buffers(inode)) { 738 unsigned long reap; 739 reap = invalidate_mapping_pages(&inode->i_data, 0, -1); 740 if (current_is_kswapd()) 741 __count_vm_events(KSWAPD_INODESTEAL, reap); 742 else 743 __count_vm_events(PGINODESTEAL, reap); 744 if (current->reclaim_state) 745 current->reclaim_state->reclaimed_slab += reap; 746 } 747 iput(inode); 748 spin_lock(lru_lock); 749 return LRU_RETRY; 750 } 751 752 WARN_ON(inode->i_state & I_NEW); 753 inode->i_state |= I_FREEING; 754 list_lru_isolate_move(lru, &inode->i_lru, freeable); 755 spin_unlock(&inode->i_lock); 756 757 this_cpu_dec(nr_unused); 758 return LRU_REMOVED; 759 } 760 761 /* 762 * Walk the superblock inode LRU for freeable inodes and attempt to free them. 763 * This is called from the superblock shrinker function with a number of inodes 764 * to trim from the LRU. Inodes to be freed are moved to a temporary list and 765 * then are freed outside inode_lock by dispose_list(). 766 */ 767 long prune_icache_sb(struct super_block *sb, struct shrink_control *sc) 768 { 769 LIST_HEAD(freeable); 770 long freed; 771 772 freed = list_lru_shrink_walk(&sb->s_inode_lru, sc, 773 inode_lru_isolate, &freeable); 774 dispose_list(&freeable); 775 return freed; 776 } 777 778 static void __wait_on_freeing_inode(struct inode *inode); 779 /* 780 * Called with the inode lock held. 781 */ 782 static struct inode *find_inode(struct super_block *sb, 783 struct hlist_head *head, 784 int (*test)(struct inode *, void *), 785 void *data) 786 { 787 struct inode *inode = NULL; 788 789 repeat: 790 hlist_for_each_entry(inode, head, i_hash) { 791 if (inode->i_sb != sb) 792 continue; 793 if (!test(inode, data)) 794 continue; 795 spin_lock(&inode->i_lock); 796 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 797 __wait_on_freeing_inode(inode); 798 goto repeat; 799 } 800 __iget(inode); 801 spin_unlock(&inode->i_lock); 802 return inode; 803 } 804 return NULL; 805 } 806 807 /* 808 * find_inode_fast is the fast path version of find_inode, see the comment at 809 * iget_locked for details. 810 */ 811 static struct inode *find_inode_fast(struct super_block *sb, 812 struct hlist_head *head, unsigned long ino) 813 { 814 struct inode *inode = NULL; 815 816 repeat: 817 hlist_for_each_entry(inode, head, i_hash) { 818 if (inode->i_ino != ino) 819 continue; 820 if (inode->i_sb != sb) 821 continue; 822 spin_lock(&inode->i_lock); 823 if (inode->i_state & (I_FREEING|I_WILL_FREE)) { 824 __wait_on_freeing_inode(inode); 825 goto repeat; 826 } 827 __iget(inode); 828 spin_unlock(&inode->i_lock); 829 return inode; 830 } 831 return NULL; 832 } 833 834 /* 835 * Each cpu owns a range of LAST_INO_BATCH numbers. 836 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations, 837 * to renew the exhausted range. 838 * 839 * This does not significantly increase overflow rate because every CPU can 840 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is 841 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the 842 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase 843 * overflow rate by 2x, which does not seem too significant. 844 * 845 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 846 * error if st_ino won't fit in target struct field. Use 32bit counter 847 * here to attempt to avoid that. 848 */ 849 #define LAST_INO_BATCH 1024 850 static DEFINE_PER_CPU(unsigned int, last_ino); 851 852 unsigned int get_next_ino(void) 853 { 854 unsigned int *p = &get_cpu_var(last_ino); 855 unsigned int res = *p; 856 857 #ifdef CONFIG_SMP 858 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) { 859 static atomic_t shared_last_ino; 860 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino); 861 862 res = next - LAST_INO_BATCH; 863 } 864 #endif 865 866 res++; 867 /* get_next_ino should not provide a 0 inode number */ 868 if (unlikely(!res)) 869 res++; 870 *p = res; 871 put_cpu_var(last_ino); 872 return res; 873 } 874 EXPORT_SYMBOL(get_next_ino); 875 876 /** 877 * new_inode_pseudo - obtain an inode 878 * @sb: superblock 879 * 880 * Allocates a new inode for given superblock. 881 * Inode wont be chained in superblock s_inodes list 882 * This means : 883 * - fs can't be unmount 884 * - quotas, fsnotify, writeback can't work 885 */ 886 struct inode *new_inode_pseudo(struct super_block *sb) 887 { 888 struct inode *inode = alloc_inode(sb); 889 890 if (inode) { 891 spin_lock(&inode->i_lock); 892 inode->i_state = 0; 893 spin_unlock(&inode->i_lock); 894 INIT_LIST_HEAD(&inode->i_sb_list); 895 } 896 return inode; 897 } 898 899 /** 900 * new_inode - obtain an inode 901 * @sb: superblock 902 * 903 * Allocates a new inode for given superblock. The default gfp_mask 904 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE. 905 * If HIGHMEM pages are unsuitable or it is known that pages allocated 906 * for the page cache are not reclaimable or migratable, 907 * mapping_set_gfp_mask() must be called with suitable flags on the 908 * newly created inode's mapping 909 * 910 */ 911 struct inode *new_inode(struct super_block *sb) 912 { 913 struct inode *inode; 914 915 spin_lock_prefetch(&sb->s_inode_list_lock); 916 917 inode = new_inode_pseudo(sb); 918 if (inode) 919 inode_sb_list_add(inode); 920 return inode; 921 } 922 EXPORT_SYMBOL(new_inode); 923 924 #ifdef CONFIG_DEBUG_LOCK_ALLOC 925 void lockdep_annotate_inode_mutex_key(struct inode *inode) 926 { 927 if (S_ISDIR(inode->i_mode)) { 928 struct file_system_type *type = inode->i_sb->s_type; 929 930 /* Set new key only if filesystem hasn't already changed it */ 931 if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) { 932 /* 933 * ensure nobody is actually holding i_mutex 934 */ 935 // mutex_destroy(&inode->i_mutex); 936 init_rwsem(&inode->i_rwsem); 937 lockdep_set_class(&inode->i_rwsem, 938 &type->i_mutex_dir_key); 939 } 940 } 941 } 942 EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key); 943 #endif 944 945 /** 946 * unlock_new_inode - clear the I_NEW state and wake up any waiters 947 * @inode: new inode to unlock 948 * 949 * Called when the inode is fully initialised to clear the new state of the 950 * inode and wake up anyone waiting for the inode to finish initialisation. 951 */ 952 void unlock_new_inode(struct inode *inode) 953 { 954 lockdep_annotate_inode_mutex_key(inode); 955 spin_lock(&inode->i_lock); 956 WARN_ON(!(inode->i_state & I_NEW)); 957 inode->i_state &= ~I_NEW; 958 smp_mb(); 959 wake_up_bit(&inode->i_state, __I_NEW); 960 spin_unlock(&inode->i_lock); 961 } 962 EXPORT_SYMBOL(unlock_new_inode); 963 964 /** 965 * lock_two_nondirectories - take two i_mutexes on non-directory objects 966 * 967 * Lock any non-NULL argument that is not a directory. 968 * Zero, one or two objects may be locked by this function. 969 * 970 * @inode1: first inode to lock 971 * @inode2: second inode to lock 972 */ 973 void lock_two_nondirectories(struct inode *inode1, struct inode *inode2) 974 { 975 if (inode1 > inode2) 976 swap(inode1, inode2); 977 978 if (inode1 && !S_ISDIR(inode1->i_mode)) 979 inode_lock(inode1); 980 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1) 981 inode_lock_nested(inode2, I_MUTEX_NONDIR2); 982 } 983 EXPORT_SYMBOL(lock_two_nondirectories); 984 985 /** 986 * unlock_two_nondirectories - release locks from lock_two_nondirectories() 987 * @inode1: first inode to unlock 988 * @inode2: second inode to unlock 989 */ 990 void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2) 991 { 992 if (inode1 && !S_ISDIR(inode1->i_mode)) 993 inode_unlock(inode1); 994 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1) 995 inode_unlock(inode2); 996 } 997 EXPORT_SYMBOL(unlock_two_nondirectories); 998 999 /** 1000 * iget5_locked - obtain an inode from a mounted file system 1001 * @sb: super block of file system 1002 * @hashval: hash value (usually inode number) to get 1003 * @test: callback used for comparisons between inodes 1004 * @set: callback used to initialize a new struct inode 1005 * @data: opaque data pointer to pass to @test and @set 1006 * 1007 * Search for the inode specified by @hashval and @data in the inode cache, 1008 * and if present it is return it with an increased reference count. This is 1009 * a generalized version of iget_locked() for file systems where the inode 1010 * number is not sufficient for unique identification of an inode. 1011 * 1012 * If the inode is not in cache, allocate a new inode and return it locked, 1013 * hashed, and with the I_NEW flag set. The file system gets to fill it in 1014 * before unlocking it via unlock_new_inode(). 1015 * 1016 * Note both @test and @set are called with the inode_hash_lock held, so can't 1017 * sleep. 1018 */ 1019 struct inode *iget5_locked(struct super_block *sb, unsigned long hashval, 1020 int (*test)(struct inode *, void *), 1021 int (*set)(struct inode *, void *), void *data) 1022 { 1023 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1024 struct inode *inode; 1025 again: 1026 spin_lock(&inode_hash_lock); 1027 inode = find_inode(sb, head, test, data); 1028 spin_unlock(&inode_hash_lock); 1029 1030 if (inode) { 1031 wait_on_inode(inode); 1032 if (unlikely(inode_unhashed(inode))) { 1033 iput(inode); 1034 goto again; 1035 } 1036 return inode; 1037 } 1038 1039 inode = alloc_inode(sb); 1040 if (inode) { 1041 struct inode *old; 1042 1043 spin_lock(&inode_hash_lock); 1044 /* We released the lock, so.. */ 1045 old = find_inode(sb, head, test, data); 1046 if (!old) { 1047 if (set(inode, data)) 1048 goto set_failed; 1049 1050 spin_lock(&inode->i_lock); 1051 inode->i_state = I_NEW; 1052 hlist_add_head(&inode->i_hash, head); 1053 spin_unlock(&inode->i_lock); 1054 inode_sb_list_add(inode); 1055 spin_unlock(&inode_hash_lock); 1056 1057 /* Return the locked inode with I_NEW set, the 1058 * caller is responsible for filling in the contents 1059 */ 1060 return inode; 1061 } 1062 1063 /* 1064 * Uhhuh, somebody else created the same inode under 1065 * us. Use the old inode instead of the one we just 1066 * allocated. 1067 */ 1068 spin_unlock(&inode_hash_lock); 1069 destroy_inode(inode); 1070 inode = old; 1071 wait_on_inode(inode); 1072 if (unlikely(inode_unhashed(inode))) { 1073 iput(inode); 1074 goto again; 1075 } 1076 } 1077 return inode; 1078 1079 set_failed: 1080 spin_unlock(&inode_hash_lock); 1081 destroy_inode(inode); 1082 return NULL; 1083 } 1084 EXPORT_SYMBOL(iget5_locked); 1085 1086 /** 1087 * iget_locked - obtain an inode from a mounted file system 1088 * @sb: super block of file system 1089 * @ino: inode number to get 1090 * 1091 * Search for the inode specified by @ino in the inode cache and if present 1092 * return it with an increased reference count. This is for file systems 1093 * where the inode number is sufficient for unique identification of an inode. 1094 * 1095 * If the inode is not in cache, allocate a new inode and return it locked, 1096 * hashed, and with the I_NEW flag set. The file system gets to fill it in 1097 * before unlocking it via unlock_new_inode(). 1098 */ 1099 struct inode *iget_locked(struct super_block *sb, unsigned long ino) 1100 { 1101 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1102 struct inode *inode; 1103 again: 1104 spin_lock(&inode_hash_lock); 1105 inode = find_inode_fast(sb, head, ino); 1106 spin_unlock(&inode_hash_lock); 1107 if (inode) { 1108 wait_on_inode(inode); 1109 if (unlikely(inode_unhashed(inode))) { 1110 iput(inode); 1111 goto again; 1112 } 1113 return inode; 1114 } 1115 1116 inode = alloc_inode(sb); 1117 if (inode) { 1118 struct inode *old; 1119 1120 spin_lock(&inode_hash_lock); 1121 /* We released the lock, so.. */ 1122 old = find_inode_fast(sb, head, ino); 1123 if (!old) { 1124 inode->i_ino = ino; 1125 spin_lock(&inode->i_lock); 1126 inode->i_state = I_NEW; 1127 hlist_add_head(&inode->i_hash, head); 1128 spin_unlock(&inode->i_lock); 1129 inode_sb_list_add(inode); 1130 spin_unlock(&inode_hash_lock); 1131 1132 /* Return the locked inode with I_NEW set, the 1133 * caller is responsible for filling in the contents 1134 */ 1135 return inode; 1136 } 1137 1138 /* 1139 * Uhhuh, somebody else created the same inode under 1140 * us. Use the old inode instead of the one we just 1141 * allocated. 1142 */ 1143 spin_unlock(&inode_hash_lock); 1144 destroy_inode(inode); 1145 inode = old; 1146 wait_on_inode(inode); 1147 if (unlikely(inode_unhashed(inode))) { 1148 iput(inode); 1149 goto again; 1150 } 1151 } 1152 return inode; 1153 } 1154 EXPORT_SYMBOL(iget_locked); 1155 1156 /* 1157 * search the inode cache for a matching inode number. 1158 * If we find one, then the inode number we are trying to 1159 * allocate is not unique and so we should not use it. 1160 * 1161 * Returns 1 if the inode number is unique, 0 if it is not. 1162 */ 1163 static int test_inode_iunique(struct super_block *sb, unsigned long ino) 1164 { 1165 struct hlist_head *b = inode_hashtable + hash(sb, ino); 1166 struct inode *inode; 1167 1168 spin_lock(&inode_hash_lock); 1169 hlist_for_each_entry(inode, b, i_hash) { 1170 if (inode->i_ino == ino && inode->i_sb == sb) { 1171 spin_unlock(&inode_hash_lock); 1172 return 0; 1173 } 1174 } 1175 spin_unlock(&inode_hash_lock); 1176 1177 return 1; 1178 } 1179 1180 /** 1181 * iunique - get a unique inode number 1182 * @sb: superblock 1183 * @max_reserved: highest reserved inode number 1184 * 1185 * Obtain an inode number that is unique on the system for a given 1186 * superblock. This is used by file systems that have no natural 1187 * permanent inode numbering system. An inode number is returned that 1188 * is higher than the reserved limit but unique. 1189 * 1190 * BUGS: 1191 * With a large number of inodes live on the file system this function 1192 * currently becomes quite slow. 1193 */ 1194 ino_t iunique(struct super_block *sb, ino_t max_reserved) 1195 { 1196 /* 1197 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW 1198 * error if st_ino won't fit in target struct field. Use 32bit counter 1199 * here to attempt to avoid that. 1200 */ 1201 static DEFINE_SPINLOCK(iunique_lock); 1202 static unsigned int counter; 1203 ino_t res; 1204 1205 spin_lock(&iunique_lock); 1206 do { 1207 if (counter <= max_reserved) 1208 counter = max_reserved + 1; 1209 res = counter++; 1210 } while (!test_inode_iunique(sb, res)); 1211 spin_unlock(&iunique_lock); 1212 1213 return res; 1214 } 1215 EXPORT_SYMBOL(iunique); 1216 1217 struct inode *igrab(struct inode *inode) 1218 { 1219 spin_lock(&inode->i_lock); 1220 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) { 1221 __iget(inode); 1222 spin_unlock(&inode->i_lock); 1223 } else { 1224 spin_unlock(&inode->i_lock); 1225 /* 1226 * Handle the case where s_op->clear_inode is not been 1227 * called yet, and somebody is calling igrab 1228 * while the inode is getting freed. 1229 */ 1230 inode = NULL; 1231 } 1232 return inode; 1233 } 1234 EXPORT_SYMBOL(igrab); 1235 1236 /** 1237 * ilookup5_nowait - search for an inode in the inode cache 1238 * @sb: super block of file system to search 1239 * @hashval: hash value (usually inode number) to search for 1240 * @test: callback used for comparisons between inodes 1241 * @data: opaque data pointer to pass to @test 1242 * 1243 * Search for the inode specified by @hashval and @data in the inode cache. 1244 * If the inode is in the cache, the inode is returned with an incremented 1245 * reference count. 1246 * 1247 * Note: I_NEW is not waited upon so you have to be very careful what you do 1248 * with the returned inode. You probably should be using ilookup5() instead. 1249 * 1250 * Note2: @test is called with the inode_hash_lock held, so can't sleep. 1251 */ 1252 struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, 1253 int (*test)(struct inode *, void *), void *data) 1254 { 1255 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1256 struct inode *inode; 1257 1258 spin_lock(&inode_hash_lock); 1259 inode = find_inode(sb, head, test, data); 1260 spin_unlock(&inode_hash_lock); 1261 1262 return inode; 1263 } 1264 EXPORT_SYMBOL(ilookup5_nowait); 1265 1266 /** 1267 * ilookup5 - search for an inode in the inode cache 1268 * @sb: super block of file system to search 1269 * @hashval: hash value (usually inode number) to search for 1270 * @test: callback used for comparisons between inodes 1271 * @data: opaque data pointer to pass to @test 1272 * 1273 * Search for the inode specified by @hashval and @data in the inode cache, 1274 * and if the inode is in the cache, return the inode with an incremented 1275 * reference count. Waits on I_NEW before returning the inode. 1276 * returned with an incremented reference count. 1277 * 1278 * This is a generalized version of ilookup() for file systems where the 1279 * inode number is not sufficient for unique identification of an inode. 1280 * 1281 * Note: @test is called with the inode_hash_lock held, so can't sleep. 1282 */ 1283 struct inode *ilookup5(struct super_block *sb, unsigned long hashval, 1284 int (*test)(struct inode *, void *), void *data) 1285 { 1286 struct inode *inode; 1287 again: 1288 inode = ilookup5_nowait(sb, hashval, test, data); 1289 if (inode) { 1290 wait_on_inode(inode); 1291 if (unlikely(inode_unhashed(inode))) { 1292 iput(inode); 1293 goto again; 1294 } 1295 } 1296 return inode; 1297 } 1298 EXPORT_SYMBOL(ilookup5); 1299 1300 /** 1301 * ilookup - search for an inode in the inode cache 1302 * @sb: super block of file system to search 1303 * @ino: inode number to search for 1304 * 1305 * Search for the inode @ino in the inode cache, and if the inode is in the 1306 * cache, the inode is returned with an incremented reference count. 1307 */ 1308 struct inode *ilookup(struct super_block *sb, unsigned long ino) 1309 { 1310 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1311 struct inode *inode; 1312 again: 1313 spin_lock(&inode_hash_lock); 1314 inode = find_inode_fast(sb, head, ino); 1315 spin_unlock(&inode_hash_lock); 1316 1317 if (inode) { 1318 wait_on_inode(inode); 1319 if (unlikely(inode_unhashed(inode))) { 1320 iput(inode); 1321 goto again; 1322 } 1323 } 1324 return inode; 1325 } 1326 EXPORT_SYMBOL(ilookup); 1327 1328 /** 1329 * find_inode_nowait - find an inode in the inode cache 1330 * @sb: super block of file system to search 1331 * @hashval: hash value (usually inode number) to search for 1332 * @match: callback used for comparisons between inodes 1333 * @data: opaque data pointer to pass to @match 1334 * 1335 * Search for the inode specified by @hashval and @data in the inode 1336 * cache, where the helper function @match will return 0 if the inode 1337 * does not match, 1 if the inode does match, and -1 if the search 1338 * should be stopped. The @match function must be responsible for 1339 * taking the i_lock spin_lock and checking i_state for an inode being 1340 * freed or being initialized, and incrementing the reference count 1341 * before returning 1. It also must not sleep, since it is called with 1342 * the inode_hash_lock spinlock held. 1343 * 1344 * This is a even more generalized version of ilookup5() when the 1345 * function must never block --- find_inode() can block in 1346 * __wait_on_freeing_inode() --- or when the caller can not increment 1347 * the reference count because the resulting iput() might cause an 1348 * inode eviction. The tradeoff is that the @match funtion must be 1349 * very carefully implemented. 1350 */ 1351 struct inode *find_inode_nowait(struct super_block *sb, 1352 unsigned long hashval, 1353 int (*match)(struct inode *, unsigned long, 1354 void *), 1355 void *data) 1356 { 1357 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1358 struct inode *inode, *ret_inode = NULL; 1359 int mval; 1360 1361 spin_lock(&inode_hash_lock); 1362 hlist_for_each_entry(inode, head, i_hash) { 1363 if (inode->i_sb != sb) 1364 continue; 1365 mval = match(inode, hashval, data); 1366 if (mval == 0) 1367 continue; 1368 if (mval == 1) 1369 ret_inode = inode; 1370 goto out; 1371 } 1372 out: 1373 spin_unlock(&inode_hash_lock); 1374 return ret_inode; 1375 } 1376 EXPORT_SYMBOL(find_inode_nowait); 1377 1378 int insert_inode_locked(struct inode *inode) 1379 { 1380 struct super_block *sb = inode->i_sb; 1381 ino_t ino = inode->i_ino; 1382 struct hlist_head *head = inode_hashtable + hash(sb, ino); 1383 1384 while (1) { 1385 struct inode *old = NULL; 1386 spin_lock(&inode_hash_lock); 1387 hlist_for_each_entry(old, head, i_hash) { 1388 if (old->i_ino != ino) 1389 continue; 1390 if (old->i_sb != sb) 1391 continue; 1392 spin_lock(&old->i_lock); 1393 if (old->i_state & (I_FREEING|I_WILL_FREE)) { 1394 spin_unlock(&old->i_lock); 1395 continue; 1396 } 1397 break; 1398 } 1399 if (likely(!old)) { 1400 spin_lock(&inode->i_lock); 1401 inode->i_state |= I_NEW; 1402 hlist_add_head(&inode->i_hash, head); 1403 spin_unlock(&inode->i_lock); 1404 spin_unlock(&inode_hash_lock); 1405 return 0; 1406 } 1407 __iget(old); 1408 spin_unlock(&old->i_lock); 1409 spin_unlock(&inode_hash_lock); 1410 wait_on_inode(old); 1411 if (unlikely(!inode_unhashed(old))) { 1412 iput(old); 1413 return -EBUSY; 1414 } 1415 iput(old); 1416 } 1417 } 1418 EXPORT_SYMBOL(insert_inode_locked); 1419 1420 int insert_inode_locked4(struct inode *inode, unsigned long hashval, 1421 int (*test)(struct inode *, void *), void *data) 1422 { 1423 struct super_block *sb = inode->i_sb; 1424 struct hlist_head *head = inode_hashtable + hash(sb, hashval); 1425 1426 while (1) { 1427 struct inode *old = NULL; 1428 1429 spin_lock(&inode_hash_lock); 1430 hlist_for_each_entry(old, head, i_hash) { 1431 if (old->i_sb != sb) 1432 continue; 1433 if (!test(old, data)) 1434 continue; 1435 spin_lock(&old->i_lock); 1436 if (old->i_state & (I_FREEING|I_WILL_FREE)) { 1437 spin_unlock(&old->i_lock); 1438 continue; 1439 } 1440 break; 1441 } 1442 if (likely(!old)) { 1443 spin_lock(&inode->i_lock); 1444 inode->i_state |= I_NEW; 1445 hlist_add_head(&inode->i_hash, head); 1446 spin_unlock(&inode->i_lock); 1447 spin_unlock(&inode_hash_lock); 1448 return 0; 1449 } 1450 __iget(old); 1451 spin_unlock(&old->i_lock); 1452 spin_unlock(&inode_hash_lock); 1453 wait_on_inode(old); 1454 if (unlikely(!inode_unhashed(old))) { 1455 iput(old); 1456 return -EBUSY; 1457 } 1458 iput(old); 1459 } 1460 } 1461 EXPORT_SYMBOL(insert_inode_locked4); 1462 1463 1464 int generic_delete_inode(struct inode *inode) 1465 { 1466 return 1; 1467 } 1468 EXPORT_SYMBOL(generic_delete_inode); 1469 1470 /* 1471 * Called when we're dropping the last reference 1472 * to an inode. 1473 * 1474 * Call the FS "drop_inode()" function, defaulting to 1475 * the legacy UNIX filesystem behaviour. If it tells 1476 * us to evict inode, do so. Otherwise, retain inode 1477 * in cache if fs is alive, sync and evict if fs is 1478 * shutting down. 1479 */ 1480 static void iput_final(struct inode *inode) 1481 { 1482 struct super_block *sb = inode->i_sb; 1483 const struct super_operations *op = inode->i_sb->s_op; 1484 int drop; 1485 1486 WARN_ON(inode->i_state & I_NEW); 1487 1488 if (op->drop_inode) 1489 drop = op->drop_inode(inode); 1490 else 1491 drop = generic_drop_inode(inode); 1492 1493 if (!drop && (sb->s_flags & MS_ACTIVE)) { 1494 inode_add_lru(inode); 1495 spin_unlock(&inode->i_lock); 1496 return; 1497 } 1498 1499 if (!drop) { 1500 inode->i_state |= I_WILL_FREE; 1501 spin_unlock(&inode->i_lock); 1502 write_inode_now(inode, 1); 1503 spin_lock(&inode->i_lock); 1504 WARN_ON(inode->i_state & I_NEW); 1505 inode->i_state &= ~I_WILL_FREE; 1506 } 1507 1508 inode->i_state |= I_FREEING; 1509 if (!list_empty(&inode->i_lru)) 1510 inode_lru_list_del(inode); 1511 spin_unlock(&inode->i_lock); 1512 1513 evict(inode); 1514 } 1515 1516 /** 1517 * iput - put an inode 1518 * @inode: inode to put 1519 * 1520 * Puts an inode, dropping its usage count. If the inode use count hits 1521 * zero, the inode is then freed and may also be destroyed. 1522 * 1523 * Consequently, iput() can sleep. 1524 */ 1525 void iput(struct inode *inode) 1526 { 1527 if (!inode) 1528 return; 1529 BUG_ON(inode->i_state & I_CLEAR); 1530 retry: 1531 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) { 1532 if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) { 1533 atomic_inc(&inode->i_count); 1534 inode->i_state &= ~I_DIRTY_TIME; 1535 spin_unlock(&inode->i_lock); 1536 trace_writeback_lazytime_iput(inode); 1537 mark_inode_dirty_sync(inode); 1538 goto retry; 1539 } 1540 iput_final(inode); 1541 } 1542 } 1543 EXPORT_SYMBOL(iput); 1544 1545 /** 1546 * bmap - find a block number in a file 1547 * @inode: inode of file 1548 * @block: block to find 1549 * 1550 * Returns the block number on the device holding the inode that 1551 * is the disk block number for the block of the file requested. 1552 * That is, asked for block 4 of inode 1 the function will return the 1553 * disk block relative to the disk start that holds that block of the 1554 * file. 1555 */ 1556 sector_t bmap(struct inode *inode, sector_t block) 1557 { 1558 sector_t res = 0; 1559 if (inode->i_mapping->a_ops->bmap) 1560 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block); 1561 return res; 1562 } 1563 EXPORT_SYMBOL(bmap); 1564 1565 /* 1566 * Update times in overlayed inode from underlying real inode 1567 */ 1568 static void update_ovl_inode_times(struct dentry *dentry, struct inode *inode, 1569 bool rcu) 1570 { 1571 if (!rcu) { 1572 struct inode *realinode = d_real_inode(dentry); 1573 1574 if (unlikely(inode != realinode) && 1575 (!timespec_equal(&inode->i_mtime, &realinode->i_mtime) || 1576 !timespec_equal(&inode->i_ctime, &realinode->i_ctime))) { 1577 inode->i_mtime = realinode->i_mtime; 1578 inode->i_ctime = realinode->i_ctime; 1579 } 1580 } 1581 } 1582 1583 /* 1584 * With relative atime, only update atime if the previous atime is 1585 * earlier than either the ctime or mtime or if at least a day has 1586 * passed since the last atime update. 1587 */ 1588 static int relatime_need_update(const struct path *path, struct inode *inode, 1589 struct timespec now, bool rcu) 1590 { 1591 1592 if (!(path->mnt->mnt_flags & MNT_RELATIME)) 1593 return 1; 1594 1595 update_ovl_inode_times(path->dentry, inode, rcu); 1596 /* 1597 * Is mtime younger than atime? If yes, update atime: 1598 */ 1599 if (timespec_compare(&inode->i_mtime, &inode->i_atime) >= 0) 1600 return 1; 1601 /* 1602 * Is ctime younger than atime? If yes, update atime: 1603 */ 1604 if (timespec_compare(&inode->i_ctime, &inode->i_atime) >= 0) 1605 return 1; 1606 1607 /* 1608 * Is the previous atime value older than a day? If yes, 1609 * update atime: 1610 */ 1611 if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60) 1612 return 1; 1613 /* 1614 * Good, we can skip the atime update: 1615 */ 1616 return 0; 1617 } 1618 1619 int generic_update_time(struct inode *inode, struct timespec *time, int flags) 1620 { 1621 int iflags = I_DIRTY_TIME; 1622 1623 if (flags & S_ATIME) 1624 inode->i_atime = *time; 1625 if (flags & S_VERSION) 1626 inode_inc_iversion(inode); 1627 if (flags & S_CTIME) 1628 inode->i_ctime = *time; 1629 if (flags & S_MTIME) 1630 inode->i_mtime = *time; 1631 1632 if (!(inode->i_sb->s_flags & MS_LAZYTIME) || (flags & S_VERSION)) 1633 iflags |= I_DIRTY_SYNC; 1634 __mark_inode_dirty(inode, iflags); 1635 return 0; 1636 } 1637 EXPORT_SYMBOL(generic_update_time); 1638 1639 /* 1640 * This does the actual work of updating an inodes time or version. Must have 1641 * had called mnt_want_write() before calling this. 1642 */ 1643 static int update_time(struct inode *inode, struct timespec *time, int flags) 1644 { 1645 int (*update_time)(struct inode *, struct timespec *, int); 1646 1647 update_time = inode->i_op->update_time ? inode->i_op->update_time : 1648 generic_update_time; 1649 1650 return update_time(inode, time, flags); 1651 } 1652 1653 /** 1654 * touch_atime - update the access time 1655 * @path: the &struct path to update 1656 * @inode: inode to update 1657 * 1658 * Update the accessed time on an inode and mark it for writeback. 1659 * This function automatically handles read only file systems and media, 1660 * as well as the "noatime" flag and inode specific "noatime" markers. 1661 */ 1662 bool __atime_needs_update(const struct path *path, struct inode *inode, 1663 bool rcu) 1664 { 1665 struct vfsmount *mnt = path->mnt; 1666 struct timespec now; 1667 1668 if (inode->i_flags & S_NOATIME) 1669 return false; 1670 1671 /* Atime updates will likely cause i_uid and i_gid to be written 1672 * back improprely if their true value is unknown to the vfs. 1673 */ 1674 if (HAS_UNMAPPED_ID(inode)) 1675 return false; 1676 1677 if (IS_NOATIME(inode)) 1678 return false; 1679 if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)) 1680 return false; 1681 1682 if (mnt->mnt_flags & MNT_NOATIME) 1683 return false; 1684 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)) 1685 return false; 1686 1687 now = current_time(inode); 1688 1689 if (!relatime_need_update(path, inode, now, rcu)) 1690 return false; 1691 1692 if (timespec_equal(&inode->i_atime, &now)) 1693 return false; 1694 1695 return true; 1696 } 1697 1698 void touch_atime(const struct path *path) 1699 { 1700 struct vfsmount *mnt = path->mnt; 1701 struct inode *inode = d_inode(path->dentry); 1702 struct timespec now; 1703 1704 if (!__atime_needs_update(path, inode, false)) 1705 return; 1706 1707 if (!sb_start_write_trylock(inode->i_sb)) 1708 return; 1709 1710 if (__mnt_want_write(mnt) != 0) 1711 goto skip_update; 1712 /* 1713 * File systems can error out when updating inodes if they need to 1714 * allocate new space to modify an inode (such is the case for 1715 * Btrfs), but since we touch atime while walking down the path we 1716 * really don't care if we failed to update the atime of the file, 1717 * so just ignore the return value. 1718 * We may also fail on filesystems that have the ability to make parts 1719 * of the fs read only, e.g. subvolumes in Btrfs. 1720 */ 1721 now = current_time(inode); 1722 update_time(inode, &now, S_ATIME); 1723 __mnt_drop_write(mnt); 1724 skip_update: 1725 sb_end_write(inode->i_sb); 1726 } 1727 EXPORT_SYMBOL(touch_atime); 1728 1729 /* 1730 * The logic we want is 1731 * 1732 * if suid or (sgid and xgrp) 1733 * remove privs 1734 */ 1735 int should_remove_suid(struct dentry *dentry) 1736 { 1737 umode_t mode = d_inode(dentry)->i_mode; 1738 int kill = 0; 1739 1740 /* suid always must be killed */ 1741 if (unlikely(mode & S_ISUID)) 1742 kill = ATTR_KILL_SUID; 1743 1744 /* 1745 * sgid without any exec bits is just a mandatory locking mark; leave 1746 * it alone. If some exec bits are set, it's a real sgid; kill it. 1747 */ 1748 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) 1749 kill |= ATTR_KILL_SGID; 1750 1751 if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode))) 1752 return kill; 1753 1754 return 0; 1755 } 1756 EXPORT_SYMBOL(should_remove_suid); 1757 1758 /* 1759 * Return mask of changes for notify_change() that need to be done as a 1760 * response to write or truncate. Return 0 if nothing has to be changed. 1761 * Negative value on error (change should be denied). 1762 */ 1763 int dentry_needs_remove_privs(struct dentry *dentry) 1764 { 1765 struct inode *inode = d_inode(dentry); 1766 int mask = 0; 1767 int ret; 1768 1769 if (IS_NOSEC(inode)) 1770 return 0; 1771 1772 mask = should_remove_suid(dentry); 1773 ret = security_inode_need_killpriv(dentry); 1774 if (ret < 0) 1775 return ret; 1776 if (ret) 1777 mask |= ATTR_KILL_PRIV; 1778 return mask; 1779 } 1780 1781 static int __remove_privs(struct dentry *dentry, int kill) 1782 { 1783 struct iattr newattrs; 1784 1785 newattrs.ia_valid = ATTR_FORCE | kill; 1786 /* 1787 * Note we call this on write, so notify_change will not 1788 * encounter any conflicting delegations: 1789 */ 1790 return notify_change(dentry, &newattrs, NULL); 1791 } 1792 1793 /* 1794 * Remove special file priviledges (suid, capabilities) when file is written 1795 * to or truncated. 1796 */ 1797 int file_remove_privs(struct file *file) 1798 { 1799 struct dentry *dentry = file_dentry(file); 1800 struct inode *inode = file_inode(file); 1801 int kill; 1802 int error = 0; 1803 1804 /* Fast path for nothing security related */ 1805 if (IS_NOSEC(inode)) 1806 return 0; 1807 1808 kill = dentry_needs_remove_privs(dentry); 1809 if (kill < 0) 1810 return kill; 1811 if (kill) 1812 error = __remove_privs(dentry, kill); 1813 if (!error) 1814 inode_has_no_xattr(inode); 1815 1816 return error; 1817 } 1818 EXPORT_SYMBOL(file_remove_privs); 1819 1820 /** 1821 * file_update_time - update mtime and ctime time 1822 * @file: file accessed 1823 * 1824 * Update the mtime and ctime members of an inode and mark the inode 1825 * for writeback. Note that this function is meant exclusively for 1826 * usage in the file write path of filesystems, and filesystems may 1827 * choose to explicitly ignore update via this function with the 1828 * S_NOCMTIME inode flag, e.g. for network filesystem where these 1829 * timestamps are handled by the server. This can return an error for 1830 * file systems who need to allocate space in order to update an inode. 1831 */ 1832 1833 int file_update_time(struct file *file) 1834 { 1835 struct inode *inode = file_inode(file); 1836 struct timespec now; 1837 int sync_it = 0; 1838 int ret; 1839 1840 /* First try to exhaust all avenues to not sync */ 1841 if (IS_NOCMTIME(inode)) 1842 return 0; 1843 1844 now = current_time(inode); 1845 if (!timespec_equal(&inode->i_mtime, &now)) 1846 sync_it = S_MTIME; 1847 1848 if (!timespec_equal(&inode->i_ctime, &now)) 1849 sync_it |= S_CTIME; 1850 1851 if (IS_I_VERSION(inode)) 1852 sync_it |= S_VERSION; 1853 1854 if (!sync_it) 1855 return 0; 1856 1857 /* Finally allowed to write? Takes lock. */ 1858 if (__mnt_want_write_file(file)) 1859 return 0; 1860 1861 ret = update_time(inode, &now, sync_it); 1862 __mnt_drop_write_file(file); 1863 1864 return ret; 1865 } 1866 EXPORT_SYMBOL(file_update_time); 1867 1868 int inode_needs_sync(struct inode *inode) 1869 { 1870 if (IS_SYNC(inode)) 1871 return 1; 1872 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) 1873 return 1; 1874 return 0; 1875 } 1876 EXPORT_SYMBOL(inode_needs_sync); 1877 1878 /* 1879 * If we try to find an inode in the inode hash while it is being 1880 * deleted, we have to wait until the filesystem completes its 1881 * deletion before reporting that it isn't found. This function waits 1882 * until the deletion _might_ have completed. Callers are responsible 1883 * to recheck inode state. 1884 * 1885 * It doesn't matter if I_NEW is not set initially, a call to 1886 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list 1887 * will DTRT. 1888 */ 1889 static void __wait_on_freeing_inode(struct inode *inode) 1890 { 1891 wait_queue_head_t *wq; 1892 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW); 1893 wq = bit_waitqueue(&inode->i_state, __I_NEW); 1894 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); 1895 spin_unlock(&inode->i_lock); 1896 spin_unlock(&inode_hash_lock); 1897 schedule(); 1898 finish_wait(wq, &wait.wait); 1899 spin_lock(&inode_hash_lock); 1900 } 1901 1902 static __initdata unsigned long ihash_entries; 1903 static int __init set_ihash_entries(char *str) 1904 { 1905 if (!str) 1906 return 0; 1907 ihash_entries = simple_strtoul(str, &str, 0); 1908 return 1; 1909 } 1910 __setup("ihash_entries=", set_ihash_entries); 1911 1912 /* 1913 * Initialize the waitqueues and inode hash table. 1914 */ 1915 void __init inode_init_early(void) 1916 { 1917 unsigned int loop; 1918 1919 /* If hashes are distributed across NUMA nodes, defer 1920 * hash allocation until vmalloc space is available. 1921 */ 1922 if (hashdist) 1923 return; 1924 1925 inode_hashtable = 1926 alloc_large_system_hash("Inode-cache", 1927 sizeof(struct hlist_head), 1928 ihash_entries, 1929 14, 1930 HASH_EARLY, 1931 &i_hash_shift, 1932 &i_hash_mask, 1933 0, 1934 0); 1935 1936 for (loop = 0; loop < (1U << i_hash_shift); loop++) 1937 INIT_HLIST_HEAD(&inode_hashtable[loop]); 1938 } 1939 1940 void __init inode_init(void) 1941 { 1942 unsigned int loop; 1943 1944 /* inode slab cache */ 1945 inode_cachep = kmem_cache_create("inode_cache", 1946 sizeof(struct inode), 1947 0, 1948 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| 1949 SLAB_MEM_SPREAD|SLAB_ACCOUNT), 1950 init_once); 1951 1952 /* Hash may have been set up in inode_init_early */ 1953 if (!hashdist) 1954 return; 1955 1956 inode_hashtable = 1957 alloc_large_system_hash("Inode-cache", 1958 sizeof(struct hlist_head), 1959 ihash_entries, 1960 14, 1961 0, 1962 &i_hash_shift, 1963 &i_hash_mask, 1964 0, 1965 0); 1966 1967 for (loop = 0; loop < (1U << i_hash_shift); loop++) 1968 INIT_HLIST_HEAD(&inode_hashtable[loop]); 1969 } 1970 1971 void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev) 1972 { 1973 inode->i_mode = mode; 1974 if (S_ISCHR(mode)) { 1975 inode->i_fop = &def_chr_fops; 1976 inode->i_rdev = rdev; 1977 } else if (S_ISBLK(mode)) { 1978 inode->i_fop = &def_blk_fops; 1979 inode->i_rdev = rdev; 1980 } else if (S_ISFIFO(mode)) 1981 inode->i_fop = &pipefifo_fops; 1982 else if (S_ISSOCK(mode)) 1983 ; /* leave it no_open_fops */ 1984 else 1985 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for" 1986 " inode %s:%lu\n", mode, inode->i_sb->s_id, 1987 inode->i_ino); 1988 } 1989 EXPORT_SYMBOL(init_special_inode); 1990 1991 /** 1992 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards 1993 * @inode: New inode 1994 * @dir: Directory inode 1995 * @mode: mode of the new inode 1996 */ 1997 void inode_init_owner(struct inode *inode, const struct inode *dir, 1998 umode_t mode) 1999 { 2000 inode->i_uid = current_fsuid(); 2001 if (dir && dir->i_mode & S_ISGID) { 2002 inode->i_gid = dir->i_gid; 2003 if (S_ISDIR(mode)) 2004 mode |= S_ISGID; 2005 } else 2006 inode->i_gid = current_fsgid(); 2007 inode->i_mode = mode; 2008 } 2009 EXPORT_SYMBOL(inode_init_owner); 2010 2011 /** 2012 * inode_owner_or_capable - check current task permissions to inode 2013 * @inode: inode being checked 2014 * 2015 * Return true if current either has CAP_FOWNER in a namespace with the 2016 * inode owner uid mapped, or owns the file. 2017 */ 2018 bool inode_owner_or_capable(const struct inode *inode) 2019 { 2020 struct user_namespace *ns; 2021 2022 if (uid_eq(current_fsuid(), inode->i_uid)) 2023 return true; 2024 2025 ns = current_user_ns(); 2026 if (ns_capable(ns, CAP_FOWNER) && kuid_has_mapping(ns, inode->i_uid)) 2027 return true; 2028 return false; 2029 } 2030 EXPORT_SYMBOL(inode_owner_or_capable); 2031 2032 /* 2033 * Direct i/o helper functions 2034 */ 2035 static void __inode_dio_wait(struct inode *inode) 2036 { 2037 wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP); 2038 DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP); 2039 2040 do { 2041 prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE); 2042 if (atomic_read(&inode->i_dio_count)) 2043 schedule(); 2044 } while (atomic_read(&inode->i_dio_count)); 2045 finish_wait(wq, &q.wait); 2046 } 2047 2048 /** 2049 * inode_dio_wait - wait for outstanding DIO requests to finish 2050 * @inode: inode to wait for 2051 * 2052 * Waits for all pending direct I/O requests to finish so that we can 2053 * proceed with a truncate or equivalent operation. 2054 * 2055 * Must be called under a lock that serializes taking new references 2056 * to i_dio_count, usually by inode->i_mutex. 2057 */ 2058 void inode_dio_wait(struct inode *inode) 2059 { 2060 if (atomic_read(&inode->i_dio_count)) 2061 __inode_dio_wait(inode); 2062 } 2063 EXPORT_SYMBOL(inode_dio_wait); 2064 2065 /* 2066 * inode_set_flags - atomically set some inode flags 2067 * 2068 * Note: the caller should be holding i_mutex, or else be sure that 2069 * they have exclusive access to the inode structure (i.e., while the 2070 * inode is being instantiated). The reason for the cmpxchg() loop 2071 * --- which wouldn't be necessary if all code paths which modify 2072 * i_flags actually followed this rule, is that there is at least one 2073 * code path which doesn't today so we use cmpxchg() out of an abundance 2074 * of caution. 2075 * 2076 * In the long run, i_mutex is overkill, and we should probably look 2077 * at using the i_lock spinlock to protect i_flags, and then make sure 2078 * it is so documented in include/linux/fs.h and that all code follows 2079 * the locking convention!! 2080 */ 2081 void inode_set_flags(struct inode *inode, unsigned int flags, 2082 unsigned int mask) 2083 { 2084 unsigned int old_flags, new_flags; 2085 2086 WARN_ON_ONCE(flags & ~mask); 2087 do { 2088 old_flags = ACCESS_ONCE(inode->i_flags); 2089 new_flags = (old_flags & ~mask) | flags; 2090 } while (unlikely(cmpxchg(&inode->i_flags, old_flags, 2091 new_flags) != old_flags)); 2092 } 2093 EXPORT_SYMBOL(inode_set_flags); 2094 2095 void inode_nohighmem(struct inode *inode) 2096 { 2097 mapping_set_gfp_mask(inode->i_mapping, GFP_USER); 2098 } 2099 EXPORT_SYMBOL(inode_nohighmem); 2100 2101 /** 2102 * current_time - Return FS time 2103 * @inode: inode. 2104 * 2105 * Return the current time truncated to the time granularity supported by 2106 * the fs. 2107 * 2108 * Note that inode and inode->sb cannot be NULL. 2109 * Otherwise, the function warns and returns time without truncation. 2110 */ 2111 struct timespec current_time(struct inode *inode) 2112 { 2113 struct timespec now = current_kernel_time(); 2114 2115 if (unlikely(!inode->i_sb)) { 2116 WARN(1, "current_time() called with uninitialized super_block in the inode"); 2117 return now; 2118 } 2119 2120 return timespec_trunc(now, inode->i_sb->s_time_gran); 2121 } 2122 EXPORT_SYMBOL(current_time); 2123