1 /* 2 * fs/dcache.c 3 * 4 * Complete reimplementation 5 * (C) 1997 Thomas Schoebel-Theuer, 6 * with heavy changes by Linus Torvalds 7 */ 8 9 /* 10 * Notes on the allocation strategy: 11 * 12 * The dcache is a master of the icache - whenever a dcache entry 13 * exists, the inode will always exist. "iput()" is done either when 14 * the dcache entry is deleted or garbage collected. 15 */ 16 17 #include <linux/syscalls.h> 18 #include <linux/string.h> 19 #include <linux/mm.h> 20 #include <linux/fs.h> 21 #include <linux/fsnotify.h> 22 #include <linux/slab.h> 23 #include <linux/init.h> 24 #include <linux/hash.h> 25 #include <linux/cache.h> 26 #include <linux/module.h> 27 #include <linux/mount.h> 28 #include <linux/file.h> 29 #include <asm/uaccess.h> 30 #include <linux/security.h> 31 #include <linux/seqlock.h> 32 #include <linux/swap.h> 33 #include <linux/bootmem.h> 34 #include "internal.h" 35 36 37 int sysctl_vfs_cache_pressure __read_mostly = 100; 38 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); 39 40 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock); 41 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); 42 43 EXPORT_SYMBOL(dcache_lock); 44 45 static struct kmem_cache *dentry_cache __read_mostly; 46 47 #define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname)) 48 49 /* 50 * This is the single most critical data structure when it comes 51 * to the dcache: the hashtable for lookups. Somebody should try 52 * to make this good - I've just made it work. 53 * 54 * This hash-function tries to avoid losing too many bits of hash 55 * information, yet avoid using a prime hash-size or similar. 56 */ 57 #define D_HASHBITS d_hash_shift 58 #define D_HASHMASK d_hash_mask 59 60 static unsigned int d_hash_mask __read_mostly; 61 static unsigned int d_hash_shift __read_mostly; 62 static struct hlist_head *dentry_hashtable __read_mostly; 63 static LIST_HEAD(dentry_unused); 64 65 /* Statistics gathering. */ 66 struct dentry_stat_t dentry_stat = { 67 .age_limit = 45, 68 }; 69 70 static void __d_free(struct dentry *dentry) 71 { 72 if (dname_external(dentry)) 73 kfree(dentry->d_name.name); 74 kmem_cache_free(dentry_cache, dentry); 75 } 76 77 static void d_callback(struct rcu_head *head) 78 { 79 struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu); 80 __d_free(dentry); 81 } 82 83 /* 84 * no dcache_lock, please. The caller must decrement dentry_stat.nr_dentry 85 * inside dcache_lock. 86 */ 87 static void d_free(struct dentry *dentry) 88 { 89 if (dentry->d_op && dentry->d_op->d_release) 90 dentry->d_op->d_release(dentry); 91 /* if dentry was never inserted into hash, immediate free is OK */ 92 if (dentry->d_hash.pprev == NULL) 93 __d_free(dentry); 94 else 95 call_rcu(&dentry->d_u.d_rcu, d_callback); 96 } 97 98 /* 99 * Release the dentry's inode, using the filesystem 100 * d_iput() operation if defined. 101 * Called with dcache_lock and per dentry lock held, drops both. 102 */ 103 static void dentry_iput(struct dentry * dentry) 104 { 105 struct inode *inode = dentry->d_inode; 106 if (inode) { 107 dentry->d_inode = NULL; 108 list_del_init(&dentry->d_alias); 109 spin_unlock(&dentry->d_lock); 110 spin_unlock(&dcache_lock); 111 if (!inode->i_nlink) 112 fsnotify_inoderemove(inode); 113 if (dentry->d_op && dentry->d_op->d_iput) 114 dentry->d_op->d_iput(dentry, inode); 115 else 116 iput(inode); 117 } else { 118 spin_unlock(&dentry->d_lock); 119 spin_unlock(&dcache_lock); 120 } 121 } 122 123 /** 124 * d_kill - kill dentry and return parent 125 * @dentry: dentry to kill 126 * 127 * Called with dcache_lock and d_lock, releases both. The dentry must 128 * already be unhashed and removed from the LRU. 129 * 130 * If this is the root of the dentry tree, return NULL. 131 */ 132 static struct dentry *d_kill(struct dentry *dentry) 133 { 134 struct dentry *parent; 135 136 list_del(&dentry->d_u.d_child); 137 dentry_stat.nr_dentry--; /* For d_free, below */ 138 /*drops the locks, at that point nobody can reach this dentry */ 139 dentry_iput(dentry); 140 parent = dentry->d_parent; 141 d_free(dentry); 142 return dentry == parent ? NULL : parent; 143 } 144 145 /* 146 * This is dput 147 * 148 * This is complicated by the fact that we do not want to put 149 * dentries that are no longer on any hash chain on the unused 150 * list: we'd much rather just get rid of them immediately. 151 * 152 * However, that implies that we have to traverse the dentry 153 * tree upwards to the parents which might _also_ now be 154 * scheduled for deletion (it may have been only waiting for 155 * its last child to go away). 156 * 157 * This tail recursion is done by hand as we don't want to depend 158 * on the compiler to always get this right (gcc generally doesn't). 159 * Real recursion would eat up our stack space. 160 */ 161 162 /* 163 * dput - release a dentry 164 * @dentry: dentry to release 165 * 166 * Release a dentry. This will drop the usage count and if appropriate 167 * call the dentry unlink method as well as removing it from the queues and 168 * releasing its resources. If the parent dentries were scheduled for release 169 * they too may now get deleted. 170 * 171 * no dcache lock, please. 172 */ 173 174 void dput(struct dentry *dentry) 175 { 176 if (!dentry) 177 return; 178 179 repeat: 180 if (atomic_read(&dentry->d_count) == 1) 181 might_sleep(); 182 if (!atomic_dec_and_lock(&dentry->d_count, &dcache_lock)) 183 return; 184 185 spin_lock(&dentry->d_lock); 186 if (atomic_read(&dentry->d_count)) { 187 spin_unlock(&dentry->d_lock); 188 spin_unlock(&dcache_lock); 189 return; 190 } 191 192 /* 193 * AV: ->d_delete() is _NOT_ allowed to block now. 194 */ 195 if (dentry->d_op && dentry->d_op->d_delete) { 196 if (dentry->d_op->d_delete(dentry)) 197 goto unhash_it; 198 } 199 /* Unreachable? Get rid of it */ 200 if (d_unhashed(dentry)) 201 goto kill_it; 202 if (list_empty(&dentry->d_lru)) { 203 dentry->d_flags |= DCACHE_REFERENCED; 204 list_add(&dentry->d_lru, &dentry_unused); 205 dentry_stat.nr_unused++; 206 } 207 spin_unlock(&dentry->d_lock); 208 spin_unlock(&dcache_lock); 209 return; 210 211 unhash_it: 212 __d_drop(dentry); 213 kill_it: 214 /* If dentry was on d_lru list 215 * delete it from there 216 */ 217 if (!list_empty(&dentry->d_lru)) { 218 list_del(&dentry->d_lru); 219 dentry_stat.nr_unused--; 220 } 221 dentry = d_kill(dentry); 222 if (dentry) 223 goto repeat; 224 } 225 226 /** 227 * d_invalidate - invalidate a dentry 228 * @dentry: dentry to invalidate 229 * 230 * Try to invalidate the dentry if it turns out to be 231 * possible. If there are other dentries that can be 232 * reached through this one we can't delete it and we 233 * return -EBUSY. On success we return 0. 234 * 235 * no dcache lock. 236 */ 237 238 int d_invalidate(struct dentry * dentry) 239 { 240 /* 241 * If it's already been dropped, return OK. 242 */ 243 spin_lock(&dcache_lock); 244 if (d_unhashed(dentry)) { 245 spin_unlock(&dcache_lock); 246 return 0; 247 } 248 /* 249 * Check whether to do a partial shrink_dcache 250 * to get rid of unused child entries. 251 */ 252 if (!list_empty(&dentry->d_subdirs)) { 253 spin_unlock(&dcache_lock); 254 shrink_dcache_parent(dentry); 255 spin_lock(&dcache_lock); 256 } 257 258 /* 259 * Somebody else still using it? 260 * 261 * If it's a directory, we can't drop it 262 * for fear of somebody re-populating it 263 * with children (even though dropping it 264 * would make it unreachable from the root, 265 * we might still populate it if it was a 266 * working directory or similar). 267 */ 268 spin_lock(&dentry->d_lock); 269 if (atomic_read(&dentry->d_count) > 1) { 270 if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) { 271 spin_unlock(&dentry->d_lock); 272 spin_unlock(&dcache_lock); 273 return -EBUSY; 274 } 275 } 276 277 __d_drop(dentry); 278 spin_unlock(&dentry->d_lock); 279 spin_unlock(&dcache_lock); 280 return 0; 281 } 282 283 /* This should be called _only_ with dcache_lock held */ 284 285 static inline struct dentry * __dget_locked(struct dentry *dentry) 286 { 287 atomic_inc(&dentry->d_count); 288 if (!list_empty(&dentry->d_lru)) { 289 dentry_stat.nr_unused--; 290 list_del_init(&dentry->d_lru); 291 } 292 return dentry; 293 } 294 295 struct dentry * dget_locked(struct dentry *dentry) 296 { 297 return __dget_locked(dentry); 298 } 299 300 /** 301 * d_find_alias - grab a hashed alias of inode 302 * @inode: inode in question 303 * @want_discon: flag, used by d_splice_alias, to request 304 * that only a DISCONNECTED alias be returned. 305 * 306 * If inode has a hashed alias, or is a directory and has any alias, 307 * acquire the reference to alias and return it. Otherwise return NULL. 308 * Notice that if inode is a directory there can be only one alias and 309 * it can be unhashed only if it has no children, or if it is the root 310 * of a filesystem. 311 * 312 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer 313 * any other hashed alias over that one unless @want_discon is set, 314 * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias. 315 */ 316 317 static struct dentry * __d_find_alias(struct inode *inode, int want_discon) 318 { 319 struct list_head *head, *next, *tmp; 320 struct dentry *alias, *discon_alias=NULL; 321 322 head = &inode->i_dentry; 323 next = inode->i_dentry.next; 324 while (next != head) { 325 tmp = next; 326 next = tmp->next; 327 prefetch(next); 328 alias = list_entry(tmp, struct dentry, d_alias); 329 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { 330 if (IS_ROOT(alias) && 331 (alias->d_flags & DCACHE_DISCONNECTED)) 332 discon_alias = alias; 333 else if (!want_discon) { 334 __dget_locked(alias); 335 return alias; 336 } 337 } 338 } 339 if (discon_alias) 340 __dget_locked(discon_alias); 341 return discon_alias; 342 } 343 344 struct dentry * d_find_alias(struct inode *inode) 345 { 346 struct dentry *de = NULL; 347 348 if (!list_empty(&inode->i_dentry)) { 349 spin_lock(&dcache_lock); 350 de = __d_find_alias(inode, 0); 351 spin_unlock(&dcache_lock); 352 } 353 return de; 354 } 355 356 /* 357 * Try to kill dentries associated with this inode. 358 * WARNING: you must own a reference to inode. 359 */ 360 void d_prune_aliases(struct inode *inode) 361 { 362 struct dentry *dentry; 363 restart: 364 spin_lock(&dcache_lock); 365 list_for_each_entry(dentry, &inode->i_dentry, d_alias) { 366 spin_lock(&dentry->d_lock); 367 if (!atomic_read(&dentry->d_count)) { 368 __dget_locked(dentry); 369 __d_drop(dentry); 370 spin_unlock(&dentry->d_lock); 371 spin_unlock(&dcache_lock); 372 dput(dentry); 373 goto restart; 374 } 375 spin_unlock(&dentry->d_lock); 376 } 377 spin_unlock(&dcache_lock); 378 } 379 380 /* 381 * Throw away a dentry - free the inode, dput the parent. This requires that 382 * the LRU list has already been removed. 383 * 384 * Try to prune ancestors as well. This is necessary to prevent 385 * quadratic behavior of shrink_dcache_parent(), but is also expected 386 * to be beneficial in reducing dentry cache fragmentation. 387 * 388 * Called with dcache_lock, drops it and then regains. 389 * Called with dentry->d_lock held, drops it. 390 */ 391 static void prune_one_dentry(struct dentry * dentry) 392 { 393 __d_drop(dentry); 394 dentry = d_kill(dentry); 395 396 /* 397 * Prune ancestors. Locking is simpler than in dput(), 398 * because dcache_lock needs to be taken anyway. 399 */ 400 spin_lock(&dcache_lock); 401 while (dentry) { 402 if (!atomic_dec_and_lock(&dentry->d_count, &dentry->d_lock)) 403 return; 404 405 if (dentry->d_op && dentry->d_op->d_delete) 406 dentry->d_op->d_delete(dentry); 407 if (!list_empty(&dentry->d_lru)) { 408 list_del(&dentry->d_lru); 409 dentry_stat.nr_unused--; 410 } 411 __d_drop(dentry); 412 dentry = d_kill(dentry); 413 spin_lock(&dcache_lock); 414 } 415 } 416 417 /** 418 * prune_dcache - shrink the dcache 419 * @count: number of entries to try and free 420 * @sb: if given, ignore dentries for other superblocks 421 * which are being unmounted. 422 * 423 * Shrink the dcache. This is done when we need 424 * more memory, or simply when we need to unmount 425 * something (at which point we need to unuse 426 * all dentries). 427 * 428 * This function may fail to free any resources if 429 * all the dentries are in use. 430 */ 431 432 static void prune_dcache(int count, struct super_block *sb) 433 { 434 spin_lock(&dcache_lock); 435 for (; count ; count--) { 436 struct dentry *dentry; 437 struct list_head *tmp; 438 struct rw_semaphore *s_umount; 439 440 cond_resched_lock(&dcache_lock); 441 442 tmp = dentry_unused.prev; 443 if (sb) { 444 /* Try to find a dentry for this sb, but don't try 445 * too hard, if they aren't near the tail they will 446 * be moved down again soon 447 */ 448 int skip = count; 449 while (skip && tmp != &dentry_unused && 450 list_entry(tmp, struct dentry, d_lru)->d_sb != sb) { 451 skip--; 452 tmp = tmp->prev; 453 } 454 } 455 if (tmp == &dentry_unused) 456 break; 457 list_del_init(tmp); 458 prefetch(dentry_unused.prev); 459 dentry_stat.nr_unused--; 460 dentry = list_entry(tmp, struct dentry, d_lru); 461 462 spin_lock(&dentry->d_lock); 463 /* 464 * We found an inuse dentry which was not removed from 465 * dentry_unused because of laziness during lookup. Do not free 466 * it - just keep it off the dentry_unused list. 467 */ 468 if (atomic_read(&dentry->d_count)) { 469 spin_unlock(&dentry->d_lock); 470 continue; 471 } 472 /* If the dentry was recently referenced, don't free it. */ 473 if (dentry->d_flags & DCACHE_REFERENCED) { 474 dentry->d_flags &= ~DCACHE_REFERENCED; 475 list_add(&dentry->d_lru, &dentry_unused); 476 dentry_stat.nr_unused++; 477 spin_unlock(&dentry->d_lock); 478 continue; 479 } 480 /* 481 * If the dentry is not DCACHED_REFERENCED, it is time 482 * to remove it from the dcache, provided the super block is 483 * NULL (which means we are trying to reclaim memory) 484 * or this dentry belongs to the same super block that 485 * we want to shrink. 486 */ 487 /* 488 * If this dentry is for "my" filesystem, then I can prune it 489 * without taking the s_umount lock (I already hold it). 490 */ 491 if (sb && dentry->d_sb == sb) { 492 prune_one_dentry(dentry); 493 continue; 494 } 495 /* 496 * ...otherwise we need to be sure this filesystem isn't being 497 * unmounted, otherwise we could race with 498 * generic_shutdown_super(), and end up holding a reference to 499 * an inode while the filesystem is unmounted. 500 * So we try to get s_umount, and make sure s_root isn't NULL. 501 * (Take a local copy of s_umount to avoid a use-after-free of 502 * `dentry'). 503 */ 504 s_umount = &dentry->d_sb->s_umount; 505 if (down_read_trylock(s_umount)) { 506 if (dentry->d_sb->s_root != NULL) { 507 prune_one_dentry(dentry); 508 up_read(s_umount); 509 continue; 510 } 511 up_read(s_umount); 512 } 513 spin_unlock(&dentry->d_lock); 514 /* 515 * Insert dentry at the head of the list as inserting at the 516 * tail leads to a cycle. 517 */ 518 list_add(&dentry->d_lru, &dentry_unused); 519 dentry_stat.nr_unused++; 520 } 521 spin_unlock(&dcache_lock); 522 } 523 524 /* 525 * Shrink the dcache for the specified super block. 526 * This allows us to unmount a device without disturbing 527 * the dcache for the other devices. 528 * 529 * This implementation makes just two traversals of the 530 * unused list. On the first pass we move the selected 531 * dentries to the most recent end, and on the second 532 * pass we free them. The second pass must restart after 533 * each dput(), but since the target dentries are all at 534 * the end, it's really just a single traversal. 535 */ 536 537 /** 538 * shrink_dcache_sb - shrink dcache for a superblock 539 * @sb: superblock 540 * 541 * Shrink the dcache for the specified super block. This 542 * is used to free the dcache before unmounting a file 543 * system 544 */ 545 546 void shrink_dcache_sb(struct super_block * sb) 547 { 548 struct list_head *tmp, *next; 549 struct dentry *dentry; 550 551 /* 552 * Pass one ... move the dentries for the specified 553 * superblock to the most recent end of the unused list. 554 */ 555 spin_lock(&dcache_lock); 556 list_for_each_prev_safe(tmp, next, &dentry_unused) { 557 dentry = list_entry(tmp, struct dentry, d_lru); 558 if (dentry->d_sb != sb) 559 continue; 560 list_move_tail(tmp, &dentry_unused); 561 } 562 563 /* 564 * Pass two ... free the dentries for this superblock. 565 */ 566 repeat: 567 list_for_each_prev_safe(tmp, next, &dentry_unused) { 568 dentry = list_entry(tmp, struct dentry, d_lru); 569 if (dentry->d_sb != sb) 570 continue; 571 dentry_stat.nr_unused--; 572 list_del_init(tmp); 573 spin_lock(&dentry->d_lock); 574 if (atomic_read(&dentry->d_count)) { 575 spin_unlock(&dentry->d_lock); 576 continue; 577 } 578 prune_one_dentry(dentry); 579 cond_resched_lock(&dcache_lock); 580 goto repeat; 581 } 582 spin_unlock(&dcache_lock); 583 } 584 585 /* 586 * destroy a single subtree of dentries for unmount 587 * - see the comments on shrink_dcache_for_umount() for a description of the 588 * locking 589 */ 590 static void shrink_dcache_for_umount_subtree(struct dentry *dentry) 591 { 592 struct dentry *parent; 593 unsigned detached = 0; 594 595 BUG_ON(!IS_ROOT(dentry)); 596 597 /* detach this root from the system */ 598 spin_lock(&dcache_lock); 599 if (!list_empty(&dentry->d_lru)) { 600 dentry_stat.nr_unused--; 601 list_del_init(&dentry->d_lru); 602 } 603 __d_drop(dentry); 604 spin_unlock(&dcache_lock); 605 606 for (;;) { 607 /* descend to the first leaf in the current subtree */ 608 while (!list_empty(&dentry->d_subdirs)) { 609 struct dentry *loop; 610 611 /* this is a branch with children - detach all of them 612 * from the system in one go */ 613 spin_lock(&dcache_lock); 614 list_for_each_entry(loop, &dentry->d_subdirs, 615 d_u.d_child) { 616 if (!list_empty(&loop->d_lru)) { 617 dentry_stat.nr_unused--; 618 list_del_init(&loop->d_lru); 619 } 620 621 __d_drop(loop); 622 cond_resched_lock(&dcache_lock); 623 } 624 spin_unlock(&dcache_lock); 625 626 /* move to the first child */ 627 dentry = list_entry(dentry->d_subdirs.next, 628 struct dentry, d_u.d_child); 629 } 630 631 /* consume the dentries from this leaf up through its parents 632 * until we find one with children or run out altogether */ 633 do { 634 struct inode *inode; 635 636 if (atomic_read(&dentry->d_count) != 0) { 637 printk(KERN_ERR 638 "BUG: Dentry %p{i=%lx,n=%s}" 639 " still in use (%d)" 640 " [unmount of %s %s]\n", 641 dentry, 642 dentry->d_inode ? 643 dentry->d_inode->i_ino : 0UL, 644 dentry->d_name.name, 645 atomic_read(&dentry->d_count), 646 dentry->d_sb->s_type->name, 647 dentry->d_sb->s_id); 648 BUG(); 649 } 650 651 parent = dentry->d_parent; 652 if (parent == dentry) 653 parent = NULL; 654 else 655 atomic_dec(&parent->d_count); 656 657 list_del(&dentry->d_u.d_child); 658 detached++; 659 660 inode = dentry->d_inode; 661 if (inode) { 662 dentry->d_inode = NULL; 663 list_del_init(&dentry->d_alias); 664 if (dentry->d_op && dentry->d_op->d_iput) 665 dentry->d_op->d_iput(dentry, inode); 666 else 667 iput(inode); 668 } 669 670 d_free(dentry); 671 672 /* finished when we fall off the top of the tree, 673 * otherwise we ascend to the parent and move to the 674 * next sibling if there is one */ 675 if (!parent) 676 goto out; 677 678 dentry = parent; 679 680 } while (list_empty(&dentry->d_subdirs)); 681 682 dentry = list_entry(dentry->d_subdirs.next, 683 struct dentry, d_u.d_child); 684 } 685 out: 686 /* several dentries were freed, need to correct nr_dentry */ 687 spin_lock(&dcache_lock); 688 dentry_stat.nr_dentry -= detached; 689 spin_unlock(&dcache_lock); 690 } 691 692 /* 693 * destroy the dentries attached to a superblock on unmounting 694 * - we don't need to use dentry->d_lock, and only need dcache_lock when 695 * removing the dentry from the system lists and hashes because: 696 * - the superblock is detached from all mountings and open files, so the 697 * dentry trees will not be rearranged by the VFS 698 * - s_umount is write-locked, so the memory pressure shrinker will ignore 699 * any dentries belonging to this superblock that it comes across 700 * - the filesystem itself is no longer permitted to rearrange the dentries 701 * in this superblock 702 */ 703 void shrink_dcache_for_umount(struct super_block *sb) 704 { 705 struct dentry *dentry; 706 707 if (down_read_trylock(&sb->s_umount)) 708 BUG(); 709 710 dentry = sb->s_root; 711 sb->s_root = NULL; 712 atomic_dec(&dentry->d_count); 713 shrink_dcache_for_umount_subtree(dentry); 714 715 while (!hlist_empty(&sb->s_anon)) { 716 dentry = hlist_entry(sb->s_anon.first, struct dentry, d_hash); 717 shrink_dcache_for_umount_subtree(dentry); 718 } 719 } 720 721 /* 722 * Search for at least 1 mount point in the dentry's subdirs. 723 * We descend to the next level whenever the d_subdirs 724 * list is non-empty and continue searching. 725 */ 726 727 /** 728 * have_submounts - check for mounts over a dentry 729 * @parent: dentry to check. 730 * 731 * Return true if the parent or its subdirectories contain 732 * a mount point 733 */ 734 735 int have_submounts(struct dentry *parent) 736 { 737 struct dentry *this_parent = parent; 738 struct list_head *next; 739 740 spin_lock(&dcache_lock); 741 if (d_mountpoint(parent)) 742 goto positive; 743 repeat: 744 next = this_parent->d_subdirs.next; 745 resume: 746 while (next != &this_parent->d_subdirs) { 747 struct list_head *tmp = next; 748 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 749 next = tmp->next; 750 /* Have we found a mount point ? */ 751 if (d_mountpoint(dentry)) 752 goto positive; 753 if (!list_empty(&dentry->d_subdirs)) { 754 this_parent = dentry; 755 goto repeat; 756 } 757 } 758 /* 759 * All done at this level ... ascend and resume the search. 760 */ 761 if (this_parent != parent) { 762 next = this_parent->d_u.d_child.next; 763 this_parent = this_parent->d_parent; 764 goto resume; 765 } 766 spin_unlock(&dcache_lock); 767 return 0; /* No mount points found in tree */ 768 positive: 769 spin_unlock(&dcache_lock); 770 return 1; 771 } 772 773 /* 774 * Search the dentry child list for the specified parent, 775 * and move any unused dentries to the end of the unused 776 * list for prune_dcache(). We descend to the next level 777 * whenever the d_subdirs list is non-empty and continue 778 * searching. 779 * 780 * It returns zero iff there are no unused children, 781 * otherwise it returns the number of children moved to 782 * the end of the unused list. This may not be the total 783 * number of unused children, because select_parent can 784 * drop the lock and return early due to latency 785 * constraints. 786 */ 787 static int select_parent(struct dentry * parent) 788 { 789 struct dentry *this_parent = parent; 790 struct list_head *next; 791 int found = 0; 792 793 spin_lock(&dcache_lock); 794 repeat: 795 next = this_parent->d_subdirs.next; 796 resume: 797 while (next != &this_parent->d_subdirs) { 798 struct list_head *tmp = next; 799 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 800 next = tmp->next; 801 802 if (!list_empty(&dentry->d_lru)) { 803 dentry_stat.nr_unused--; 804 list_del_init(&dentry->d_lru); 805 } 806 /* 807 * move only zero ref count dentries to the end 808 * of the unused list for prune_dcache 809 */ 810 if (!atomic_read(&dentry->d_count)) { 811 list_add_tail(&dentry->d_lru, &dentry_unused); 812 dentry_stat.nr_unused++; 813 found++; 814 } 815 816 /* 817 * We can return to the caller if we have found some (this 818 * ensures forward progress). We'll be coming back to find 819 * the rest. 820 */ 821 if (found && need_resched()) 822 goto out; 823 824 /* 825 * Descend a level if the d_subdirs list is non-empty. 826 */ 827 if (!list_empty(&dentry->d_subdirs)) { 828 this_parent = dentry; 829 goto repeat; 830 } 831 } 832 /* 833 * All done at this level ... ascend and resume the search. 834 */ 835 if (this_parent != parent) { 836 next = this_parent->d_u.d_child.next; 837 this_parent = this_parent->d_parent; 838 goto resume; 839 } 840 out: 841 spin_unlock(&dcache_lock); 842 return found; 843 } 844 845 /** 846 * shrink_dcache_parent - prune dcache 847 * @parent: parent of entries to prune 848 * 849 * Prune the dcache to remove unused children of the parent dentry. 850 */ 851 852 void shrink_dcache_parent(struct dentry * parent) 853 { 854 int found; 855 856 while ((found = select_parent(parent)) != 0) 857 prune_dcache(found, parent->d_sb); 858 } 859 860 /* 861 * Scan `nr' dentries and return the number which remain. 862 * 863 * We need to avoid reentering the filesystem if the caller is performing a 864 * GFP_NOFS allocation attempt. One example deadlock is: 865 * 866 * ext2_new_block->getblk->GFP->shrink_dcache_memory->prune_dcache-> 867 * prune_one_dentry->dput->dentry_iput->iput->inode->i_sb->s_op->put_inode-> 868 * ext2_discard_prealloc->ext2_free_blocks->lock_super->DEADLOCK. 869 * 870 * In this case we return -1 to tell the caller that we baled. 871 */ 872 static int shrink_dcache_memory(int nr, gfp_t gfp_mask) 873 { 874 if (nr) { 875 if (!(gfp_mask & __GFP_FS)) 876 return -1; 877 prune_dcache(nr, NULL); 878 } 879 return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; 880 } 881 882 static struct shrinker dcache_shrinker = { 883 .shrink = shrink_dcache_memory, 884 .seeks = DEFAULT_SEEKS, 885 }; 886 887 /** 888 * d_alloc - allocate a dcache entry 889 * @parent: parent of entry to allocate 890 * @name: qstr of the name 891 * 892 * Allocates a dentry. It returns %NULL if there is insufficient memory 893 * available. On a success the dentry is returned. The name passed in is 894 * copied and the copy passed in may be reused after this call. 895 */ 896 897 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) 898 { 899 struct dentry *dentry; 900 char *dname; 901 902 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL); 903 if (!dentry) 904 return NULL; 905 906 if (name->len > DNAME_INLINE_LEN-1) { 907 dname = kmalloc(name->len + 1, GFP_KERNEL); 908 if (!dname) { 909 kmem_cache_free(dentry_cache, dentry); 910 return NULL; 911 } 912 } else { 913 dname = dentry->d_iname; 914 } 915 dentry->d_name.name = dname; 916 917 dentry->d_name.len = name->len; 918 dentry->d_name.hash = name->hash; 919 memcpy(dname, name->name, name->len); 920 dname[name->len] = 0; 921 922 atomic_set(&dentry->d_count, 1); 923 dentry->d_flags = DCACHE_UNHASHED; 924 spin_lock_init(&dentry->d_lock); 925 dentry->d_inode = NULL; 926 dentry->d_parent = NULL; 927 dentry->d_sb = NULL; 928 dentry->d_op = NULL; 929 dentry->d_fsdata = NULL; 930 dentry->d_mounted = 0; 931 #ifdef CONFIG_PROFILING 932 dentry->d_cookie = NULL; 933 #endif 934 INIT_HLIST_NODE(&dentry->d_hash); 935 INIT_LIST_HEAD(&dentry->d_lru); 936 INIT_LIST_HEAD(&dentry->d_subdirs); 937 INIT_LIST_HEAD(&dentry->d_alias); 938 939 if (parent) { 940 dentry->d_parent = dget(parent); 941 dentry->d_sb = parent->d_sb; 942 } else { 943 INIT_LIST_HEAD(&dentry->d_u.d_child); 944 } 945 946 spin_lock(&dcache_lock); 947 if (parent) 948 list_add(&dentry->d_u.d_child, &parent->d_subdirs); 949 dentry_stat.nr_dentry++; 950 spin_unlock(&dcache_lock); 951 952 return dentry; 953 } 954 955 struct dentry *d_alloc_name(struct dentry *parent, const char *name) 956 { 957 struct qstr q; 958 959 q.name = name; 960 q.len = strlen(name); 961 q.hash = full_name_hash(q.name, q.len); 962 return d_alloc(parent, &q); 963 } 964 965 /** 966 * d_instantiate - fill in inode information for a dentry 967 * @entry: dentry to complete 968 * @inode: inode to attach to this dentry 969 * 970 * Fill in inode information in the entry. 971 * 972 * This turns negative dentries into productive full members 973 * of society. 974 * 975 * NOTE! This assumes that the inode count has been incremented 976 * (or otherwise set) by the caller to indicate that it is now 977 * in use by the dcache. 978 */ 979 980 void d_instantiate(struct dentry *entry, struct inode * inode) 981 { 982 BUG_ON(!list_empty(&entry->d_alias)); 983 spin_lock(&dcache_lock); 984 if (inode) 985 list_add(&entry->d_alias, &inode->i_dentry); 986 entry->d_inode = inode; 987 fsnotify_d_instantiate(entry, inode); 988 spin_unlock(&dcache_lock); 989 security_d_instantiate(entry, inode); 990 } 991 992 /** 993 * d_instantiate_unique - instantiate a non-aliased dentry 994 * @entry: dentry to instantiate 995 * @inode: inode to attach to this dentry 996 * 997 * Fill in inode information in the entry. On success, it returns NULL. 998 * If an unhashed alias of "entry" already exists, then we return the 999 * aliased dentry instead and drop one reference to inode. 1000 * 1001 * Note that in order to avoid conflicts with rename() etc, the caller 1002 * had better be holding the parent directory semaphore. 1003 * 1004 * This also assumes that the inode count has been incremented 1005 * (or otherwise set) by the caller to indicate that it is now 1006 * in use by the dcache. 1007 */ 1008 static struct dentry *__d_instantiate_unique(struct dentry *entry, 1009 struct inode *inode) 1010 { 1011 struct dentry *alias; 1012 int len = entry->d_name.len; 1013 const char *name = entry->d_name.name; 1014 unsigned int hash = entry->d_name.hash; 1015 1016 if (!inode) { 1017 entry->d_inode = NULL; 1018 return NULL; 1019 } 1020 1021 list_for_each_entry(alias, &inode->i_dentry, d_alias) { 1022 struct qstr *qstr = &alias->d_name; 1023 1024 if (qstr->hash != hash) 1025 continue; 1026 if (alias->d_parent != entry->d_parent) 1027 continue; 1028 if (qstr->len != len) 1029 continue; 1030 if (memcmp(qstr->name, name, len)) 1031 continue; 1032 dget_locked(alias); 1033 return alias; 1034 } 1035 1036 list_add(&entry->d_alias, &inode->i_dentry); 1037 entry->d_inode = inode; 1038 fsnotify_d_instantiate(entry, inode); 1039 return NULL; 1040 } 1041 1042 struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode) 1043 { 1044 struct dentry *result; 1045 1046 BUG_ON(!list_empty(&entry->d_alias)); 1047 1048 spin_lock(&dcache_lock); 1049 result = __d_instantiate_unique(entry, inode); 1050 spin_unlock(&dcache_lock); 1051 1052 if (!result) { 1053 security_d_instantiate(entry, inode); 1054 return NULL; 1055 } 1056 1057 BUG_ON(!d_unhashed(result)); 1058 iput(inode); 1059 return result; 1060 } 1061 1062 EXPORT_SYMBOL(d_instantiate_unique); 1063 1064 /** 1065 * d_alloc_root - allocate root dentry 1066 * @root_inode: inode to allocate the root for 1067 * 1068 * Allocate a root ("/") dentry for the inode given. The inode is 1069 * instantiated and returned. %NULL is returned if there is insufficient 1070 * memory or the inode passed is %NULL. 1071 */ 1072 1073 struct dentry * d_alloc_root(struct inode * root_inode) 1074 { 1075 struct dentry *res = NULL; 1076 1077 if (root_inode) { 1078 static const struct qstr name = { .name = "/", .len = 1 }; 1079 1080 res = d_alloc(NULL, &name); 1081 if (res) { 1082 res->d_sb = root_inode->i_sb; 1083 res->d_parent = res; 1084 d_instantiate(res, root_inode); 1085 } 1086 } 1087 return res; 1088 } 1089 1090 static inline struct hlist_head *d_hash(struct dentry *parent, 1091 unsigned long hash) 1092 { 1093 hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES; 1094 hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS); 1095 return dentry_hashtable + (hash & D_HASHMASK); 1096 } 1097 1098 /** 1099 * d_alloc_anon - allocate an anonymous dentry 1100 * @inode: inode to allocate the dentry for 1101 * 1102 * This is similar to d_alloc_root. It is used by filesystems when 1103 * creating a dentry for a given inode, often in the process of 1104 * mapping a filehandle to a dentry. The returned dentry may be 1105 * anonymous, or may have a full name (if the inode was already 1106 * in the cache). The file system may need to make further 1107 * efforts to connect this dentry into the dcache properly. 1108 * 1109 * When called on a directory inode, we must ensure that 1110 * the inode only ever has one dentry. If a dentry is 1111 * found, that is returned instead of allocating a new one. 1112 * 1113 * On successful return, the reference to the inode has been transferred 1114 * to the dentry. If %NULL is returned (indicating kmalloc failure), 1115 * the reference on the inode has not been released. 1116 */ 1117 1118 struct dentry * d_alloc_anon(struct inode *inode) 1119 { 1120 static const struct qstr anonstring = { .name = "" }; 1121 struct dentry *tmp; 1122 struct dentry *res; 1123 1124 if ((res = d_find_alias(inode))) { 1125 iput(inode); 1126 return res; 1127 } 1128 1129 tmp = d_alloc(NULL, &anonstring); 1130 if (!tmp) 1131 return NULL; 1132 1133 tmp->d_parent = tmp; /* make sure dput doesn't croak */ 1134 1135 spin_lock(&dcache_lock); 1136 res = __d_find_alias(inode, 0); 1137 if (!res) { 1138 /* attach a disconnected dentry */ 1139 res = tmp; 1140 tmp = NULL; 1141 spin_lock(&res->d_lock); 1142 res->d_sb = inode->i_sb; 1143 res->d_parent = res; 1144 res->d_inode = inode; 1145 res->d_flags |= DCACHE_DISCONNECTED; 1146 res->d_flags &= ~DCACHE_UNHASHED; 1147 list_add(&res->d_alias, &inode->i_dentry); 1148 hlist_add_head(&res->d_hash, &inode->i_sb->s_anon); 1149 spin_unlock(&res->d_lock); 1150 1151 inode = NULL; /* don't drop reference */ 1152 } 1153 spin_unlock(&dcache_lock); 1154 1155 if (inode) 1156 iput(inode); 1157 if (tmp) 1158 dput(tmp); 1159 return res; 1160 } 1161 1162 1163 /** 1164 * d_splice_alias - splice a disconnected dentry into the tree if one exists 1165 * @inode: the inode which may have a disconnected dentry 1166 * @dentry: a negative dentry which we want to point to the inode. 1167 * 1168 * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and 1169 * DCACHE_DISCONNECTED), then d_move that in place of the given dentry 1170 * and return it, else simply d_add the inode to the dentry and return NULL. 1171 * 1172 * This is needed in the lookup routine of any filesystem that is exportable 1173 * (via knfsd) so that we can build dcache paths to directories effectively. 1174 * 1175 * If a dentry was found and moved, then it is returned. Otherwise NULL 1176 * is returned. This matches the expected return value of ->lookup. 1177 * 1178 */ 1179 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) 1180 { 1181 struct dentry *new = NULL; 1182 1183 if (inode && S_ISDIR(inode->i_mode)) { 1184 spin_lock(&dcache_lock); 1185 new = __d_find_alias(inode, 1); 1186 if (new) { 1187 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED)); 1188 fsnotify_d_instantiate(new, inode); 1189 spin_unlock(&dcache_lock); 1190 security_d_instantiate(new, inode); 1191 d_rehash(dentry); 1192 d_move(new, dentry); 1193 iput(inode); 1194 } else { 1195 /* d_instantiate takes dcache_lock, so we do it by hand */ 1196 list_add(&dentry->d_alias, &inode->i_dentry); 1197 dentry->d_inode = inode; 1198 fsnotify_d_instantiate(dentry, inode); 1199 spin_unlock(&dcache_lock); 1200 security_d_instantiate(dentry, inode); 1201 d_rehash(dentry); 1202 } 1203 } else 1204 d_add(dentry, inode); 1205 return new; 1206 } 1207 1208 1209 /** 1210 * d_lookup - search for a dentry 1211 * @parent: parent dentry 1212 * @name: qstr of name we wish to find 1213 * 1214 * Searches the children of the parent dentry for the name in question. If 1215 * the dentry is found its reference count is incremented and the dentry 1216 * is returned. The caller must use d_put to free the entry when it has 1217 * finished using it. %NULL is returned on failure. 1218 * 1219 * __d_lookup is dcache_lock free. The hash list is protected using RCU. 1220 * Memory barriers are used while updating and doing lockless traversal. 1221 * To avoid races with d_move while rename is happening, d_lock is used. 1222 * 1223 * Overflows in memcmp(), while d_move, are avoided by keeping the length 1224 * and name pointer in one structure pointed by d_qstr. 1225 * 1226 * rcu_read_lock() and rcu_read_unlock() are used to disable preemption while 1227 * lookup is going on. 1228 * 1229 * dentry_unused list is not updated even if lookup finds the required dentry 1230 * in there. It is updated in places such as prune_dcache, shrink_dcache_sb, 1231 * select_parent and __dget_locked. This laziness saves lookup from dcache_lock 1232 * acquisition. 1233 * 1234 * d_lookup() is protected against the concurrent renames in some unrelated 1235 * directory using the seqlockt_t rename_lock. 1236 */ 1237 1238 struct dentry * d_lookup(struct dentry * parent, struct qstr * name) 1239 { 1240 struct dentry * dentry = NULL; 1241 unsigned long seq; 1242 1243 do { 1244 seq = read_seqbegin(&rename_lock); 1245 dentry = __d_lookup(parent, name); 1246 if (dentry) 1247 break; 1248 } while (read_seqretry(&rename_lock, seq)); 1249 return dentry; 1250 } 1251 1252 struct dentry * __d_lookup(struct dentry * parent, struct qstr * name) 1253 { 1254 unsigned int len = name->len; 1255 unsigned int hash = name->hash; 1256 const unsigned char *str = name->name; 1257 struct hlist_head *head = d_hash(parent,hash); 1258 struct dentry *found = NULL; 1259 struct hlist_node *node; 1260 struct dentry *dentry; 1261 1262 rcu_read_lock(); 1263 1264 hlist_for_each_entry_rcu(dentry, node, head, d_hash) { 1265 struct qstr *qstr; 1266 1267 if (dentry->d_name.hash != hash) 1268 continue; 1269 if (dentry->d_parent != parent) 1270 continue; 1271 1272 spin_lock(&dentry->d_lock); 1273 1274 /* 1275 * Recheck the dentry after taking the lock - d_move may have 1276 * changed things. Don't bother checking the hash because we're 1277 * about to compare the whole name anyway. 1278 */ 1279 if (dentry->d_parent != parent) 1280 goto next; 1281 1282 /* 1283 * It is safe to compare names since d_move() cannot 1284 * change the qstr (protected by d_lock). 1285 */ 1286 qstr = &dentry->d_name; 1287 if (parent->d_op && parent->d_op->d_compare) { 1288 if (parent->d_op->d_compare(parent, qstr, name)) 1289 goto next; 1290 } else { 1291 if (qstr->len != len) 1292 goto next; 1293 if (memcmp(qstr->name, str, len)) 1294 goto next; 1295 } 1296 1297 if (!d_unhashed(dentry)) { 1298 atomic_inc(&dentry->d_count); 1299 found = dentry; 1300 } 1301 spin_unlock(&dentry->d_lock); 1302 break; 1303 next: 1304 spin_unlock(&dentry->d_lock); 1305 } 1306 rcu_read_unlock(); 1307 1308 return found; 1309 } 1310 1311 /** 1312 * d_hash_and_lookup - hash the qstr then search for a dentry 1313 * @dir: Directory to search in 1314 * @name: qstr of name we wish to find 1315 * 1316 * On hash failure or on lookup failure NULL is returned. 1317 */ 1318 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name) 1319 { 1320 struct dentry *dentry = NULL; 1321 1322 /* 1323 * Check for a fs-specific hash function. Note that we must 1324 * calculate the standard hash first, as the d_op->d_hash() 1325 * routine may choose to leave the hash value unchanged. 1326 */ 1327 name->hash = full_name_hash(name->name, name->len); 1328 if (dir->d_op && dir->d_op->d_hash) { 1329 if (dir->d_op->d_hash(dir, name) < 0) 1330 goto out; 1331 } 1332 dentry = d_lookup(dir, name); 1333 out: 1334 return dentry; 1335 } 1336 1337 /** 1338 * d_validate - verify dentry provided from insecure source 1339 * @dentry: The dentry alleged to be valid child of @dparent 1340 * @dparent: The parent dentry (known to be valid) 1341 * @hash: Hash of the dentry 1342 * @len: Length of the name 1343 * 1344 * An insecure source has sent us a dentry, here we verify it and dget() it. 1345 * This is used by ncpfs in its readdir implementation. 1346 * Zero is returned in the dentry is invalid. 1347 */ 1348 1349 int d_validate(struct dentry *dentry, struct dentry *dparent) 1350 { 1351 struct hlist_head *base; 1352 struct hlist_node *lhp; 1353 1354 /* Check whether the ptr might be valid at all.. */ 1355 if (!kmem_ptr_validate(dentry_cache, dentry)) 1356 goto out; 1357 1358 if (dentry->d_parent != dparent) 1359 goto out; 1360 1361 spin_lock(&dcache_lock); 1362 base = d_hash(dparent, dentry->d_name.hash); 1363 hlist_for_each(lhp,base) { 1364 /* hlist_for_each_entry_rcu() not required for d_hash list 1365 * as it is parsed under dcache_lock 1366 */ 1367 if (dentry == hlist_entry(lhp, struct dentry, d_hash)) { 1368 __dget_locked(dentry); 1369 spin_unlock(&dcache_lock); 1370 return 1; 1371 } 1372 } 1373 spin_unlock(&dcache_lock); 1374 out: 1375 return 0; 1376 } 1377 1378 /* 1379 * When a file is deleted, we have two options: 1380 * - turn this dentry into a negative dentry 1381 * - unhash this dentry and free it. 1382 * 1383 * Usually, we want to just turn this into 1384 * a negative dentry, but if anybody else is 1385 * currently using the dentry or the inode 1386 * we can't do that and we fall back on removing 1387 * it from the hash queues and waiting for 1388 * it to be deleted later when it has no users 1389 */ 1390 1391 /** 1392 * d_delete - delete a dentry 1393 * @dentry: The dentry to delete 1394 * 1395 * Turn the dentry into a negative dentry if possible, otherwise 1396 * remove it from the hash queues so it can be deleted later 1397 */ 1398 1399 void d_delete(struct dentry * dentry) 1400 { 1401 int isdir = 0; 1402 /* 1403 * Are we the only user? 1404 */ 1405 spin_lock(&dcache_lock); 1406 spin_lock(&dentry->d_lock); 1407 isdir = S_ISDIR(dentry->d_inode->i_mode); 1408 if (atomic_read(&dentry->d_count) == 1) { 1409 dentry_iput(dentry); 1410 fsnotify_nameremove(dentry, isdir); 1411 1412 /* remove this and other inotify debug checks after 2.6.18 */ 1413 dentry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED; 1414 return; 1415 } 1416 1417 if (!d_unhashed(dentry)) 1418 __d_drop(dentry); 1419 1420 spin_unlock(&dentry->d_lock); 1421 spin_unlock(&dcache_lock); 1422 1423 fsnotify_nameremove(dentry, isdir); 1424 } 1425 1426 static void __d_rehash(struct dentry * entry, struct hlist_head *list) 1427 { 1428 1429 entry->d_flags &= ~DCACHE_UNHASHED; 1430 hlist_add_head_rcu(&entry->d_hash, list); 1431 } 1432 1433 static void _d_rehash(struct dentry * entry) 1434 { 1435 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash)); 1436 } 1437 1438 /** 1439 * d_rehash - add an entry back to the hash 1440 * @entry: dentry to add to the hash 1441 * 1442 * Adds a dentry to the hash according to its name. 1443 */ 1444 1445 void d_rehash(struct dentry * entry) 1446 { 1447 spin_lock(&dcache_lock); 1448 spin_lock(&entry->d_lock); 1449 _d_rehash(entry); 1450 spin_unlock(&entry->d_lock); 1451 spin_unlock(&dcache_lock); 1452 } 1453 1454 #define do_switch(x,y) do { \ 1455 __typeof__ (x) __tmp = x; \ 1456 x = y; y = __tmp; } while (0) 1457 1458 /* 1459 * When switching names, the actual string doesn't strictly have to 1460 * be preserved in the target - because we're dropping the target 1461 * anyway. As such, we can just do a simple memcpy() to copy over 1462 * the new name before we switch. 1463 * 1464 * Note that we have to be a lot more careful about getting the hash 1465 * switched - we have to switch the hash value properly even if it 1466 * then no longer matches the actual (corrupted) string of the target. 1467 * The hash value has to match the hash queue that the dentry is on.. 1468 */ 1469 static void switch_names(struct dentry *dentry, struct dentry *target) 1470 { 1471 if (dname_external(target)) { 1472 if (dname_external(dentry)) { 1473 /* 1474 * Both external: swap the pointers 1475 */ 1476 do_switch(target->d_name.name, dentry->d_name.name); 1477 } else { 1478 /* 1479 * dentry:internal, target:external. Steal target's 1480 * storage and make target internal. 1481 */ 1482 memcpy(target->d_iname, dentry->d_name.name, 1483 dentry->d_name.len + 1); 1484 dentry->d_name.name = target->d_name.name; 1485 target->d_name.name = target->d_iname; 1486 } 1487 } else { 1488 if (dname_external(dentry)) { 1489 /* 1490 * dentry:external, target:internal. Give dentry's 1491 * storage to target and make dentry internal 1492 */ 1493 memcpy(dentry->d_iname, target->d_name.name, 1494 target->d_name.len + 1); 1495 target->d_name.name = dentry->d_name.name; 1496 dentry->d_name.name = dentry->d_iname; 1497 } else { 1498 /* 1499 * Both are internal. Just copy target to dentry 1500 */ 1501 memcpy(dentry->d_iname, target->d_name.name, 1502 target->d_name.len + 1); 1503 } 1504 } 1505 } 1506 1507 /* 1508 * We cannibalize "target" when moving dentry on top of it, 1509 * because it's going to be thrown away anyway. We could be more 1510 * polite about it, though. 1511 * 1512 * This forceful removal will result in ugly /proc output if 1513 * somebody holds a file open that got deleted due to a rename. 1514 * We could be nicer about the deleted file, and let it show 1515 * up under the name it had before it was deleted rather than 1516 * under the original name of the file that was moved on top of it. 1517 */ 1518 1519 /* 1520 * d_move_locked - move a dentry 1521 * @dentry: entry to move 1522 * @target: new dentry 1523 * 1524 * Update the dcache to reflect the move of a file name. Negative 1525 * dcache entries should not be moved in this way. 1526 */ 1527 static void d_move_locked(struct dentry * dentry, struct dentry * target) 1528 { 1529 struct hlist_head *list; 1530 1531 if (!dentry->d_inode) 1532 printk(KERN_WARNING "VFS: moving negative dcache entry\n"); 1533 1534 write_seqlock(&rename_lock); 1535 /* 1536 * XXXX: do we really need to take target->d_lock? 1537 */ 1538 if (target < dentry) { 1539 spin_lock(&target->d_lock); 1540 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 1541 } else { 1542 spin_lock(&dentry->d_lock); 1543 spin_lock_nested(&target->d_lock, DENTRY_D_LOCK_NESTED); 1544 } 1545 1546 /* Move the dentry to the target hash queue, if on different bucket */ 1547 if (d_unhashed(dentry)) 1548 goto already_unhashed; 1549 1550 hlist_del_rcu(&dentry->d_hash); 1551 1552 already_unhashed: 1553 list = d_hash(target->d_parent, target->d_name.hash); 1554 __d_rehash(dentry, list); 1555 1556 /* Unhash the target: dput() will then get rid of it */ 1557 __d_drop(target); 1558 1559 list_del(&dentry->d_u.d_child); 1560 list_del(&target->d_u.d_child); 1561 1562 /* Switch the names.. */ 1563 switch_names(dentry, target); 1564 do_switch(dentry->d_name.len, target->d_name.len); 1565 do_switch(dentry->d_name.hash, target->d_name.hash); 1566 1567 /* ... and switch the parents */ 1568 if (IS_ROOT(dentry)) { 1569 dentry->d_parent = target->d_parent; 1570 target->d_parent = target; 1571 INIT_LIST_HEAD(&target->d_u.d_child); 1572 } else { 1573 do_switch(dentry->d_parent, target->d_parent); 1574 1575 /* And add them back to the (new) parent lists */ 1576 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs); 1577 } 1578 1579 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); 1580 spin_unlock(&target->d_lock); 1581 fsnotify_d_move(dentry); 1582 spin_unlock(&dentry->d_lock); 1583 write_sequnlock(&rename_lock); 1584 } 1585 1586 /** 1587 * d_move - move a dentry 1588 * @dentry: entry to move 1589 * @target: new dentry 1590 * 1591 * Update the dcache to reflect the move of a file name. Negative 1592 * dcache entries should not be moved in this way. 1593 */ 1594 1595 void d_move(struct dentry * dentry, struct dentry * target) 1596 { 1597 spin_lock(&dcache_lock); 1598 d_move_locked(dentry, target); 1599 spin_unlock(&dcache_lock); 1600 } 1601 1602 /* 1603 * Helper that returns 1 if p1 is a parent of p2, else 0 1604 */ 1605 static int d_isparent(struct dentry *p1, struct dentry *p2) 1606 { 1607 struct dentry *p; 1608 1609 for (p = p2; p->d_parent != p; p = p->d_parent) { 1610 if (p->d_parent == p1) 1611 return 1; 1612 } 1613 return 0; 1614 } 1615 1616 /* 1617 * This helper attempts to cope with remotely renamed directories 1618 * 1619 * It assumes that the caller is already holding 1620 * dentry->d_parent->d_inode->i_mutex and the dcache_lock 1621 * 1622 * Note: If ever the locking in lock_rename() changes, then please 1623 * remember to update this too... 1624 * 1625 * On return, dcache_lock will have been unlocked. 1626 */ 1627 static struct dentry *__d_unalias(struct dentry *dentry, struct dentry *alias) 1628 { 1629 struct mutex *m1 = NULL, *m2 = NULL; 1630 struct dentry *ret; 1631 1632 /* If alias and dentry share a parent, then no extra locks required */ 1633 if (alias->d_parent == dentry->d_parent) 1634 goto out_unalias; 1635 1636 /* Check for loops */ 1637 ret = ERR_PTR(-ELOOP); 1638 if (d_isparent(alias, dentry)) 1639 goto out_err; 1640 1641 /* See lock_rename() */ 1642 ret = ERR_PTR(-EBUSY); 1643 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex)) 1644 goto out_err; 1645 m1 = &dentry->d_sb->s_vfs_rename_mutex; 1646 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex)) 1647 goto out_err; 1648 m2 = &alias->d_parent->d_inode->i_mutex; 1649 out_unalias: 1650 d_move_locked(alias, dentry); 1651 ret = alias; 1652 out_err: 1653 spin_unlock(&dcache_lock); 1654 if (m2) 1655 mutex_unlock(m2); 1656 if (m1) 1657 mutex_unlock(m1); 1658 return ret; 1659 } 1660 1661 /* 1662 * Prepare an anonymous dentry for life in the superblock's dentry tree as a 1663 * named dentry in place of the dentry to be replaced. 1664 */ 1665 static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) 1666 { 1667 struct dentry *dparent, *aparent; 1668 1669 switch_names(dentry, anon); 1670 do_switch(dentry->d_name.len, anon->d_name.len); 1671 do_switch(dentry->d_name.hash, anon->d_name.hash); 1672 1673 dparent = dentry->d_parent; 1674 aparent = anon->d_parent; 1675 1676 dentry->d_parent = (aparent == anon) ? dentry : aparent; 1677 list_del(&dentry->d_u.d_child); 1678 if (!IS_ROOT(dentry)) 1679 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); 1680 else 1681 INIT_LIST_HEAD(&dentry->d_u.d_child); 1682 1683 anon->d_parent = (dparent == dentry) ? anon : dparent; 1684 list_del(&anon->d_u.d_child); 1685 if (!IS_ROOT(anon)) 1686 list_add(&anon->d_u.d_child, &anon->d_parent->d_subdirs); 1687 else 1688 INIT_LIST_HEAD(&anon->d_u.d_child); 1689 1690 anon->d_flags &= ~DCACHE_DISCONNECTED; 1691 } 1692 1693 /** 1694 * d_materialise_unique - introduce an inode into the tree 1695 * @dentry: candidate dentry 1696 * @inode: inode to bind to the dentry, to which aliases may be attached 1697 * 1698 * Introduces an dentry into the tree, substituting an extant disconnected 1699 * root directory alias in its place if there is one 1700 */ 1701 struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) 1702 { 1703 struct dentry *actual; 1704 1705 BUG_ON(!d_unhashed(dentry)); 1706 1707 spin_lock(&dcache_lock); 1708 1709 if (!inode) { 1710 actual = dentry; 1711 dentry->d_inode = NULL; 1712 goto found_lock; 1713 } 1714 1715 if (S_ISDIR(inode->i_mode)) { 1716 struct dentry *alias; 1717 1718 /* Does an aliased dentry already exist? */ 1719 alias = __d_find_alias(inode, 0); 1720 if (alias) { 1721 actual = alias; 1722 /* Is this an anonymous mountpoint that we could splice 1723 * into our tree? */ 1724 if (IS_ROOT(alias)) { 1725 spin_lock(&alias->d_lock); 1726 __d_materialise_dentry(dentry, alias); 1727 __d_drop(alias); 1728 goto found; 1729 } 1730 /* Nope, but we must(!) avoid directory aliasing */ 1731 actual = __d_unalias(dentry, alias); 1732 if (IS_ERR(actual)) 1733 dput(alias); 1734 goto out_nolock; 1735 } 1736 } 1737 1738 /* Add a unique reference */ 1739 actual = __d_instantiate_unique(dentry, inode); 1740 if (!actual) 1741 actual = dentry; 1742 else if (unlikely(!d_unhashed(actual))) 1743 goto shouldnt_be_hashed; 1744 1745 found_lock: 1746 spin_lock(&actual->d_lock); 1747 found: 1748 _d_rehash(actual); 1749 spin_unlock(&actual->d_lock); 1750 spin_unlock(&dcache_lock); 1751 out_nolock: 1752 if (actual == dentry) { 1753 security_d_instantiate(dentry, inode); 1754 return NULL; 1755 } 1756 1757 iput(inode); 1758 return actual; 1759 1760 shouldnt_be_hashed: 1761 spin_unlock(&dcache_lock); 1762 BUG(); 1763 goto shouldnt_be_hashed; 1764 } 1765 1766 /** 1767 * d_path - return the path of a dentry 1768 * @dentry: dentry to report 1769 * @vfsmnt: vfsmnt to which the dentry belongs 1770 * @root: root dentry 1771 * @rootmnt: vfsmnt to which the root dentry belongs 1772 * @buffer: buffer to return value in 1773 * @buflen: buffer length 1774 * 1775 * Convert a dentry into an ASCII path name. If the entry has been deleted 1776 * the string " (deleted)" is appended. Note that this is ambiguous. 1777 * 1778 * Returns the buffer or an error code if the path was too long. 1779 * 1780 * "buflen" should be positive. Caller holds the dcache_lock. 1781 */ 1782 static char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt, 1783 struct dentry *root, struct vfsmount *rootmnt, 1784 char *buffer, int buflen) 1785 { 1786 char * end = buffer+buflen; 1787 char * retval; 1788 int namelen; 1789 1790 *--end = '\0'; 1791 buflen--; 1792 if (!IS_ROOT(dentry) && d_unhashed(dentry)) { 1793 buflen -= 10; 1794 end -= 10; 1795 if (buflen < 0) 1796 goto Elong; 1797 memcpy(end, " (deleted)", 10); 1798 } 1799 1800 if (buflen < 1) 1801 goto Elong; 1802 /* Get '/' right */ 1803 retval = end-1; 1804 *retval = '/'; 1805 1806 for (;;) { 1807 struct dentry * parent; 1808 1809 if (dentry == root && vfsmnt == rootmnt) 1810 break; 1811 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { 1812 /* Global root? */ 1813 spin_lock(&vfsmount_lock); 1814 if (vfsmnt->mnt_parent == vfsmnt) { 1815 spin_unlock(&vfsmount_lock); 1816 goto global_root; 1817 } 1818 dentry = vfsmnt->mnt_mountpoint; 1819 vfsmnt = vfsmnt->mnt_parent; 1820 spin_unlock(&vfsmount_lock); 1821 continue; 1822 } 1823 parent = dentry->d_parent; 1824 prefetch(parent); 1825 namelen = dentry->d_name.len; 1826 buflen -= namelen + 1; 1827 if (buflen < 0) 1828 goto Elong; 1829 end -= namelen; 1830 memcpy(end, dentry->d_name.name, namelen); 1831 *--end = '/'; 1832 retval = end; 1833 dentry = parent; 1834 } 1835 1836 return retval; 1837 1838 global_root: 1839 namelen = dentry->d_name.len; 1840 buflen -= namelen; 1841 if (buflen < 0) 1842 goto Elong; 1843 retval -= namelen-1; /* hit the slash */ 1844 memcpy(retval, dentry->d_name.name, namelen); 1845 return retval; 1846 Elong: 1847 return ERR_PTR(-ENAMETOOLONG); 1848 } 1849 1850 /* write full pathname into buffer and return start of pathname */ 1851 char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt, 1852 char *buf, int buflen) 1853 { 1854 char *res; 1855 struct vfsmount *rootmnt; 1856 struct dentry *root; 1857 1858 /* 1859 * We have various synthetic filesystems that never get mounted. On 1860 * these filesystems dentries are never used for lookup purposes, and 1861 * thus don't need to be hashed. They also don't need a name until a 1862 * user wants to identify the object in /proc/pid/fd/. The little hack 1863 * below allows us to generate a name for these objects on demand: 1864 */ 1865 if (dentry->d_op && dentry->d_op->d_dname) 1866 return dentry->d_op->d_dname(dentry, buf, buflen); 1867 1868 read_lock(¤t->fs->lock); 1869 rootmnt = mntget(current->fs->rootmnt); 1870 root = dget(current->fs->root); 1871 read_unlock(¤t->fs->lock); 1872 spin_lock(&dcache_lock); 1873 res = __d_path(dentry, vfsmnt, root, rootmnt, buf, buflen); 1874 spin_unlock(&dcache_lock); 1875 dput(root); 1876 mntput(rootmnt); 1877 return res; 1878 } 1879 1880 /* 1881 * Helper function for dentry_operations.d_dname() members 1882 */ 1883 char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen, 1884 const char *fmt, ...) 1885 { 1886 va_list args; 1887 char temp[64]; 1888 int sz; 1889 1890 va_start(args, fmt); 1891 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1; 1892 va_end(args); 1893 1894 if (sz > sizeof(temp) || sz > buflen) 1895 return ERR_PTR(-ENAMETOOLONG); 1896 1897 buffer += buflen - sz; 1898 return memcpy(buffer, temp, sz); 1899 } 1900 1901 /* 1902 * NOTE! The user-level library version returns a 1903 * character pointer. The kernel system call just 1904 * returns the length of the buffer filled (which 1905 * includes the ending '\0' character), or a negative 1906 * error value. So libc would do something like 1907 * 1908 * char *getcwd(char * buf, size_t size) 1909 * { 1910 * int retval; 1911 * 1912 * retval = sys_getcwd(buf, size); 1913 * if (retval >= 0) 1914 * return buf; 1915 * errno = -retval; 1916 * return NULL; 1917 * } 1918 */ 1919 asmlinkage long sys_getcwd(char __user *buf, unsigned long size) 1920 { 1921 int error; 1922 struct vfsmount *pwdmnt, *rootmnt; 1923 struct dentry *pwd, *root; 1924 char *page = (char *) __get_free_page(GFP_USER); 1925 1926 if (!page) 1927 return -ENOMEM; 1928 1929 read_lock(¤t->fs->lock); 1930 pwdmnt = mntget(current->fs->pwdmnt); 1931 pwd = dget(current->fs->pwd); 1932 rootmnt = mntget(current->fs->rootmnt); 1933 root = dget(current->fs->root); 1934 read_unlock(¤t->fs->lock); 1935 1936 error = -ENOENT; 1937 /* Has the current directory has been unlinked? */ 1938 spin_lock(&dcache_lock); 1939 if (pwd->d_parent == pwd || !d_unhashed(pwd)) { 1940 unsigned long len; 1941 char * cwd; 1942 1943 cwd = __d_path(pwd, pwdmnt, root, rootmnt, page, PAGE_SIZE); 1944 spin_unlock(&dcache_lock); 1945 1946 error = PTR_ERR(cwd); 1947 if (IS_ERR(cwd)) 1948 goto out; 1949 1950 error = -ERANGE; 1951 len = PAGE_SIZE + page - cwd; 1952 if (len <= size) { 1953 error = len; 1954 if (copy_to_user(buf, cwd, len)) 1955 error = -EFAULT; 1956 } 1957 } else 1958 spin_unlock(&dcache_lock); 1959 1960 out: 1961 dput(pwd); 1962 mntput(pwdmnt); 1963 dput(root); 1964 mntput(rootmnt); 1965 free_page((unsigned long) page); 1966 return error; 1967 } 1968 1969 /* 1970 * Test whether new_dentry is a subdirectory of old_dentry. 1971 * 1972 * Trivially implemented using the dcache structure 1973 */ 1974 1975 /** 1976 * is_subdir - is new dentry a subdirectory of old_dentry 1977 * @new_dentry: new dentry 1978 * @old_dentry: old dentry 1979 * 1980 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth). 1981 * Returns 0 otherwise. 1982 * Caller must ensure that "new_dentry" is pinned before calling is_subdir() 1983 */ 1984 1985 int is_subdir(struct dentry * new_dentry, struct dentry * old_dentry) 1986 { 1987 int result; 1988 struct dentry * saved = new_dentry; 1989 unsigned long seq; 1990 1991 /* need rcu_readlock to protect against the d_parent trashing due to 1992 * d_move 1993 */ 1994 rcu_read_lock(); 1995 do { 1996 /* for restarting inner loop in case of seq retry */ 1997 new_dentry = saved; 1998 result = 0; 1999 seq = read_seqbegin(&rename_lock); 2000 for (;;) { 2001 if (new_dentry != old_dentry) { 2002 struct dentry * parent = new_dentry->d_parent; 2003 if (parent == new_dentry) 2004 break; 2005 new_dentry = parent; 2006 continue; 2007 } 2008 result = 1; 2009 break; 2010 } 2011 } while (read_seqretry(&rename_lock, seq)); 2012 rcu_read_unlock(); 2013 2014 return result; 2015 } 2016 2017 void d_genocide(struct dentry *root) 2018 { 2019 struct dentry *this_parent = root; 2020 struct list_head *next; 2021 2022 spin_lock(&dcache_lock); 2023 repeat: 2024 next = this_parent->d_subdirs.next; 2025 resume: 2026 while (next != &this_parent->d_subdirs) { 2027 struct list_head *tmp = next; 2028 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 2029 next = tmp->next; 2030 if (d_unhashed(dentry)||!dentry->d_inode) 2031 continue; 2032 if (!list_empty(&dentry->d_subdirs)) { 2033 this_parent = dentry; 2034 goto repeat; 2035 } 2036 atomic_dec(&dentry->d_count); 2037 } 2038 if (this_parent != root) { 2039 next = this_parent->d_u.d_child.next; 2040 atomic_dec(&this_parent->d_count); 2041 this_parent = this_parent->d_parent; 2042 goto resume; 2043 } 2044 spin_unlock(&dcache_lock); 2045 } 2046 2047 /** 2048 * find_inode_number - check for dentry with name 2049 * @dir: directory to check 2050 * @name: Name to find. 2051 * 2052 * Check whether a dentry already exists for the given name, 2053 * and return the inode number if it has an inode. Otherwise 2054 * 0 is returned. 2055 * 2056 * This routine is used to post-process directory listings for 2057 * filesystems using synthetic inode numbers, and is necessary 2058 * to keep getcwd() working. 2059 */ 2060 2061 ino_t find_inode_number(struct dentry *dir, struct qstr *name) 2062 { 2063 struct dentry * dentry; 2064 ino_t ino = 0; 2065 2066 dentry = d_hash_and_lookup(dir, name); 2067 if (dentry) { 2068 if (dentry->d_inode) 2069 ino = dentry->d_inode->i_ino; 2070 dput(dentry); 2071 } 2072 return ino; 2073 } 2074 2075 static __initdata unsigned long dhash_entries; 2076 static int __init set_dhash_entries(char *str) 2077 { 2078 if (!str) 2079 return 0; 2080 dhash_entries = simple_strtoul(str, &str, 0); 2081 return 1; 2082 } 2083 __setup("dhash_entries=", set_dhash_entries); 2084 2085 static void __init dcache_init_early(void) 2086 { 2087 int loop; 2088 2089 /* If hashes are distributed across NUMA nodes, defer 2090 * hash allocation until vmalloc space is available. 2091 */ 2092 if (hashdist) 2093 return; 2094 2095 dentry_hashtable = 2096 alloc_large_system_hash("Dentry cache", 2097 sizeof(struct hlist_head), 2098 dhash_entries, 2099 13, 2100 HASH_EARLY, 2101 &d_hash_shift, 2102 &d_hash_mask, 2103 0); 2104 2105 for (loop = 0; loop < (1 << d_hash_shift); loop++) 2106 INIT_HLIST_HEAD(&dentry_hashtable[loop]); 2107 } 2108 2109 static void __init dcache_init(void) 2110 { 2111 int loop; 2112 2113 /* 2114 * A constructor could be added for stable state like the lists, 2115 * but it is probably not worth it because of the cache nature 2116 * of the dcache. 2117 */ 2118 dentry_cache = KMEM_CACHE(dentry, 2119 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); 2120 2121 register_shrinker(&dcache_shrinker); 2122 2123 /* Hash may have been set up in dcache_init_early */ 2124 if (!hashdist) 2125 return; 2126 2127 dentry_hashtable = 2128 alloc_large_system_hash("Dentry cache", 2129 sizeof(struct hlist_head), 2130 dhash_entries, 2131 13, 2132 0, 2133 &d_hash_shift, 2134 &d_hash_mask, 2135 0); 2136 2137 for (loop = 0; loop < (1 << d_hash_shift); loop++) 2138 INIT_HLIST_HEAD(&dentry_hashtable[loop]); 2139 } 2140 2141 /* SLAB cache for __getname() consumers */ 2142 struct kmem_cache *names_cachep __read_mostly; 2143 2144 /* SLAB cache for file structures */ 2145 struct kmem_cache *filp_cachep __read_mostly; 2146 2147 EXPORT_SYMBOL(d_genocide); 2148 2149 void __init vfs_caches_init_early(void) 2150 { 2151 dcache_init_early(); 2152 inode_init_early(); 2153 } 2154 2155 void __init vfs_caches_init(unsigned long mempages) 2156 { 2157 unsigned long reserve; 2158 2159 /* Base hash sizes on available memory, with a reserve equal to 2160 150% of current kernel size */ 2161 2162 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1); 2163 mempages -= reserve; 2164 2165 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, 2166 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 2167 2168 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, 2169 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 2170 2171 dcache_init(); 2172 inode_init(); 2173 files_init(mempages); 2174 mnt_init(); 2175 bdev_cache_init(); 2176 chrdev_init(); 2177 } 2178 2179 EXPORT_SYMBOL(d_alloc); 2180 EXPORT_SYMBOL(d_alloc_anon); 2181 EXPORT_SYMBOL(d_alloc_root); 2182 EXPORT_SYMBOL(d_delete); 2183 EXPORT_SYMBOL(d_find_alias); 2184 EXPORT_SYMBOL(d_instantiate); 2185 EXPORT_SYMBOL(d_invalidate); 2186 EXPORT_SYMBOL(d_lookup); 2187 EXPORT_SYMBOL(d_move); 2188 EXPORT_SYMBOL_GPL(d_materialise_unique); 2189 EXPORT_SYMBOL(d_path); 2190 EXPORT_SYMBOL(d_prune_aliases); 2191 EXPORT_SYMBOL(d_rehash); 2192 EXPORT_SYMBOL(d_splice_alias); 2193 EXPORT_SYMBOL(d_validate); 2194 EXPORT_SYMBOL(dget_locked); 2195 EXPORT_SYMBOL(dput); 2196 EXPORT_SYMBOL(find_inode_number); 2197 EXPORT_SYMBOL(have_submounts); 2198 EXPORT_SYMBOL(names_cachep); 2199 EXPORT_SYMBOL(shrink_dcache_parent); 2200 EXPORT_SYMBOL(shrink_dcache_sb); 2201