1 /* 2 * fs/dcache.c 3 * 4 * Complete reimplementation 5 * (C) 1997 Thomas Schoebel-Theuer, 6 * with heavy changes by Linus Torvalds 7 */ 8 9 /* 10 * Notes on the allocation strategy: 11 * 12 * The dcache is a master of the icache - whenever a dcache entry 13 * exists, the inode will always exist. "iput()" is done either when 14 * the dcache entry is deleted or garbage collected. 15 */ 16 17 #include <linux/syscalls.h> 18 #include <linux/string.h> 19 #include <linux/mm.h> 20 #include <linux/fs.h> 21 #include <linux/fsnotify.h> 22 #include <linux/slab.h> 23 #include <linux/init.h> 24 #include <linux/hash.h> 25 #include <linux/cache.h> 26 #include <linux/module.h> 27 #include <linux/mount.h> 28 #include <linux/file.h> 29 #include <asm/uaccess.h> 30 #include <linux/security.h> 31 #include <linux/seqlock.h> 32 #include <linux/swap.h> 33 #include <linux/bootmem.h> 34 #include <linux/fs_struct.h> 35 #include <linux/hardirq.h> 36 #include "internal.h" 37 38 int sysctl_vfs_cache_pressure __read_mostly = 100; 39 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); 40 41 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock); 42 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); 43 44 EXPORT_SYMBOL(dcache_lock); 45 46 static struct kmem_cache *dentry_cache __read_mostly; 47 48 #define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname)) 49 50 /* 51 * This is the single most critical data structure when it comes 52 * to the dcache: the hashtable for lookups. Somebody should try 53 * to make this good - I've just made it work. 54 * 55 * This hash-function tries to avoid losing too many bits of hash 56 * information, yet avoid using a prime hash-size or similar. 57 */ 58 #define D_HASHBITS d_hash_shift 59 #define D_HASHMASK d_hash_mask 60 61 static unsigned int d_hash_mask __read_mostly; 62 static unsigned int d_hash_shift __read_mostly; 63 static struct hlist_head *dentry_hashtable __read_mostly; 64 65 /* Statistics gathering. */ 66 struct dentry_stat_t dentry_stat = { 67 .age_limit = 45, 68 }; 69 70 static void __d_free(struct dentry *dentry) 71 { 72 WARN_ON(!list_empty(&dentry->d_alias)); 73 if (dname_external(dentry)) 74 kfree(dentry->d_name.name); 75 kmem_cache_free(dentry_cache, dentry); 76 } 77 78 static void d_callback(struct rcu_head *head) 79 { 80 struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu); 81 __d_free(dentry); 82 } 83 84 /* 85 * no dcache_lock, please. The caller must decrement dentry_stat.nr_dentry 86 * inside dcache_lock. 87 */ 88 static void d_free(struct dentry *dentry) 89 { 90 if (dentry->d_op && dentry->d_op->d_release) 91 dentry->d_op->d_release(dentry); 92 /* if dentry was never inserted into hash, immediate free is OK */ 93 if (hlist_unhashed(&dentry->d_hash)) 94 __d_free(dentry); 95 else 96 call_rcu(&dentry->d_u.d_rcu, d_callback); 97 } 98 99 /* 100 * Release the dentry's inode, using the filesystem 101 * d_iput() operation if defined. 102 */ 103 static void dentry_iput(struct dentry * dentry) 104 __releases(dentry->d_lock) 105 __releases(dcache_lock) 106 { 107 struct inode *inode = dentry->d_inode; 108 if (inode) { 109 dentry->d_inode = NULL; 110 list_del_init(&dentry->d_alias); 111 spin_unlock(&dentry->d_lock); 112 spin_unlock(&dcache_lock); 113 if (!inode->i_nlink) 114 fsnotify_inoderemove(inode); 115 if (dentry->d_op && dentry->d_op->d_iput) 116 dentry->d_op->d_iput(dentry, inode); 117 else 118 iput(inode); 119 } else { 120 spin_unlock(&dentry->d_lock); 121 spin_unlock(&dcache_lock); 122 } 123 } 124 125 /* 126 * dentry_lru_(add|add_tail|del|del_init) must be called with dcache_lock held. 127 */ 128 static void dentry_lru_add(struct dentry *dentry) 129 { 130 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); 131 dentry->d_sb->s_nr_dentry_unused++; 132 dentry_stat.nr_unused++; 133 } 134 135 static void dentry_lru_add_tail(struct dentry *dentry) 136 { 137 list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); 138 dentry->d_sb->s_nr_dentry_unused++; 139 dentry_stat.nr_unused++; 140 } 141 142 static void dentry_lru_del(struct dentry *dentry) 143 { 144 if (!list_empty(&dentry->d_lru)) { 145 list_del(&dentry->d_lru); 146 dentry->d_sb->s_nr_dentry_unused--; 147 dentry_stat.nr_unused--; 148 } 149 } 150 151 static void dentry_lru_del_init(struct dentry *dentry) 152 { 153 if (likely(!list_empty(&dentry->d_lru))) { 154 list_del_init(&dentry->d_lru); 155 dentry->d_sb->s_nr_dentry_unused--; 156 dentry_stat.nr_unused--; 157 } 158 } 159 160 /** 161 * d_kill - kill dentry and return parent 162 * @dentry: dentry to kill 163 * 164 * The dentry must already be unhashed and removed from the LRU. 165 * 166 * If this is the root of the dentry tree, return NULL. 167 */ 168 static struct dentry *d_kill(struct dentry *dentry) 169 __releases(dentry->d_lock) 170 __releases(dcache_lock) 171 { 172 struct dentry *parent; 173 174 list_del(&dentry->d_u.d_child); 175 dentry_stat.nr_dentry--; /* For d_free, below */ 176 /*drops the locks, at that point nobody can reach this dentry */ 177 dentry_iput(dentry); 178 if (IS_ROOT(dentry)) 179 parent = NULL; 180 else 181 parent = dentry->d_parent; 182 d_free(dentry); 183 return parent; 184 } 185 186 /* 187 * This is dput 188 * 189 * This is complicated by the fact that we do not want to put 190 * dentries that are no longer on any hash chain on the unused 191 * list: we'd much rather just get rid of them immediately. 192 * 193 * However, that implies that we have to traverse the dentry 194 * tree upwards to the parents which might _also_ now be 195 * scheduled for deletion (it may have been only waiting for 196 * its last child to go away). 197 * 198 * This tail recursion is done by hand as we don't want to depend 199 * on the compiler to always get this right (gcc generally doesn't). 200 * Real recursion would eat up our stack space. 201 */ 202 203 /* 204 * dput - release a dentry 205 * @dentry: dentry to release 206 * 207 * Release a dentry. This will drop the usage count and if appropriate 208 * call the dentry unlink method as well as removing it from the queues and 209 * releasing its resources. If the parent dentries were scheduled for release 210 * they too may now get deleted. 211 * 212 * no dcache lock, please. 213 */ 214 215 void dput(struct dentry *dentry) 216 { 217 if (!dentry) 218 return; 219 220 repeat: 221 if (atomic_read(&dentry->d_count) == 1) 222 might_sleep(); 223 if (!atomic_dec_and_lock(&dentry->d_count, &dcache_lock)) 224 return; 225 226 spin_lock(&dentry->d_lock); 227 if (atomic_read(&dentry->d_count)) { 228 spin_unlock(&dentry->d_lock); 229 spin_unlock(&dcache_lock); 230 return; 231 } 232 233 /* 234 * AV: ->d_delete() is _NOT_ allowed to block now. 235 */ 236 if (dentry->d_op && dentry->d_op->d_delete) { 237 if (dentry->d_op->d_delete(dentry)) 238 goto unhash_it; 239 } 240 /* Unreachable? Get rid of it */ 241 if (d_unhashed(dentry)) 242 goto kill_it; 243 if (list_empty(&dentry->d_lru)) { 244 dentry->d_flags |= DCACHE_REFERENCED; 245 dentry_lru_add(dentry); 246 } 247 spin_unlock(&dentry->d_lock); 248 spin_unlock(&dcache_lock); 249 return; 250 251 unhash_it: 252 __d_drop(dentry); 253 kill_it: 254 /* if dentry was on the d_lru list delete it from there */ 255 dentry_lru_del(dentry); 256 dentry = d_kill(dentry); 257 if (dentry) 258 goto repeat; 259 } 260 EXPORT_SYMBOL(dput); 261 262 /** 263 * d_invalidate - invalidate a dentry 264 * @dentry: dentry to invalidate 265 * 266 * Try to invalidate the dentry if it turns out to be 267 * possible. If there are other dentries that can be 268 * reached through this one we can't delete it and we 269 * return -EBUSY. On success we return 0. 270 * 271 * no dcache lock. 272 */ 273 274 int d_invalidate(struct dentry * dentry) 275 { 276 /* 277 * If it's already been dropped, return OK. 278 */ 279 spin_lock(&dcache_lock); 280 if (d_unhashed(dentry)) { 281 spin_unlock(&dcache_lock); 282 return 0; 283 } 284 /* 285 * Check whether to do a partial shrink_dcache 286 * to get rid of unused child entries. 287 */ 288 if (!list_empty(&dentry->d_subdirs)) { 289 spin_unlock(&dcache_lock); 290 shrink_dcache_parent(dentry); 291 spin_lock(&dcache_lock); 292 } 293 294 /* 295 * Somebody else still using it? 296 * 297 * If it's a directory, we can't drop it 298 * for fear of somebody re-populating it 299 * with children (even though dropping it 300 * would make it unreachable from the root, 301 * we might still populate it if it was a 302 * working directory or similar). 303 */ 304 spin_lock(&dentry->d_lock); 305 if (atomic_read(&dentry->d_count) > 1) { 306 if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) { 307 spin_unlock(&dentry->d_lock); 308 spin_unlock(&dcache_lock); 309 return -EBUSY; 310 } 311 } 312 313 __d_drop(dentry); 314 spin_unlock(&dentry->d_lock); 315 spin_unlock(&dcache_lock); 316 return 0; 317 } 318 EXPORT_SYMBOL(d_invalidate); 319 320 /* This should be called _only_ with dcache_lock held */ 321 322 static inline struct dentry * __dget_locked(struct dentry *dentry) 323 { 324 atomic_inc(&dentry->d_count); 325 dentry_lru_del_init(dentry); 326 return dentry; 327 } 328 329 struct dentry * dget_locked(struct dentry *dentry) 330 { 331 return __dget_locked(dentry); 332 } 333 EXPORT_SYMBOL(dget_locked); 334 335 /** 336 * d_find_alias - grab a hashed alias of inode 337 * @inode: inode in question 338 * @want_discon: flag, used by d_splice_alias, to request 339 * that only a DISCONNECTED alias be returned. 340 * 341 * If inode has a hashed alias, or is a directory and has any alias, 342 * acquire the reference to alias and return it. Otherwise return NULL. 343 * Notice that if inode is a directory there can be only one alias and 344 * it can be unhashed only if it has no children, or if it is the root 345 * of a filesystem. 346 * 347 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer 348 * any other hashed alias over that one unless @want_discon is set, 349 * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias. 350 */ 351 352 static struct dentry * __d_find_alias(struct inode *inode, int want_discon) 353 { 354 struct list_head *head, *next, *tmp; 355 struct dentry *alias, *discon_alias=NULL; 356 357 head = &inode->i_dentry; 358 next = inode->i_dentry.next; 359 while (next != head) { 360 tmp = next; 361 next = tmp->next; 362 prefetch(next); 363 alias = list_entry(tmp, struct dentry, d_alias); 364 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { 365 if (IS_ROOT(alias) && 366 (alias->d_flags & DCACHE_DISCONNECTED)) 367 discon_alias = alias; 368 else if (!want_discon) { 369 __dget_locked(alias); 370 return alias; 371 } 372 } 373 } 374 if (discon_alias) 375 __dget_locked(discon_alias); 376 return discon_alias; 377 } 378 379 struct dentry * d_find_alias(struct inode *inode) 380 { 381 struct dentry *de = NULL; 382 383 if (!list_empty(&inode->i_dentry)) { 384 spin_lock(&dcache_lock); 385 de = __d_find_alias(inode, 0); 386 spin_unlock(&dcache_lock); 387 } 388 return de; 389 } 390 EXPORT_SYMBOL(d_find_alias); 391 392 /* 393 * Try to kill dentries associated with this inode. 394 * WARNING: you must own a reference to inode. 395 */ 396 void d_prune_aliases(struct inode *inode) 397 { 398 struct dentry *dentry; 399 restart: 400 spin_lock(&dcache_lock); 401 list_for_each_entry(dentry, &inode->i_dentry, d_alias) { 402 spin_lock(&dentry->d_lock); 403 if (!atomic_read(&dentry->d_count)) { 404 __dget_locked(dentry); 405 __d_drop(dentry); 406 spin_unlock(&dentry->d_lock); 407 spin_unlock(&dcache_lock); 408 dput(dentry); 409 goto restart; 410 } 411 spin_unlock(&dentry->d_lock); 412 } 413 spin_unlock(&dcache_lock); 414 } 415 EXPORT_SYMBOL(d_prune_aliases); 416 417 /* 418 * Throw away a dentry - free the inode, dput the parent. This requires that 419 * the LRU list has already been removed. 420 * 421 * Try to prune ancestors as well. This is necessary to prevent 422 * quadratic behavior of shrink_dcache_parent(), but is also expected 423 * to be beneficial in reducing dentry cache fragmentation. 424 */ 425 static void prune_one_dentry(struct dentry * dentry) 426 __releases(dentry->d_lock) 427 __releases(dcache_lock) 428 __acquires(dcache_lock) 429 { 430 __d_drop(dentry); 431 dentry = d_kill(dentry); 432 433 /* 434 * Prune ancestors. Locking is simpler than in dput(), 435 * because dcache_lock needs to be taken anyway. 436 */ 437 spin_lock(&dcache_lock); 438 while (dentry) { 439 if (!atomic_dec_and_lock(&dentry->d_count, &dentry->d_lock)) 440 return; 441 442 if (dentry->d_op && dentry->d_op->d_delete) 443 dentry->d_op->d_delete(dentry); 444 dentry_lru_del_init(dentry); 445 __d_drop(dentry); 446 dentry = d_kill(dentry); 447 spin_lock(&dcache_lock); 448 } 449 } 450 451 /* 452 * Shrink the dentry LRU on a given superblock. 453 * @sb : superblock to shrink dentry LRU. 454 * @count: If count is NULL, we prune all dentries on superblock. 455 * @flags: If flags is non-zero, we need to do special processing based on 456 * which flags are set. This means we don't need to maintain multiple 457 * similar copies of this loop. 458 */ 459 static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags) 460 { 461 LIST_HEAD(referenced); 462 LIST_HEAD(tmp); 463 struct dentry *dentry; 464 int cnt = 0; 465 466 BUG_ON(!sb); 467 BUG_ON((flags & DCACHE_REFERENCED) && count == NULL); 468 spin_lock(&dcache_lock); 469 if (count != NULL) 470 /* called from prune_dcache() and shrink_dcache_parent() */ 471 cnt = *count; 472 restart: 473 if (count == NULL) 474 list_splice_init(&sb->s_dentry_lru, &tmp); 475 else { 476 while (!list_empty(&sb->s_dentry_lru)) { 477 dentry = list_entry(sb->s_dentry_lru.prev, 478 struct dentry, d_lru); 479 BUG_ON(dentry->d_sb != sb); 480 481 spin_lock(&dentry->d_lock); 482 /* 483 * If we are honouring the DCACHE_REFERENCED flag and 484 * the dentry has this flag set, don't free it. Clear 485 * the flag and put it back on the LRU. 486 */ 487 if ((flags & DCACHE_REFERENCED) 488 && (dentry->d_flags & DCACHE_REFERENCED)) { 489 dentry->d_flags &= ~DCACHE_REFERENCED; 490 list_move(&dentry->d_lru, &referenced); 491 spin_unlock(&dentry->d_lock); 492 } else { 493 list_move_tail(&dentry->d_lru, &tmp); 494 spin_unlock(&dentry->d_lock); 495 cnt--; 496 if (!cnt) 497 break; 498 } 499 cond_resched_lock(&dcache_lock); 500 } 501 } 502 while (!list_empty(&tmp)) { 503 dentry = list_entry(tmp.prev, struct dentry, d_lru); 504 dentry_lru_del_init(dentry); 505 spin_lock(&dentry->d_lock); 506 /* 507 * We found an inuse dentry which was not removed from 508 * the LRU because of laziness during lookup. Do not free 509 * it - just keep it off the LRU list. 510 */ 511 if (atomic_read(&dentry->d_count)) { 512 spin_unlock(&dentry->d_lock); 513 continue; 514 } 515 prune_one_dentry(dentry); 516 /* dentry->d_lock was dropped in prune_one_dentry() */ 517 cond_resched_lock(&dcache_lock); 518 } 519 if (count == NULL && !list_empty(&sb->s_dentry_lru)) 520 goto restart; 521 if (count != NULL) 522 *count = cnt; 523 if (!list_empty(&referenced)) 524 list_splice(&referenced, &sb->s_dentry_lru); 525 spin_unlock(&dcache_lock); 526 } 527 528 /** 529 * prune_dcache - shrink the dcache 530 * @count: number of entries to try to free 531 * 532 * Shrink the dcache. This is done when we need more memory, or simply when we 533 * need to unmount something (at which point we need to unuse all dentries). 534 * 535 * This function may fail to free any resources if all the dentries are in use. 536 */ 537 static void prune_dcache(int count) 538 { 539 struct super_block *sb, *p = NULL; 540 int w_count; 541 int unused = dentry_stat.nr_unused; 542 int prune_ratio; 543 int pruned; 544 545 if (unused == 0 || count == 0) 546 return; 547 spin_lock(&dcache_lock); 548 if (count >= unused) 549 prune_ratio = 1; 550 else 551 prune_ratio = unused / count; 552 spin_lock(&sb_lock); 553 list_for_each_entry(sb, &super_blocks, s_list) { 554 if (list_empty(&sb->s_instances)) 555 continue; 556 if (sb->s_nr_dentry_unused == 0) 557 continue; 558 sb->s_count++; 559 /* Now, we reclaim unused dentrins with fairness. 560 * We reclaim them same percentage from each superblock. 561 * We calculate number of dentries to scan on this sb 562 * as follows, but the implementation is arranged to avoid 563 * overflows: 564 * number of dentries to scan on this sb = 565 * count * (number of dentries on this sb / 566 * number of dentries in the machine) 567 */ 568 spin_unlock(&sb_lock); 569 if (prune_ratio != 1) 570 w_count = (sb->s_nr_dentry_unused / prune_ratio) + 1; 571 else 572 w_count = sb->s_nr_dentry_unused; 573 pruned = w_count; 574 /* 575 * We need to be sure this filesystem isn't being unmounted, 576 * otherwise we could race with generic_shutdown_super(), and 577 * end up holding a reference to an inode while the filesystem 578 * is unmounted. So we try to get s_umount, and make sure 579 * s_root isn't NULL. 580 */ 581 if (down_read_trylock(&sb->s_umount)) { 582 if ((sb->s_root != NULL) && 583 (!list_empty(&sb->s_dentry_lru))) { 584 spin_unlock(&dcache_lock); 585 __shrink_dcache_sb(sb, &w_count, 586 DCACHE_REFERENCED); 587 pruned -= w_count; 588 spin_lock(&dcache_lock); 589 } 590 up_read(&sb->s_umount); 591 } 592 spin_lock(&sb_lock); 593 if (p) 594 __put_super(p); 595 count -= pruned; 596 p = sb; 597 /* more work left to do? */ 598 if (count <= 0) 599 break; 600 } 601 if (p) 602 __put_super(p); 603 spin_unlock(&sb_lock); 604 spin_unlock(&dcache_lock); 605 } 606 607 /** 608 * shrink_dcache_sb - shrink dcache for a superblock 609 * @sb: superblock 610 * 611 * Shrink the dcache for the specified super block. This 612 * is used to free the dcache before unmounting a file 613 * system 614 */ 615 void shrink_dcache_sb(struct super_block * sb) 616 { 617 __shrink_dcache_sb(sb, NULL, 0); 618 } 619 EXPORT_SYMBOL(shrink_dcache_sb); 620 621 /* 622 * destroy a single subtree of dentries for unmount 623 * - see the comments on shrink_dcache_for_umount() for a description of the 624 * locking 625 */ 626 static void shrink_dcache_for_umount_subtree(struct dentry *dentry) 627 { 628 struct dentry *parent; 629 unsigned detached = 0; 630 631 BUG_ON(!IS_ROOT(dentry)); 632 633 /* detach this root from the system */ 634 spin_lock(&dcache_lock); 635 dentry_lru_del_init(dentry); 636 __d_drop(dentry); 637 spin_unlock(&dcache_lock); 638 639 for (;;) { 640 /* descend to the first leaf in the current subtree */ 641 while (!list_empty(&dentry->d_subdirs)) { 642 struct dentry *loop; 643 644 /* this is a branch with children - detach all of them 645 * from the system in one go */ 646 spin_lock(&dcache_lock); 647 list_for_each_entry(loop, &dentry->d_subdirs, 648 d_u.d_child) { 649 dentry_lru_del_init(loop); 650 __d_drop(loop); 651 cond_resched_lock(&dcache_lock); 652 } 653 spin_unlock(&dcache_lock); 654 655 /* move to the first child */ 656 dentry = list_entry(dentry->d_subdirs.next, 657 struct dentry, d_u.d_child); 658 } 659 660 /* consume the dentries from this leaf up through its parents 661 * until we find one with children or run out altogether */ 662 do { 663 struct inode *inode; 664 665 if (atomic_read(&dentry->d_count) != 0) { 666 printk(KERN_ERR 667 "BUG: Dentry %p{i=%lx,n=%s}" 668 " still in use (%d)" 669 " [unmount of %s %s]\n", 670 dentry, 671 dentry->d_inode ? 672 dentry->d_inode->i_ino : 0UL, 673 dentry->d_name.name, 674 atomic_read(&dentry->d_count), 675 dentry->d_sb->s_type->name, 676 dentry->d_sb->s_id); 677 BUG(); 678 } 679 680 if (IS_ROOT(dentry)) 681 parent = NULL; 682 else { 683 parent = dentry->d_parent; 684 atomic_dec(&parent->d_count); 685 } 686 687 list_del(&dentry->d_u.d_child); 688 detached++; 689 690 inode = dentry->d_inode; 691 if (inode) { 692 dentry->d_inode = NULL; 693 list_del_init(&dentry->d_alias); 694 if (dentry->d_op && dentry->d_op->d_iput) 695 dentry->d_op->d_iput(dentry, inode); 696 else 697 iput(inode); 698 } 699 700 d_free(dentry); 701 702 /* finished when we fall off the top of the tree, 703 * otherwise we ascend to the parent and move to the 704 * next sibling if there is one */ 705 if (!parent) 706 goto out; 707 708 dentry = parent; 709 710 } while (list_empty(&dentry->d_subdirs)); 711 712 dentry = list_entry(dentry->d_subdirs.next, 713 struct dentry, d_u.d_child); 714 } 715 out: 716 /* several dentries were freed, need to correct nr_dentry */ 717 spin_lock(&dcache_lock); 718 dentry_stat.nr_dentry -= detached; 719 spin_unlock(&dcache_lock); 720 } 721 722 /* 723 * destroy the dentries attached to a superblock on unmounting 724 * - we don't need to use dentry->d_lock, and only need dcache_lock when 725 * removing the dentry from the system lists and hashes because: 726 * - the superblock is detached from all mountings and open files, so the 727 * dentry trees will not be rearranged by the VFS 728 * - s_umount is write-locked, so the memory pressure shrinker will ignore 729 * any dentries belonging to this superblock that it comes across 730 * - the filesystem itself is no longer permitted to rearrange the dentries 731 * in this superblock 732 */ 733 void shrink_dcache_for_umount(struct super_block *sb) 734 { 735 struct dentry *dentry; 736 737 if (down_read_trylock(&sb->s_umount)) 738 BUG(); 739 740 dentry = sb->s_root; 741 sb->s_root = NULL; 742 atomic_dec(&dentry->d_count); 743 shrink_dcache_for_umount_subtree(dentry); 744 745 while (!hlist_empty(&sb->s_anon)) { 746 dentry = hlist_entry(sb->s_anon.first, struct dentry, d_hash); 747 shrink_dcache_for_umount_subtree(dentry); 748 } 749 } 750 751 /* 752 * Search for at least 1 mount point in the dentry's subdirs. 753 * We descend to the next level whenever the d_subdirs 754 * list is non-empty and continue searching. 755 */ 756 757 /** 758 * have_submounts - check for mounts over a dentry 759 * @parent: dentry to check. 760 * 761 * Return true if the parent or its subdirectories contain 762 * a mount point 763 */ 764 765 int have_submounts(struct dentry *parent) 766 { 767 struct dentry *this_parent = parent; 768 struct list_head *next; 769 770 spin_lock(&dcache_lock); 771 if (d_mountpoint(parent)) 772 goto positive; 773 repeat: 774 next = this_parent->d_subdirs.next; 775 resume: 776 while (next != &this_parent->d_subdirs) { 777 struct list_head *tmp = next; 778 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 779 next = tmp->next; 780 /* Have we found a mount point ? */ 781 if (d_mountpoint(dentry)) 782 goto positive; 783 if (!list_empty(&dentry->d_subdirs)) { 784 this_parent = dentry; 785 goto repeat; 786 } 787 } 788 /* 789 * All done at this level ... ascend and resume the search. 790 */ 791 if (this_parent != parent) { 792 next = this_parent->d_u.d_child.next; 793 this_parent = this_parent->d_parent; 794 goto resume; 795 } 796 spin_unlock(&dcache_lock); 797 return 0; /* No mount points found in tree */ 798 positive: 799 spin_unlock(&dcache_lock); 800 return 1; 801 } 802 EXPORT_SYMBOL(have_submounts); 803 804 /* 805 * Search the dentry child list for the specified parent, 806 * and move any unused dentries to the end of the unused 807 * list for prune_dcache(). We descend to the next level 808 * whenever the d_subdirs list is non-empty and continue 809 * searching. 810 * 811 * It returns zero iff there are no unused children, 812 * otherwise it returns the number of children moved to 813 * the end of the unused list. This may not be the total 814 * number of unused children, because select_parent can 815 * drop the lock and return early due to latency 816 * constraints. 817 */ 818 static int select_parent(struct dentry * parent) 819 { 820 struct dentry *this_parent = parent; 821 struct list_head *next; 822 int found = 0; 823 824 spin_lock(&dcache_lock); 825 repeat: 826 next = this_parent->d_subdirs.next; 827 resume: 828 while (next != &this_parent->d_subdirs) { 829 struct list_head *tmp = next; 830 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 831 next = tmp->next; 832 833 dentry_lru_del_init(dentry); 834 /* 835 * move only zero ref count dentries to the end 836 * of the unused list for prune_dcache 837 */ 838 if (!atomic_read(&dentry->d_count)) { 839 dentry_lru_add_tail(dentry); 840 found++; 841 } 842 843 /* 844 * We can return to the caller if we have found some (this 845 * ensures forward progress). We'll be coming back to find 846 * the rest. 847 */ 848 if (found && need_resched()) 849 goto out; 850 851 /* 852 * Descend a level if the d_subdirs list is non-empty. 853 */ 854 if (!list_empty(&dentry->d_subdirs)) { 855 this_parent = dentry; 856 goto repeat; 857 } 858 } 859 /* 860 * All done at this level ... ascend and resume the search. 861 */ 862 if (this_parent != parent) { 863 next = this_parent->d_u.d_child.next; 864 this_parent = this_parent->d_parent; 865 goto resume; 866 } 867 out: 868 spin_unlock(&dcache_lock); 869 return found; 870 } 871 872 /** 873 * shrink_dcache_parent - prune dcache 874 * @parent: parent of entries to prune 875 * 876 * Prune the dcache to remove unused children of the parent dentry. 877 */ 878 879 void shrink_dcache_parent(struct dentry * parent) 880 { 881 struct super_block *sb = parent->d_sb; 882 int found; 883 884 while ((found = select_parent(parent)) != 0) 885 __shrink_dcache_sb(sb, &found, 0); 886 } 887 EXPORT_SYMBOL(shrink_dcache_parent); 888 889 /* 890 * Scan `nr' dentries and return the number which remain. 891 * 892 * We need to avoid reentering the filesystem if the caller is performing a 893 * GFP_NOFS allocation attempt. One example deadlock is: 894 * 895 * ext2_new_block->getblk->GFP->shrink_dcache_memory->prune_dcache-> 896 * prune_one_dentry->dput->dentry_iput->iput->inode->i_sb->s_op->put_inode-> 897 * ext2_discard_prealloc->ext2_free_blocks->lock_super->DEADLOCK. 898 * 899 * In this case we return -1 to tell the caller that we baled. 900 */ 901 static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) 902 { 903 if (nr) { 904 if (!(gfp_mask & __GFP_FS)) 905 return -1; 906 prune_dcache(nr); 907 } 908 return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; 909 } 910 911 static struct shrinker dcache_shrinker = { 912 .shrink = shrink_dcache_memory, 913 .seeks = DEFAULT_SEEKS, 914 }; 915 916 /** 917 * d_alloc - allocate a dcache entry 918 * @parent: parent of entry to allocate 919 * @name: qstr of the name 920 * 921 * Allocates a dentry. It returns %NULL if there is insufficient memory 922 * available. On a success the dentry is returned. The name passed in is 923 * copied and the copy passed in may be reused after this call. 924 */ 925 926 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) 927 { 928 struct dentry *dentry; 929 char *dname; 930 931 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL); 932 if (!dentry) 933 return NULL; 934 935 if (name->len > DNAME_INLINE_LEN-1) { 936 dname = kmalloc(name->len + 1, GFP_KERNEL); 937 if (!dname) { 938 kmem_cache_free(dentry_cache, dentry); 939 return NULL; 940 } 941 } else { 942 dname = dentry->d_iname; 943 } 944 dentry->d_name.name = dname; 945 946 dentry->d_name.len = name->len; 947 dentry->d_name.hash = name->hash; 948 memcpy(dname, name->name, name->len); 949 dname[name->len] = 0; 950 951 atomic_set(&dentry->d_count, 1); 952 dentry->d_flags = DCACHE_UNHASHED; 953 spin_lock_init(&dentry->d_lock); 954 dentry->d_inode = NULL; 955 dentry->d_parent = NULL; 956 dentry->d_sb = NULL; 957 dentry->d_op = NULL; 958 dentry->d_fsdata = NULL; 959 dentry->d_mounted = 0; 960 INIT_HLIST_NODE(&dentry->d_hash); 961 INIT_LIST_HEAD(&dentry->d_lru); 962 INIT_LIST_HEAD(&dentry->d_subdirs); 963 INIT_LIST_HEAD(&dentry->d_alias); 964 965 if (parent) { 966 dentry->d_parent = dget(parent); 967 dentry->d_sb = parent->d_sb; 968 } else { 969 INIT_LIST_HEAD(&dentry->d_u.d_child); 970 } 971 972 spin_lock(&dcache_lock); 973 if (parent) 974 list_add(&dentry->d_u.d_child, &parent->d_subdirs); 975 dentry_stat.nr_dentry++; 976 spin_unlock(&dcache_lock); 977 978 return dentry; 979 } 980 EXPORT_SYMBOL(d_alloc); 981 982 struct dentry *d_alloc_name(struct dentry *parent, const char *name) 983 { 984 struct qstr q; 985 986 q.name = name; 987 q.len = strlen(name); 988 q.hash = full_name_hash(q.name, q.len); 989 return d_alloc(parent, &q); 990 } 991 EXPORT_SYMBOL(d_alloc_name); 992 993 /* the caller must hold dcache_lock */ 994 static void __d_instantiate(struct dentry *dentry, struct inode *inode) 995 { 996 if (inode) 997 list_add(&dentry->d_alias, &inode->i_dentry); 998 dentry->d_inode = inode; 999 fsnotify_d_instantiate(dentry, inode); 1000 } 1001 1002 /** 1003 * d_instantiate - fill in inode information for a dentry 1004 * @entry: dentry to complete 1005 * @inode: inode to attach to this dentry 1006 * 1007 * Fill in inode information in the entry. 1008 * 1009 * This turns negative dentries into productive full members 1010 * of society. 1011 * 1012 * NOTE! This assumes that the inode count has been incremented 1013 * (or otherwise set) by the caller to indicate that it is now 1014 * in use by the dcache. 1015 */ 1016 1017 void d_instantiate(struct dentry *entry, struct inode * inode) 1018 { 1019 BUG_ON(!list_empty(&entry->d_alias)); 1020 spin_lock(&dcache_lock); 1021 __d_instantiate(entry, inode); 1022 spin_unlock(&dcache_lock); 1023 security_d_instantiate(entry, inode); 1024 } 1025 EXPORT_SYMBOL(d_instantiate); 1026 1027 /** 1028 * d_instantiate_unique - instantiate a non-aliased dentry 1029 * @entry: dentry to instantiate 1030 * @inode: inode to attach to this dentry 1031 * 1032 * Fill in inode information in the entry. On success, it returns NULL. 1033 * If an unhashed alias of "entry" already exists, then we return the 1034 * aliased dentry instead and drop one reference to inode. 1035 * 1036 * Note that in order to avoid conflicts with rename() etc, the caller 1037 * had better be holding the parent directory semaphore. 1038 * 1039 * This also assumes that the inode count has been incremented 1040 * (or otherwise set) by the caller to indicate that it is now 1041 * in use by the dcache. 1042 */ 1043 static struct dentry *__d_instantiate_unique(struct dentry *entry, 1044 struct inode *inode) 1045 { 1046 struct dentry *alias; 1047 int len = entry->d_name.len; 1048 const char *name = entry->d_name.name; 1049 unsigned int hash = entry->d_name.hash; 1050 1051 if (!inode) { 1052 __d_instantiate(entry, NULL); 1053 return NULL; 1054 } 1055 1056 list_for_each_entry(alias, &inode->i_dentry, d_alias) { 1057 struct qstr *qstr = &alias->d_name; 1058 1059 if (qstr->hash != hash) 1060 continue; 1061 if (alias->d_parent != entry->d_parent) 1062 continue; 1063 if (qstr->len != len) 1064 continue; 1065 if (memcmp(qstr->name, name, len)) 1066 continue; 1067 dget_locked(alias); 1068 return alias; 1069 } 1070 1071 __d_instantiate(entry, inode); 1072 return NULL; 1073 } 1074 1075 struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode) 1076 { 1077 struct dentry *result; 1078 1079 BUG_ON(!list_empty(&entry->d_alias)); 1080 1081 spin_lock(&dcache_lock); 1082 result = __d_instantiate_unique(entry, inode); 1083 spin_unlock(&dcache_lock); 1084 1085 if (!result) { 1086 security_d_instantiate(entry, inode); 1087 return NULL; 1088 } 1089 1090 BUG_ON(!d_unhashed(result)); 1091 iput(inode); 1092 return result; 1093 } 1094 1095 EXPORT_SYMBOL(d_instantiate_unique); 1096 1097 /** 1098 * d_alloc_root - allocate root dentry 1099 * @root_inode: inode to allocate the root for 1100 * 1101 * Allocate a root ("/") dentry for the inode given. The inode is 1102 * instantiated and returned. %NULL is returned if there is insufficient 1103 * memory or the inode passed is %NULL. 1104 */ 1105 1106 struct dentry * d_alloc_root(struct inode * root_inode) 1107 { 1108 struct dentry *res = NULL; 1109 1110 if (root_inode) { 1111 static const struct qstr name = { .name = "/", .len = 1 }; 1112 1113 res = d_alloc(NULL, &name); 1114 if (res) { 1115 res->d_sb = root_inode->i_sb; 1116 res->d_parent = res; 1117 d_instantiate(res, root_inode); 1118 } 1119 } 1120 return res; 1121 } 1122 EXPORT_SYMBOL(d_alloc_root); 1123 1124 static inline struct hlist_head *d_hash(struct dentry *parent, 1125 unsigned long hash) 1126 { 1127 hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES; 1128 hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS); 1129 return dentry_hashtable + (hash & D_HASHMASK); 1130 } 1131 1132 /** 1133 * d_obtain_alias - find or allocate a dentry for a given inode 1134 * @inode: inode to allocate the dentry for 1135 * 1136 * Obtain a dentry for an inode resulting from NFS filehandle conversion or 1137 * similar open by handle operations. The returned dentry may be anonymous, 1138 * or may have a full name (if the inode was already in the cache). 1139 * 1140 * When called on a directory inode, we must ensure that the inode only ever 1141 * has one dentry. If a dentry is found, that is returned instead of 1142 * allocating a new one. 1143 * 1144 * On successful return, the reference to the inode has been transferred 1145 * to the dentry. In case of an error the reference on the inode is released. 1146 * To make it easier to use in export operations a %NULL or IS_ERR inode may 1147 * be passed in and will be the error will be propagate to the return value, 1148 * with a %NULL @inode replaced by ERR_PTR(-ESTALE). 1149 */ 1150 struct dentry *d_obtain_alias(struct inode *inode) 1151 { 1152 static const struct qstr anonstring = { .name = "" }; 1153 struct dentry *tmp; 1154 struct dentry *res; 1155 1156 if (!inode) 1157 return ERR_PTR(-ESTALE); 1158 if (IS_ERR(inode)) 1159 return ERR_CAST(inode); 1160 1161 res = d_find_alias(inode); 1162 if (res) 1163 goto out_iput; 1164 1165 tmp = d_alloc(NULL, &anonstring); 1166 if (!tmp) { 1167 res = ERR_PTR(-ENOMEM); 1168 goto out_iput; 1169 } 1170 tmp->d_parent = tmp; /* make sure dput doesn't croak */ 1171 1172 spin_lock(&dcache_lock); 1173 res = __d_find_alias(inode, 0); 1174 if (res) { 1175 spin_unlock(&dcache_lock); 1176 dput(tmp); 1177 goto out_iput; 1178 } 1179 1180 /* attach a disconnected dentry */ 1181 spin_lock(&tmp->d_lock); 1182 tmp->d_sb = inode->i_sb; 1183 tmp->d_inode = inode; 1184 tmp->d_flags |= DCACHE_DISCONNECTED; 1185 tmp->d_flags &= ~DCACHE_UNHASHED; 1186 list_add(&tmp->d_alias, &inode->i_dentry); 1187 hlist_add_head(&tmp->d_hash, &inode->i_sb->s_anon); 1188 spin_unlock(&tmp->d_lock); 1189 1190 spin_unlock(&dcache_lock); 1191 return tmp; 1192 1193 out_iput: 1194 iput(inode); 1195 return res; 1196 } 1197 EXPORT_SYMBOL(d_obtain_alias); 1198 1199 /** 1200 * d_splice_alias - splice a disconnected dentry into the tree if one exists 1201 * @inode: the inode which may have a disconnected dentry 1202 * @dentry: a negative dentry which we want to point to the inode. 1203 * 1204 * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and 1205 * DCACHE_DISCONNECTED), then d_move that in place of the given dentry 1206 * and return it, else simply d_add the inode to the dentry and return NULL. 1207 * 1208 * This is needed in the lookup routine of any filesystem that is exportable 1209 * (via knfsd) so that we can build dcache paths to directories effectively. 1210 * 1211 * If a dentry was found and moved, then it is returned. Otherwise NULL 1212 * is returned. This matches the expected return value of ->lookup. 1213 * 1214 */ 1215 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) 1216 { 1217 struct dentry *new = NULL; 1218 1219 if (inode && S_ISDIR(inode->i_mode)) { 1220 spin_lock(&dcache_lock); 1221 new = __d_find_alias(inode, 1); 1222 if (new) { 1223 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED)); 1224 spin_unlock(&dcache_lock); 1225 security_d_instantiate(new, inode); 1226 d_move(new, dentry); 1227 iput(inode); 1228 } else { 1229 /* already taking dcache_lock, so d_add() by hand */ 1230 __d_instantiate(dentry, inode); 1231 spin_unlock(&dcache_lock); 1232 security_d_instantiate(dentry, inode); 1233 d_rehash(dentry); 1234 } 1235 } else 1236 d_add(dentry, inode); 1237 return new; 1238 } 1239 EXPORT_SYMBOL(d_splice_alias); 1240 1241 /** 1242 * d_add_ci - lookup or allocate new dentry with case-exact name 1243 * @inode: the inode case-insensitive lookup has found 1244 * @dentry: the negative dentry that was passed to the parent's lookup func 1245 * @name: the case-exact name to be associated with the returned dentry 1246 * 1247 * This is to avoid filling the dcache with case-insensitive names to the 1248 * same inode, only the actual correct case is stored in the dcache for 1249 * case-insensitive filesystems. 1250 * 1251 * For a case-insensitive lookup match and if the the case-exact dentry 1252 * already exists in in the dcache, use it and return it. 1253 * 1254 * If no entry exists with the exact case name, allocate new dentry with 1255 * the exact case, and return the spliced entry. 1256 */ 1257 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode, 1258 struct qstr *name) 1259 { 1260 int error; 1261 struct dentry *found; 1262 struct dentry *new; 1263 1264 /* 1265 * First check if a dentry matching the name already exists, 1266 * if not go ahead and create it now. 1267 */ 1268 found = d_hash_and_lookup(dentry->d_parent, name); 1269 if (!found) { 1270 new = d_alloc(dentry->d_parent, name); 1271 if (!new) { 1272 error = -ENOMEM; 1273 goto err_out; 1274 } 1275 1276 found = d_splice_alias(inode, new); 1277 if (found) { 1278 dput(new); 1279 return found; 1280 } 1281 return new; 1282 } 1283 1284 /* 1285 * If a matching dentry exists, and it's not negative use it. 1286 * 1287 * Decrement the reference count to balance the iget() done 1288 * earlier on. 1289 */ 1290 if (found->d_inode) { 1291 if (unlikely(found->d_inode != inode)) { 1292 /* This can't happen because bad inodes are unhashed. */ 1293 BUG_ON(!is_bad_inode(inode)); 1294 BUG_ON(!is_bad_inode(found->d_inode)); 1295 } 1296 iput(inode); 1297 return found; 1298 } 1299 1300 /* 1301 * Negative dentry: instantiate it unless the inode is a directory and 1302 * already has a dentry. 1303 */ 1304 spin_lock(&dcache_lock); 1305 if (!S_ISDIR(inode->i_mode) || list_empty(&inode->i_dentry)) { 1306 __d_instantiate(found, inode); 1307 spin_unlock(&dcache_lock); 1308 security_d_instantiate(found, inode); 1309 return found; 1310 } 1311 1312 /* 1313 * In case a directory already has a (disconnected) entry grab a 1314 * reference to it, move it in place and use it. 1315 */ 1316 new = list_entry(inode->i_dentry.next, struct dentry, d_alias); 1317 dget_locked(new); 1318 spin_unlock(&dcache_lock); 1319 security_d_instantiate(found, inode); 1320 d_move(new, found); 1321 iput(inode); 1322 dput(found); 1323 return new; 1324 1325 err_out: 1326 iput(inode); 1327 return ERR_PTR(error); 1328 } 1329 EXPORT_SYMBOL(d_add_ci); 1330 1331 /** 1332 * d_lookup - search for a dentry 1333 * @parent: parent dentry 1334 * @name: qstr of name we wish to find 1335 * Returns: dentry, or NULL 1336 * 1337 * d_lookup searches the children of the parent dentry for the name in 1338 * question. If the dentry is found its reference count is incremented and the 1339 * dentry is returned. The caller must use dput to free the entry when it has 1340 * finished using it. %NULL is returned if the dentry does not exist. 1341 */ 1342 struct dentry * d_lookup(struct dentry * parent, struct qstr * name) 1343 { 1344 struct dentry * dentry = NULL; 1345 unsigned long seq; 1346 1347 do { 1348 seq = read_seqbegin(&rename_lock); 1349 dentry = __d_lookup(parent, name); 1350 if (dentry) 1351 break; 1352 } while (read_seqretry(&rename_lock, seq)); 1353 return dentry; 1354 } 1355 EXPORT_SYMBOL(d_lookup); 1356 1357 /* 1358 * __d_lookup - search for a dentry (racy) 1359 * @parent: parent dentry 1360 * @name: qstr of name we wish to find 1361 * Returns: dentry, or NULL 1362 * 1363 * __d_lookup is like d_lookup, however it may (rarely) return a 1364 * false-negative result due to unrelated rename activity. 1365 * 1366 * __d_lookup is slightly faster by avoiding rename_lock read seqlock, 1367 * however it must be used carefully, eg. with a following d_lookup in 1368 * the case of failure. 1369 * 1370 * __d_lookup callers must be commented. 1371 */ 1372 struct dentry * __d_lookup(struct dentry * parent, struct qstr * name) 1373 { 1374 unsigned int len = name->len; 1375 unsigned int hash = name->hash; 1376 const unsigned char *str = name->name; 1377 struct hlist_head *head = d_hash(parent,hash); 1378 struct dentry *found = NULL; 1379 struct hlist_node *node; 1380 struct dentry *dentry; 1381 1382 /* 1383 * The hash list is protected using RCU. 1384 * 1385 * Take d_lock when comparing a candidate dentry, to avoid races 1386 * with d_move(). 1387 * 1388 * It is possible that concurrent renames can mess up our list 1389 * walk here and result in missing our dentry, resulting in the 1390 * false-negative result. d_lookup() protects against concurrent 1391 * renames using rename_lock seqlock. 1392 * 1393 * See Documentation/vfs/dcache-locking.txt for more details. 1394 */ 1395 rcu_read_lock(); 1396 1397 hlist_for_each_entry_rcu(dentry, node, head, d_hash) { 1398 struct qstr *qstr; 1399 1400 if (dentry->d_name.hash != hash) 1401 continue; 1402 if (dentry->d_parent != parent) 1403 continue; 1404 1405 spin_lock(&dentry->d_lock); 1406 1407 /* 1408 * Recheck the dentry after taking the lock - d_move may have 1409 * changed things. Don't bother checking the hash because 1410 * we're about to compare the whole name anyway. 1411 */ 1412 if (dentry->d_parent != parent) 1413 goto next; 1414 1415 /* non-existing due to RCU? */ 1416 if (d_unhashed(dentry)) 1417 goto next; 1418 1419 /* 1420 * It is safe to compare names since d_move() cannot 1421 * change the qstr (protected by d_lock). 1422 */ 1423 qstr = &dentry->d_name; 1424 if (parent->d_op && parent->d_op->d_compare) { 1425 if (parent->d_op->d_compare(parent, qstr, name)) 1426 goto next; 1427 } else { 1428 if (qstr->len != len) 1429 goto next; 1430 if (memcmp(qstr->name, str, len)) 1431 goto next; 1432 } 1433 1434 atomic_inc(&dentry->d_count); 1435 found = dentry; 1436 spin_unlock(&dentry->d_lock); 1437 break; 1438 next: 1439 spin_unlock(&dentry->d_lock); 1440 } 1441 rcu_read_unlock(); 1442 1443 return found; 1444 } 1445 1446 /** 1447 * d_hash_and_lookup - hash the qstr then search for a dentry 1448 * @dir: Directory to search in 1449 * @name: qstr of name we wish to find 1450 * 1451 * On hash failure or on lookup failure NULL is returned. 1452 */ 1453 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name) 1454 { 1455 struct dentry *dentry = NULL; 1456 1457 /* 1458 * Check for a fs-specific hash function. Note that we must 1459 * calculate the standard hash first, as the d_op->d_hash() 1460 * routine may choose to leave the hash value unchanged. 1461 */ 1462 name->hash = full_name_hash(name->name, name->len); 1463 if (dir->d_op && dir->d_op->d_hash) { 1464 if (dir->d_op->d_hash(dir, name) < 0) 1465 goto out; 1466 } 1467 dentry = d_lookup(dir, name); 1468 out: 1469 return dentry; 1470 } 1471 1472 /** 1473 * d_validate - verify dentry provided from insecure source 1474 * @dentry: The dentry alleged to be valid child of @dparent 1475 * @dparent: The parent dentry (known to be valid) 1476 * 1477 * An insecure source has sent us a dentry, here we verify it and dget() it. 1478 * This is used by ncpfs in its readdir implementation. 1479 * Zero is returned in the dentry is invalid. 1480 */ 1481 1482 int d_validate(struct dentry *dentry, struct dentry *dparent) 1483 { 1484 struct hlist_head *base; 1485 struct hlist_node *lhp; 1486 1487 /* Check whether the ptr might be valid at all.. */ 1488 if (!kmem_ptr_validate(dentry_cache, dentry)) 1489 goto out; 1490 1491 if (dentry->d_parent != dparent) 1492 goto out; 1493 1494 spin_lock(&dcache_lock); 1495 base = d_hash(dparent, dentry->d_name.hash); 1496 hlist_for_each(lhp,base) { 1497 /* hlist_for_each_entry_rcu() not required for d_hash list 1498 * as it is parsed under dcache_lock 1499 */ 1500 if (dentry == hlist_entry(lhp, struct dentry, d_hash)) { 1501 __dget_locked(dentry); 1502 spin_unlock(&dcache_lock); 1503 return 1; 1504 } 1505 } 1506 spin_unlock(&dcache_lock); 1507 out: 1508 return 0; 1509 } 1510 EXPORT_SYMBOL(d_validate); 1511 1512 /* 1513 * When a file is deleted, we have two options: 1514 * - turn this dentry into a negative dentry 1515 * - unhash this dentry and free it. 1516 * 1517 * Usually, we want to just turn this into 1518 * a negative dentry, but if anybody else is 1519 * currently using the dentry or the inode 1520 * we can't do that and we fall back on removing 1521 * it from the hash queues and waiting for 1522 * it to be deleted later when it has no users 1523 */ 1524 1525 /** 1526 * d_delete - delete a dentry 1527 * @dentry: The dentry to delete 1528 * 1529 * Turn the dentry into a negative dentry if possible, otherwise 1530 * remove it from the hash queues so it can be deleted later 1531 */ 1532 1533 void d_delete(struct dentry * dentry) 1534 { 1535 int isdir = 0; 1536 /* 1537 * Are we the only user? 1538 */ 1539 spin_lock(&dcache_lock); 1540 spin_lock(&dentry->d_lock); 1541 isdir = S_ISDIR(dentry->d_inode->i_mode); 1542 if (atomic_read(&dentry->d_count) == 1) { 1543 dentry->d_flags &= ~DCACHE_CANT_MOUNT; 1544 dentry_iput(dentry); 1545 fsnotify_nameremove(dentry, isdir); 1546 return; 1547 } 1548 1549 if (!d_unhashed(dentry)) 1550 __d_drop(dentry); 1551 1552 spin_unlock(&dentry->d_lock); 1553 spin_unlock(&dcache_lock); 1554 1555 fsnotify_nameremove(dentry, isdir); 1556 } 1557 EXPORT_SYMBOL(d_delete); 1558 1559 static void __d_rehash(struct dentry * entry, struct hlist_head *list) 1560 { 1561 1562 entry->d_flags &= ~DCACHE_UNHASHED; 1563 hlist_add_head_rcu(&entry->d_hash, list); 1564 } 1565 1566 static void _d_rehash(struct dentry * entry) 1567 { 1568 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash)); 1569 } 1570 1571 /** 1572 * d_rehash - add an entry back to the hash 1573 * @entry: dentry to add to the hash 1574 * 1575 * Adds a dentry to the hash according to its name. 1576 */ 1577 1578 void d_rehash(struct dentry * entry) 1579 { 1580 spin_lock(&dcache_lock); 1581 spin_lock(&entry->d_lock); 1582 _d_rehash(entry); 1583 spin_unlock(&entry->d_lock); 1584 spin_unlock(&dcache_lock); 1585 } 1586 EXPORT_SYMBOL(d_rehash); 1587 1588 /* 1589 * When switching names, the actual string doesn't strictly have to 1590 * be preserved in the target - because we're dropping the target 1591 * anyway. As such, we can just do a simple memcpy() to copy over 1592 * the new name before we switch. 1593 * 1594 * Note that we have to be a lot more careful about getting the hash 1595 * switched - we have to switch the hash value properly even if it 1596 * then no longer matches the actual (corrupted) string of the target. 1597 * The hash value has to match the hash queue that the dentry is on.. 1598 */ 1599 static void switch_names(struct dentry *dentry, struct dentry *target) 1600 { 1601 if (dname_external(target)) { 1602 if (dname_external(dentry)) { 1603 /* 1604 * Both external: swap the pointers 1605 */ 1606 swap(target->d_name.name, dentry->d_name.name); 1607 } else { 1608 /* 1609 * dentry:internal, target:external. Steal target's 1610 * storage and make target internal. 1611 */ 1612 memcpy(target->d_iname, dentry->d_name.name, 1613 dentry->d_name.len + 1); 1614 dentry->d_name.name = target->d_name.name; 1615 target->d_name.name = target->d_iname; 1616 } 1617 } else { 1618 if (dname_external(dentry)) { 1619 /* 1620 * dentry:external, target:internal. Give dentry's 1621 * storage to target and make dentry internal 1622 */ 1623 memcpy(dentry->d_iname, target->d_name.name, 1624 target->d_name.len + 1); 1625 target->d_name.name = dentry->d_name.name; 1626 dentry->d_name.name = dentry->d_iname; 1627 } else { 1628 /* 1629 * Both are internal. Just copy target to dentry 1630 */ 1631 memcpy(dentry->d_iname, target->d_name.name, 1632 target->d_name.len + 1); 1633 dentry->d_name.len = target->d_name.len; 1634 return; 1635 } 1636 } 1637 swap(dentry->d_name.len, target->d_name.len); 1638 } 1639 1640 /* 1641 * We cannibalize "target" when moving dentry on top of it, 1642 * because it's going to be thrown away anyway. We could be more 1643 * polite about it, though. 1644 * 1645 * This forceful removal will result in ugly /proc output if 1646 * somebody holds a file open that got deleted due to a rename. 1647 * We could be nicer about the deleted file, and let it show 1648 * up under the name it had before it was deleted rather than 1649 * under the original name of the file that was moved on top of it. 1650 */ 1651 1652 /* 1653 * d_move_locked - move a dentry 1654 * @dentry: entry to move 1655 * @target: new dentry 1656 * 1657 * Update the dcache to reflect the move of a file name. Negative 1658 * dcache entries should not be moved in this way. 1659 */ 1660 static void d_move_locked(struct dentry * dentry, struct dentry * target) 1661 { 1662 struct hlist_head *list; 1663 1664 if (!dentry->d_inode) 1665 printk(KERN_WARNING "VFS: moving negative dcache entry\n"); 1666 1667 write_seqlock(&rename_lock); 1668 /* 1669 * XXXX: do we really need to take target->d_lock? 1670 */ 1671 if (target < dentry) { 1672 spin_lock(&target->d_lock); 1673 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 1674 } else { 1675 spin_lock(&dentry->d_lock); 1676 spin_lock_nested(&target->d_lock, DENTRY_D_LOCK_NESTED); 1677 } 1678 1679 /* Move the dentry to the target hash queue, if on different bucket */ 1680 if (d_unhashed(dentry)) 1681 goto already_unhashed; 1682 1683 hlist_del_rcu(&dentry->d_hash); 1684 1685 already_unhashed: 1686 list = d_hash(target->d_parent, target->d_name.hash); 1687 __d_rehash(dentry, list); 1688 1689 /* Unhash the target: dput() will then get rid of it */ 1690 __d_drop(target); 1691 1692 list_del(&dentry->d_u.d_child); 1693 list_del(&target->d_u.d_child); 1694 1695 /* Switch the names.. */ 1696 switch_names(dentry, target); 1697 swap(dentry->d_name.hash, target->d_name.hash); 1698 1699 /* ... and switch the parents */ 1700 if (IS_ROOT(dentry)) { 1701 dentry->d_parent = target->d_parent; 1702 target->d_parent = target; 1703 INIT_LIST_HEAD(&target->d_u.d_child); 1704 } else { 1705 swap(dentry->d_parent, target->d_parent); 1706 1707 /* And add them back to the (new) parent lists */ 1708 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs); 1709 } 1710 1711 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); 1712 spin_unlock(&target->d_lock); 1713 fsnotify_d_move(dentry); 1714 spin_unlock(&dentry->d_lock); 1715 write_sequnlock(&rename_lock); 1716 } 1717 1718 /** 1719 * d_move - move a dentry 1720 * @dentry: entry to move 1721 * @target: new dentry 1722 * 1723 * Update the dcache to reflect the move of a file name. Negative 1724 * dcache entries should not be moved in this way. 1725 */ 1726 1727 void d_move(struct dentry * dentry, struct dentry * target) 1728 { 1729 spin_lock(&dcache_lock); 1730 d_move_locked(dentry, target); 1731 spin_unlock(&dcache_lock); 1732 } 1733 EXPORT_SYMBOL(d_move); 1734 1735 /** 1736 * d_ancestor - search for an ancestor 1737 * @p1: ancestor dentry 1738 * @p2: child dentry 1739 * 1740 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is 1741 * an ancestor of p2, else NULL. 1742 */ 1743 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2) 1744 { 1745 struct dentry *p; 1746 1747 for (p = p2; !IS_ROOT(p); p = p->d_parent) { 1748 if (p->d_parent == p1) 1749 return p; 1750 } 1751 return NULL; 1752 } 1753 1754 /* 1755 * This helper attempts to cope with remotely renamed directories 1756 * 1757 * It assumes that the caller is already holding 1758 * dentry->d_parent->d_inode->i_mutex and the dcache_lock 1759 * 1760 * Note: If ever the locking in lock_rename() changes, then please 1761 * remember to update this too... 1762 */ 1763 static struct dentry *__d_unalias(struct dentry *dentry, struct dentry *alias) 1764 __releases(dcache_lock) 1765 { 1766 struct mutex *m1 = NULL, *m2 = NULL; 1767 struct dentry *ret; 1768 1769 /* If alias and dentry share a parent, then no extra locks required */ 1770 if (alias->d_parent == dentry->d_parent) 1771 goto out_unalias; 1772 1773 /* Check for loops */ 1774 ret = ERR_PTR(-ELOOP); 1775 if (d_ancestor(alias, dentry)) 1776 goto out_err; 1777 1778 /* See lock_rename() */ 1779 ret = ERR_PTR(-EBUSY); 1780 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex)) 1781 goto out_err; 1782 m1 = &dentry->d_sb->s_vfs_rename_mutex; 1783 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex)) 1784 goto out_err; 1785 m2 = &alias->d_parent->d_inode->i_mutex; 1786 out_unalias: 1787 d_move_locked(alias, dentry); 1788 ret = alias; 1789 out_err: 1790 spin_unlock(&dcache_lock); 1791 if (m2) 1792 mutex_unlock(m2); 1793 if (m1) 1794 mutex_unlock(m1); 1795 return ret; 1796 } 1797 1798 /* 1799 * Prepare an anonymous dentry for life in the superblock's dentry tree as a 1800 * named dentry in place of the dentry to be replaced. 1801 */ 1802 static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) 1803 { 1804 struct dentry *dparent, *aparent; 1805 1806 switch_names(dentry, anon); 1807 swap(dentry->d_name.hash, anon->d_name.hash); 1808 1809 dparent = dentry->d_parent; 1810 aparent = anon->d_parent; 1811 1812 dentry->d_parent = (aparent == anon) ? dentry : aparent; 1813 list_del(&dentry->d_u.d_child); 1814 if (!IS_ROOT(dentry)) 1815 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); 1816 else 1817 INIT_LIST_HEAD(&dentry->d_u.d_child); 1818 1819 anon->d_parent = (dparent == dentry) ? anon : dparent; 1820 list_del(&anon->d_u.d_child); 1821 if (!IS_ROOT(anon)) 1822 list_add(&anon->d_u.d_child, &anon->d_parent->d_subdirs); 1823 else 1824 INIT_LIST_HEAD(&anon->d_u.d_child); 1825 1826 anon->d_flags &= ~DCACHE_DISCONNECTED; 1827 } 1828 1829 /** 1830 * d_materialise_unique - introduce an inode into the tree 1831 * @dentry: candidate dentry 1832 * @inode: inode to bind to the dentry, to which aliases may be attached 1833 * 1834 * Introduces an dentry into the tree, substituting an extant disconnected 1835 * root directory alias in its place if there is one 1836 */ 1837 struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) 1838 { 1839 struct dentry *actual; 1840 1841 BUG_ON(!d_unhashed(dentry)); 1842 1843 spin_lock(&dcache_lock); 1844 1845 if (!inode) { 1846 actual = dentry; 1847 __d_instantiate(dentry, NULL); 1848 goto found_lock; 1849 } 1850 1851 if (S_ISDIR(inode->i_mode)) { 1852 struct dentry *alias; 1853 1854 /* Does an aliased dentry already exist? */ 1855 alias = __d_find_alias(inode, 0); 1856 if (alias) { 1857 actual = alias; 1858 /* Is this an anonymous mountpoint that we could splice 1859 * into our tree? */ 1860 if (IS_ROOT(alias)) { 1861 spin_lock(&alias->d_lock); 1862 __d_materialise_dentry(dentry, alias); 1863 __d_drop(alias); 1864 goto found; 1865 } 1866 /* Nope, but we must(!) avoid directory aliasing */ 1867 actual = __d_unalias(dentry, alias); 1868 if (IS_ERR(actual)) 1869 dput(alias); 1870 goto out_nolock; 1871 } 1872 } 1873 1874 /* Add a unique reference */ 1875 actual = __d_instantiate_unique(dentry, inode); 1876 if (!actual) 1877 actual = dentry; 1878 else if (unlikely(!d_unhashed(actual))) 1879 goto shouldnt_be_hashed; 1880 1881 found_lock: 1882 spin_lock(&actual->d_lock); 1883 found: 1884 _d_rehash(actual); 1885 spin_unlock(&actual->d_lock); 1886 spin_unlock(&dcache_lock); 1887 out_nolock: 1888 if (actual == dentry) { 1889 security_d_instantiate(dentry, inode); 1890 return NULL; 1891 } 1892 1893 iput(inode); 1894 return actual; 1895 1896 shouldnt_be_hashed: 1897 spin_unlock(&dcache_lock); 1898 BUG(); 1899 } 1900 EXPORT_SYMBOL_GPL(d_materialise_unique); 1901 1902 static int prepend(char **buffer, int *buflen, const char *str, int namelen) 1903 { 1904 *buflen -= namelen; 1905 if (*buflen < 0) 1906 return -ENAMETOOLONG; 1907 *buffer -= namelen; 1908 memcpy(*buffer, str, namelen); 1909 return 0; 1910 } 1911 1912 static int prepend_name(char **buffer, int *buflen, struct qstr *name) 1913 { 1914 return prepend(buffer, buflen, name->name, name->len); 1915 } 1916 1917 /** 1918 * Prepend path string to a buffer 1919 * 1920 * @path: the dentry/vfsmount to report 1921 * @root: root vfsmnt/dentry (may be modified by this function) 1922 * @buffer: pointer to the end of the buffer 1923 * @buflen: pointer to buffer length 1924 * 1925 * Caller holds the dcache_lock. 1926 * 1927 * If path is not reachable from the supplied root, then the value of 1928 * root is changed (without modifying refcounts). 1929 */ 1930 static int prepend_path(const struct path *path, struct path *root, 1931 char **buffer, int *buflen) 1932 { 1933 struct dentry *dentry = path->dentry; 1934 struct vfsmount *vfsmnt = path->mnt; 1935 bool slash = false; 1936 int error = 0; 1937 1938 br_read_lock(vfsmount_lock); 1939 while (dentry != root->dentry || vfsmnt != root->mnt) { 1940 struct dentry * parent; 1941 1942 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { 1943 /* Global root? */ 1944 if (vfsmnt->mnt_parent == vfsmnt) { 1945 goto global_root; 1946 } 1947 dentry = vfsmnt->mnt_mountpoint; 1948 vfsmnt = vfsmnt->mnt_parent; 1949 continue; 1950 } 1951 parent = dentry->d_parent; 1952 prefetch(parent); 1953 error = prepend_name(buffer, buflen, &dentry->d_name); 1954 if (!error) 1955 error = prepend(buffer, buflen, "/", 1); 1956 if (error) 1957 break; 1958 1959 slash = true; 1960 dentry = parent; 1961 } 1962 1963 out: 1964 if (!error && !slash) 1965 error = prepend(buffer, buflen, "/", 1); 1966 1967 br_read_unlock(vfsmount_lock); 1968 return error; 1969 1970 global_root: 1971 /* 1972 * Filesystems needing to implement special "root names" 1973 * should do so with ->d_dname() 1974 */ 1975 if (IS_ROOT(dentry) && 1976 (dentry->d_name.len != 1 || dentry->d_name.name[0] != '/')) { 1977 WARN(1, "Root dentry has weird name <%.*s>\n", 1978 (int) dentry->d_name.len, dentry->d_name.name); 1979 } 1980 root->mnt = vfsmnt; 1981 root->dentry = dentry; 1982 goto out; 1983 } 1984 1985 /** 1986 * __d_path - return the path of a dentry 1987 * @path: the dentry/vfsmount to report 1988 * @root: root vfsmnt/dentry (may be modified by this function) 1989 * @buf: buffer to return value in 1990 * @buflen: buffer length 1991 * 1992 * Convert a dentry into an ASCII path name. 1993 * 1994 * Returns a pointer into the buffer or an error code if the 1995 * path was too long. 1996 * 1997 * "buflen" should be positive. Caller holds the dcache_lock. 1998 * 1999 * If path is not reachable from the supplied root, then the value of 2000 * root is changed (without modifying refcounts). 2001 */ 2002 char *__d_path(const struct path *path, struct path *root, 2003 char *buf, int buflen) 2004 { 2005 char *res = buf + buflen; 2006 int error; 2007 2008 prepend(&res, &buflen, "\0", 1); 2009 error = prepend_path(path, root, &res, &buflen); 2010 if (error) 2011 return ERR_PTR(error); 2012 2013 return res; 2014 } 2015 2016 /* 2017 * same as __d_path but appends "(deleted)" for unlinked files. 2018 */ 2019 static int path_with_deleted(const struct path *path, struct path *root, 2020 char **buf, int *buflen) 2021 { 2022 prepend(buf, buflen, "\0", 1); 2023 if (d_unlinked(path->dentry)) { 2024 int error = prepend(buf, buflen, " (deleted)", 10); 2025 if (error) 2026 return error; 2027 } 2028 2029 return prepend_path(path, root, buf, buflen); 2030 } 2031 2032 static int prepend_unreachable(char **buffer, int *buflen) 2033 { 2034 return prepend(buffer, buflen, "(unreachable)", 13); 2035 } 2036 2037 /** 2038 * d_path - return the path of a dentry 2039 * @path: path to report 2040 * @buf: buffer to return value in 2041 * @buflen: buffer length 2042 * 2043 * Convert a dentry into an ASCII path name. If the entry has been deleted 2044 * the string " (deleted)" is appended. Note that this is ambiguous. 2045 * 2046 * Returns a pointer into the buffer or an error code if the path was 2047 * too long. Note: Callers should use the returned pointer, not the passed 2048 * in buffer, to use the name! The implementation often starts at an offset 2049 * into the buffer, and may leave 0 bytes at the start. 2050 * 2051 * "buflen" should be positive. 2052 */ 2053 char *d_path(const struct path *path, char *buf, int buflen) 2054 { 2055 char *res = buf + buflen; 2056 struct path root; 2057 struct path tmp; 2058 int error; 2059 2060 /* 2061 * We have various synthetic filesystems that never get mounted. On 2062 * these filesystems dentries are never used for lookup purposes, and 2063 * thus don't need to be hashed. They also don't need a name until a 2064 * user wants to identify the object in /proc/pid/fd/. The little hack 2065 * below allows us to generate a name for these objects on demand: 2066 */ 2067 if (path->dentry->d_op && path->dentry->d_op->d_dname) 2068 return path->dentry->d_op->d_dname(path->dentry, buf, buflen); 2069 2070 get_fs_root(current->fs, &root); 2071 spin_lock(&dcache_lock); 2072 tmp = root; 2073 error = path_with_deleted(path, &tmp, &res, &buflen); 2074 if (error) 2075 res = ERR_PTR(error); 2076 spin_unlock(&dcache_lock); 2077 path_put(&root); 2078 return res; 2079 } 2080 EXPORT_SYMBOL(d_path); 2081 2082 /** 2083 * d_path_with_unreachable - return the path of a dentry 2084 * @path: path to report 2085 * @buf: buffer to return value in 2086 * @buflen: buffer length 2087 * 2088 * The difference from d_path() is that this prepends "(unreachable)" 2089 * to paths which are unreachable from the current process' root. 2090 */ 2091 char *d_path_with_unreachable(const struct path *path, char *buf, int buflen) 2092 { 2093 char *res = buf + buflen; 2094 struct path root; 2095 struct path tmp; 2096 int error; 2097 2098 if (path->dentry->d_op && path->dentry->d_op->d_dname) 2099 return path->dentry->d_op->d_dname(path->dentry, buf, buflen); 2100 2101 get_fs_root(current->fs, &root); 2102 spin_lock(&dcache_lock); 2103 tmp = root; 2104 error = path_with_deleted(path, &tmp, &res, &buflen); 2105 if (!error && !path_equal(&tmp, &root)) 2106 error = prepend_unreachable(&res, &buflen); 2107 spin_unlock(&dcache_lock); 2108 path_put(&root); 2109 if (error) 2110 res = ERR_PTR(error); 2111 2112 return res; 2113 } 2114 2115 /* 2116 * Helper function for dentry_operations.d_dname() members 2117 */ 2118 char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen, 2119 const char *fmt, ...) 2120 { 2121 va_list args; 2122 char temp[64]; 2123 int sz; 2124 2125 va_start(args, fmt); 2126 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1; 2127 va_end(args); 2128 2129 if (sz > sizeof(temp) || sz > buflen) 2130 return ERR_PTR(-ENAMETOOLONG); 2131 2132 buffer += buflen - sz; 2133 return memcpy(buffer, temp, sz); 2134 } 2135 2136 /* 2137 * Write full pathname from the root of the filesystem into the buffer. 2138 */ 2139 char *__dentry_path(struct dentry *dentry, char *buf, int buflen) 2140 { 2141 char *end = buf + buflen; 2142 char *retval; 2143 2144 prepend(&end, &buflen, "\0", 1); 2145 if (buflen < 1) 2146 goto Elong; 2147 /* Get '/' right */ 2148 retval = end-1; 2149 *retval = '/'; 2150 2151 while (!IS_ROOT(dentry)) { 2152 struct dentry *parent = dentry->d_parent; 2153 2154 prefetch(parent); 2155 if ((prepend_name(&end, &buflen, &dentry->d_name) != 0) || 2156 (prepend(&end, &buflen, "/", 1) != 0)) 2157 goto Elong; 2158 2159 retval = end; 2160 dentry = parent; 2161 } 2162 return retval; 2163 Elong: 2164 return ERR_PTR(-ENAMETOOLONG); 2165 } 2166 EXPORT_SYMBOL(__dentry_path); 2167 2168 char *dentry_path(struct dentry *dentry, char *buf, int buflen) 2169 { 2170 char *p = NULL; 2171 char *retval; 2172 2173 spin_lock(&dcache_lock); 2174 if (d_unlinked(dentry)) { 2175 p = buf + buflen; 2176 if (prepend(&p, &buflen, "//deleted", 10) != 0) 2177 goto Elong; 2178 buflen++; 2179 } 2180 retval = __dentry_path(dentry, buf, buflen); 2181 spin_unlock(&dcache_lock); 2182 if (!IS_ERR(retval) && p) 2183 *p = '/'; /* restore '/' overriden with '\0' */ 2184 return retval; 2185 Elong: 2186 spin_unlock(&dcache_lock); 2187 return ERR_PTR(-ENAMETOOLONG); 2188 } 2189 2190 /* 2191 * NOTE! The user-level library version returns a 2192 * character pointer. The kernel system call just 2193 * returns the length of the buffer filled (which 2194 * includes the ending '\0' character), or a negative 2195 * error value. So libc would do something like 2196 * 2197 * char *getcwd(char * buf, size_t size) 2198 * { 2199 * int retval; 2200 * 2201 * retval = sys_getcwd(buf, size); 2202 * if (retval >= 0) 2203 * return buf; 2204 * errno = -retval; 2205 * return NULL; 2206 * } 2207 */ 2208 SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size) 2209 { 2210 int error; 2211 struct path pwd, root; 2212 char *page = (char *) __get_free_page(GFP_USER); 2213 2214 if (!page) 2215 return -ENOMEM; 2216 2217 get_fs_root_and_pwd(current->fs, &root, &pwd); 2218 2219 error = -ENOENT; 2220 spin_lock(&dcache_lock); 2221 if (!d_unlinked(pwd.dentry)) { 2222 unsigned long len; 2223 struct path tmp = root; 2224 char *cwd = page + PAGE_SIZE; 2225 int buflen = PAGE_SIZE; 2226 2227 prepend(&cwd, &buflen, "\0", 1); 2228 error = prepend_path(&pwd, &tmp, &cwd, &buflen); 2229 spin_unlock(&dcache_lock); 2230 2231 if (error) 2232 goto out; 2233 2234 /* Unreachable from current root */ 2235 if (!path_equal(&tmp, &root)) { 2236 error = prepend_unreachable(&cwd, &buflen); 2237 if (error) 2238 goto out; 2239 } 2240 2241 error = -ERANGE; 2242 len = PAGE_SIZE + page - cwd; 2243 if (len <= size) { 2244 error = len; 2245 if (copy_to_user(buf, cwd, len)) 2246 error = -EFAULT; 2247 } 2248 } else 2249 spin_unlock(&dcache_lock); 2250 2251 out: 2252 path_put(&pwd); 2253 path_put(&root); 2254 free_page((unsigned long) page); 2255 return error; 2256 } 2257 2258 /* 2259 * Test whether new_dentry is a subdirectory of old_dentry. 2260 * 2261 * Trivially implemented using the dcache structure 2262 */ 2263 2264 /** 2265 * is_subdir - is new dentry a subdirectory of old_dentry 2266 * @new_dentry: new dentry 2267 * @old_dentry: old dentry 2268 * 2269 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth). 2270 * Returns 0 otherwise. 2271 * Caller must ensure that "new_dentry" is pinned before calling is_subdir() 2272 */ 2273 2274 int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry) 2275 { 2276 int result; 2277 unsigned long seq; 2278 2279 if (new_dentry == old_dentry) 2280 return 1; 2281 2282 /* 2283 * Need rcu_readlock to protect against the d_parent trashing 2284 * due to d_move 2285 */ 2286 rcu_read_lock(); 2287 do { 2288 /* for restarting inner loop in case of seq retry */ 2289 seq = read_seqbegin(&rename_lock); 2290 if (d_ancestor(old_dentry, new_dentry)) 2291 result = 1; 2292 else 2293 result = 0; 2294 } while (read_seqretry(&rename_lock, seq)); 2295 rcu_read_unlock(); 2296 2297 return result; 2298 } 2299 2300 int path_is_under(struct path *path1, struct path *path2) 2301 { 2302 struct vfsmount *mnt = path1->mnt; 2303 struct dentry *dentry = path1->dentry; 2304 int res; 2305 2306 br_read_lock(vfsmount_lock); 2307 if (mnt != path2->mnt) { 2308 for (;;) { 2309 if (mnt->mnt_parent == mnt) { 2310 br_read_unlock(vfsmount_lock); 2311 return 0; 2312 } 2313 if (mnt->mnt_parent == path2->mnt) 2314 break; 2315 mnt = mnt->mnt_parent; 2316 } 2317 dentry = mnt->mnt_mountpoint; 2318 } 2319 res = is_subdir(dentry, path2->dentry); 2320 br_read_unlock(vfsmount_lock); 2321 return res; 2322 } 2323 EXPORT_SYMBOL(path_is_under); 2324 2325 void d_genocide(struct dentry *root) 2326 { 2327 struct dentry *this_parent = root; 2328 struct list_head *next; 2329 2330 spin_lock(&dcache_lock); 2331 repeat: 2332 next = this_parent->d_subdirs.next; 2333 resume: 2334 while (next != &this_parent->d_subdirs) { 2335 struct list_head *tmp = next; 2336 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 2337 next = tmp->next; 2338 if (d_unhashed(dentry)||!dentry->d_inode) 2339 continue; 2340 if (!list_empty(&dentry->d_subdirs)) { 2341 this_parent = dentry; 2342 goto repeat; 2343 } 2344 atomic_dec(&dentry->d_count); 2345 } 2346 if (this_parent != root) { 2347 next = this_parent->d_u.d_child.next; 2348 atomic_dec(&this_parent->d_count); 2349 this_parent = this_parent->d_parent; 2350 goto resume; 2351 } 2352 spin_unlock(&dcache_lock); 2353 } 2354 2355 /** 2356 * find_inode_number - check for dentry with name 2357 * @dir: directory to check 2358 * @name: Name to find. 2359 * 2360 * Check whether a dentry already exists for the given name, 2361 * and return the inode number if it has an inode. Otherwise 2362 * 0 is returned. 2363 * 2364 * This routine is used to post-process directory listings for 2365 * filesystems using synthetic inode numbers, and is necessary 2366 * to keep getcwd() working. 2367 */ 2368 2369 ino_t find_inode_number(struct dentry *dir, struct qstr *name) 2370 { 2371 struct dentry * dentry; 2372 ino_t ino = 0; 2373 2374 dentry = d_hash_and_lookup(dir, name); 2375 if (dentry) { 2376 if (dentry->d_inode) 2377 ino = dentry->d_inode->i_ino; 2378 dput(dentry); 2379 } 2380 return ino; 2381 } 2382 EXPORT_SYMBOL(find_inode_number); 2383 2384 static __initdata unsigned long dhash_entries; 2385 static int __init set_dhash_entries(char *str) 2386 { 2387 if (!str) 2388 return 0; 2389 dhash_entries = simple_strtoul(str, &str, 0); 2390 return 1; 2391 } 2392 __setup("dhash_entries=", set_dhash_entries); 2393 2394 static void __init dcache_init_early(void) 2395 { 2396 int loop; 2397 2398 /* If hashes are distributed across NUMA nodes, defer 2399 * hash allocation until vmalloc space is available. 2400 */ 2401 if (hashdist) 2402 return; 2403 2404 dentry_hashtable = 2405 alloc_large_system_hash("Dentry cache", 2406 sizeof(struct hlist_head), 2407 dhash_entries, 2408 13, 2409 HASH_EARLY, 2410 &d_hash_shift, 2411 &d_hash_mask, 2412 0); 2413 2414 for (loop = 0; loop < (1 << d_hash_shift); loop++) 2415 INIT_HLIST_HEAD(&dentry_hashtable[loop]); 2416 } 2417 2418 static void __init dcache_init(void) 2419 { 2420 int loop; 2421 2422 /* 2423 * A constructor could be added for stable state like the lists, 2424 * but it is probably not worth it because of the cache nature 2425 * of the dcache. 2426 */ 2427 dentry_cache = KMEM_CACHE(dentry, 2428 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); 2429 2430 register_shrinker(&dcache_shrinker); 2431 2432 /* Hash may have been set up in dcache_init_early */ 2433 if (!hashdist) 2434 return; 2435 2436 dentry_hashtable = 2437 alloc_large_system_hash("Dentry cache", 2438 sizeof(struct hlist_head), 2439 dhash_entries, 2440 13, 2441 0, 2442 &d_hash_shift, 2443 &d_hash_mask, 2444 0); 2445 2446 for (loop = 0; loop < (1 << d_hash_shift); loop++) 2447 INIT_HLIST_HEAD(&dentry_hashtable[loop]); 2448 } 2449 2450 /* SLAB cache for __getname() consumers */ 2451 struct kmem_cache *names_cachep __read_mostly; 2452 EXPORT_SYMBOL(names_cachep); 2453 2454 EXPORT_SYMBOL(d_genocide); 2455 2456 void __init vfs_caches_init_early(void) 2457 { 2458 dcache_init_early(); 2459 inode_init_early(); 2460 } 2461 2462 void __init vfs_caches_init(unsigned long mempages) 2463 { 2464 unsigned long reserve; 2465 2466 /* Base hash sizes on available memory, with a reserve equal to 2467 150% of current kernel size */ 2468 2469 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1); 2470 mempages -= reserve; 2471 2472 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, 2473 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 2474 2475 dcache_init(); 2476 inode_init(); 2477 files_init(mempages); 2478 mnt_init(); 2479 bdev_cache_init(); 2480 chrdev_init(); 2481 } 2482