1 /* 2 * fs/dcache.c 3 * 4 * Complete reimplementation 5 * (C) 1997 Thomas Schoebel-Theuer, 6 * with heavy changes by Linus Torvalds 7 */ 8 9 /* 10 * Notes on the allocation strategy: 11 * 12 * The dcache is a master of the icache - whenever a dcache entry 13 * exists, the inode will always exist. "iput()" is done either when 14 * the dcache entry is deleted or garbage collected. 15 */ 16 17 #include <linux/syscalls.h> 18 #include <linux/string.h> 19 #include <linux/mm.h> 20 #include <linux/fs.h> 21 #include <linux/fsnotify.h> 22 #include <linux/slab.h> 23 #include <linux/init.h> 24 #include <linux/hash.h> 25 #include <linux/cache.h> 26 #include <linux/export.h> 27 #include <linux/mount.h> 28 #include <linux/file.h> 29 #include <asm/uaccess.h> 30 #include <linux/security.h> 31 #include <linux/seqlock.h> 32 #include <linux/swap.h> 33 #include <linux/bootmem.h> 34 #include <linux/fs_struct.h> 35 #include <linux/hardirq.h> 36 #include <linux/bit_spinlock.h> 37 #include <linux/rculist_bl.h> 38 #include <linux/prefetch.h> 39 #include <linux/ratelimit.h> 40 #include "internal.h" 41 #include "mount.h" 42 43 /* 44 * Usage: 45 * dcache->d_inode->i_lock protects: 46 * - i_dentry, d_alias, d_inode of aliases 47 * dcache_hash_bucket lock protects: 48 * - the dcache hash table 49 * s_anon bl list spinlock protects: 50 * - the s_anon list (see __d_drop) 51 * dcache_lru_lock protects: 52 * - the dcache lru lists and counters 53 * d_lock protects: 54 * - d_flags 55 * - d_name 56 * - d_lru 57 * - d_count 58 * - d_unhashed() 59 * - d_parent and d_subdirs 60 * - childrens' d_child and d_parent 61 * - d_alias, d_inode 62 * 63 * Ordering: 64 * dentry->d_inode->i_lock 65 * dentry->d_lock 66 * dcache_lru_lock 67 * dcache_hash_bucket lock 68 * s_anon lock 69 * 70 * If there is an ancestor relationship: 71 * dentry->d_parent->...->d_parent->d_lock 72 * ... 73 * dentry->d_parent->d_lock 74 * dentry->d_lock 75 * 76 * If no ancestor relationship: 77 * if (dentry1 < dentry2) 78 * dentry1->d_lock 79 * dentry2->d_lock 80 */ 81 int sysctl_vfs_cache_pressure __read_mostly = 100; 82 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); 83 84 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock); 85 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); 86 87 EXPORT_SYMBOL(rename_lock); 88 89 static struct kmem_cache *dentry_cache __read_mostly; 90 91 /* 92 * This is the single most critical data structure when it comes 93 * to the dcache: the hashtable for lookups. Somebody should try 94 * to make this good - I've just made it work. 95 * 96 * This hash-function tries to avoid losing too many bits of hash 97 * information, yet avoid using a prime hash-size or similar. 98 */ 99 #define D_HASHBITS d_hash_shift 100 #define D_HASHMASK d_hash_mask 101 102 static unsigned int d_hash_mask __read_mostly; 103 static unsigned int d_hash_shift __read_mostly; 104 105 static struct hlist_bl_head *dentry_hashtable __read_mostly; 106 107 static inline struct hlist_bl_head *d_hash(const struct dentry *parent, 108 unsigned int hash) 109 { 110 hash += (unsigned long) parent / L1_CACHE_BYTES; 111 hash = hash + (hash >> D_HASHBITS); 112 return dentry_hashtable + (hash & D_HASHMASK); 113 } 114 115 /* Statistics gathering. */ 116 struct dentry_stat_t dentry_stat = { 117 .age_limit = 45, 118 }; 119 120 static DEFINE_PER_CPU(unsigned int, nr_dentry); 121 122 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) 123 static int get_nr_dentry(void) 124 { 125 int i; 126 int sum = 0; 127 for_each_possible_cpu(i) 128 sum += per_cpu(nr_dentry, i); 129 return sum < 0 ? 0 : sum; 130 } 131 132 int proc_nr_dentry(ctl_table *table, int write, void __user *buffer, 133 size_t *lenp, loff_t *ppos) 134 { 135 dentry_stat.nr_dentry = get_nr_dentry(); 136 return proc_dointvec(table, write, buffer, lenp, ppos); 137 } 138 #endif 139 140 /* 141 * Compare 2 name strings, return 0 if they match, otherwise non-zero. 142 * The strings are both count bytes long, and count is non-zero. 143 */ 144 #ifdef CONFIG_DCACHE_WORD_ACCESS 145 146 #include <asm/word-at-a-time.h> 147 /* 148 * NOTE! 'cs' and 'scount' come from a dentry, so it has a 149 * aligned allocation for this particular component. We don't 150 * strictly need the load_unaligned_zeropad() safety, but it 151 * doesn't hurt either. 152 * 153 * In contrast, 'ct' and 'tcount' can be from a pathname, and do 154 * need the careful unaligned handling. 155 */ 156 static inline int dentry_cmp(const unsigned char *cs, size_t scount, 157 const unsigned char *ct, size_t tcount) 158 { 159 unsigned long a,b,mask; 160 161 if (unlikely(scount != tcount)) 162 return 1; 163 164 for (;;) { 165 a = load_unaligned_zeropad(cs); 166 b = load_unaligned_zeropad(ct); 167 if (tcount < sizeof(unsigned long)) 168 break; 169 if (unlikely(a != b)) 170 return 1; 171 cs += sizeof(unsigned long); 172 ct += sizeof(unsigned long); 173 tcount -= sizeof(unsigned long); 174 if (!tcount) 175 return 0; 176 } 177 mask = ~(~0ul << tcount*8); 178 return unlikely(!!((a ^ b) & mask)); 179 } 180 181 #else 182 183 static inline int dentry_cmp(const unsigned char *cs, size_t scount, 184 const unsigned char *ct, size_t tcount) 185 { 186 if (scount != tcount) 187 return 1; 188 189 do { 190 if (*cs != *ct) 191 return 1; 192 cs++; 193 ct++; 194 tcount--; 195 } while (tcount); 196 return 0; 197 } 198 199 #endif 200 201 static void __d_free(struct rcu_head *head) 202 { 203 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); 204 205 WARN_ON(!list_empty(&dentry->d_alias)); 206 if (dname_external(dentry)) 207 kfree(dentry->d_name.name); 208 kmem_cache_free(dentry_cache, dentry); 209 } 210 211 /* 212 * no locks, please. 213 */ 214 static void d_free(struct dentry *dentry) 215 { 216 BUG_ON(dentry->d_count); 217 this_cpu_dec(nr_dentry); 218 if (dentry->d_op && dentry->d_op->d_release) 219 dentry->d_op->d_release(dentry); 220 221 /* if dentry was never visible to RCU, immediate free is OK */ 222 if (!(dentry->d_flags & DCACHE_RCUACCESS)) 223 __d_free(&dentry->d_u.d_rcu); 224 else 225 call_rcu(&dentry->d_u.d_rcu, __d_free); 226 } 227 228 /** 229 * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups 230 * @dentry: the target dentry 231 * After this call, in-progress rcu-walk path lookup will fail. This 232 * should be called after unhashing, and after changing d_inode (if 233 * the dentry has not already been unhashed). 234 */ 235 static inline void dentry_rcuwalk_barrier(struct dentry *dentry) 236 { 237 assert_spin_locked(&dentry->d_lock); 238 /* Go through a barrier */ 239 write_seqcount_barrier(&dentry->d_seq); 240 } 241 242 /* 243 * Release the dentry's inode, using the filesystem 244 * d_iput() operation if defined. Dentry has no refcount 245 * and is unhashed. 246 */ 247 static void dentry_iput(struct dentry * dentry) 248 __releases(dentry->d_lock) 249 __releases(dentry->d_inode->i_lock) 250 { 251 struct inode *inode = dentry->d_inode; 252 if (inode) { 253 dentry->d_inode = NULL; 254 list_del_init(&dentry->d_alias); 255 spin_unlock(&dentry->d_lock); 256 spin_unlock(&inode->i_lock); 257 if (!inode->i_nlink) 258 fsnotify_inoderemove(inode); 259 if (dentry->d_op && dentry->d_op->d_iput) 260 dentry->d_op->d_iput(dentry, inode); 261 else 262 iput(inode); 263 } else { 264 spin_unlock(&dentry->d_lock); 265 } 266 } 267 268 /* 269 * Release the dentry's inode, using the filesystem 270 * d_iput() operation if defined. dentry remains in-use. 271 */ 272 static void dentry_unlink_inode(struct dentry * dentry) 273 __releases(dentry->d_lock) 274 __releases(dentry->d_inode->i_lock) 275 { 276 struct inode *inode = dentry->d_inode; 277 dentry->d_inode = NULL; 278 list_del_init(&dentry->d_alias); 279 dentry_rcuwalk_barrier(dentry); 280 spin_unlock(&dentry->d_lock); 281 spin_unlock(&inode->i_lock); 282 if (!inode->i_nlink) 283 fsnotify_inoderemove(inode); 284 if (dentry->d_op && dentry->d_op->d_iput) 285 dentry->d_op->d_iput(dentry, inode); 286 else 287 iput(inode); 288 } 289 290 /* 291 * dentry_lru_(add|del|prune|move_tail) must be called with d_lock held. 292 */ 293 static void dentry_lru_add(struct dentry *dentry) 294 { 295 if (list_empty(&dentry->d_lru)) { 296 spin_lock(&dcache_lru_lock); 297 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); 298 dentry->d_sb->s_nr_dentry_unused++; 299 dentry_stat.nr_unused++; 300 spin_unlock(&dcache_lru_lock); 301 } 302 } 303 304 static void __dentry_lru_del(struct dentry *dentry) 305 { 306 list_del_init(&dentry->d_lru); 307 dentry->d_flags &= ~DCACHE_SHRINK_LIST; 308 dentry->d_sb->s_nr_dentry_unused--; 309 dentry_stat.nr_unused--; 310 } 311 312 /* 313 * Remove a dentry with references from the LRU. 314 */ 315 static void dentry_lru_del(struct dentry *dentry) 316 { 317 if (!list_empty(&dentry->d_lru)) { 318 spin_lock(&dcache_lru_lock); 319 __dentry_lru_del(dentry); 320 spin_unlock(&dcache_lru_lock); 321 } 322 } 323 324 /* 325 * Remove a dentry that is unreferenced and about to be pruned 326 * (unhashed and destroyed) from the LRU, and inform the file system. 327 * This wrapper should be called _prior_ to unhashing a victim dentry. 328 */ 329 static void dentry_lru_prune(struct dentry *dentry) 330 { 331 if (!list_empty(&dentry->d_lru)) { 332 if (dentry->d_flags & DCACHE_OP_PRUNE) 333 dentry->d_op->d_prune(dentry); 334 335 spin_lock(&dcache_lru_lock); 336 __dentry_lru_del(dentry); 337 spin_unlock(&dcache_lru_lock); 338 } 339 } 340 341 static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list) 342 { 343 spin_lock(&dcache_lru_lock); 344 if (list_empty(&dentry->d_lru)) { 345 list_add_tail(&dentry->d_lru, list); 346 dentry->d_sb->s_nr_dentry_unused++; 347 dentry_stat.nr_unused++; 348 } else { 349 list_move_tail(&dentry->d_lru, list); 350 } 351 spin_unlock(&dcache_lru_lock); 352 } 353 354 /** 355 * d_kill - kill dentry and return parent 356 * @dentry: dentry to kill 357 * @parent: parent dentry 358 * 359 * The dentry must already be unhashed and removed from the LRU. 360 * 361 * If this is the root of the dentry tree, return NULL. 362 * 363 * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by 364 * d_kill. 365 */ 366 static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent) 367 __releases(dentry->d_lock) 368 __releases(parent->d_lock) 369 __releases(dentry->d_inode->i_lock) 370 { 371 list_del(&dentry->d_u.d_child); 372 /* 373 * Inform try_to_ascend() that we are no longer attached to the 374 * dentry tree 375 */ 376 dentry->d_flags |= DCACHE_DISCONNECTED; 377 if (parent) 378 spin_unlock(&parent->d_lock); 379 dentry_iput(dentry); 380 /* 381 * dentry_iput drops the locks, at which point nobody (except 382 * transient RCU lookups) can reach this dentry. 383 */ 384 d_free(dentry); 385 return parent; 386 } 387 388 /* 389 * Unhash a dentry without inserting an RCU walk barrier or checking that 390 * dentry->d_lock is locked. The caller must take care of that, if 391 * appropriate. 392 */ 393 static void __d_shrink(struct dentry *dentry) 394 { 395 if (!d_unhashed(dentry)) { 396 struct hlist_bl_head *b; 397 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED)) 398 b = &dentry->d_sb->s_anon; 399 else 400 b = d_hash(dentry->d_parent, dentry->d_name.hash); 401 402 hlist_bl_lock(b); 403 __hlist_bl_del(&dentry->d_hash); 404 dentry->d_hash.pprev = NULL; 405 hlist_bl_unlock(b); 406 } 407 } 408 409 /** 410 * d_drop - drop a dentry 411 * @dentry: dentry to drop 412 * 413 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't 414 * be found through a VFS lookup any more. Note that this is different from 415 * deleting the dentry - d_delete will try to mark the dentry negative if 416 * possible, giving a successful _negative_ lookup, while d_drop will 417 * just make the cache lookup fail. 418 * 419 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some 420 * reason (NFS timeouts or autofs deletes). 421 * 422 * __d_drop requires dentry->d_lock. 423 */ 424 void __d_drop(struct dentry *dentry) 425 { 426 if (!d_unhashed(dentry)) { 427 __d_shrink(dentry); 428 dentry_rcuwalk_barrier(dentry); 429 } 430 } 431 EXPORT_SYMBOL(__d_drop); 432 433 void d_drop(struct dentry *dentry) 434 { 435 spin_lock(&dentry->d_lock); 436 __d_drop(dentry); 437 spin_unlock(&dentry->d_lock); 438 } 439 EXPORT_SYMBOL(d_drop); 440 441 /* 442 * d_clear_need_lookup - drop a dentry from cache and clear the need lookup flag 443 * @dentry: dentry to drop 444 * 445 * This is called when we do a lookup on a placeholder dentry that needed to be 446 * looked up. The dentry should have been hashed in order for it to be found by 447 * the lookup code, but now needs to be unhashed while we do the actual lookup 448 * and clear the DCACHE_NEED_LOOKUP flag. 449 */ 450 void d_clear_need_lookup(struct dentry *dentry) 451 { 452 spin_lock(&dentry->d_lock); 453 __d_drop(dentry); 454 dentry->d_flags &= ~DCACHE_NEED_LOOKUP; 455 spin_unlock(&dentry->d_lock); 456 } 457 EXPORT_SYMBOL(d_clear_need_lookup); 458 459 /* 460 * Finish off a dentry we've decided to kill. 461 * dentry->d_lock must be held, returns with it unlocked. 462 * If ref is non-zero, then decrement the refcount too. 463 * Returns dentry requiring refcount drop, or NULL if we're done. 464 */ 465 static inline struct dentry *dentry_kill(struct dentry *dentry, int ref) 466 __releases(dentry->d_lock) 467 { 468 struct inode *inode; 469 struct dentry *parent; 470 471 inode = dentry->d_inode; 472 if (inode && !spin_trylock(&inode->i_lock)) { 473 relock: 474 spin_unlock(&dentry->d_lock); 475 cpu_relax(); 476 return dentry; /* try again with same dentry */ 477 } 478 if (IS_ROOT(dentry)) 479 parent = NULL; 480 else 481 parent = dentry->d_parent; 482 if (parent && !spin_trylock(&parent->d_lock)) { 483 if (inode) 484 spin_unlock(&inode->i_lock); 485 goto relock; 486 } 487 488 if (ref) 489 dentry->d_count--; 490 /* 491 * if dentry was on the d_lru list delete it from there. 492 * inform the fs via d_prune that this dentry is about to be 493 * unhashed and destroyed. 494 */ 495 dentry_lru_prune(dentry); 496 /* if it was on the hash then remove it */ 497 __d_drop(dentry); 498 return d_kill(dentry, parent); 499 } 500 501 /* 502 * This is dput 503 * 504 * This is complicated by the fact that we do not want to put 505 * dentries that are no longer on any hash chain on the unused 506 * list: we'd much rather just get rid of them immediately. 507 * 508 * However, that implies that we have to traverse the dentry 509 * tree upwards to the parents which might _also_ now be 510 * scheduled for deletion (it may have been only waiting for 511 * its last child to go away). 512 * 513 * This tail recursion is done by hand as we don't want to depend 514 * on the compiler to always get this right (gcc generally doesn't). 515 * Real recursion would eat up our stack space. 516 */ 517 518 /* 519 * dput - release a dentry 520 * @dentry: dentry to release 521 * 522 * Release a dentry. This will drop the usage count and if appropriate 523 * call the dentry unlink method as well as removing it from the queues and 524 * releasing its resources. If the parent dentries were scheduled for release 525 * they too may now get deleted. 526 */ 527 void dput(struct dentry *dentry) 528 { 529 if (!dentry) 530 return; 531 532 repeat: 533 if (dentry->d_count == 1) 534 might_sleep(); 535 spin_lock(&dentry->d_lock); 536 BUG_ON(!dentry->d_count); 537 if (dentry->d_count > 1) { 538 dentry->d_count--; 539 spin_unlock(&dentry->d_lock); 540 return; 541 } 542 543 if (dentry->d_flags & DCACHE_OP_DELETE) { 544 if (dentry->d_op->d_delete(dentry)) 545 goto kill_it; 546 } 547 548 /* Unreachable? Get rid of it */ 549 if (d_unhashed(dentry)) 550 goto kill_it; 551 552 /* 553 * If this dentry needs lookup, don't set the referenced flag so that it 554 * is more likely to be cleaned up by the dcache shrinker in case of 555 * memory pressure. 556 */ 557 if (!d_need_lookup(dentry)) 558 dentry->d_flags |= DCACHE_REFERENCED; 559 dentry_lru_add(dentry); 560 561 dentry->d_count--; 562 spin_unlock(&dentry->d_lock); 563 return; 564 565 kill_it: 566 dentry = dentry_kill(dentry, 1); 567 if (dentry) 568 goto repeat; 569 } 570 EXPORT_SYMBOL(dput); 571 572 /** 573 * d_invalidate - invalidate a dentry 574 * @dentry: dentry to invalidate 575 * 576 * Try to invalidate the dentry if it turns out to be 577 * possible. If there are other dentries that can be 578 * reached through this one we can't delete it and we 579 * return -EBUSY. On success we return 0. 580 * 581 * no dcache lock. 582 */ 583 584 int d_invalidate(struct dentry * dentry) 585 { 586 /* 587 * If it's already been dropped, return OK. 588 */ 589 spin_lock(&dentry->d_lock); 590 if (d_unhashed(dentry)) { 591 spin_unlock(&dentry->d_lock); 592 return 0; 593 } 594 /* 595 * Check whether to do a partial shrink_dcache 596 * to get rid of unused child entries. 597 */ 598 if (!list_empty(&dentry->d_subdirs)) { 599 spin_unlock(&dentry->d_lock); 600 shrink_dcache_parent(dentry); 601 spin_lock(&dentry->d_lock); 602 } 603 604 /* 605 * Somebody else still using it? 606 * 607 * If it's a directory, we can't drop it 608 * for fear of somebody re-populating it 609 * with children (even though dropping it 610 * would make it unreachable from the root, 611 * we might still populate it if it was a 612 * working directory or similar). 613 * We also need to leave mountpoints alone, 614 * directory or not. 615 */ 616 if (dentry->d_count > 1 && dentry->d_inode) { 617 if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) { 618 spin_unlock(&dentry->d_lock); 619 return -EBUSY; 620 } 621 } 622 623 __d_drop(dentry); 624 spin_unlock(&dentry->d_lock); 625 return 0; 626 } 627 EXPORT_SYMBOL(d_invalidate); 628 629 /* This must be called with d_lock held */ 630 static inline void __dget_dlock(struct dentry *dentry) 631 { 632 dentry->d_count++; 633 } 634 635 static inline void __dget(struct dentry *dentry) 636 { 637 spin_lock(&dentry->d_lock); 638 __dget_dlock(dentry); 639 spin_unlock(&dentry->d_lock); 640 } 641 642 struct dentry *dget_parent(struct dentry *dentry) 643 { 644 struct dentry *ret; 645 646 repeat: 647 /* 648 * Don't need rcu_dereference because we re-check it was correct under 649 * the lock. 650 */ 651 rcu_read_lock(); 652 ret = dentry->d_parent; 653 spin_lock(&ret->d_lock); 654 if (unlikely(ret != dentry->d_parent)) { 655 spin_unlock(&ret->d_lock); 656 rcu_read_unlock(); 657 goto repeat; 658 } 659 rcu_read_unlock(); 660 BUG_ON(!ret->d_count); 661 ret->d_count++; 662 spin_unlock(&ret->d_lock); 663 return ret; 664 } 665 EXPORT_SYMBOL(dget_parent); 666 667 /** 668 * d_find_alias - grab a hashed alias of inode 669 * @inode: inode in question 670 * @want_discon: flag, used by d_splice_alias, to request 671 * that only a DISCONNECTED alias be returned. 672 * 673 * If inode has a hashed alias, or is a directory and has any alias, 674 * acquire the reference to alias and return it. Otherwise return NULL. 675 * Notice that if inode is a directory there can be only one alias and 676 * it can be unhashed only if it has no children, or if it is the root 677 * of a filesystem. 678 * 679 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer 680 * any other hashed alias over that one unless @want_discon is set, 681 * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias. 682 */ 683 static struct dentry *__d_find_alias(struct inode *inode, int want_discon) 684 { 685 struct dentry *alias, *discon_alias; 686 687 again: 688 discon_alias = NULL; 689 list_for_each_entry(alias, &inode->i_dentry, d_alias) { 690 spin_lock(&alias->d_lock); 691 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { 692 if (IS_ROOT(alias) && 693 (alias->d_flags & DCACHE_DISCONNECTED)) { 694 discon_alias = alias; 695 } else if (!want_discon) { 696 __dget_dlock(alias); 697 spin_unlock(&alias->d_lock); 698 return alias; 699 } 700 } 701 spin_unlock(&alias->d_lock); 702 } 703 if (discon_alias) { 704 alias = discon_alias; 705 spin_lock(&alias->d_lock); 706 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { 707 if (IS_ROOT(alias) && 708 (alias->d_flags & DCACHE_DISCONNECTED)) { 709 __dget_dlock(alias); 710 spin_unlock(&alias->d_lock); 711 return alias; 712 } 713 } 714 spin_unlock(&alias->d_lock); 715 goto again; 716 } 717 return NULL; 718 } 719 720 struct dentry *d_find_alias(struct inode *inode) 721 { 722 struct dentry *de = NULL; 723 724 if (!list_empty(&inode->i_dentry)) { 725 spin_lock(&inode->i_lock); 726 de = __d_find_alias(inode, 0); 727 spin_unlock(&inode->i_lock); 728 } 729 return de; 730 } 731 EXPORT_SYMBOL(d_find_alias); 732 733 /* 734 * Try to kill dentries associated with this inode. 735 * WARNING: you must own a reference to inode. 736 */ 737 void d_prune_aliases(struct inode *inode) 738 { 739 struct dentry *dentry; 740 restart: 741 spin_lock(&inode->i_lock); 742 list_for_each_entry(dentry, &inode->i_dentry, d_alias) { 743 spin_lock(&dentry->d_lock); 744 if (!dentry->d_count) { 745 __dget_dlock(dentry); 746 __d_drop(dentry); 747 spin_unlock(&dentry->d_lock); 748 spin_unlock(&inode->i_lock); 749 dput(dentry); 750 goto restart; 751 } 752 spin_unlock(&dentry->d_lock); 753 } 754 spin_unlock(&inode->i_lock); 755 } 756 EXPORT_SYMBOL(d_prune_aliases); 757 758 /* 759 * Try to throw away a dentry - free the inode, dput the parent. 760 * Requires dentry->d_lock is held, and dentry->d_count == 0. 761 * Releases dentry->d_lock. 762 * 763 * This may fail if locks cannot be acquired no problem, just try again. 764 */ 765 static void try_prune_one_dentry(struct dentry *dentry) 766 __releases(dentry->d_lock) 767 { 768 struct dentry *parent; 769 770 parent = dentry_kill(dentry, 0); 771 /* 772 * If dentry_kill returns NULL, we have nothing more to do. 773 * if it returns the same dentry, trylocks failed. In either 774 * case, just loop again. 775 * 776 * Otherwise, we need to prune ancestors too. This is necessary 777 * to prevent quadratic behavior of shrink_dcache_parent(), but 778 * is also expected to be beneficial in reducing dentry cache 779 * fragmentation. 780 */ 781 if (!parent) 782 return; 783 if (parent == dentry) 784 return; 785 786 /* Prune ancestors. */ 787 dentry = parent; 788 while (dentry) { 789 spin_lock(&dentry->d_lock); 790 if (dentry->d_count > 1) { 791 dentry->d_count--; 792 spin_unlock(&dentry->d_lock); 793 return; 794 } 795 dentry = dentry_kill(dentry, 1); 796 } 797 } 798 799 static void shrink_dentry_list(struct list_head *list) 800 { 801 struct dentry *dentry; 802 803 rcu_read_lock(); 804 for (;;) { 805 dentry = list_entry_rcu(list->prev, struct dentry, d_lru); 806 if (&dentry->d_lru == list) 807 break; /* empty */ 808 spin_lock(&dentry->d_lock); 809 if (dentry != list_entry(list->prev, struct dentry, d_lru)) { 810 spin_unlock(&dentry->d_lock); 811 continue; 812 } 813 814 /* 815 * We found an inuse dentry which was not removed from 816 * the LRU because of laziness during lookup. Do not free 817 * it - just keep it off the LRU list. 818 */ 819 if (dentry->d_count) { 820 dentry_lru_del(dentry); 821 spin_unlock(&dentry->d_lock); 822 continue; 823 } 824 825 rcu_read_unlock(); 826 827 try_prune_one_dentry(dentry); 828 829 rcu_read_lock(); 830 } 831 rcu_read_unlock(); 832 } 833 834 /** 835 * prune_dcache_sb - shrink the dcache 836 * @sb: superblock 837 * @count: number of entries to try to free 838 * 839 * Attempt to shrink the superblock dcache LRU by @count entries. This is 840 * done when we need more memory an called from the superblock shrinker 841 * function. 842 * 843 * This function may fail to free any resources if all the dentries are in 844 * use. 845 */ 846 void prune_dcache_sb(struct super_block *sb, int count) 847 { 848 struct dentry *dentry; 849 LIST_HEAD(referenced); 850 LIST_HEAD(tmp); 851 852 relock: 853 spin_lock(&dcache_lru_lock); 854 while (!list_empty(&sb->s_dentry_lru)) { 855 dentry = list_entry(sb->s_dentry_lru.prev, 856 struct dentry, d_lru); 857 BUG_ON(dentry->d_sb != sb); 858 859 if (!spin_trylock(&dentry->d_lock)) { 860 spin_unlock(&dcache_lru_lock); 861 cpu_relax(); 862 goto relock; 863 } 864 865 if (dentry->d_flags & DCACHE_REFERENCED) { 866 dentry->d_flags &= ~DCACHE_REFERENCED; 867 list_move(&dentry->d_lru, &referenced); 868 spin_unlock(&dentry->d_lock); 869 } else { 870 list_move_tail(&dentry->d_lru, &tmp); 871 dentry->d_flags |= DCACHE_SHRINK_LIST; 872 spin_unlock(&dentry->d_lock); 873 if (!--count) 874 break; 875 } 876 cond_resched_lock(&dcache_lru_lock); 877 } 878 if (!list_empty(&referenced)) 879 list_splice(&referenced, &sb->s_dentry_lru); 880 spin_unlock(&dcache_lru_lock); 881 882 shrink_dentry_list(&tmp); 883 } 884 885 /** 886 * shrink_dcache_sb - shrink dcache for a superblock 887 * @sb: superblock 888 * 889 * Shrink the dcache for the specified super block. This is used to free 890 * the dcache before unmounting a file system. 891 */ 892 void shrink_dcache_sb(struct super_block *sb) 893 { 894 LIST_HEAD(tmp); 895 896 spin_lock(&dcache_lru_lock); 897 while (!list_empty(&sb->s_dentry_lru)) { 898 list_splice_init(&sb->s_dentry_lru, &tmp); 899 spin_unlock(&dcache_lru_lock); 900 shrink_dentry_list(&tmp); 901 spin_lock(&dcache_lru_lock); 902 } 903 spin_unlock(&dcache_lru_lock); 904 } 905 EXPORT_SYMBOL(shrink_dcache_sb); 906 907 /* 908 * destroy a single subtree of dentries for unmount 909 * - see the comments on shrink_dcache_for_umount() for a description of the 910 * locking 911 */ 912 static void shrink_dcache_for_umount_subtree(struct dentry *dentry) 913 { 914 struct dentry *parent; 915 916 BUG_ON(!IS_ROOT(dentry)); 917 918 for (;;) { 919 /* descend to the first leaf in the current subtree */ 920 while (!list_empty(&dentry->d_subdirs)) 921 dentry = list_entry(dentry->d_subdirs.next, 922 struct dentry, d_u.d_child); 923 924 /* consume the dentries from this leaf up through its parents 925 * until we find one with children or run out altogether */ 926 do { 927 struct inode *inode; 928 929 /* 930 * remove the dentry from the lru, and inform 931 * the fs that this dentry is about to be 932 * unhashed and destroyed. 933 */ 934 dentry_lru_prune(dentry); 935 __d_shrink(dentry); 936 937 if (dentry->d_count != 0) { 938 printk(KERN_ERR 939 "BUG: Dentry %p{i=%lx,n=%s}" 940 " still in use (%d)" 941 " [unmount of %s %s]\n", 942 dentry, 943 dentry->d_inode ? 944 dentry->d_inode->i_ino : 0UL, 945 dentry->d_name.name, 946 dentry->d_count, 947 dentry->d_sb->s_type->name, 948 dentry->d_sb->s_id); 949 BUG(); 950 } 951 952 if (IS_ROOT(dentry)) { 953 parent = NULL; 954 list_del(&dentry->d_u.d_child); 955 } else { 956 parent = dentry->d_parent; 957 parent->d_count--; 958 list_del(&dentry->d_u.d_child); 959 } 960 961 inode = dentry->d_inode; 962 if (inode) { 963 dentry->d_inode = NULL; 964 list_del_init(&dentry->d_alias); 965 if (dentry->d_op && dentry->d_op->d_iput) 966 dentry->d_op->d_iput(dentry, inode); 967 else 968 iput(inode); 969 } 970 971 d_free(dentry); 972 973 /* finished when we fall off the top of the tree, 974 * otherwise we ascend to the parent and move to the 975 * next sibling if there is one */ 976 if (!parent) 977 return; 978 dentry = parent; 979 } while (list_empty(&dentry->d_subdirs)); 980 981 dentry = list_entry(dentry->d_subdirs.next, 982 struct dentry, d_u.d_child); 983 } 984 } 985 986 /* 987 * destroy the dentries attached to a superblock on unmounting 988 * - we don't need to use dentry->d_lock because: 989 * - the superblock is detached from all mountings and open files, so the 990 * dentry trees will not be rearranged by the VFS 991 * - s_umount is write-locked, so the memory pressure shrinker will ignore 992 * any dentries belonging to this superblock that it comes across 993 * - the filesystem itself is no longer permitted to rearrange the dentries 994 * in this superblock 995 */ 996 void shrink_dcache_for_umount(struct super_block *sb) 997 { 998 struct dentry *dentry; 999 1000 if (down_read_trylock(&sb->s_umount)) 1001 BUG(); 1002 1003 dentry = sb->s_root; 1004 sb->s_root = NULL; 1005 dentry->d_count--; 1006 shrink_dcache_for_umount_subtree(dentry); 1007 1008 while (!hlist_bl_empty(&sb->s_anon)) { 1009 dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash); 1010 shrink_dcache_for_umount_subtree(dentry); 1011 } 1012 } 1013 1014 /* 1015 * This tries to ascend one level of parenthood, but 1016 * we can race with renaming, so we need to re-check 1017 * the parenthood after dropping the lock and check 1018 * that the sequence number still matches. 1019 */ 1020 static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq) 1021 { 1022 struct dentry *new = old->d_parent; 1023 1024 rcu_read_lock(); 1025 spin_unlock(&old->d_lock); 1026 spin_lock(&new->d_lock); 1027 1028 /* 1029 * might go back up the wrong parent if we have had a rename 1030 * or deletion 1031 */ 1032 if (new != old->d_parent || 1033 (old->d_flags & DCACHE_DISCONNECTED) || 1034 (!locked && read_seqretry(&rename_lock, seq))) { 1035 spin_unlock(&new->d_lock); 1036 new = NULL; 1037 } 1038 rcu_read_unlock(); 1039 return new; 1040 } 1041 1042 1043 /* 1044 * Search for at least 1 mount point in the dentry's subdirs. 1045 * We descend to the next level whenever the d_subdirs 1046 * list is non-empty and continue searching. 1047 */ 1048 1049 /** 1050 * have_submounts - check for mounts over a dentry 1051 * @parent: dentry to check. 1052 * 1053 * Return true if the parent or its subdirectories contain 1054 * a mount point 1055 */ 1056 int have_submounts(struct dentry *parent) 1057 { 1058 struct dentry *this_parent; 1059 struct list_head *next; 1060 unsigned seq; 1061 int locked = 0; 1062 1063 seq = read_seqbegin(&rename_lock); 1064 again: 1065 this_parent = parent; 1066 1067 if (d_mountpoint(parent)) 1068 goto positive; 1069 spin_lock(&this_parent->d_lock); 1070 repeat: 1071 next = this_parent->d_subdirs.next; 1072 resume: 1073 while (next != &this_parent->d_subdirs) { 1074 struct list_head *tmp = next; 1075 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 1076 next = tmp->next; 1077 1078 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 1079 /* Have we found a mount point ? */ 1080 if (d_mountpoint(dentry)) { 1081 spin_unlock(&dentry->d_lock); 1082 spin_unlock(&this_parent->d_lock); 1083 goto positive; 1084 } 1085 if (!list_empty(&dentry->d_subdirs)) { 1086 spin_unlock(&this_parent->d_lock); 1087 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); 1088 this_parent = dentry; 1089 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); 1090 goto repeat; 1091 } 1092 spin_unlock(&dentry->d_lock); 1093 } 1094 /* 1095 * All done at this level ... ascend and resume the search. 1096 */ 1097 if (this_parent != parent) { 1098 struct dentry *child = this_parent; 1099 this_parent = try_to_ascend(this_parent, locked, seq); 1100 if (!this_parent) 1101 goto rename_retry; 1102 next = child->d_u.d_child.next; 1103 goto resume; 1104 } 1105 spin_unlock(&this_parent->d_lock); 1106 if (!locked && read_seqretry(&rename_lock, seq)) 1107 goto rename_retry; 1108 if (locked) 1109 write_sequnlock(&rename_lock); 1110 return 0; /* No mount points found in tree */ 1111 positive: 1112 if (!locked && read_seqretry(&rename_lock, seq)) 1113 goto rename_retry; 1114 if (locked) 1115 write_sequnlock(&rename_lock); 1116 return 1; 1117 1118 rename_retry: 1119 locked = 1; 1120 write_seqlock(&rename_lock); 1121 goto again; 1122 } 1123 EXPORT_SYMBOL(have_submounts); 1124 1125 /* 1126 * Search the dentry child list for the specified parent, 1127 * and move any unused dentries to the end of the unused 1128 * list for prune_dcache(). We descend to the next level 1129 * whenever the d_subdirs list is non-empty and continue 1130 * searching. 1131 * 1132 * It returns zero iff there are no unused children, 1133 * otherwise it returns the number of children moved to 1134 * the end of the unused list. This may not be the total 1135 * number of unused children, because select_parent can 1136 * drop the lock and return early due to latency 1137 * constraints. 1138 */ 1139 static int select_parent(struct dentry *parent, struct list_head *dispose) 1140 { 1141 struct dentry *this_parent; 1142 struct list_head *next; 1143 unsigned seq; 1144 int found = 0; 1145 int locked = 0; 1146 1147 seq = read_seqbegin(&rename_lock); 1148 again: 1149 this_parent = parent; 1150 spin_lock(&this_parent->d_lock); 1151 repeat: 1152 next = this_parent->d_subdirs.next; 1153 resume: 1154 while (next != &this_parent->d_subdirs) { 1155 struct list_head *tmp = next; 1156 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 1157 next = tmp->next; 1158 1159 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 1160 1161 /* 1162 * move only zero ref count dentries to the dispose list. 1163 * 1164 * Those which are presently on the shrink list, being processed 1165 * by shrink_dentry_list(), shouldn't be moved. Otherwise the 1166 * loop in shrink_dcache_parent() might not make any progress 1167 * and loop forever. 1168 */ 1169 if (dentry->d_count) { 1170 dentry_lru_del(dentry); 1171 } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { 1172 dentry_lru_move_list(dentry, dispose); 1173 dentry->d_flags |= DCACHE_SHRINK_LIST; 1174 found++; 1175 } 1176 /* 1177 * We can return to the caller if we have found some (this 1178 * ensures forward progress). We'll be coming back to find 1179 * the rest. 1180 */ 1181 if (found && need_resched()) { 1182 spin_unlock(&dentry->d_lock); 1183 goto out; 1184 } 1185 1186 /* 1187 * Descend a level if the d_subdirs list is non-empty. 1188 */ 1189 if (!list_empty(&dentry->d_subdirs)) { 1190 spin_unlock(&this_parent->d_lock); 1191 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); 1192 this_parent = dentry; 1193 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); 1194 goto repeat; 1195 } 1196 1197 spin_unlock(&dentry->d_lock); 1198 } 1199 /* 1200 * All done at this level ... ascend and resume the search. 1201 */ 1202 if (this_parent != parent) { 1203 struct dentry *child = this_parent; 1204 this_parent = try_to_ascend(this_parent, locked, seq); 1205 if (!this_parent) 1206 goto rename_retry; 1207 next = child->d_u.d_child.next; 1208 goto resume; 1209 } 1210 out: 1211 spin_unlock(&this_parent->d_lock); 1212 if (!locked && read_seqretry(&rename_lock, seq)) 1213 goto rename_retry; 1214 if (locked) 1215 write_sequnlock(&rename_lock); 1216 return found; 1217 1218 rename_retry: 1219 if (found) 1220 return found; 1221 locked = 1; 1222 write_seqlock(&rename_lock); 1223 goto again; 1224 } 1225 1226 /** 1227 * shrink_dcache_parent - prune dcache 1228 * @parent: parent of entries to prune 1229 * 1230 * Prune the dcache to remove unused children of the parent dentry. 1231 */ 1232 void shrink_dcache_parent(struct dentry * parent) 1233 { 1234 LIST_HEAD(dispose); 1235 int found; 1236 1237 while ((found = select_parent(parent, &dispose)) != 0) 1238 shrink_dentry_list(&dispose); 1239 } 1240 EXPORT_SYMBOL(shrink_dcache_parent); 1241 1242 /** 1243 * __d_alloc - allocate a dcache entry 1244 * @sb: filesystem it will belong to 1245 * @name: qstr of the name 1246 * 1247 * Allocates a dentry. It returns %NULL if there is insufficient memory 1248 * available. On a success the dentry is returned. The name passed in is 1249 * copied and the copy passed in may be reused after this call. 1250 */ 1251 1252 struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) 1253 { 1254 struct dentry *dentry; 1255 char *dname; 1256 1257 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL); 1258 if (!dentry) 1259 return NULL; 1260 1261 if (name->len > DNAME_INLINE_LEN-1) { 1262 dname = kmalloc(name->len + 1, GFP_KERNEL); 1263 if (!dname) { 1264 kmem_cache_free(dentry_cache, dentry); 1265 return NULL; 1266 } 1267 } else { 1268 dname = dentry->d_iname; 1269 } 1270 dentry->d_name.name = dname; 1271 1272 dentry->d_name.len = name->len; 1273 dentry->d_name.hash = name->hash; 1274 memcpy(dname, name->name, name->len); 1275 dname[name->len] = 0; 1276 1277 dentry->d_count = 1; 1278 dentry->d_flags = 0; 1279 spin_lock_init(&dentry->d_lock); 1280 seqcount_init(&dentry->d_seq); 1281 dentry->d_inode = NULL; 1282 dentry->d_parent = dentry; 1283 dentry->d_sb = sb; 1284 dentry->d_op = NULL; 1285 dentry->d_fsdata = NULL; 1286 INIT_HLIST_BL_NODE(&dentry->d_hash); 1287 INIT_LIST_HEAD(&dentry->d_lru); 1288 INIT_LIST_HEAD(&dentry->d_subdirs); 1289 INIT_LIST_HEAD(&dentry->d_alias); 1290 INIT_LIST_HEAD(&dentry->d_u.d_child); 1291 d_set_d_op(dentry, dentry->d_sb->s_d_op); 1292 1293 this_cpu_inc(nr_dentry); 1294 1295 return dentry; 1296 } 1297 1298 /** 1299 * d_alloc - allocate a dcache entry 1300 * @parent: parent of entry to allocate 1301 * @name: qstr of the name 1302 * 1303 * Allocates a dentry. It returns %NULL if there is insufficient memory 1304 * available. On a success the dentry is returned. The name passed in is 1305 * copied and the copy passed in may be reused after this call. 1306 */ 1307 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) 1308 { 1309 struct dentry *dentry = __d_alloc(parent->d_sb, name); 1310 if (!dentry) 1311 return NULL; 1312 1313 spin_lock(&parent->d_lock); 1314 /* 1315 * don't need child lock because it is not subject 1316 * to concurrency here 1317 */ 1318 __dget_dlock(parent); 1319 dentry->d_parent = parent; 1320 list_add(&dentry->d_u.d_child, &parent->d_subdirs); 1321 spin_unlock(&parent->d_lock); 1322 1323 return dentry; 1324 } 1325 EXPORT_SYMBOL(d_alloc); 1326 1327 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name) 1328 { 1329 struct dentry *dentry = __d_alloc(sb, name); 1330 if (dentry) 1331 dentry->d_flags |= DCACHE_DISCONNECTED; 1332 return dentry; 1333 } 1334 EXPORT_SYMBOL(d_alloc_pseudo); 1335 1336 struct dentry *d_alloc_name(struct dentry *parent, const char *name) 1337 { 1338 struct qstr q; 1339 1340 q.name = name; 1341 q.len = strlen(name); 1342 q.hash = full_name_hash(q.name, q.len); 1343 return d_alloc(parent, &q); 1344 } 1345 EXPORT_SYMBOL(d_alloc_name); 1346 1347 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op) 1348 { 1349 WARN_ON_ONCE(dentry->d_op); 1350 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH | 1351 DCACHE_OP_COMPARE | 1352 DCACHE_OP_REVALIDATE | 1353 DCACHE_OP_DELETE )); 1354 dentry->d_op = op; 1355 if (!op) 1356 return; 1357 if (op->d_hash) 1358 dentry->d_flags |= DCACHE_OP_HASH; 1359 if (op->d_compare) 1360 dentry->d_flags |= DCACHE_OP_COMPARE; 1361 if (op->d_revalidate) 1362 dentry->d_flags |= DCACHE_OP_REVALIDATE; 1363 if (op->d_delete) 1364 dentry->d_flags |= DCACHE_OP_DELETE; 1365 if (op->d_prune) 1366 dentry->d_flags |= DCACHE_OP_PRUNE; 1367 1368 } 1369 EXPORT_SYMBOL(d_set_d_op); 1370 1371 static void __d_instantiate(struct dentry *dentry, struct inode *inode) 1372 { 1373 spin_lock(&dentry->d_lock); 1374 if (inode) { 1375 if (unlikely(IS_AUTOMOUNT(inode))) 1376 dentry->d_flags |= DCACHE_NEED_AUTOMOUNT; 1377 list_add(&dentry->d_alias, &inode->i_dentry); 1378 } 1379 dentry->d_inode = inode; 1380 dentry_rcuwalk_barrier(dentry); 1381 spin_unlock(&dentry->d_lock); 1382 fsnotify_d_instantiate(dentry, inode); 1383 } 1384 1385 /** 1386 * d_instantiate - fill in inode information for a dentry 1387 * @entry: dentry to complete 1388 * @inode: inode to attach to this dentry 1389 * 1390 * Fill in inode information in the entry. 1391 * 1392 * This turns negative dentries into productive full members 1393 * of society. 1394 * 1395 * NOTE! This assumes that the inode count has been incremented 1396 * (or otherwise set) by the caller to indicate that it is now 1397 * in use by the dcache. 1398 */ 1399 1400 void d_instantiate(struct dentry *entry, struct inode * inode) 1401 { 1402 BUG_ON(!list_empty(&entry->d_alias)); 1403 if (inode) 1404 spin_lock(&inode->i_lock); 1405 __d_instantiate(entry, inode); 1406 if (inode) 1407 spin_unlock(&inode->i_lock); 1408 security_d_instantiate(entry, inode); 1409 } 1410 EXPORT_SYMBOL(d_instantiate); 1411 1412 /** 1413 * d_instantiate_unique - instantiate a non-aliased dentry 1414 * @entry: dentry to instantiate 1415 * @inode: inode to attach to this dentry 1416 * 1417 * Fill in inode information in the entry. On success, it returns NULL. 1418 * If an unhashed alias of "entry" already exists, then we return the 1419 * aliased dentry instead and drop one reference to inode. 1420 * 1421 * Note that in order to avoid conflicts with rename() etc, the caller 1422 * had better be holding the parent directory semaphore. 1423 * 1424 * This also assumes that the inode count has been incremented 1425 * (or otherwise set) by the caller to indicate that it is now 1426 * in use by the dcache. 1427 */ 1428 static struct dentry *__d_instantiate_unique(struct dentry *entry, 1429 struct inode *inode) 1430 { 1431 struct dentry *alias; 1432 int len = entry->d_name.len; 1433 const char *name = entry->d_name.name; 1434 unsigned int hash = entry->d_name.hash; 1435 1436 if (!inode) { 1437 __d_instantiate(entry, NULL); 1438 return NULL; 1439 } 1440 1441 list_for_each_entry(alias, &inode->i_dentry, d_alias) { 1442 struct qstr *qstr = &alias->d_name; 1443 1444 /* 1445 * Don't need alias->d_lock here, because aliases with 1446 * d_parent == entry->d_parent are not subject to name or 1447 * parent changes, because the parent inode i_mutex is held. 1448 */ 1449 if (qstr->hash != hash) 1450 continue; 1451 if (alias->d_parent != entry->d_parent) 1452 continue; 1453 if (dentry_cmp(qstr->name, qstr->len, name, len)) 1454 continue; 1455 __dget(alias); 1456 return alias; 1457 } 1458 1459 __d_instantiate(entry, inode); 1460 return NULL; 1461 } 1462 1463 struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode) 1464 { 1465 struct dentry *result; 1466 1467 BUG_ON(!list_empty(&entry->d_alias)); 1468 1469 if (inode) 1470 spin_lock(&inode->i_lock); 1471 result = __d_instantiate_unique(entry, inode); 1472 if (inode) 1473 spin_unlock(&inode->i_lock); 1474 1475 if (!result) { 1476 security_d_instantiate(entry, inode); 1477 return NULL; 1478 } 1479 1480 BUG_ON(!d_unhashed(result)); 1481 iput(inode); 1482 return result; 1483 } 1484 1485 EXPORT_SYMBOL(d_instantiate_unique); 1486 1487 struct dentry *d_make_root(struct inode *root_inode) 1488 { 1489 struct dentry *res = NULL; 1490 1491 if (root_inode) { 1492 static const struct qstr name = { .name = "/", .len = 1 }; 1493 1494 res = __d_alloc(root_inode->i_sb, &name); 1495 if (res) 1496 d_instantiate(res, root_inode); 1497 else 1498 iput(root_inode); 1499 } 1500 return res; 1501 } 1502 EXPORT_SYMBOL(d_make_root); 1503 1504 static struct dentry * __d_find_any_alias(struct inode *inode) 1505 { 1506 struct dentry *alias; 1507 1508 if (list_empty(&inode->i_dentry)) 1509 return NULL; 1510 alias = list_first_entry(&inode->i_dentry, struct dentry, d_alias); 1511 __dget(alias); 1512 return alias; 1513 } 1514 1515 /** 1516 * d_find_any_alias - find any alias for a given inode 1517 * @inode: inode to find an alias for 1518 * 1519 * If any aliases exist for the given inode, take and return a 1520 * reference for one of them. If no aliases exist, return %NULL. 1521 */ 1522 struct dentry *d_find_any_alias(struct inode *inode) 1523 { 1524 struct dentry *de; 1525 1526 spin_lock(&inode->i_lock); 1527 de = __d_find_any_alias(inode); 1528 spin_unlock(&inode->i_lock); 1529 return de; 1530 } 1531 EXPORT_SYMBOL(d_find_any_alias); 1532 1533 /** 1534 * d_obtain_alias - find or allocate a dentry for a given inode 1535 * @inode: inode to allocate the dentry for 1536 * 1537 * Obtain a dentry for an inode resulting from NFS filehandle conversion or 1538 * similar open by handle operations. The returned dentry may be anonymous, 1539 * or may have a full name (if the inode was already in the cache). 1540 * 1541 * When called on a directory inode, we must ensure that the inode only ever 1542 * has one dentry. If a dentry is found, that is returned instead of 1543 * allocating a new one. 1544 * 1545 * On successful return, the reference to the inode has been transferred 1546 * to the dentry. In case of an error the reference on the inode is released. 1547 * To make it easier to use in export operations a %NULL or IS_ERR inode may 1548 * be passed in and will be the error will be propagate to the return value, 1549 * with a %NULL @inode replaced by ERR_PTR(-ESTALE). 1550 */ 1551 struct dentry *d_obtain_alias(struct inode *inode) 1552 { 1553 static const struct qstr anonstring = { .name = "" }; 1554 struct dentry *tmp; 1555 struct dentry *res; 1556 1557 if (!inode) 1558 return ERR_PTR(-ESTALE); 1559 if (IS_ERR(inode)) 1560 return ERR_CAST(inode); 1561 1562 res = d_find_any_alias(inode); 1563 if (res) 1564 goto out_iput; 1565 1566 tmp = __d_alloc(inode->i_sb, &anonstring); 1567 if (!tmp) { 1568 res = ERR_PTR(-ENOMEM); 1569 goto out_iput; 1570 } 1571 1572 spin_lock(&inode->i_lock); 1573 res = __d_find_any_alias(inode); 1574 if (res) { 1575 spin_unlock(&inode->i_lock); 1576 dput(tmp); 1577 goto out_iput; 1578 } 1579 1580 /* attach a disconnected dentry */ 1581 spin_lock(&tmp->d_lock); 1582 tmp->d_inode = inode; 1583 tmp->d_flags |= DCACHE_DISCONNECTED; 1584 list_add(&tmp->d_alias, &inode->i_dentry); 1585 hlist_bl_lock(&tmp->d_sb->s_anon); 1586 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon); 1587 hlist_bl_unlock(&tmp->d_sb->s_anon); 1588 spin_unlock(&tmp->d_lock); 1589 spin_unlock(&inode->i_lock); 1590 security_d_instantiate(tmp, inode); 1591 1592 return tmp; 1593 1594 out_iput: 1595 if (res && !IS_ERR(res)) 1596 security_d_instantiate(res, inode); 1597 iput(inode); 1598 return res; 1599 } 1600 EXPORT_SYMBOL(d_obtain_alias); 1601 1602 /** 1603 * d_splice_alias - splice a disconnected dentry into the tree if one exists 1604 * @inode: the inode which may have a disconnected dentry 1605 * @dentry: a negative dentry which we want to point to the inode. 1606 * 1607 * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and 1608 * DCACHE_DISCONNECTED), then d_move that in place of the given dentry 1609 * and return it, else simply d_add the inode to the dentry and return NULL. 1610 * 1611 * This is needed in the lookup routine of any filesystem that is exportable 1612 * (via knfsd) so that we can build dcache paths to directories effectively. 1613 * 1614 * If a dentry was found and moved, then it is returned. Otherwise NULL 1615 * is returned. This matches the expected return value of ->lookup. 1616 * 1617 */ 1618 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) 1619 { 1620 struct dentry *new = NULL; 1621 1622 if (IS_ERR(inode)) 1623 return ERR_CAST(inode); 1624 1625 if (inode && S_ISDIR(inode->i_mode)) { 1626 spin_lock(&inode->i_lock); 1627 new = __d_find_alias(inode, 1); 1628 if (new) { 1629 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED)); 1630 spin_unlock(&inode->i_lock); 1631 security_d_instantiate(new, inode); 1632 d_move(new, dentry); 1633 iput(inode); 1634 } else { 1635 /* already taking inode->i_lock, so d_add() by hand */ 1636 __d_instantiate(dentry, inode); 1637 spin_unlock(&inode->i_lock); 1638 security_d_instantiate(dentry, inode); 1639 d_rehash(dentry); 1640 } 1641 } else 1642 d_add(dentry, inode); 1643 return new; 1644 } 1645 EXPORT_SYMBOL(d_splice_alias); 1646 1647 /** 1648 * d_add_ci - lookup or allocate new dentry with case-exact name 1649 * @inode: the inode case-insensitive lookup has found 1650 * @dentry: the negative dentry that was passed to the parent's lookup func 1651 * @name: the case-exact name to be associated with the returned dentry 1652 * 1653 * This is to avoid filling the dcache with case-insensitive names to the 1654 * same inode, only the actual correct case is stored in the dcache for 1655 * case-insensitive filesystems. 1656 * 1657 * For a case-insensitive lookup match and if the the case-exact dentry 1658 * already exists in in the dcache, use it and return it. 1659 * 1660 * If no entry exists with the exact case name, allocate new dentry with 1661 * the exact case, and return the spliced entry. 1662 */ 1663 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode, 1664 struct qstr *name) 1665 { 1666 int error; 1667 struct dentry *found; 1668 struct dentry *new; 1669 1670 /* 1671 * First check if a dentry matching the name already exists, 1672 * if not go ahead and create it now. 1673 */ 1674 found = d_hash_and_lookup(dentry->d_parent, name); 1675 if (!found) { 1676 new = d_alloc(dentry->d_parent, name); 1677 if (!new) { 1678 error = -ENOMEM; 1679 goto err_out; 1680 } 1681 1682 found = d_splice_alias(inode, new); 1683 if (found) { 1684 dput(new); 1685 return found; 1686 } 1687 return new; 1688 } 1689 1690 /* 1691 * If a matching dentry exists, and it's not negative use it. 1692 * 1693 * Decrement the reference count to balance the iget() done 1694 * earlier on. 1695 */ 1696 if (found->d_inode) { 1697 if (unlikely(found->d_inode != inode)) { 1698 /* This can't happen because bad inodes are unhashed. */ 1699 BUG_ON(!is_bad_inode(inode)); 1700 BUG_ON(!is_bad_inode(found->d_inode)); 1701 } 1702 iput(inode); 1703 return found; 1704 } 1705 1706 /* 1707 * We are going to instantiate this dentry, unhash it and clear the 1708 * lookup flag so we can do that. 1709 */ 1710 if (unlikely(d_need_lookup(found))) 1711 d_clear_need_lookup(found); 1712 1713 /* 1714 * Negative dentry: instantiate it unless the inode is a directory and 1715 * already has a dentry. 1716 */ 1717 new = d_splice_alias(inode, found); 1718 if (new) { 1719 dput(found); 1720 found = new; 1721 } 1722 return found; 1723 1724 err_out: 1725 iput(inode); 1726 return ERR_PTR(error); 1727 } 1728 EXPORT_SYMBOL(d_add_ci); 1729 1730 /** 1731 * __d_lookup_rcu - search for a dentry (racy, store-free) 1732 * @parent: parent dentry 1733 * @name: qstr of name we wish to find 1734 * @seqp: returns d_seq value at the point where the dentry was found 1735 * @inode: returns dentry->d_inode when the inode was found valid. 1736 * Returns: dentry, or NULL 1737 * 1738 * __d_lookup_rcu is the dcache lookup function for rcu-walk name 1739 * resolution (store-free path walking) design described in 1740 * Documentation/filesystems/path-lookup.txt. 1741 * 1742 * This is not to be used outside core vfs. 1743 * 1744 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock 1745 * held, and rcu_read_lock held. The returned dentry must not be stored into 1746 * without taking d_lock and checking d_seq sequence count against @seq 1747 * returned here. 1748 * 1749 * A refcount may be taken on the found dentry with the __d_rcu_to_refcount 1750 * function. 1751 * 1752 * Alternatively, __d_lookup_rcu may be called again to look up the child of 1753 * the returned dentry, so long as its parent's seqlock is checked after the 1754 * child is looked up. Thus, an interlocking stepping of sequence lock checks 1755 * is formed, giving integrity down the path walk. 1756 */ 1757 struct dentry *__d_lookup_rcu(const struct dentry *parent, 1758 const struct qstr *name, 1759 unsigned *seqp, struct inode **inode) 1760 { 1761 unsigned int len = name->len; 1762 unsigned int hash = name->hash; 1763 const unsigned char *str = name->name; 1764 struct hlist_bl_head *b = d_hash(parent, hash); 1765 struct hlist_bl_node *node; 1766 struct dentry *dentry; 1767 1768 /* 1769 * Note: There is significant duplication with __d_lookup_rcu which is 1770 * required to prevent single threaded performance regressions 1771 * especially on architectures where smp_rmb (in seqcounts) are costly. 1772 * Keep the two functions in sync. 1773 */ 1774 1775 /* 1776 * The hash list is protected using RCU. 1777 * 1778 * Carefully use d_seq when comparing a candidate dentry, to avoid 1779 * races with d_move(). 1780 * 1781 * It is possible that concurrent renames can mess up our list 1782 * walk here and result in missing our dentry, resulting in the 1783 * false-negative result. d_lookup() protects against concurrent 1784 * renames using rename_lock seqlock. 1785 * 1786 * See Documentation/filesystems/path-lookup.txt for more details. 1787 */ 1788 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { 1789 unsigned seq; 1790 struct inode *i; 1791 const char *tname; 1792 int tlen; 1793 1794 if (dentry->d_name.hash != hash) 1795 continue; 1796 1797 seqretry: 1798 seq = read_seqcount_begin(&dentry->d_seq); 1799 if (dentry->d_parent != parent) 1800 continue; 1801 if (d_unhashed(dentry)) 1802 continue; 1803 tlen = dentry->d_name.len; 1804 tname = dentry->d_name.name; 1805 i = dentry->d_inode; 1806 prefetch(tname); 1807 /* 1808 * This seqcount check is required to ensure name and 1809 * len are loaded atomically, so as not to walk off the 1810 * edge of memory when walking. If we could load this 1811 * atomically some other way, we could drop this check. 1812 */ 1813 if (read_seqcount_retry(&dentry->d_seq, seq)) 1814 goto seqretry; 1815 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) { 1816 if (parent->d_op->d_compare(parent, *inode, 1817 dentry, i, 1818 tlen, tname, name)) 1819 continue; 1820 } else { 1821 if (dentry_cmp(tname, tlen, str, len)) 1822 continue; 1823 } 1824 /* 1825 * No extra seqcount check is required after the name 1826 * compare. The caller must perform a seqcount check in 1827 * order to do anything useful with the returned dentry 1828 * anyway. 1829 */ 1830 *seqp = seq; 1831 *inode = i; 1832 return dentry; 1833 } 1834 return NULL; 1835 } 1836 1837 /** 1838 * d_lookup - search for a dentry 1839 * @parent: parent dentry 1840 * @name: qstr of name we wish to find 1841 * Returns: dentry, or NULL 1842 * 1843 * d_lookup searches the children of the parent dentry for the name in 1844 * question. If the dentry is found its reference count is incremented and the 1845 * dentry is returned. The caller must use dput to free the entry when it has 1846 * finished using it. %NULL is returned if the dentry does not exist. 1847 */ 1848 struct dentry *d_lookup(struct dentry *parent, struct qstr *name) 1849 { 1850 struct dentry *dentry; 1851 unsigned seq; 1852 1853 do { 1854 seq = read_seqbegin(&rename_lock); 1855 dentry = __d_lookup(parent, name); 1856 if (dentry) 1857 break; 1858 } while (read_seqretry(&rename_lock, seq)); 1859 return dentry; 1860 } 1861 EXPORT_SYMBOL(d_lookup); 1862 1863 /** 1864 * __d_lookup - search for a dentry (racy) 1865 * @parent: parent dentry 1866 * @name: qstr of name we wish to find 1867 * Returns: dentry, or NULL 1868 * 1869 * __d_lookup is like d_lookup, however it may (rarely) return a 1870 * false-negative result due to unrelated rename activity. 1871 * 1872 * __d_lookup is slightly faster by avoiding rename_lock read seqlock, 1873 * however it must be used carefully, eg. with a following d_lookup in 1874 * the case of failure. 1875 * 1876 * __d_lookup callers must be commented. 1877 */ 1878 struct dentry *__d_lookup(struct dentry *parent, struct qstr *name) 1879 { 1880 unsigned int len = name->len; 1881 unsigned int hash = name->hash; 1882 const unsigned char *str = name->name; 1883 struct hlist_bl_head *b = d_hash(parent, hash); 1884 struct hlist_bl_node *node; 1885 struct dentry *found = NULL; 1886 struct dentry *dentry; 1887 1888 /* 1889 * Note: There is significant duplication with __d_lookup_rcu which is 1890 * required to prevent single threaded performance regressions 1891 * especially on architectures where smp_rmb (in seqcounts) are costly. 1892 * Keep the two functions in sync. 1893 */ 1894 1895 /* 1896 * The hash list is protected using RCU. 1897 * 1898 * Take d_lock when comparing a candidate dentry, to avoid races 1899 * with d_move(). 1900 * 1901 * It is possible that concurrent renames can mess up our list 1902 * walk here and result in missing our dentry, resulting in the 1903 * false-negative result. d_lookup() protects against concurrent 1904 * renames using rename_lock seqlock. 1905 * 1906 * See Documentation/filesystems/path-lookup.txt for more details. 1907 */ 1908 rcu_read_lock(); 1909 1910 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { 1911 const char *tname; 1912 int tlen; 1913 1914 if (dentry->d_name.hash != hash) 1915 continue; 1916 1917 spin_lock(&dentry->d_lock); 1918 if (dentry->d_parent != parent) 1919 goto next; 1920 if (d_unhashed(dentry)) 1921 goto next; 1922 1923 /* 1924 * It is safe to compare names since d_move() cannot 1925 * change the qstr (protected by d_lock). 1926 */ 1927 tlen = dentry->d_name.len; 1928 tname = dentry->d_name.name; 1929 if (parent->d_flags & DCACHE_OP_COMPARE) { 1930 if (parent->d_op->d_compare(parent, parent->d_inode, 1931 dentry, dentry->d_inode, 1932 tlen, tname, name)) 1933 goto next; 1934 } else { 1935 if (dentry_cmp(tname, tlen, str, len)) 1936 goto next; 1937 } 1938 1939 dentry->d_count++; 1940 found = dentry; 1941 spin_unlock(&dentry->d_lock); 1942 break; 1943 next: 1944 spin_unlock(&dentry->d_lock); 1945 } 1946 rcu_read_unlock(); 1947 1948 return found; 1949 } 1950 1951 /** 1952 * d_hash_and_lookup - hash the qstr then search for a dentry 1953 * @dir: Directory to search in 1954 * @name: qstr of name we wish to find 1955 * 1956 * On hash failure or on lookup failure NULL is returned. 1957 */ 1958 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name) 1959 { 1960 struct dentry *dentry = NULL; 1961 1962 /* 1963 * Check for a fs-specific hash function. Note that we must 1964 * calculate the standard hash first, as the d_op->d_hash() 1965 * routine may choose to leave the hash value unchanged. 1966 */ 1967 name->hash = full_name_hash(name->name, name->len); 1968 if (dir->d_flags & DCACHE_OP_HASH) { 1969 if (dir->d_op->d_hash(dir, dir->d_inode, name) < 0) 1970 goto out; 1971 } 1972 dentry = d_lookup(dir, name); 1973 out: 1974 return dentry; 1975 } 1976 1977 /** 1978 * d_validate - verify dentry provided from insecure source (deprecated) 1979 * @dentry: The dentry alleged to be valid child of @dparent 1980 * @dparent: The parent dentry (known to be valid) 1981 * 1982 * An insecure source has sent us a dentry, here we verify it and dget() it. 1983 * This is used by ncpfs in its readdir implementation. 1984 * Zero is returned in the dentry is invalid. 1985 * 1986 * This function is slow for big directories, and deprecated, do not use it. 1987 */ 1988 int d_validate(struct dentry *dentry, struct dentry *dparent) 1989 { 1990 struct dentry *child; 1991 1992 spin_lock(&dparent->d_lock); 1993 list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) { 1994 if (dentry == child) { 1995 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 1996 __dget_dlock(dentry); 1997 spin_unlock(&dentry->d_lock); 1998 spin_unlock(&dparent->d_lock); 1999 return 1; 2000 } 2001 } 2002 spin_unlock(&dparent->d_lock); 2003 2004 return 0; 2005 } 2006 EXPORT_SYMBOL(d_validate); 2007 2008 /* 2009 * When a file is deleted, we have two options: 2010 * - turn this dentry into a negative dentry 2011 * - unhash this dentry and free it. 2012 * 2013 * Usually, we want to just turn this into 2014 * a negative dentry, but if anybody else is 2015 * currently using the dentry or the inode 2016 * we can't do that and we fall back on removing 2017 * it from the hash queues and waiting for 2018 * it to be deleted later when it has no users 2019 */ 2020 2021 /** 2022 * d_delete - delete a dentry 2023 * @dentry: The dentry to delete 2024 * 2025 * Turn the dentry into a negative dentry if possible, otherwise 2026 * remove it from the hash queues so it can be deleted later 2027 */ 2028 2029 void d_delete(struct dentry * dentry) 2030 { 2031 struct inode *inode; 2032 int isdir = 0; 2033 /* 2034 * Are we the only user? 2035 */ 2036 again: 2037 spin_lock(&dentry->d_lock); 2038 inode = dentry->d_inode; 2039 isdir = S_ISDIR(inode->i_mode); 2040 if (dentry->d_count == 1) { 2041 if (inode && !spin_trylock(&inode->i_lock)) { 2042 spin_unlock(&dentry->d_lock); 2043 cpu_relax(); 2044 goto again; 2045 } 2046 dentry->d_flags &= ~DCACHE_CANT_MOUNT; 2047 dentry_unlink_inode(dentry); 2048 fsnotify_nameremove(dentry, isdir); 2049 return; 2050 } 2051 2052 if (!d_unhashed(dentry)) 2053 __d_drop(dentry); 2054 2055 spin_unlock(&dentry->d_lock); 2056 2057 fsnotify_nameremove(dentry, isdir); 2058 } 2059 EXPORT_SYMBOL(d_delete); 2060 2061 static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b) 2062 { 2063 BUG_ON(!d_unhashed(entry)); 2064 hlist_bl_lock(b); 2065 entry->d_flags |= DCACHE_RCUACCESS; 2066 hlist_bl_add_head_rcu(&entry->d_hash, b); 2067 hlist_bl_unlock(b); 2068 } 2069 2070 static void _d_rehash(struct dentry * entry) 2071 { 2072 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash)); 2073 } 2074 2075 /** 2076 * d_rehash - add an entry back to the hash 2077 * @entry: dentry to add to the hash 2078 * 2079 * Adds a dentry to the hash according to its name. 2080 */ 2081 2082 void d_rehash(struct dentry * entry) 2083 { 2084 spin_lock(&entry->d_lock); 2085 _d_rehash(entry); 2086 spin_unlock(&entry->d_lock); 2087 } 2088 EXPORT_SYMBOL(d_rehash); 2089 2090 /** 2091 * dentry_update_name_case - update case insensitive dentry with a new name 2092 * @dentry: dentry to be updated 2093 * @name: new name 2094 * 2095 * Update a case insensitive dentry with new case of name. 2096 * 2097 * dentry must have been returned by d_lookup with name @name. Old and new 2098 * name lengths must match (ie. no d_compare which allows mismatched name 2099 * lengths). 2100 * 2101 * Parent inode i_mutex must be held over d_lookup and into this call (to 2102 * keep renames and concurrent inserts, and readdir(2) away). 2103 */ 2104 void dentry_update_name_case(struct dentry *dentry, struct qstr *name) 2105 { 2106 BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex)); 2107 BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */ 2108 2109 spin_lock(&dentry->d_lock); 2110 write_seqcount_begin(&dentry->d_seq); 2111 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len); 2112 write_seqcount_end(&dentry->d_seq); 2113 spin_unlock(&dentry->d_lock); 2114 } 2115 EXPORT_SYMBOL(dentry_update_name_case); 2116 2117 static void switch_names(struct dentry *dentry, struct dentry *target) 2118 { 2119 if (dname_external(target)) { 2120 if (dname_external(dentry)) { 2121 /* 2122 * Both external: swap the pointers 2123 */ 2124 swap(target->d_name.name, dentry->d_name.name); 2125 } else { 2126 /* 2127 * dentry:internal, target:external. Steal target's 2128 * storage and make target internal. 2129 */ 2130 memcpy(target->d_iname, dentry->d_name.name, 2131 dentry->d_name.len + 1); 2132 dentry->d_name.name = target->d_name.name; 2133 target->d_name.name = target->d_iname; 2134 } 2135 } else { 2136 if (dname_external(dentry)) { 2137 /* 2138 * dentry:external, target:internal. Give dentry's 2139 * storage to target and make dentry internal 2140 */ 2141 memcpy(dentry->d_iname, target->d_name.name, 2142 target->d_name.len + 1); 2143 target->d_name.name = dentry->d_name.name; 2144 dentry->d_name.name = dentry->d_iname; 2145 } else { 2146 /* 2147 * Both are internal. Just copy target to dentry 2148 */ 2149 memcpy(dentry->d_iname, target->d_name.name, 2150 target->d_name.len + 1); 2151 dentry->d_name.len = target->d_name.len; 2152 return; 2153 } 2154 } 2155 swap(dentry->d_name.len, target->d_name.len); 2156 } 2157 2158 static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target) 2159 { 2160 /* 2161 * XXXX: do we really need to take target->d_lock? 2162 */ 2163 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent) 2164 spin_lock(&target->d_parent->d_lock); 2165 else { 2166 if (d_ancestor(dentry->d_parent, target->d_parent)) { 2167 spin_lock(&dentry->d_parent->d_lock); 2168 spin_lock_nested(&target->d_parent->d_lock, 2169 DENTRY_D_LOCK_NESTED); 2170 } else { 2171 spin_lock(&target->d_parent->d_lock); 2172 spin_lock_nested(&dentry->d_parent->d_lock, 2173 DENTRY_D_LOCK_NESTED); 2174 } 2175 } 2176 if (target < dentry) { 2177 spin_lock_nested(&target->d_lock, 2); 2178 spin_lock_nested(&dentry->d_lock, 3); 2179 } else { 2180 spin_lock_nested(&dentry->d_lock, 2); 2181 spin_lock_nested(&target->d_lock, 3); 2182 } 2183 } 2184 2185 static void dentry_unlock_parents_for_move(struct dentry *dentry, 2186 struct dentry *target) 2187 { 2188 if (target->d_parent != dentry->d_parent) 2189 spin_unlock(&dentry->d_parent->d_lock); 2190 if (target->d_parent != target) 2191 spin_unlock(&target->d_parent->d_lock); 2192 } 2193 2194 /* 2195 * When switching names, the actual string doesn't strictly have to 2196 * be preserved in the target - because we're dropping the target 2197 * anyway. As such, we can just do a simple memcpy() to copy over 2198 * the new name before we switch. 2199 * 2200 * Note that we have to be a lot more careful about getting the hash 2201 * switched - we have to switch the hash value properly even if it 2202 * then no longer matches the actual (corrupted) string of the target. 2203 * The hash value has to match the hash queue that the dentry is on.. 2204 */ 2205 /* 2206 * __d_move - move a dentry 2207 * @dentry: entry to move 2208 * @target: new dentry 2209 * 2210 * Update the dcache to reflect the move of a file name. Negative 2211 * dcache entries should not be moved in this way. Caller must hold 2212 * rename_lock, the i_mutex of the source and target directories, 2213 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename(). 2214 */ 2215 static void __d_move(struct dentry * dentry, struct dentry * target) 2216 { 2217 if (!dentry->d_inode) 2218 printk(KERN_WARNING "VFS: moving negative dcache entry\n"); 2219 2220 BUG_ON(d_ancestor(dentry, target)); 2221 BUG_ON(d_ancestor(target, dentry)); 2222 2223 dentry_lock_for_move(dentry, target); 2224 2225 write_seqcount_begin(&dentry->d_seq); 2226 write_seqcount_begin(&target->d_seq); 2227 2228 /* __d_drop does write_seqcount_barrier, but they're OK to nest. */ 2229 2230 /* 2231 * Move the dentry to the target hash queue. Don't bother checking 2232 * for the same hash queue because of how unlikely it is. 2233 */ 2234 __d_drop(dentry); 2235 __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash)); 2236 2237 /* Unhash the target: dput() will then get rid of it */ 2238 __d_drop(target); 2239 2240 list_del(&dentry->d_u.d_child); 2241 list_del(&target->d_u.d_child); 2242 2243 /* Switch the names.. */ 2244 switch_names(dentry, target); 2245 swap(dentry->d_name.hash, target->d_name.hash); 2246 2247 /* ... and switch the parents */ 2248 if (IS_ROOT(dentry)) { 2249 dentry->d_parent = target->d_parent; 2250 target->d_parent = target; 2251 INIT_LIST_HEAD(&target->d_u.d_child); 2252 } else { 2253 swap(dentry->d_parent, target->d_parent); 2254 2255 /* And add them back to the (new) parent lists */ 2256 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs); 2257 } 2258 2259 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); 2260 2261 write_seqcount_end(&target->d_seq); 2262 write_seqcount_end(&dentry->d_seq); 2263 2264 dentry_unlock_parents_for_move(dentry, target); 2265 spin_unlock(&target->d_lock); 2266 fsnotify_d_move(dentry); 2267 spin_unlock(&dentry->d_lock); 2268 } 2269 2270 /* 2271 * d_move - move a dentry 2272 * @dentry: entry to move 2273 * @target: new dentry 2274 * 2275 * Update the dcache to reflect the move of a file name. Negative 2276 * dcache entries should not be moved in this way. See the locking 2277 * requirements for __d_move. 2278 */ 2279 void d_move(struct dentry *dentry, struct dentry *target) 2280 { 2281 write_seqlock(&rename_lock); 2282 __d_move(dentry, target); 2283 write_sequnlock(&rename_lock); 2284 } 2285 EXPORT_SYMBOL(d_move); 2286 2287 /** 2288 * d_ancestor - search for an ancestor 2289 * @p1: ancestor dentry 2290 * @p2: child dentry 2291 * 2292 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is 2293 * an ancestor of p2, else NULL. 2294 */ 2295 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2) 2296 { 2297 struct dentry *p; 2298 2299 for (p = p2; !IS_ROOT(p); p = p->d_parent) { 2300 if (p->d_parent == p1) 2301 return p; 2302 } 2303 return NULL; 2304 } 2305 2306 /* 2307 * This helper attempts to cope with remotely renamed directories 2308 * 2309 * It assumes that the caller is already holding 2310 * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock 2311 * 2312 * Note: If ever the locking in lock_rename() changes, then please 2313 * remember to update this too... 2314 */ 2315 static struct dentry *__d_unalias(struct inode *inode, 2316 struct dentry *dentry, struct dentry *alias) 2317 { 2318 struct mutex *m1 = NULL, *m2 = NULL; 2319 struct dentry *ret; 2320 2321 /* If alias and dentry share a parent, then no extra locks required */ 2322 if (alias->d_parent == dentry->d_parent) 2323 goto out_unalias; 2324 2325 /* See lock_rename() */ 2326 ret = ERR_PTR(-EBUSY); 2327 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex)) 2328 goto out_err; 2329 m1 = &dentry->d_sb->s_vfs_rename_mutex; 2330 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex)) 2331 goto out_err; 2332 m2 = &alias->d_parent->d_inode->i_mutex; 2333 out_unalias: 2334 __d_move(alias, dentry); 2335 ret = alias; 2336 out_err: 2337 spin_unlock(&inode->i_lock); 2338 if (m2) 2339 mutex_unlock(m2); 2340 if (m1) 2341 mutex_unlock(m1); 2342 return ret; 2343 } 2344 2345 /* 2346 * Prepare an anonymous dentry for life in the superblock's dentry tree as a 2347 * named dentry in place of the dentry to be replaced. 2348 * returns with anon->d_lock held! 2349 */ 2350 static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) 2351 { 2352 struct dentry *dparent, *aparent; 2353 2354 dentry_lock_for_move(anon, dentry); 2355 2356 write_seqcount_begin(&dentry->d_seq); 2357 write_seqcount_begin(&anon->d_seq); 2358 2359 dparent = dentry->d_parent; 2360 aparent = anon->d_parent; 2361 2362 switch_names(dentry, anon); 2363 swap(dentry->d_name.hash, anon->d_name.hash); 2364 2365 dentry->d_parent = (aparent == anon) ? dentry : aparent; 2366 list_del(&dentry->d_u.d_child); 2367 if (!IS_ROOT(dentry)) 2368 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); 2369 else 2370 INIT_LIST_HEAD(&dentry->d_u.d_child); 2371 2372 anon->d_parent = (dparent == dentry) ? anon : dparent; 2373 list_del(&anon->d_u.d_child); 2374 if (!IS_ROOT(anon)) 2375 list_add(&anon->d_u.d_child, &anon->d_parent->d_subdirs); 2376 else 2377 INIT_LIST_HEAD(&anon->d_u.d_child); 2378 2379 write_seqcount_end(&dentry->d_seq); 2380 write_seqcount_end(&anon->d_seq); 2381 2382 dentry_unlock_parents_for_move(anon, dentry); 2383 spin_unlock(&dentry->d_lock); 2384 2385 /* anon->d_lock still locked, returns locked */ 2386 anon->d_flags &= ~DCACHE_DISCONNECTED; 2387 } 2388 2389 /** 2390 * d_materialise_unique - introduce an inode into the tree 2391 * @dentry: candidate dentry 2392 * @inode: inode to bind to the dentry, to which aliases may be attached 2393 * 2394 * Introduces an dentry into the tree, substituting an extant disconnected 2395 * root directory alias in its place if there is one. Caller must hold the 2396 * i_mutex of the parent directory. 2397 */ 2398 struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) 2399 { 2400 struct dentry *actual; 2401 2402 BUG_ON(!d_unhashed(dentry)); 2403 2404 if (!inode) { 2405 actual = dentry; 2406 __d_instantiate(dentry, NULL); 2407 d_rehash(actual); 2408 goto out_nolock; 2409 } 2410 2411 spin_lock(&inode->i_lock); 2412 2413 if (S_ISDIR(inode->i_mode)) { 2414 struct dentry *alias; 2415 2416 /* Does an aliased dentry already exist? */ 2417 alias = __d_find_alias(inode, 0); 2418 if (alias) { 2419 actual = alias; 2420 write_seqlock(&rename_lock); 2421 2422 if (d_ancestor(alias, dentry)) { 2423 /* Check for loops */ 2424 actual = ERR_PTR(-ELOOP); 2425 spin_unlock(&inode->i_lock); 2426 } else if (IS_ROOT(alias)) { 2427 /* Is this an anonymous mountpoint that we 2428 * could splice into our tree? */ 2429 __d_materialise_dentry(dentry, alias); 2430 write_sequnlock(&rename_lock); 2431 __d_drop(alias); 2432 goto found; 2433 } else { 2434 /* Nope, but we must(!) avoid directory 2435 * aliasing. This drops inode->i_lock */ 2436 actual = __d_unalias(inode, dentry, alias); 2437 } 2438 write_sequnlock(&rename_lock); 2439 if (IS_ERR(actual)) { 2440 if (PTR_ERR(actual) == -ELOOP) 2441 pr_warn_ratelimited( 2442 "VFS: Lookup of '%s' in %s %s" 2443 " would have caused loop\n", 2444 dentry->d_name.name, 2445 inode->i_sb->s_type->name, 2446 inode->i_sb->s_id); 2447 dput(alias); 2448 } 2449 goto out_nolock; 2450 } 2451 } 2452 2453 /* Add a unique reference */ 2454 actual = __d_instantiate_unique(dentry, inode); 2455 if (!actual) 2456 actual = dentry; 2457 else 2458 BUG_ON(!d_unhashed(actual)); 2459 2460 spin_lock(&actual->d_lock); 2461 found: 2462 _d_rehash(actual); 2463 spin_unlock(&actual->d_lock); 2464 spin_unlock(&inode->i_lock); 2465 out_nolock: 2466 if (actual == dentry) { 2467 security_d_instantiate(dentry, inode); 2468 return NULL; 2469 } 2470 2471 iput(inode); 2472 return actual; 2473 } 2474 EXPORT_SYMBOL_GPL(d_materialise_unique); 2475 2476 static int prepend(char **buffer, int *buflen, const char *str, int namelen) 2477 { 2478 *buflen -= namelen; 2479 if (*buflen < 0) 2480 return -ENAMETOOLONG; 2481 *buffer -= namelen; 2482 memcpy(*buffer, str, namelen); 2483 return 0; 2484 } 2485 2486 static int prepend_name(char **buffer, int *buflen, struct qstr *name) 2487 { 2488 return prepend(buffer, buflen, name->name, name->len); 2489 } 2490 2491 /** 2492 * prepend_path - Prepend path string to a buffer 2493 * @path: the dentry/vfsmount to report 2494 * @root: root vfsmnt/dentry 2495 * @buffer: pointer to the end of the buffer 2496 * @buflen: pointer to buffer length 2497 * 2498 * Caller holds the rename_lock. 2499 */ 2500 static int prepend_path(const struct path *path, 2501 const struct path *root, 2502 char **buffer, int *buflen) 2503 { 2504 struct dentry *dentry = path->dentry; 2505 struct vfsmount *vfsmnt = path->mnt; 2506 struct mount *mnt = real_mount(vfsmnt); 2507 bool slash = false; 2508 int error = 0; 2509 2510 br_read_lock(vfsmount_lock); 2511 while (dentry != root->dentry || vfsmnt != root->mnt) { 2512 struct dentry * parent; 2513 2514 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { 2515 /* Global root? */ 2516 if (!mnt_has_parent(mnt)) 2517 goto global_root; 2518 dentry = mnt->mnt_mountpoint; 2519 mnt = mnt->mnt_parent; 2520 vfsmnt = &mnt->mnt; 2521 continue; 2522 } 2523 parent = dentry->d_parent; 2524 prefetch(parent); 2525 spin_lock(&dentry->d_lock); 2526 error = prepend_name(buffer, buflen, &dentry->d_name); 2527 spin_unlock(&dentry->d_lock); 2528 if (!error) 2529 error = prepend(buffer, buflen, "/", 1); 2530 if (error) 2531 break; 2532 2533 slash = true; 2534 dentry = parent; 2535 } 2536 2537 if (!error && !slash) 2538 error = prepend(buffer, buflen, "/", 1); 2539 2540 out: 2541 br_read_unlock(vfsmount_lock); 2542 return error; 2543 2544 global_root: 2545 /* 2546 * Filesystems needing to implement special "root names" 2547 * should do so with ->d_dname() 2548 */ 2549 if (IS_ROOT(dentry) && 2550 (dentry->d_name.len != 1 || dentry->d_name.name[0] != '/')) { 2551 WARN(1, "Root dentry has weird name <%.*s>\n", 2552 (int) dentry->d_name.len, dentry->d_name.name); 2553 } 2554 if (!slash) 2555 error = prepend(buffer, buflen, "/", 1); 2556 if (!error) 2557 error = real_mount(vfsmnt)->mnt_ns ? 1 : 2; 2558 goto out; 2559 } 2560 2561 /** 2562 * __d_path - return the path of a dentry 2563 * @path: the dentry/vfsmount to report 2564 * @root: root vfsmnt/dentry 2565 * @buf: buffer to return value in 2566 * @buflen: buffer length 2567 * 2568 * Convert a dentry into an ASCII path name. 2569 * 2570 * Returns a pointer into the buffer or an error code if the 2571 * path was too long. 2572 * 2573 * "buflen" should be positive. 2574 * 2575 * If the path is not reachable from the supplied root, return %NULL. 2576 */ 2577 char *__d_path(const struct path *path, 2578 const struct path *root, 2579 char *buf, int buflen) 2580 { 2581 char *res = buf + buflen; 2582 int error; 2583 2584 prepend(&res, &buflen, "\0", 1); 2585 write_seqlock(&rename_lock); 2586 error = prepend_path(path, root, &res, &buflen); 2587 write_sequnlock(&rename_lock); 2588 2589 if (error < 0) 2590 return ERR_PTR(error); 2591 if (error > 0) 2592 return NULL; 2593 return res; 2594 } 2595 2596 char *d_absolute_path(const struct path *path, 2597 char *buf, int buflen) 2598 { 2599 struct path root = {}; 2600 char *res = buf + buflen; 2601 int error; 2602 2603 prepend(&res, &buflen, "\0", 1); 2604 write_seqlock(&rename_lock); 2605 error = prepend_path(path, &root, &res, &buflen); 2606 write_sequnlock(&rename_lock); 2607 2608 if (error > 1) 2609 error = -EINVAL; 2610 if (error < 0) 2611 return ERR_PTR(error); 2612 return res; 2613 } 2614 2615 /* 2616 * same as __d_path but appends "(deleted)" for unlinked files. 2617 */ 2618 static int path_with_deleted(const struct path *path, 2619 const struct path *root, 2620 char **buf, int *buflen) 2621 { 2622 prepend(buf, buflen, "\0", 1); 2623 if (d_unlinked(path->dentry)) { 2624 int error = prepend(buf, buflen, " (deleted)", 10); 2625 if (error) 2626 return error; 2627 } 2628 2629 return prepend_path(path, root, buf, buflen); 2630 } 2631 2632 static int prepend_unreachable(char **buffer, int *buflen) 2633 { 2634 return prepend(buffer, buflen, "(unreachable)", 13); 2635 } 2636 2637 /** 2638 * d_path - return the path of a dentry 2639 * @path: path to report 2640 * @buf: buffer to return value in 2641 * @buflen: buffer length 2642 * 2643 * Convert a dentry into an ASCII path name. If the entry has been deleted 2644 * the string " (deleted)" is appended. Note that this is ambiguous. 2645 * 2646 * Returns a pointer into the buffer or an error code if the path was 2647 * too long. Note: Callers should use the returned pointer, not the passed 2648 * in buffer, to use the name! The implementation often starts at an offset 2649 * into the buffer, and may leave 0 bytes at the start. 2650 * 2651 * "buflen" should be positive. 2652 */ 2653 char *d_path(const struct path *path, char *buf, int buflen) 2654 { 2655 char *res = buf + buflen; 2656 struct path root; 2657 int error; 2658 2659 /* 2660 * We have various synthetic filesystems that never get mounted. On 2661 * these filesystems dentries are never used for lookup purposes, and 2662 * thus don't need to be hashed. They also don't need a name until a 2663 * user wants to identify the object in /proc/pid/fd/. The little hack 2664 * below allows us to generate a name for these objects on demand: 2665 */ 2666 if (path->dentry->d_op && path->dentry->d_op->d_dname) 2667 return path->dentry->d_op->d_dname(path->dentry, buf, buflen); 2668 2669 get_fs_root(current->fs, &root); 2670 write_seqlock(&rename_lock); 2671 error = path_with_deleted(path, &root, &res, &buflen); 2672 if (error < 0) 2673 res = ERR_PTR(error); 2674 write_sequnlock(&rename_lock); 2675 path_put(&root); 2676 return res; 2677 } 2678 EXPORT_SYMBOL(d_path); 2679 2680 /** 2681 * d_path_with_unreachable - return the path of a dentry 2682 * @path: path to report 2683 * @buf: buffer to return value in 2684 * @buflen: buffer length 2685 * 2686 * The difference from d_path() is that this prepends "(unreachable)" 2687 * to paths which are unreachable from the current process' root. 2688 */ 2689 char *d_path_with_unreachable(const struct path *path, char *buf, int buflen) 2690 { 2691 char *res = buf + buflen; 2692 struct path root; 2693 int error; 2694 2695 if (path->dentry->d_op && path->dentry->d_op->d_dname) 2696 return path->dentry->d_op->d_dname(path->dentry, buf, buflen); 2697 2698 get_fs_root(current->fs, &root); 2699 write_seqlock(&rename_lock); 2700 error = path_with_deleted(path, &root, &res, &buflen); 2701 if (error > 0) 2702 error = prepend_unreachable(&res, &buflen); 2703 write_sequnlock(&rename_lock); 2704 path_put(&root); 2705 if (error) 2706 res = ERR_PTR(error); 2707 2708 return res; 2709 } 2710 2711 /* 2712 * Helper function for dentry_operations.d_dname() members 2713 */ 2714 char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen, 2715 const char *fmt, ...) 2716 { 2717 va_list args; 2718 char temp[64]; 2719 int sz; 2720 2721 va_start(args, fmt); 2722 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1; 2723 va_end(args); 2724 2725 if (sz > sizeof(temp) || sz > buflen) 2726 return ERR_PTR(-ENAMETOOLONG); 2727 2728 buffer += buflen - sz; 2729 return memcpy(buffer, temp, sz); 2730 } 2731 2732 /* 2733 * Write full pathname from the root of the filesystem into the buffer. 2734 */ 2735 static char *__dentry_path(struct dentry *dentry, char *buf, int buflen) 2736 { 2737 char *end = buf + buflen; 2738 char *retval; 2739 2740 prepend(&end, &buflen, "\0", 1); 2741 if (buflen < 1) 2742 goto Elong; 2743 /* Get '/' right */ 2744 retval = end-1; 2745 *retval = '/'; 2746 2747 while (!IS_ROOT(dentry)) { 2748 struct dentry *parent = dentry->d_parent; 2749 int error; 2750 2751 prefetch(parent); 2752 spin_lock(&dentry->d_lock); 2753 error = prepend_name(&end, &buflen, &dentry->d_name); 2754 spin_unlock(&dentry->d_lock); 2755 if (error != 0 || prepend(&end, &buflen, "/", 1) != 0) 2756 goto Elong; 2757 2758 retval = end; 2759 dentry = parent; 2760 } 2761 return retval; 2762 Elong: 2763 return ERR_PTR(-ENAMETOOLONG); 2764 } 2765 2766 char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen) 2767 { 2768 char *retval; 2769 2770 write_seqlock(&rename_lock); 2771 retval = __dentry_path(dentry, buf, buflen); 2772 write_sequnlock(&rename_lock); 2773 2774 return retval; 2775 } 2776 EXPORT_SYMBOL(dentry_path_raw); 2777 2778 char *dentry_path(struct dentry *dentry, char *buf, int buflen) 2779 { 2780 char *p = NULL; 2781 char *retval; 2782 2783 write_seqlock(&rename_lock); 2784 if (d_unlinked(dentry)) { 2785 p = buf + buflen; 2786 if (prepend(&p, &buflen, "//deleted", 10) != 0) 2787 goto Elong; 2788 buflen++; 2789 } 2790 retval = __dentry_path(dentry, buf, buflen); 2791 write_sequnlock(&rename_lock); 2792 if (!IS_ERR(retval) && p) 2793 *p = '/'; /* restore '/' overriden with '\0' */ 2794 return retval; 2795 Elong: 2796 return ERR_PTR(-ENAMETOOLONG); 2797 } 2798 2799 /* 2800 * NOTE! The user-level library version returns a 2801 * character pointer. The kernel system call just 2802 * returns the length of the buffer filled (which 2803 * includes the ending '\0' character), or a negative 2804 * error value. So libc would do something like 2805 * 2806 * char *getcwd(char * buf, size_t size) 2807 * { 2808 * int retval; 2809 * 2810 * retval = sys_getcwd(buf, size); 2811 * if (retval >= 0) 2812 * return buf; 2813 * errno = -retval; 2814 * return NULL; 2815 * } 2816 */ 2817 SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size) 2818 { 2819 int error; 2820 struct path pwd, root; 2821 char *page = (char *) __get_free_page(GFP_USER); 2822 2823 if (!page) 2824 return -ENOMEM; 2825 2826 get_fs_root_and_pwd(current->fs, &root, &pwd); 2827 2828 error = -ENOENT; 2829 write_seqlock(&rename_lock); 2830 if (!d_unlinked(pwd.dentry)) { 2831 unsigned long len; 2832 char *cwd = page + PAGE_SIZE; 2833 int buflen = PAGE_SIZE; 2834 2835 prepend(&cwd, &buflen, "\0", 1); 2836 error = prepend_path(&pwd, &root, &cwd, &buflen); 2837 write_sequnlock(&rename_lock); 2838 2839 if (error < 0) 2840 goto out; 2841 2842 /* Unreachable from current root */ 2843 if (error > 0) { 2844 error = prepend_unreachable(&cwd, &buflen); 2845 if (error) 2846 goto out; 2847 } 2848 2849 error = -ERANGE; 2850 len = PAGE_SIZE + page - cwd; 2851 if (len <= size) { 2852 error = len; 2853 if (copy_to_user(buf, cwd, len)) 2854 error = -EFAULT; 2855 } 2856 } else { 2857 write_sequnlock(&rename_lock); 2858 } 2859 2860 out: 2861 path_put(&pwd); 2862 path_put(&root); 2863 free_page((unsigned long) page); 2864 return error; 2865 } 2866 2867 /* 2868 * Test whether new_dentry is a subdirectory of old_dentry. 2869 * 2870 * Trivially implemented using the dcache structure 2871 */ 2872 2873 /** 2874 * is_subdir - is new dentry a subdirectory of old_dentry 2875 * @new_dentry: new dentry 2876 * @old_dentry: old dentry 2877 * 2878 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth). 2879 * Returns 0 otherwise. 2880 * Caller must ensure that "new_dentry" is pinned before calling is_subdir() 2881 */ 2882 2883 int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry) 2884 { 2885 int result; 2886 unsigned seq; 2887 2888 if (new_dentry == old_dentry) 2889 return 1; 2890 2891 do { 2892 /* for restarting inner loop in case of seq retry */ 2893 seq = read_seqbegin(&rename_lock); 2894 /* 2895 * Need rcu_readlock to protect against the d_parent trashing 2896 * due to d_move 2897 */ 2898 rcu_read_lock(); 2899 if (d_ancestor(old_dentry, new_dentry)) 2900 result = 1; 2901 else 2902 result = 0; 2903 rcu_read_unlock(); 2904 } while (read_seqretry(&rename_lock, seq)); 2905 2906 return result; 2907 } 2908 2909 void d_genocide(struct dentry *root) 2910 { 2911 struct dentry *this_parent; 2912 struct list_head *next; 2913 unsigned seq; 2914 int locked = 0; 2915 2916 seq = read_seqbegin(&rename_lock); 2917 again: 2918 this_parent = root; 2919 spin_lock(&this_parent->d_lock); 2920 repeat: 2921 next = this_parent->d_subdirs.next; 2922 resume: 2923 while (next != &this_parent->d_subdirs) { 2924 struct list_head *tmp = next; 2925 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 2926 next = tmp->next; 2927 2928 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 2929 if (d_unhashed(dentry) || !dentry->d_inode) { 2930 spin_unlock(&dentry->d_lock); 2931 continue; 2932 } 2933 if (!list_empty(&dentry->d_subdirs)) { 2934 spin_unlock(&this_parent->d_lock); 2935 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); 2936 this_parent = dentry; 2937 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); 2938 goto repeat; 2939 } 2940 if (!(dentry->d_flags & DCACHE_GENOCIDE)) { 2941 dentry->d_flags |= DCACHE_GENOCIDE; 2942 dentry->d_count--; 2943 } 2944 spin_unlock(&dentry->d_lock); 2945 } 2946 if (this_parent != root) { 2947 struct dentry *child = this_parent; 2948 if (!(this_parent->d_flags & DCACHE_GENOCIDE)) { 2949 this_parent->d_flags |= DCACHE_GENOCIDE; 2950 this_parent->d_count--; 2951 } 2952 this_parent = try_to_ascend(this_parent, locked, seq); 2953 if (!this_parent) 2954 goto rename_retry; 2955 next = child->d_u.d_child.next; 2956 goto resume; 2957 } 2958 spin_unlock(&this_parent->d_lock); 2959 if (!locked && read_seqretry(&rename_lock, seq)) 2960 goto rename_retry; 2961 if (locked) 2962 write_sequnlock(&rename_lock); 2963 return; 2964 2965 rename_retry: 2966 locked = 1; 2967 write_seqlock(&rename_lock); 2968 goto again; 2969 } 2970 2971 /** 2972 * find_inode_number - check for dentry with name 2973 * @dir: directory to check 2974 * @name: Name to find. 2975 * 2976 * Check whether a dentry already exists for the given name, 2977 * and return the inode number if it has an inode. Otherwise 2978 * 0 is returned. 2979 * 2980 * This routine is used to post-process directory listings for 2981 * filesystems using synthetic inode numbers, and is necessary 2982 * to keep getcwd() working. 2983 */ 2984 2985 ino_t find_inode_number(struct dentry *dir, struct qstr *name) 2986 { 2987 struct dentry * dentry; 2988 ino_t ino = 0; 2989 2990 dentry = d_hash_and_lookup(dir, name); 2991 if (dentry) { 2992 if (dentry->d_inode) 2993 ino = dentry->d_inode->i_ino; 2994 dput(dentry); 2995 } 2996 return ino; 2997 } 2998 EXPORT_SYMBOL(find_inode_number); 2999 3000 static __initdata unsigned long dhash_entries; 3001 static int __init set_dhash_entries(char *str) 3002 { 3003 if (!str) 3004 return 0; 3005 dhash_entries = simple_strtoul(str, &str, 0); 3006 return 1; 3007 } 3008 __setup("dhash_entries=", set_dhash_entries); 3009 3010 static void __init dcache_init_early(void) 3011 { 3012 unsigned int loop; 3013 3014 /* If hashes are distributed across NUMA nodes, defer 3015 * hash allocation until vmalloc space is available. 3016 */ 3017 if (hashdist) 3018 return; 3019 3020 dentry_hashtable = 3021 alloc_large_system_hash("Dentry cache", 3022 sizeof(struct hlist_bl_head), 3023 dhash_entries, 3024 13, 3025 HASH_EARLY, 3026 &d_hash_shift, 3027 &d_hash_mask, 3028 0); 3029 3030 for (loop = 0; loop < (1U << d_hash_shift); loop++) 3031 INIT_HLIST_BL_HEAD(dentry_hashtable + loop); 3032 } 3033 3034 static void __init dcache_init(void) 3035 { 3036 unsigned int loop; 3037 3038 /* 3039 * A constructor could be added for stable state like the lists, 3040 * but it is probably not worth it because of the cache nature 3041 * of the dcache. 3042 */ 3043 dentry_cache = KMEM_CACHE(dentry, 3044 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); 3045 3046 /* Hash may have been set up in dcache_init_early */ 3047 if (!hashdist) 3048 return; 3049 3050 dentry_hashtable = 3051 alloc_large_system_hash("Dentry cache", 3052 sizeof(struct hlist_bl_head), 3053 dhash_entries, 3054 13, 3055 0, 3056 &d_hash_shift, 3057 &d_hash_mask, 3058 0); 3059 3060 for (loop = 0; loop < (1U << d_hash_shift); loop++) 3061 INIT_HLIST_BL_HEAD(dentry_hashtable + loop); 3062 } 3063 3064 /* SLAB cache for __getname() consumers */ 3065 struct kmem_cache *names_cachep __read_mostly; 3066 EXPORT_SYMBOL(names_cachep); 3067 3068 EXPORT_SYMBOL(d_genocide); 3069 3070 void __init vfs_caches_init_early(void) 3071 { 3072 dcache_init_early(); 3073 inode_init_early(); 3074 } 3075 3076 void __init vfs_caches_init(unsigned long mempages) 3077 { 3078 unsigned long reserve; 3079 3080 /* Base hash sizes on available memory, with a reserve equal to 3081 150% of current kernel size */ 3082 3083 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1); 3084 mempages -= reserve; 3085 3086 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, 3087 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 3088 3089 dcache_init(); 3090 inode_init(); 3091 files_init(mempages); 3092 mnt_init(); 3093 bdev_cache_init(); 3094 chrdev_init(); 3095 } 3096