1 /* 2 * fs/dcache.c 3 * 4 * Complete reimplementation 5 * (C) 1997 Thomas Schoebel-Theuer, 6 * with heavy changes by Linus Torvalds 7 */ 8 9 /* 10 * Notes on the allocation strategy: 11 * 12 * The dcache is a master of the icache - whenever a dcache entry 13 * exists, the inode will always exist. "iput()" is done either when 14 * the dcache entry is deleted or garbage collected. 15 */ 16 17 #include <linux/syscalls.h> 18 #include <linux/string.h> 19 #include <linux/mm.h> 20 #include <linux/fs.h> 21 #include <linux/fsnotify.h> 22 #include <linux/slab.h> 23 #include <linux/init.h> 24 #include <linux/hash.h> 25 #include <linux/cache.h> 26 #include <linux/export.h> 27 #include <linux/mount.h> 28 #include <linux/file.h> 29 #include <asm/uaccess.h> 30 #include <linux/security.h> 31 #include <linux/seqlock.h> 32 #include <linux/swap.h> 33 #include <linux/bootmem.h> 34 #include <linux/fs_struct.h> 35 #include <linux/hardirq.h> 36 #include <linux/bit_spinlock.h> 37 #include <linux/rculist_bl.h> 38 #include <linux/prefetch.h> 39 #include <linux/ratelimit.h> 40 #include "internal.h" 41 #include "mount.h" 42 43 /* 44 * Usage: 45 * dcache->d_inode->i_lock protects: 46 * - i_dentry, d_alias, d_inode of aliases 47 * dcache_hash_bucket lock protects: 48 * - the dcache hash table 49 * s_anon bl list spinlock protects: 50 * - the s_anon list (see __d_drop) 51 * dcache_lru_lock protects: 52 * - the dcache lru lists and counters 53 * d_lock protects: 54 * - d_flags 55 * - d_name 56 * - d_lru 57 * - d_count 58 * - d_unhashed() 59 * - d_parent and d_subdirs 60 * - childrens' d_child and d_parent 61 * - d_alias, d_inode 62 * 63 * Ordering: 64 * dentry->d_inode->i_lock 65 * dentry->d_lock 66 * dcache_lru_lock 67 * dcache_hash_bucket lock 68 * s_anon lock 69 * 70 * If there is an ancestor relationship: 71 * dentry->d_parent->...->d_parent->d_lock 72 * ... 73 * dentry->d_parent->d_lock 74 * dentry->d_lock 75 * 76 * If no ancestor relationship: 77 * if (dentry1 < dentry2) 78 * dentry1->d_lock 79 * dentry2->d_lock 80 */ 81 int sysctl_vfs_cache_pressure __read_mostly = 100; 82 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); 83 84 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock); 85 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); 86 87 EXPORT_SYMBOL(rename_lock); 88 89 static struct kmem_cache *dentry_cache __read_mostly; 90 91 /* 92 * This is the single most critical data structure when it comes 93 * to the dcache: the hashtable for lookups. Somebody should try 94 * to make this good - I've just made it work. 95 * 96 * This hash-function tries to avoid losing too many bits of hash 97 * information, yet avoid using a prime hash-size or similar. 98 */ 99 #define D_HASHBITS d_hash_shift 100 #define D_HASHMASK d_hash_mask 101 102 static unsigned int d_hash_mask __read_mostly; 103 static unsigned int d_hash_shift __read_mostly; 104 105 static struct hlist_bl_head *dentry_hashtable __read_mostly; 106 107 static inline struct hlist_bl_head *d_hash(const struct dentry *parent, 108 unsigned int hash) 109 { 110 hash += (unsigned long) parent / L1_CACHE_BYTES; 111 hash = hash + (hash >> D_HASHBITS); 112 return dentry_hashtable + (hash & D_HASHMASK); 113 } 114 115 /* Statistics gathering. */ 116 struct dentry_stat_t dentry_stat = { 117 .age_limit = 45, 118 }; 119 120 static DEFINE_PER_CPU(unsigned int, nr_dentry); 121 122 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) 123 static int get_nr_dentry(void) 124 { 125 int i; 126 int sum = 0; 127 for_each_possible_cpu(i) 128 sum += per_cpu(nr_dentry, i); 129 return sum < 0 ? 0 : sum; 130 } 131 132 int proc_nr_dentry(ctl_table *table, int write, void __user *buffer, 133 size_t *lenp, loff_t *ppos) 134 { 135 dentry_stat.nr_dentry = get_nr_dentry(); 136 return proc_dointvec(table, write, buffer, lenp, ppos); 137 } 138 #endif 139 140 /* 141 * Compare 2 name strings, return 0 if they match, otherwise non-zero. 142 * The strings are both count bytes long, and count is non-zero. 143 */ 144 #ifdef CONFIG_DCACHE_WORD_ACCESS 145 146 #include <asm/word-at-a-time.h> 147 /* 148 * NOTE! 'cs' and 'scount' come from a dentry, so it has a 149 * aligned allocation for this particular component. We don't 150 * strictly need the load_unaligned_zeropad() safety, but it 151 * doesn't hurt either. 152 * 153 * In contrast, 'ct' and 'tcount' can be from a pathname, and do 154 * need the careful unaligned handling. 155 */ 156 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount) 157 { 158 unsigned long a,b,mask; 159 160 for (;;) { 161 a = *(unsigned long *)cs; 162 b = load_unaligned_zeropad(ct); 163 if (tcount < sizeof(unsigned long)) 164 break; 165 if (unlikely(a != b)) 166 return 1; 167 cs += sizeof(unsigned long); 168 ct += sizeof(unsigned long); 169 tcount -= sizeof(unsigned long); 170 if (!tcount) 171 return 0; 172 } 173 mask = ~(~0ul << tcount*8); 174 return unlikely(!!((a ^ b) & mask)); 175 } 176 177 #else 178 179 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount) 180 { 181 do { 182 if (*cs != *ct) 183 return 1; 184 cs++; 185 ct++; 186 tcount--; 187 } while (tcount); 188 return 0; 189 } 190 191 #endif 192 193 static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount) 194 { 195 const unsigned char *cs; 196 /* 197 * Be careful about RCU walk racing with rename: 198 * use ACCESS_ONCE to fetch the name pointer. 199 * 200 * NOTE! Even if a rename will mean that the length 201 * was not loaded atomically, we don't care. The 202 * RCU walk will check the sequence count eventually, 203 * and catch it. And we won't overrun the buffer, 204 * because we're reading the name pointer atomically, 205 * and a dentry name is guaranteed to be properly 206 * terminated with a NUL byte. 207 * 208 * End result: even if 'len' is wrong, we'll exit 209 * early because the data cannot match (there can 210 * be no NUL in the ct/tcount data) 211 */ 212 cs = ACCESS_ONCE(dentry->d_name.name); 213 smp_read_barrier_depends(); 214 return dentry_string_cmp(cs, ct, tcount); 215 } 216 217 static void __d_free(struct rcu_head *head) 218 { 219 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); 220 221 WARN_ON(!hlist_unhashed(&dentry->d_alias)); 222 if (dname_external(dentry)) 223 kfree(dentry->d_name.name); 224 kmem_cache_free(dentry_cache, dentry); 225 } 226 227 /* 228 * no locks, please. 229 */ 230 static void d_free(struct dentry *dentry) 231 { 232 BUG_ON(dentry->d_count); 233 this_cpu_dec(nr_dentry); 234 if (dentry->d_op && dentry->d_op->d_release) 235 dentry->d_op->d_release(dentry); 236 237 /* if dentry was never visible to RCU, immediate free is OK */ 238 if (!(dentry->d_flags & DCACHE_RCUACCESS)) 239 __d_free(&dentry->d_u.d_rcu); 240 else 241 call_rcu(&dentry->d_u.d_rcu, __d_free); 242 } 243 244 /** 245 * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups 246 * @dentry: the target dentry 247 * After this call, in-progress rcu-walk path lookup will fail. This 248 * should be called after unhashing, and after changing d_inode (if 249 * the dentry has not already been unhashed). 250 */ 251 static inline void dentry_rcuwalk_barrier(struct dentry *dentry) 252 { 253 assert_spin_locked(&dentry->d_lock); 254 /* Go through a barrier */ 255 write_seqcount_barrier(&dentry->d_seq); 256 } 257 258 /* 259 * Release the dentry's inode, using the filesystem 260 * d_iput() operation if defined. Dentry has no refcount 261 * and is unhashed. 262 */ 263 static void dentry_iput(struct dentry * dentry) 264 __releases(dentry->d_lock) 265 __releases(dentry->d_inode->i_lock) 266 { 267 struct inode *inode = dentry->d_inode; 268 if (inode) { 269 dentry->d_inode = NULL; 270 hlist_del_init(&dentry->d_alias); 271 spin_unlock(&dentry->d_lock); 272 spin_unlock(&inode->i_lock); 273 if (!inode->i_nlink) 274 fsnotify_inoderemove(inode); 275 if (dentry->d_op && dentry->d_op->d_iput) 276 dentry->d_op->d_iput(dentry, inode); 277 else 278 iput(inode); 279 } else { 280 spin_unlock(&dentry->d_lock); 281 } 282 } 283 284 /* 285 * Release the dentry's inode, using the filesystem 286 * d_iput() operation if defined. dentry remains in-use. 287 */ 288 static void dentry_unlink_inode(struct dentry * dentry) 289 __releases(dentry->d_lock) 290 __releases(dentry->d_inode->i_lock) 291 { 292 struct inode *inode = dentry->d_inode; 293 dentry->d_inode = NULL; 294 hlist_del_init(&dentry->d_alias); 295 dentry_rcuwalk_barrier(dentry); 296 spin_unlock(&dentry->d_lock); 297 spin_unlock(&inode->i_lock); 298 if (!inode->i_nlink) 299 fsnotify_inoderemove(inode); 300 if (dentry->d_op && dentry->d_op->d_iput) 301 dentry->d_op->d_iput(dentry, inode); 302 else 303 iput(inode); 304 } 305 306 /* 307 * dentry_lru_(add|del|prune|move_tail) must be called with d_lock held. 308 */ 309 static void dentry_lru_add(struct dentry *dentry) 310 { 311 if (list_empty(&dentry->d_lru)) { 312 spin_lock(&dcache_lru_lock); 313 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); 314 dentry->d_sb->s_nr_dentry_unused++; 315 dentry_stat.nr_unused++; 316 spin_unlock(&dcache_lru_lock); 317 } 318 } 319 320 static void __dentry_lru_del(struct dentry *dentry) 321 { 322 list_del_init(&dentry->d_lru); 323 dentry->d_flags &= ~DCACHE_SHRINK_LIST; 324 dentry->d_sb->s_nr_dentry_unused--; 325 dentry_stat.nr_unused--; 326 } 327 328 /* 329 * Remove a dentry with references from the LRU. 330 */ 331 static void dentry_lru_del(struct dentry *dentry) 332 { 333 if (!list_empty(&dentry->d_lru)) { 334 spin_lock(&dcache_lru_lock); 335 __dentry_lru_del(dentry); 336 spin_unlock(&dcache_lru_lock); 337 } 338 } 339 340 /* 341 * Remove a dentry that is unreferenced and about to be pruned 342 * (unhashed and destroyed) from the LRU, and inform the file system. 343 * This wrapper should be called _prior_ to unhashing a victim dentry. 344 */ 345 static void dentry_lru_prune(struct dentry *dentry) 346 { 347 if (!list_empty(&dentry->d_lru)) { 348 if (dentry->d_flags & DCACHE_OP_PRUNE) 349 dentry->d_op->d_prune(dentry); 350 351 spin_lock(&dcache_lru_lock); 352 __dentry_lru_del(dentry); 353 spin_unlock(&dcache_lru_lock); 354 } 355 } 356 357 static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list) 358 { 359 spin_lock(&dcache_lru_lock); 360 if (list_empty(&dentry->d_lru)) { 361 list_add_tail(&dentry->d_lru, list); 362 dentry->d_sb->s_nr_dentry_unused++; 363 dentry_stat.nr_unused++; 364 } else { 365 list_move_tail(&dentry->d_lru, list); 366 } 367 spin_unlock(&dcache_lru_lock); 368 } 369 370 /** 371 * d_kill - kill dentry and return parent 372 * @dentry: dentry to kill 373 * @parent: parent dentry 374 * 375 * The dentry must already be unhashed and removed from the LRU. 376 * 377 * If this is the root of the dentry tree, return NULL. 378 * 379 * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by 380 * d_kill. 381 */ 382 static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent) 383 __releases(dentry->d_lock) 384 __releases(parent->d_lock) 385 __releases(dentry->d_inode->i_lock) 386 { 387 list_del(&dentry->d_u.d_child); 388 /* 389 * Inform try_to_ascend() that we are no longer attached to the 390 * dentry tree 391 */ 392 dentry->d_flags |= DCACHE_DISCONNECTED; 393 if (parent) 394 spin_unlock(&parent->d_lock); 395 dentry_iput(dentry); 396 /* 397 * dentry_iput drops the locks, at which point nobody (except 398 * transient RCU lookups) can reach this dentry. 399 */ 400 d_free(dentry); 401 return parent; 402 } 403 404 /* 405 * Unhash a dentry without inserting an RCU walk barrier or checking that 406 * dentry->d_lock is locked. The caller must take care of that, if 407 * appropriate. 408 */ 409 static void __d_shrink(struct dentry *dentry) 410 { 411 if (!d_unhashed(dentry)) { 412 struct hlist_bl_head *b; 413 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED)) 414 b = &dentry->d_sb->s_anon; 415 else 416 b = d_hash(dentry->d_parent, dentry->d_name.hash); 417 418 hlist_bl_lock(b); 419 __hlist_bl_del(&dentry->d_hash); 420 dentry->d_hash.pprev = NULL; 421 hlist_bl_unlock(b); 422 } 423 } 424 425 /** 426 * d_drop - drop a dentry 427 * @dentry: dentry to drop 428 * 429 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't 430 * be found through a VFS lookup any more. Note that this is different from 431 * deleting the dentry - d_delete will try to mark the dentry negative if 432 * possible, giving a successful _negative_ lookup, while d_drop will 433 * just make the cache lookup fail. 434 * 435 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some 436 * reason (NFS timeouts or autofs deletes). 437 * 438 * __d_drop requires dentry->d_lock. 439 */ 440 void __d_drop(struct dentry *dentry) 441 { 442 if (!d_unhashed(dentry)) { 443 __d_shrink(dentry); 444 dentry_rcuwalk_barrier(dentry); 445 } 446 } 447 EXPORT_SYMBOL(__d_drop); 448 449 void d_drop(struct dentry *dentry) 450 { 451 spin_lock(&dentry->d_lock); 452 __d_drop(dentry); 453 spin_unlock(&dentry->d_lock); 454 } 455 EXPORT_SYMBOL(d_drop); 456 457 /* 458 * d_clear_need_lookup - drop a dentry from cache and clear the need lookup flag 459 * @dentry: dentry to drop 460 * 461 * This is called when we do a lookup on a placeholder dentry that needed to be 462 * looked up. The dentry should have been hashed in order for it to be found by 463 * the lookup code, but now needs to be unhashed while we do the actual lookup 464 * and clear the DCACHE_NEED_LOOKUP flag. 465 */ 466 void d_clear_need_lookup(struct dentry *dentry) 467 { 468 spin_lock(&dentry->d_lock); 469 __d_drop(dentry); 470 dentry->d_flags &= ~DCACHE_NEED_LOOKUP; 471 spin_unlock(&dentry->d_lock); 472 } 473 EXPORT_SYMBOL(d_clear_need_lookup); 474 475 /* 476 * Finish off a dentry we've decided to kill. 477 * dentry->d_lock must be held, returns with it unlocked. 478 * If ref is non-zero, then decrement the refcount too. 479 * Returns dentry requiring refcount drop, or NULL if we're done. 480 */ 481 static inline struct dentry *dentry_kill(struct dentry *dentry, int ref) 482 __releases(dentry->d_lock) 483 { 484 struct inode *inode; 485 struct dentry *parent; 486 487 inode = dentry->d_inode; 488 if (inode && !spin_trylock(&inode->i_lock)) { 489 relock: 490 spin_unlock(&dentry->d_lock); 491 cpu_relax(); 492 return dentry; /* try again with same dentry */ 493 } 494 if (IS_ROOT(dentry)) 495 parent = NULL; 496 else 497 parent = dentry->d_parent; 498 if (parent && !spin_trylock(&parent->d_lock)) { 499 if (inode) 500 spin_unlock(&inode->i_lock); 501 goto relock; 502 } 503 504 if (ref) 505 dentry->d_count--; 506 /* 507 * if dentry was on the d_lru list delete it from there. 508 * inform the fs via d_prune that this dentry is about to be 509 * unhashed and destroyed. 510 */ 511 dentry_lru_prune(dentry); 512 /* if it was on the hash then remove it */ 513 __d_drop(dentry); 514 return d_kill(dentry, parent); 515 } 516 517 /* 518 * This is dput 519 * 520 * This is complicated by the fact that we do not want to put 521 * dentries that are no longer on any hash chain on the unused 522 * list: we'd much rather just get rid of them immediately. 523 * 524 * However, that implies that we have to traverse the dentry 525 * tree upwards to the parents which might _also_ now be 526 * scheduled for deletion (it may have been only waiting for 527 * its last child to go away). 528 * 529 * This tail recursion is done by hand as we don't want to depend 530 * on the compiler to always get this right (gcc generally doesn't). 531 * Real recursion would eat up our stack space. 532 */ 533 534 /* 535 * dput - release a dentry 536 * @dentry: dentry to release 537 * 538 * Release a dentry. This will drop the usage count and if appropriate 539 * call the dentry unlink method as well as removing it from the queues and 540 * releasing its resources. If the parent dentries were scheduled for release 541 * they too may now get deleted. 542 */ 543 void dput(struct dentry *dentry) 544 { 545 if (!dentry) 546 return; 547 548 repeat: 549 if (dentry->d_count == 1) 550 might_sleep(); 551 spin_lock(&dentry->d_lock); 552 BUG_ON(!dentry->d_count); 553 if (dentry->d_count > 1) { 554 dentry->d_count--; 555 spin_unlock(&dentry->d_lock); 556 return; 557 } 558 559 if (dentry->d_flags & DCACHE_OP_DELETE) { 560 if (dentry->d_op->d_delete(dentry)) 561 goto kill_it; 562 } 563 564 /* Unreachable? Get rid of it */ 565 if (d_unhashed(dentry)) 566 goto kill_it; 567 568 /* 569 * If this dentry needs lookup, don't set the referenced flag so that it 570 * is more likely to be cleaned up by the dcache shrinker in case of 571 * memory pressure. 572 */ 573 if (!d_need_lookup(dentry)) 574 dentry->d_flags |= DCACHE_REFERENCED; 575 dentry_lru_add(dentry); 576 577 dentry->d_count--; 578 spin_unlock(&dentry->d_lock); 579 return; 580 581 kill_it: 582 dentry = dentry_kill(dentry, 1); 583 if (dentry) 584 goto repeat; 585 } 586 EXPORT_SYMBOL(dput); 587 588 /** 589 * d_invalidate - invalidate a dentry 590 * @dentry: dentry to invalidate 591 * 592 * Try to invalidate the dentry if it turns out to be 593 * possible. If there are other dentries that can be 594 * reached through this one we can't delete it and we 595 * return -EBUSY. On success we return 0. 596 * 597 * no dcache lock. 598 */ 599 600 int d_invalidate(struct dentry * dentry) 601 { 602 /* 603 * If it's already been dropped, return OK. 604 */ 605 spin_lock(&dentry->d_lock); 606 if (d_unhashed(dentry)) { 607 spin_unlock(&dentry->d_lock); 608 return 0; 609 } 610 /* 611 * Check whether to do a partial shrink_dcache 612 * to get rid of unused child entries. 613 */ 614 if (!list_empty(&dentry->d_subdirs)) { 615 spin_unlock(&dentry->d_lock); 616 shrink_dcache_parent(dentry); 617 spin_lock(&dentry->d_lock); 618 } 619 620 /* 621 * Somebody else still using it? 622 * 623 * If it's a directory, we can't drop it 624 * for fear of somebody re-populating it 625 * with children (even though dropping it 626 * would make it unreachable from the root, 627 * we might still populate it if it was a 628 * working directory or similar). 629 * We also need to leave mountpoints alone, 630 * directory or not. 631 */ 632 if (dentry->d_count > 1 && dentry->d_inode) { 633 if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) { 634 spin_unlock(&dentry->d_lock); 635 return -EBUSY; 636 } 637 } 638 639 __d_drop(dentry); 640 spin_unlock(&dentry->d_lock); 641 return 0; 642 } 643 EXPORT_SYMBOL(d_invalidate); 644 645 /* This must be called with d_lock held */ 646 static inline void __dget_dlock(struct dentry *dentry) 647 { 648 dentry->d_count++; 649 } 650 651 static inline void __dget(struct dentry *dentry) 652 { 653 spin_lock(&dentry->d_lock); 654 __dget_dlock(dentry); 655 spin_unlock(&dentry->d_lock); 656 } 657 658 struct dentry *dget_parent(struct dentry *dentry) 659 { 660 struct dentry *ret; 661 662 repeat: 663 /* 664 * Don't need rcu_dereference because we re-check it was correct under 665 * the lock. 666 */ 667 rcu_read_lock(); 668 ret = dentry->d_parent; 669 spin_lock(&ret->d_lock); 670 if (unlikely(ret != dentry->d_parent)) { 671 spin_unlock(&ret->d_lock); 672 rcu_read_unlock(); 673 goto repeat; 674 } 675 rcu_read_unlock(); 676 BUG_ON(!ret->d_count); 677 ret->d_count++; 678 spin_unlock(&ret->d_lock); 679 return ret; 680 } 681 EXPORT_SYMBOL(dget_parent); 682 683 /** 684 * d_find_alias - grab a hashed alias of inode 685 * @inode: inode in question 686 * @want_discon: flag, used by d_splice_alias, to request 687 * that only a DISCONNECTED alias be returned. 688 * 689 * If inode has a hashed alias, or is a directory and has any alias, 690 * acquire the reference to alias and return it. Otherwise return NULL. 691 * Notice that if inode is a directory there can be only one alias and 692 * it can be unhashed only if it has no children, or if it is the root 693 * of a filesystem. 694 * 695 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer 696 * any other hashed alias over that one unless @want_discon is set, 697 * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias. 698 */ 699 static struct dentry *__d_find_alias(struct inode *inode, int want_discon) 700 { 701 struct dentry *alias, *discon_alias; 702 struct hlist_node *p; 703 704 again: 705 discon_alias = NULL; 706 hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) { 707 spin_lock(&alias->d_lock); 708 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { 709 if (IS_ROOT(alias) && 710 (alias->d_flags & DCACHE_DISCONNECTED)) { 711 discon_alias = alias; 712 } else if (!want_discon) { 713 __dget_dlock(alias); 714 spin_unlock(&alias->d_lock); 715 return alias; 716 } 717 } 718 spin_unlock(&alias->d_lock); 719 } 720 if (discon_alias) { 721 alias = discon_alias; 722 spin_lock(&alias->d_lock); 723 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { 724 if (IS_ROOT(alias) && 725 (alias->d_flags & DCACHE_DISCONNECTED)) { 726 __dget_dlock(alias); 727 spin_unlock(&alias->d_lock); 728 return alias; 729 } 730 } 731 spin_unlock(&alias->d_lock); 732 goto again; 733 } 734 return NULL; 735 } 736 737 struct dentry *d_find_alias(struct inode *inode) 738 { 739 struct dentry *de = NULL; 740 741 if (!hlist_empty(&inode->i_dentry)) { 742 spin_lock(&inode->i_lock); 743 de = __d_find_alias(inode, 0); 744 spin_unlock(&inode->i_lock); 745 } 746 return de; 747 } 748 EXPORT_SYMBOL(d_find_alias); 749 750 /* 751 * Try to kill dentries associated with this inode. 752 * WARNING: you must own a reference to inode. 753 */ 754 void d_prune_aliases(struct inode *inode) 755 { 756 struct dentry *dentry; 757 struct hlist_node *p; 758 restart: 759 spin_lock(&inode->i_lock); 760 hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) { 761 spin_lock(&dentry->d_lock); 762 if (!dentry->d_count) { 763 __dget_dlock(dentry); 764 __d_drop(dentry); 765 spin_unlock(&dentry->d_lock); 766 spin_unlock(&inode->i_lock); 767 dput(dentry); 768 goto restart; 769 } 770 spin_unlock(&dentry->d_lock); 771 } 772 spin_unlock(&inode->i_lock); 773 } 774 EXPORT_SYMBOL(d_prune_aliases); 775 776 /* 777 * Try to throw away a dentry - free the inode, dput the parent. 778 * Requires dentry->d_lock is held, and dentry->d_count == 0. 779 * Releases dentry->d_lock. 780 * 781 * This may fail if locks cannot be acquired no problem, just try again. 782 */ 783 static void try_prune_one_dentry(struct dentry *dentry) 784 __releases(dentry->d_lock) 785 { 786 struct dentry *parent; 787 788 parent = dentry_kill(dentry, 0); 789 /* 790 * If dentry_kill returns NULL, we have nothing more to do. 791 * if it returns the same dentry, trylocks failed. In either 792 * case, just loop again. 793 * 794 * Otherwise, we need to prune ancestors too. This is necessary 795 * to prevent quadratic behavior of shrink_dcache_parent(), but 796 * is also expected to be beneficial in reducing dentry cache 797 * fragmentation. 798 */ 799 if (!parent) 800 return; 801 if (parent == dentry) 802 return; 803 804 /* Prune ancestors. */ 805 dentry = parent; 806 while (dentry) { 807 spin_lock(&dentry->d_lock); 808 if (dentry->d_count > 1) { 809 dentry->d_count--; 810 spin_unlock(&dentry->d_lock); 811 return; 812 } 813 dentry = dentry_kill(dentry, 1); 814 } 815 } 816 817 static void shrink_dentry_list(struct list_head *list) 818 { 819 struct dentry *dentry; 820 821 rcu_read_lock(); 822 for (;;) { 823 dentry = list_entry_rcu(list->prev, struct dentry, d_lru); 824 if (&dentry->d_lru == list) 825 break; /* empty */ 826 spin_lock(&dentry->d_lock); 827 if (dentry != list_entry(list->prev, struct dentry, d_lru)) { 828 spin_unlock(&dentry->d_lock); 829 continue; 830 } 831 832 /* 833 * We found an inuse dentry which was not removed from 834 * the LRU because of laziness during lookup. Do not free 835 * it - just keep it off the LRU list. 836 */ 837 if (dentry->d_count) { 838 dentry_lru_del(dentry); 839 spin_unlock(&dentry->d_lock); 840 continue; 841 } 842 843 rcu_read_unlock(); 844 845 try_prune_one_dentry(dentry); 846 847 rcu_read_lock(); 848 } 849 rcu_read_unlock(); 850 } 851 852 /** 853 * prune_dcache_sb - shrink the dcache 854 * @sb: superblock 855 * @count: number of entries to try to free 856 * 857 * Attempt to shrink the superblock dcache LRU by @count entries. This is 858 * done when we need more memory an called from the superblock shrinker 859 * function. 860 * 861 * This function may fail to free any resources if all the dentries are in 862 * use. 863 */ 864 void prune_dcache_sb(struct super_block *sb, int count) 865 { 866 struct dentry *dentry; 867 LIST_HEAD(referenced); 868 LIST_HEAD(tmp); 869 870 relock: 871 spin_lock(&dcache_lru_lock); 872 while (!list_empty(&sb->s_dentry_lru)) { 873 dentry = list_entry(sb->s_dentry_lru.prev, 874 struct dentry, d_lru); 875 BUG_ON(dentry->d_sb != sb); 876 877 if (!spin_trylock(&dentry->d_lock)) { 878 spin_unlock(&dcache_lru_lock); 879 cpu_relax(); 880 goto relock; 881 } 882 883 if (dentry->d_flags & DCACHE_REFERENCED) { 884 dentry->d_flags &= ~DCACHE_REFERENCED; 885 list_move(&dentry->d_lru, &referenced); 886 spin_unlock(&dentry->d_lock); 887 } else { 888 list_move_tail(&dentry->d_lru, &tmp); 889 dentry->d_flags |= DCACHE_SHRINK_LIST; 890 spin_unlock(&dentry->d_lock); 891 if (!--count) 892 break; 893 } 894 cond_resched_lock(&dcache_lru_lock); 895 } 896 if (!list_empty(&referenced)) 897 list_splice(&referenced, &sb->s_dentry_lru); 898 spin_unlock(&dcache_lru_lock); 899 900 shrink_dentry_list(&tmp); 901 } 902 903 /** 904 * shrink_dcache_sb - shrink dcache for a superblock 905 * @sb: superblock 906 * 907 * Shrink the dcache for the specified super block. This is used to free 908 * the dcache before unmounting a file system. 909 */ 910 void shrink_dcache_sb(struct super_block *sb) 911 { 912 LIST_HEAD(tmp); 913 914 spin_lock(&dcache_lru_lock); 915 while (!list_empty(&sb->s_dentry_lru)) { 916 list_splice_init(&sb->s_dentry_lru, &tmp); 917 spin_unlock(&dcache_lru_lock); 918 shrink_dentry_list(&tmp); 919 spin_lock(&dcache_lru_lock); 920 } 921 spin_unlock(&dcache_lru_lock); 922 } 923 EXPORT_SYMBOL(shrink_dcache_sb); 924 925 /* 926 * destroy a single subtree of dentries for unmount 927 * - see the comments on shrink_dcache_for_umount() for a description of the 928 * locking 929 */ 930 static void shrink_dcache_for_umount_subtree(struct dentry *dentry) 931 { 932 struct dentry *parent; 933 934 BUG_ON(!IS_ROOT(dentry)); 935 936 for (;;) { 937 /* descend to the first leaf in the current subtree */ 938 while (!list_empty(&dentry->d_subdirs)) 939 dentry = list_entry(dentry->d_subdirs.next, 940 struct dentry, d_u.d_child); 941 942 /* consume the dentries from this leaf up through its parents 943 * until we find one with children or run out altogether */ 944 do { 945 struct inode *inode; 946 947 /* 948 * remove the dentry from the lru, and inform 949 * the fs that this dentry is about to be 950 * unhashed and destroyed. 951 */ 952 dentry_lru_prune(dentry); 953 __d_shrink(dentry); 954 955 if (dentry->d_count != 0) { 956 printk(KERN_ERR 957 "BUG: Dentry %p{i=%lx,n=%s}" 958 " still in use (%d)" 959 " [unmount of %s %s]\n", 960 dentry, 961 dentry->d_inode ? 962 dentry->d_inode->i_ino : 0UL, 963 dentry->d_name.name, 964 dentry->d_count, 965 dentry->d_sb->s_type->name, 966 dentry->d_sb->s_id); 967 BUG(); 968 } 969 970 if (IS_ROOT(dentry)) { 971 parent = NULL; 972 list_del(&dentry->d_u.d_child); 973 } else { 974 parent = dentry->d_parent; 975 parent->d_count--; 976 list_del(&dentry->d_u.d_child); 977 } 978 979 inode = dentry->d_inode; 980 if (inode) { 981 dentry->d_inode = NULL; 982 hlist_del_init(&dentry->d_alias); 983 if (dentry->d_op && dentry->d_op->d_iput) 984 dentry->d_op->d_iput(dentry, inode); 985 else 986 iput(inode); 987 } 988 989 d_free(dentry); 990 991 /* finished when we fall off the top of the tree, 992 * otherwise we ascend to the parent and move to the 993 * next sibling if there is one */ 994 if (!parent) 995 return; 996 dentry = parent; 997 } while (list_empty(&dentry->d_subdirs)); 998 999 dentry = list_entry(dentry->d_subdirs.next, 1000 struct dentry, d_u.d_child); 1001 } 1002 } 1003 1004 /* 1005 * destroy the dentries attached to a superblock on unmounting 1006 * - we don't need to use dentry->d_lock because: 1007 * - the superblock is detached from all mountings and open files, so the 1008 * dentry trees will not be rearranged by the VFS 1009 * - s_umount is write-locked, so the memory pressure shrinker will ignore 1010 * any dentries belonging to this superblock that it comes across 1011 * - the filesystem itself is no longer permitted to rearrange the dentries 1012 * in this superblock 1013 */ 1014 void shrink_dcache_for_umount(struct super_block *sb) 1015 { 1016 struct dentry *dentry; 1017 1018 if (down_read_trylock(&sb->s_umount)) 1019 BUG(); 1020 1021 dentry = sb->s_root; 1022 sb->s_root = NULL; 1023 dentry->d_count--; 1024 shrink_dcache_for_umount_subtree(dentry); 1025 1026 while (!hlist_bl_empty(&sb->s_anon)) { 1027 dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash); 1028 shrink_dcache_for_umount_subtree(dentry); 1029 } 1030 } 1031 1032 /* 1033 * This tries to ascend one level of parenthood, but 1034 * we can race with renaming, so we need to re-check 1035 * the parenthood after dropping the lock and check 1036 * that the sequence number still matches. 1037 */ 1038 static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq) 1039 { 1040 struct dentry *new = old->d_parent; 1041 1042 rcu_read_lock(); 1043 spin_unlock(&old->d_lock); 1044 spin_lock(&new->d_lock); 1045 1046 /* 1047 * might go back up the wrong parent if we have had a rename 1048 * or deletion 1049 */ 1050 if (new != old->d_parent || 1051 (old->d_flags & DCACHE_DISCONNECTED) || 1052 (!locked && read_seqretry(&rename_lock, seq))) { 1053 spin_unlock(&new->d_lock); 1054 new = NULL; 1055 } 1056 rcu_read_unlock(); 1057 return new; 1058 } 1059 1060 1061 /* 1062 * Search for at least 1 mount point in the dentry's subdirs. 1063 * We descend to the next level whenever the d_subdirs 1064 * list is non-empty and continue searching. 1065 */ 1066 1067 /** 1068 * have_submounts - check for mounts over a dentry 1069 * @parent: dentry to check. 1070 * 1071 * Return true if the parent or its subdirectories contain 1072 * a mount point 1073 */ 1074 int have_submounts(struct dentry *parent) 1075 { 1076 struct dentry *this_parent; 1077 struct list_head *next; 1078 unsigned seq; 1079 int locked = 0; 1080 1081 seq = read_seqbegin(&rename_lock); 1082 again: 1083 this_parent = parent; 1084 1085 if (d_mountpoint(parent)) 1086 goto positive; 1087 spin_lock(&this_parent->d_lock); 1088 repeat: 1089 next = this_parent->d_subdirs.next; 1090 resume: 1091 while (next != &this_parent->d_subdirs) { 1092 struct list_head *tmp = next; 1093 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 1094 next = tmp->next; 1095 1096 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 1097 /* Have we found a mount point ? */ 1098 if (d_mountpoint(dentry)) { 1099 spin_unlock(&dentry->d_lock); 1100 spin_unlock(&this_parent->d_lock); 1101 goto positive; 1102 } 1103 if (!list_empty(&dentry->d_subdirs)) { 1104 spin_unlock(&this_parent->d_lock); 1105 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); 1106 this_parent = dentry; 1107 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); 1108 goto repeat; 1109 } 1110 spin_unlock(&dentry->d_lock); 1111 } 1112 /* 1113 * All done at this level ... ascend and resume the search. 1114 */ 1115 if (this_parent != parent) { 1116 struct dentry *child = this_parent; 1117 this_parent = try_to_ascend(this_parent, locked, seq); 1118 if (!this_parent) 1119 goto rename_retry; 1120 next = child->d_u.d_child.next; 1121 goto resume; 1122 } 1123 spin_unlock(&this_parent->d_lock); 1124 if (!locked && read_seqretry(&rename_lock, seq)) 1125 goto rename_retry; 1126 if (locked) 1127 write_sequnlock(&rename_lock); 1128 return 0; /* No mount points found in tree */ 1129 positive: 1130 if (!locked && read_seqretry(&rename_lock, seq)) 1131 goto rename_retry; 1132 if (locked) 1133 write_sequnlock(&rename_lock); 1134 return 1; 1135 1136 rename_retry: 1137 locked = 1; 1138 write_seqlock(&rename_lock); 1139 goto again; 1140 } 1141 EXPORT_SYMBOL(have_submounts); 1142 1143 /* 1144 * Search the dentry child list for the specified parent, 1145 * and move any unused dentries to the end of the unused 1146 * list for prune_dcache(). We descend to the next level 1147 * whenever the d_subdirs list is non-empty and continue 1148 * searching. 1149 * 1150 * It returns zero iff there are no unused children, 1151 * otherwise it returns the number of children moved to 1152 * the end of the unused list. This may not be the total 1153 * number of unused children, because select_parent can 1154 * drop the lock and return early due to latency 1155 * constraints. 1156 */ 1157 static int select_parent(struct dentry *parent, struct list_head *dispose) 1158 { 1159 struct dentry *this_parent; 1160 struct list_head *next; 1161 unsigned seq; 1162 int found = 0; 1163 int locked = 0; 1164 1165 seq = read_seqbegin(&rename_lock); 1166 again: 1167 this_parent = parent; 1168 spin_lock(&this_parent->d_lock); 1169 repeat: 1170 next = this_parent->d_subdirs.next; 1171 resume: 1172 while (next != &this_parent->d_subdirs) { 1173 struct list_head *tmp = next; 1174 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 1175 next = tmp->next; 1176 1177 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 1178 1179 /* 1180 * move only zero ref count dentries to the dispose list. 1181 * 1182 * Those which are presently on the shrink list, being processed 1183 * by shrink_dentry_list(), shouldn't be moved. Otherwise the 1184 * loop in shrink_dcache_parent() might not make any progress 1185 * and loop forever. 1186 */ 1187 if (dentry->d_count) { 1188 dentry_lru_del(dentry); 1189 } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { 1190 dentry_lru_move_list(dentry, dispose); 1191 dentry->d_flags |= DCACHE_SHRINK_LIST; 1192 found++; 1193 } 1194 /* 1195 * We can return to the caller if we have found some (this 1196 * ensures forward progress). We'll be coming back to find 1197 * the rest. 1198 */ 1199 if (found && need_resched()) { 1200 spin_unlock(&dentry->d_lock); 1201 goto out; 1202 } 1203 1204 /* 1205 * Descend a level if the d_subdirs list is non-empty. 1206 */ 1207 if (!list_empty(&dentry->d_subdirs)) { 1208 spin_unlock(&this_parent->d_lock); 1209 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); 1210 this_parent = dentry; 1211 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); 1212 goto repeat; 1213 } 1214 1215 spin_unlock(&dentry->d_lock); 1216 } 1217 /* 1218 * All done at this level ... ascend and resume the search. 1219 */ 1220 if (this_parent != parent) { 1221 struct dentry *child = this_parent; 1222 this_parent = try_to_ascend(this_parent, locked, seq); 1223 if (!this_parent) 1224 goto rename_retry; 1225 next = child->d_u.d_child.next; 1226 goto resume; 1227 } 1228 out: 1229 spin_unlock(&this_parent->d_lock); 1230 if (!locked && read_seqretry(&rename_lock, seq)) 1231 goto rename_retry; 1232 if (locked) 1233 write_sequnlock(&rename_lock); 1234 return found; 1235 1236 rename_retry: 1237 if (found) 1238 return found; 1239 locked = 1; 1240 write_seqlock(&rename_lock); 1241 goto again; 1242 } 1243 1244 /** 1245 * shrink_dcache_parent - prune dcache 1246 * @parent: parent of entries to prune 1247 * 1248 * Prune the dcache to remove unused children of the parent dentry. 1249 */ 1250 void shrink_dcache_parent(struct dentry * parent) 1251 { 1252 LIST_HEAD(dispose); 1253 int found; 1254 1255 while ((found = select_parent(parent, &dispose)) != 0) 1256 shrink_dentry_list(&dispose); 1257 } 1258 EXPORT_SYMBOL(shrink_dcache_parent); 1259 1260 /** 1261 * __d_alloc - allocate a dcache entry 1262 * @sb: filesystem it will belong to 1263 * @name: qstr of the name 1264 * 1265 * Allocates a dentry. It returns %NULL if there is insufficient memory 1266 * available. On a success the dentry is returned. The name passed in is 1267 * copied and the copy passed in may be reused after this call. 1268 */ 1269 1270 struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) 1271 { 1272 struct dentry *dentry; 1273 char *dname; 1274 1275 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL); 1276 if (!dentry) 1277 return NULL; 1278 1279 /* 1280 * We guarantee that the inline name is always NUL-terminated. 1281 * This way the memcpy() done by the name switching in rename 1282 * will still always have a NUL at the end, even if we might 1283 * be overwriting an internal NUL character 1284 */ 1285 dentry->d_iname[DNAME_INLINE_LEN-1] = 0; 1286 if (name->len > DNAME_INLINE_LEN-1) { 1287 dname = kmalloc(name->len + 1, GFP_KERNEL); 1288 if (!dname) { 1289 kmem_cache_free(dentry_cache, dentry); 1290 return NULL; 1291 } 1292 } else { 1293 dname = dentry->d_iname; 1294 } 1295 1296 dentry->d_name.len = name->len; 1297 dentry->d_name.hash = name->hash; 1298 memcpy(dname, name->name, name->len); 1299 dname[name->len] = 0; 1300 1301 /* Make sure we always see the terminating NUL character */ 1302 smp_wmb(); 1303 dentry->d_name.name = dname; 1304 1305 dentry->d_count = 1; 1306 dentry->d_flags = 0; 1307 spin_lock_init(&dentry->d_lock); 1308 seqcount_init(&dentry->d_seq); 1309 dentry->d_inode = NULL; 1310 dentry->d_parent = dentry; 1311 dentry->d_sb = sb; 1312 dentry->d_op = NULL; 1313 dentry->d_fsdata = NULL; 1314 INIT_HLIST_BL_NODE(&dentry->d_hash); 1315 INIT_LIST_HEAD(&dentry->d_lru); 1316 INIT_LIST_HEAD(&dentry->d_subdirs); 1317 INIT_HLIST_NODE(&dentry->d_alias); 1318 INIT_LIST_HEAD(&dentry->d_u.d_child); 1319 d_set_d_op(dentry, dentry->d_sb->s_d_op); 1320 1321 this_cpu_inc(nr_dentry); 1322 1323 return dentry; 1324 } 1325 1326 /** 1327 * d_alloc - allocate a dcache entry 1328 * @parent: parent of entry to allocate 1329 * @name: qstr of the name 1330 * 1331 * Allocates a dentry. It returns %NULL if there is insufficient memory 1332 * available. On a success the dentry is returned. The name passed in is 1333 * copied and the copy passed in may be reused after this call. 1334 */ 1335 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) 1336 { 1337 struct dentry *dentry = __d_alloc(parent->d_sb, name); 1338 if (!dentry) 1339 return NULL; 1340 1341 spin_lock(&parent->d_lock); 1342 /* 1343 * don't need child lock because it is not subject 1344 * to concurrency here 1345 */ 1346 __dget_dlock(parent); 1347 dentry->d_parent = parent; 1348 list_add(&dentry->d_u.d_child, &parent->d_subdirs); 1349 spin_unlock(&parent->d_lock); 1350 1351 return dentry; 1352 } 1353 EXPORT_SYMBOL(d_alloc); 1354 1355 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name) 1356 { 1357 struct dentry *dentry = __d_alloc(sb, name); 1358 if (dentry) 1359 dentry->d_flags |= DCACHE_DISCONNECTED; 1360 return dentry; 1361 } 1362 EXPORT_SYMBOL(d_alloc_pseudo); 1363 1364 struct dentry *d_alloc_name(struct dentry *parent, const char *name) 1365 { 1366 struct qstr q; 1367 1368 q.name = name; 1369 q.len = strlen(name); 1370 q.hash = full_name_hash(q.name, q.len); 1371 return d_alloc(parent, &q); 1372 } 1373 EXPORT_SYMBOL(d_alloc_name); 1374 1375 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op) 1376 { 1377 WARN_ON_ONCE(dentry->d_op); 1378 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH | 1379 DCACHE_OP_COMPARE | 1380 DCACHE_OP_REVALIDATE | 1381 DCACHE_OP_DELETE )); 1382 dentry->d_op = op; 1383 if (!op) 1384 return; 1385 if (op->d_hash) 1386 dentry->d_flags |= DCACHE_OP_HASH; 1387 if (op->d_compare) 1388 dentry->d_flags |= DCACHE_OP_COMPARE; 1389 if (op->d_revalidate) 1390 dentry->d_flags |= DCACHE_OP_REVALIDATE; 1391 if (op->d_delete) 1392 dentry->d_flags |= DCACHE_OP_DELETE; 1393 if (op->d_prune) 1394 dentry->d_flags |= DCACHE_OP_PRUNE; 1395 1396 } 1397 EXPORT_SYMBOL(d_set_d_op); 1398 1399 static void __d_instantiate(struct dentry *dentry, struct inode *inode) 1400 { 1401 spin_lock(&dentry->d_lock); 1402 if (inode) { 1403 if (unlikely(IS_AUTOMOUNT(inode))) 1404 dentry->d_flags |= DCACHE_NEED_AUTOMOUNT; 1405 hlist_add_head(&dentry->d_alias, &inode->i_dentry); 1406 } 1407 dentry->d_inode = inode; 1408 dentry_rcuwalk_barrier(dentry); 1409 spin_unlock(&dentry->d_lock); 1410 fsnotify_d_instantiate(dentry, inode); 1411 } 1412 1413 /** 1414 * d_instantiate - fill in inode information for a dentry 1415 * @entry: dentry to complete 1416 * @inode: inode to attach to this dentry 1417 * 1418 * Fill in inode information in the entry. 1419 * 1420 * This turns negative dentries into productive full members 1421 * of society. 1422 * 1423 * NOTE! This assumes that the inode count has been incremented 1424 * (or otherwise set) by the caller to indicate that it is now 1425 * in use by the dcache. 1426 */ 1427 1428 void d_instantiate(struct dentry *entry, struct inode * inode) 1429 { 1430 BUG_ON(!hlist_unhashed(&entry->d_alias)); 1431 if (inode) 1432 spin_lock(&inode->i_lock); 1433 __d_instantiate(entry, inode); 1434 if (inode) 1435 spin_unlock(&inode->i_lock); 1436 security_d_instantiate(entry, inode); 1437 } 1438 EXPORT_SYMBOL(d_instantiate); 1439 1440 /** 1441 * d_instantiate_unique - instantiate a non-aliased dentry 1442 * @entry: dentry to instantiate 1443 * @inode: inode to attach to this dentry 1444 * 1445 * Fill in inode information in the entry. On success, it returns NULL. 1446 * If an unhashed alias of "entry" already exists, then we return the 1447 * aliased dentry instead and drop one reference to inode. 1448 * 1449 * Note that in order to avoid conflicts with rename() etc, the caller 1450 * had better be holding the parent directory semaphore. 1451 * 1452 * This also assumes that the inode count has been incremented 1453 * (or otherwise set) by the caller to indicate that it is now 1454 * in use by the dcache. 1455 */ 1456 static struct dentry *__d_instantiate_unique(struct dentry *entry, 1457 struct inode *inode) 1458 { 1459 struct dentry *alias; 1460 int len = entry->d_name.len; 1461 const char *name = entry->d_name.name; 1462 unsigned int hash = entry->d_name.hash; 1463 struct hlist_node *p; 1464 1465 if (!inode) { 1466 __d_instantiate(entry, NULL); 1467 return NULL; 1468 } 1469 1470 hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) { 1471 /* 1472 * Don't need alias->d_lock here, because aliases with 1473 * d_parent == entry->d_parent are not subject to name or 1474 * parent changes, because the parent inode i_mutex is held. 1475 */ 1476 if (alias->d_name.hash != hash) 1477 continue; 1478 if (alias->d_parent != entry->d_parent) 1479 continue; 1480 if (alias->d_name.len != len) 1481 continue; 1482 if (dentry_cmp(alias, name, len)) 1483 continue; 1484 __dget(alias); 1485 return alias; 1486 } 1487 1488 __d_instantiate(entry, inode); 1489 return NULL; 1490 } 1491 1492 struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode) 1493 { 1494 struct dentry *result; 1495 1496 BUG_ON(!hlist_unhashed(&entry->d_alias)); 1497 1498 if (inode) 1499 spin_lock(&inode->i_lock); 1500 result = __d_instantiate_unique(entry, inode); 1501 if (inode) 1502 spin_unlock(&inode->i_lock); 1503 1504 if (!result) { 1505 security_d_instantiate(entry, inode); 1506 return NULL; 1507 } 1508 1509 BUG_ON(!d_unhashed(result)); 1510 iput(inode); 1511 return result; 1512 } 1513 1514 EXPORT_SYMBOL(d_instantiate_unique); 1515 1516 struct dentry *d_make_root(struct inode *root_inode) 1517 { 1518 struct dentry *res = NULL; 1519 1520 if (root_inode) { 1521 static const struct qstr name = QSTR_INIT("/", 1); 1522 1523 res = __d_alloc(root_inode->i_sb, &name); 1524 if (res) 1525 d_instantiate(res, root_inode); 1526 else 1527 iput(root_inode); 1528 } 1529 return res; 1530 } 1531 EXPORT_SYMBOL(d_make_root); 1532 1533 static struct dentry * __d_find_any_alias(struct inode *inode) 1534 { 1535 struct dentry *alias; 1536 1537 if (hlist_empty(&inode->i_dentry)) 1538 return NULL; 1539 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_alias); 1540 __dget(alias); 1541 return alias; 1542 } 1543 1544 /** 1545 * d_find_any_alias - find any alias for a given inode 1546 * @inode: inode to find an alias for 1547 * 1548 * If any aliases exist for the given inode, take and return a 1549 * reference for one of them. If no aliases exist, return %NULL. 1550 */ 1551 struct dentry *d_find_any_alias(struct inode *inode) 1552 { 1553 struct dentry *de; 1554 1555 spin_lock(&inode->i_lock); 1556 de = __d_find_any_alias(inode); 1557 spin_unlock(&inode->i_lock); 1558 return de; 1559 } 1560 EXPORT_SYMBOL(d_find_any_alias); 1561 1562 /** 1563 * d_obtain_alias - find or allocate a dentry for a given inode 1564 * @inode: inode to allocate the dentry for 1565 * 1566 * Obtain a dentry for an inode resulting from NFS filehandle conversion or 1567 * similar open by handle operations. The returned dentry may be anonymous, 1568 * or may have a full name (if the inode was already in the cache). 1569 * 1570 * When called on a directory inode, we must ensure that the inode only ever 1571 * has one dentry. If a dentry is found, that is returned instead of 1572 * allocating a new one. 1573 * 1574 * On successful return, the reference to the inode has been transferred 1575 * to the dentry. In case of an error the reference on the inode is released. 1576 * To make it easier to use in export operations a %NULL or IS_ERR inode may 1577 * be passed in and will be the error will be propagate to the return value, 1578 * with a %NULL @inode replaced by ERR_PTR(-ESTALE). 1579 */ 1580 struct dentry *d_obtain_alias(struct inode *inode) 1581 { 1582 static const struct qstr anonstring = { .name = "" }; 1583 struct dentry *tmp; 1584 struct dentry *res; 1585 1586 if (!inode) 1587 return ERR_PTR(-ESTALE); 1588 if (IS_ERR(inode)) 1589 return ERR_CAST(inode); 1590 1591 res = d_find_any_alias(inode); 1592 if (res) 1593 goto out_iput; 1594 1595 tmp = __d_alloc(inode->i_sb, &anonstring); 1596 if (!tmp) { 1597 res = ERR_PTR(-ENOMEM); 1598 goto out_iput; 1599 } 1600 1601 spin_lock(&inode->i_lock); 1602 res = __d_find_any_alias(inode); 1603 if (res) { 1604 spin_unlock(&inode->i_lock); 1605 dput(tmp); 1606 goto out_iput; 1607 } 1608 1609 /* attach a disconnected dentry */ 1610 spin_lock(&tmp->d_lock); 1611 tmp->d_inode = inode; 1612 tmp->d_flags |= DCACHE_DISCONNECTED; 1613 hlist_add_head(&tmp->d_alias, &inode->i_dentry); 1614 hlist_bl_lock(&tmp->d_sb->s_anon); 1615 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon); 1616 hlist_bl_unlock(&tmp->d_sb->s_anon); 1617 spin_unlock(&tmp->d_lock); 1618 spin_unlock(&inode->i_lock); 1619 security_d_instantiate(tmp, inode); 1620 1621 return tmp; 1622 1623 out_iput: 1624 if (res && !IS_ERR(res)) 1625 security_d_instantiate(res, inode); 1626 iput(inode); 1627 return res; 1628 } 1629 EXPORT_SYMBOL(d_obtain_alias); 1630 1631 /** 1632 * d_splice_alias - splice a disconnected dentry into the tree if one exists 1633 * @inode: the inode which may have a disconnected dentry 1634 * @dentry: a negative dentry which we want to point to the inode. 1635 * 1636 * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and 1637 * DCACHE_DISCONNECTED), then d_move that in place of the given dentry 1638 * and return it, else simply d_add the inode to the dentry and return NULL. 1639 * 1640 * This is needed in the lookup routine of any filesystem that is exportable 1641 * (via knfsd) so that we can build dcache paths to directories effectively. 1642 * 1643 * If a dentry was found and moved, then it is returned. Otherwise NULL 1644 * is returned. This matches the expected return value of ->lookup. 1645 * 1646 */ 1647 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) 1648 { 1649 struct dentry *new = NULL; 1650 1651 if (IS_ERR(inode)) 1652 return ERR_CAST(inode); 1653 1654 if (inode && S_ISDIR(inode->i_mode)) { 1655 spin_lock(&inode->i_lock); 1656 new = __d_find_alias(inode, 1); 1657 if (new) { 1658 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED)); 1659 spin_unlock(&inode->i_lock); 1660 security_d_instantiate(new, inode); 1661 d_move(new, dentry); 1662 iput(inode); 1663 } else { 1664 /* already taking inode->i_lock, so d_add() by hand */ 1665 __d_instantiate(dentry, inode); 1666 spin_unlock(&inode->i_lock); 1667 security_d_instantiate(dentry, inode); 1668 d_rehash(dentry); 1669 } 1670 } else 1671 d_add(dentry, inode); 1672 return new; 1673 } 1674 EXPORT_SYMBOL(d_splice_alias); 1675 1676 /** 1677 * d_add_ci - lookup or allocate new dentry with case-exact name 1678 * @inode: the inode case-insensitive lookup has found 1679 * @dentry: the negative dentry that was passed to the parent's lookup func 1680 * @name: the case-exact name to be associated with the returned dentry 1681 * 1682 * This is to avoid filling the dcache with case-insensitive names to the 1683 * same inode, only the actual correct case is stored in the dcache for 1684 * case-insensitive filesystems. 1685 * 1686 * For a case-insensitive lookup match and if the the case-exact dentry 1687 * already exists in in the dcache, use it and return it. 1688 * 1689 * If no entry exists with the exact case name, allocate new dentry with 1690 * the exact case, and return the spliced entry. 1691 */ 1692 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode, 1693 struct qstr *name) 1694 { 1695 int error; 1696 struct dentry *found; 1697 struct dentry *new; 1698 1699 /* 1700 * First check if a dentry matching the name already exists, 1701 * if not go ahead and create it now. 1702 */ 1703 found = d_hash_and_lookup(dentry->d_parent, name); 1704 if (!found) { 1705 new = d_alloc(dentry->d_parent, name); 1706 if (!new) { 1707 error = -ENOMEM; 1708 goto err_out; 1709 } 1710 1711 found = d_splice_alias(inode, new); 1712 if (found) { 1713 dput(new); 1714 return found; 1715 } 1716 return new; 1717 } 1718 1719 /* 1720 * If a matching dentry exists, and it's not negative use it. 1721 * 1722 * Decrement the reference count to balance the iget() done 1723 * earlier on. 1724 */ 1725 if (found->d_inode) { 1726 if (unlikely(found->d_inode != inode)) { 1727 /* This can't happen because bad inodes are unhashed. */ 1728 BUG_ON(!is_bad_inode(inode)); 1729 BUG_ON(!is_bad_inode(found->d_inode)); 1730 } 1731 iput(inode); 1732 return found; 1733 } 1734 1735 /* 1736 * We are going to instantiate this dentry, unhash it and clear the 1737 * lookup flag so we can do that. 1738 */ 1739 if (unlikely(d_need_lookup(found))) 1740 d_clear_need_lookup(found); 1741 1742 /* 1743 * Negative dentry: instantiate it unless the inode is a directory and 1744 * already has a dentry. 1745 */ 1746 new = d_splice_alias(inode, found); 1747 if (new) { 1748 dput(found); 1749 found = new; 1750 } 1751 return found; 1752 1753 err_out: 1754 iput(inode); 1755 return ERR_PTR(error); 1756 } 1757 EXPORT_SYMBOL(d_add_ci); 1758 1759 /* 1760 * Do the slow-case of the dentry name compare. 1761 * 1762 * Unlike the dentry_cmp() function, we need to atomically 1763 * load the name, length and inode information, so that the 1764 * filesystem can rely on them, and can use the 'name' and 1765 * 'len' information without worrying about walking off the 1766 * end of memory etc. 1767 * 1768 * Thus the read_seqcount_retry() and the "duplicate" info 1769 * in arguments (the low-level filesystem should not look 1770 * at the dentry inode or name contents directly, since 1771 * rename can change them while we're in RCU mode). 1772 */ 1773 enum slow_d_compare { 1774 D_COMP_OK, 1775 D_COMP_NOMATCH, 1776 D_COMP_SEQRETRY, 1777 }; 1778 1779 static noinline enum slow_d_compare slow_dentry_cmp( 1780 const struct dentry *parent, 1781 struct inode *inode, 1782 struct dentry *dentry, 1783 unsigned int seq, 1784 const struct qstr *name) 1785 { 1786 int tlen = dentry->d_name.len; 1787 const char *tname = dentry->d_name.name; 1788 struct inode *i = dentry->d_inode; 1789 1790 if (read_seqcount_retry(&dentry->d_seq, seq)) { 1791 cpu_relax(); 1792 return D_COMP_SEQRETRY; 1793 } 1794 if (parent->d_op->d_compare(parent, inode, 1795 dentry, i, 1796 tlen, tname, name)) 1797 return D_COMP_NOMATCH; 1798 return D_COMP_OK; 1799 } 1800 1801 /** 1802 * __d_lookup_rcu - search for a dentry (racy, store-free) 1803 * @parent: parent dentry 1804 * @name: qstr of name we wish to find 1805 * @seqp: returns d_seq value at the point where the dentry was found 1806 * @inode: returns dentry->d_inode when the inode was found valid. 1807 * Returns: dentry, or NULL 1808 * 1809 * __d_lookup_rcu is the dcache lookup function for rcu-walk name 1810 * resolution (store-free path walking) design described in 1811 * Documentation/filesystems/path-lookup.txt. 1812 * 1813 * This is not to be used outside core vfs. 1814 * 1815 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock 1816 * held, and rcu_read_lock held. The returned dentry must not be stored into 1817 * without taking d_lock and checking d_seq sequence count against @seq 1818 * returned here. 1819 * 1820 * A refcount may be taken on the found dentry with the __d_rcu_to_refcount 1821 * function. 1822 * 1823 * Alternatively, __d_lookup_rcu may be called again to look up the child of 1824 * the returned dentry, so long as its parent's seqlock is checked after the 1825 * child is looked up. Thus, an interlocking stepping of sequence lock checks 1826 * is formed, giving integrity down the path walk. 1827 * 1828 * NOTE! The caller *has* to check the resulting dentry against the sequence 1829 * number we've returned before using any of the resulting dentry state! 1830 */ 1831 struct dentry *__d_lookup_rcu(const struct dentry *parent, 1832 const struct qstr *name, 1833 unsigned *seqp, struct inode *inode) 1834 { 1835 u64 hashlen = name->hash_len; 1836 const unsigned char *str = name->name; 1837 struct hlist_bl_head *b = d_hash(parent, hashlen_hash(hashlen)); 1838 struct hlist_bl_node *node; 1839 struct dentry *dentry; 1840 1841 /* 1842 * Note: There is significant duplication with __d_lookup_rcu which is 1843 * required to prevent single threaded performance regressions 1844 * especially on architectures where smp_rmb (in seqcounts) are costly. 1845 * Keep the two functions in sync. 1846 */ 1847 1848 /* 1849 * The hash list is protected using RCU. 1850 * 1851 * Carefully use d_seq when comparing a candidate dentry, to avoid 1852 * races with d_move(). 1853 * 1854 * It is possible that concurrent renames can mess up our list 1855 * walk here and result in missing our dentry, resulting in the 1856 * false-negative result. d_lookup() protects against concurrent 1857 * renames using rename_lock seqlock. 1858 * 1859 * See Documentation/filesystems/path-lookup.txt for more details. 1860 */ 1861 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { 1862 unsigned seq; 1863 1864 seqretry: 1865 /* 1866 * The dentry sequence count protects us from concurrent 1867 * renames, and thus protects inode, parent and name fields. 1868 * 1869 * The caller must perform a seqcount check in order 1870 * to do anything useful with the returned dentry, 1871 * including using the 'd_inode' pointer. 1872 * 1873 * NOTE! We do a "raw" seqcount_begin here. That means that 1874 * we don't wait for the sequence count to stabilize if it 1875 * is in the middle of a sequence change. If we do the slow 1876 * dentry compare, we will do seqretries until it is stable, 1877 * and if we end up with a successful lookup, we actually 1878 * want to exit RCU lookup anyway. 1879 */ 1880 seq = raw_seqcount_begin(&dentry->d_seq); 1881 if (dentry->d_parent != parent) 1882 continue; 1883 if (d_unhashed(dentry)) 1884 continue; 1885 *seqp = seq; 1886 1887 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) { 1888 if (dentry->d_name.hash != hashlen_hash(hashlen)) 1889 continue; 1890 switch (slow_dentry_cmp(parent, inode, dentry, seq, name)) { 1891 case D_COMP_OK: 1892 return dentry; 1893 case D_COMP_NOMATCH: 1894 continue; 1895 default: 1896 goto seqretry; 1897 } 1898 } 1899 1900 if (dentry->d_name.hash_len != hashlen) 1901 continue; 1902 if (!dentry_cmp(dentry, str, hashlen_len(hashlen))) 1903 return dentry; 1904 } 1905 return NULL; 1906 } 1907 1908 /** 1909 * d_lookup - search for a dentry 1910 * @parent: parent dentry 1911 * @name: qstr of name we wish to find 1912 * Returns: dentry, or NULL 1913 * 1914 * d_lookup searches the children of the parent dentry for the name in 1915 * question. If the dentry is found its reference count is incremented and the 1916 * dentry is returned. The caller must use dput to free the entry when it has 1917 * finished using it. %NULL is returned if the dentry does not exist. 1918 */ 1919 struct dentry *d_lookup(struct dentry *parent, struct qstr *name) 1920 { 1921 struct dentry *dentry; 1922 unsigned seq; 1923 1924 do { 1925 seq = read_seqbegin(&rename_lock); 1926 dentry = __d_lookup(parent, name); 1927 if (dentry) 1928 break; 1929 } while (read_seqretry(&rename_lock, seq)); 1930 return dentry; 1931 } 1932 EXPORT_SYMBOL(d_lookup); 1933 1934 /** 1935 * __d_lookup - search for a dentry (racy) 1936 * @parent: parent dentry 1937 * @name: qstr of name we wish to find 1938 * Returns: dentry, or NULL 1939 * 1940 * __d_lookup is like d_lookup, however it may (rarely) return a 1941 * false-negative result due to unrelated rename activity. 1942 * 1943 * __d_lookup is slightly faster by avoiding rename_lock read seqlock, 1944 * however it must be used carefully, eg. with a following d_lookup in 1945 * the case of failure. 1946 * 1947 * __d_lookup callers must be commented. 1948 */ 1949 struct dentry *__d_lookup(struct dentry *parent, struct qstr *name) 1950 { 1951 unsigned int len = name->len; 1952 unsigned int hash = name->hash; 1953 const unsigned char *str = name->name; 1954 struct hlist_bl_head *b = d_hash(parent, hash); 1955 struct hlist_bl_node *node; 1956 struct dentry *found = NULL; 1957 struct dentry *dentry; 1958 1959 /* 1960 * Note: There is significant duplication with __d_lookup_rcu which is 1961 * required to prevent single threaded performance regressions 1962 * especially on architectures where smp_rmb (in seqcounts) are costly. 1963 * Keep the two functions in sync. 1964 */ 1965 1966 /* 1967 * The hash list is protected using RCU. 1968 * 1969 * Take d_lock when comparing a candidate dentry, to avoid races 1970 * with d_move(). 1971 * 1972 * It is possible that concurrent renames can mess up our list 1973 * walk here and result in missing our dentry, resulting in the 1974 * false-negative result. d_lookup() protects against concurrent 1975 * renames using rename_lock seqlock. 1976 * 1977 * See Documentation/filesystems/path-lookup.txt for more details. 1978 */ 1979 rcu_read_lock(); 1980 1981 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { 1982 1983 if (dentry->d_name.hash != hash) 1984 continue; 1985 1986 spin_lock(&dentry->d_lock); 1987 if (dentry->d_parent != parent) 1988 goto next; 1989 if (d_unhashed(dentry)) 1990 goto next; 1991 1992 /* 1993 * It is safe to compare names since d_move() cannot 1994 * change the qstr (protected by d_lock). 1995 */ 1996 if (parent->d_flags & DCACHE_OP_COMPARE) { 1997 int tlen = dentry->d_name.len; 1998 const char *tname = dentry->d_name.name; 1999 if (parent->d_op->d_compare(parent, parent->d_inode, 2000 dentry, dentry->d_inode, 2001 tlen, tname, name)) 2002 goto next; 2003 } else { 2004 if (dentry->d_name.len != len) 2005 goto next; 2006 if (dentry_cmp(dentry, str, len)) 2007 goto next; 2008 } 2009 2010 dentry->d_count++; 2011 found = dentry; 2012 spin_unlock(&dentry->d_lock); 2013 break; 2014 next: 2015 spin_unlock(&dentry->d_lock); 2016 } 2017 rcu_read_unlock(); 2018 2019 return found; 2020 } 2021 2022 /** 2023 * d_hash_and_lookup - hash the qstr then search for a dentry 2024 * @dir: Directory to search in 2025 * @name: qstr of name we wish to find 2026 * 2027 * On hash failure or on lookup failure NULL is returned. 2028 */ 2029 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name) 2030 { 2031 struct dentry *dentry = NULL; 2032 2033 /* 2034 * Check for a fs-specific hash function. Note that we must 2035 * calculate the standard hash first, as the d_op->d_hash() 2036 * routine may choose to leave the hash value unchanged. 2037 */ 2038 name->hash = full_name_hash(name->name, name->len); 2039 if (dir->d_flags & DCACHE_OP_HASH) { 2040 if (dir->d_op->d_hash(dir, dir->d_inode, name) < 0) 2041 goto out; 2042 } 2043 dentry = d_lookup(dir, name); 2044 out: 2045 return dentry; 2046 } 2047 2048 /** 2049 * d_validate - verify dentry provided from insecure source (deprecated) 2050 * @dentry: The dentry alleged to be valid child of @dparent 2051 * @dparent: The parent dentry (known to be valid) 2052 * 2053 * An insecure source has sent us a dentry, here we verify it and dget() it. 2054 * This is used by ncpfs in its readdir implementation. 2055 * Zero is returned in the dentry is invalid. 2056 * 2057 * This function is slow for big directories, and deprecated, do not use it. 2058 */ 2059 int d_validate(struct dentry *dentry, struct dentry *dparent) 2060 { 2061 struct dentry *child; 2062 2063 spin_lock(&dparent->d_lock); 2064 list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) { 2065 if (dentry == child) { 2066 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 2067 __dget_dlock(dentry); 2068 spin_unlock(&dentry->d_lock); 2069 spin_unlock(&dparent->d_lock); 2070 return 1; 2071 } 2072 } 2073 spin_unlock(&dparent->d_lock); 2074 2075 return 0; 2076 } 2077 EXPORT_SYMBOL(d_validate); 2078 2079 /* 2080 * When a file is deleted, we have two options: 2081 * - turn this dentry into a negative dentry 2082 * - unhash this dentry and free it. 2083 * 2084 * Usually, we want to just turn this into 2085 * a negative dentry, but if anybody else is 2086 * currently using the dentry or the inode 2087 * we can't do that and we fall back on removing 2088 * it from the hash queues and waiting for 2089 * it to be deleted later when it has no users 2090 */ 2091 2092 /** 2093 * d_delete - delete a dentry 2094 * @dentry: The dentry to delete 2095 * 2096 * Turn the dentry into a negative dentry if possible, otherwise 2097 * remove it from the hash queues so it can be deleted later 2098 */ 2099 2100 void d_delete(struct dentry * dentry) 2101 { 2102 struct inode *inode; 2103 int isdir = 0; 2104 /* 2105 * Are we the only user? 2106 */ 2107 again: 2108 spin_lock(&dentry->d_lock); 2109 inode = dentry->d_inode; 2110 isdir = S_ISDIR(inode->i_mode); 2111 if (dentry->d_count == 1) { 2112 if (inode && !spin_trylock(&inode->i_lock)) { 2113 spin_unlock(&dentry->d_lock); 2114 cpu_relax(); 2115 goto again; 2116 } 2117 dentry->d_flags &= ~DCACHE_CANT_MOUNT; 2118 dentry_unlink_inode(dentry); 2119 fsnotify_nameremove(dentry, isdir); 2120 return; 2121 } 2122 2123 if (!d_unhashed(dentry)) 2124 __d_drop(dentry); 2125 2126 spin_unlock(&dentry->d_lock); 2127 2128 fsnotify_nameremove(dentry, isdir); 2129 } 2130 EXPORT_SYMBOL(d_delete); 2131 2132 static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b) 2133 { 2134 BUG_ON(!d_unhashed(entry)); 2135 hlist_bl_lock(b); 2136 entry->d_flags |= DCACHE_RCUACCESS; 2137 hlist_bl_add_head_rcu(&entry->d_hash, b); 2138 hlist_bl_unlock(b); 2139 } 2140 2141 static void _d_rehash(struct dentry * entry) 2142 { 2143 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash)); 2144 } 2145 2146 /** 2147 * d_rehash - add an entry back to the hash 2148 * @entry: dentry to add to the hash 2149 * 2150 * Adds a dentry to the hash according to its name. 2151 */ 2152 2153 void d_rehash(struct dentry * entry) 2154 { 2155 spin_lock(&entry->d_lock); 2156 _d_rehash(entry); 2157 spin_unlock(&entry->d_lock); 2158 } 2159 EXPORT_SYMBOL(d_rehash); 2160 2161 /** 2162 * dentry_update_name_case - update case insensitive dentry with a new name 2163 * @dentry: dentry to be updated 2164 * @name: new name 2165 * 2166 * Update a case insensitive dentry with new case of name. 2167 * 2168 * dentry must have been returned by d_lookup with name @name. Old and new 2169 * name lengths must match (ie. no d_compare which allows mismatched name 2170 * lengths). 2171 * 2172 * Parent inode i_mutex must be held over d_lookup and into this call (to 2173 * keep renames and concurrent inserts, and readdir(2) away). 2174 */ 2175 void dentry_update_name_case(struct dentry *dentry, struct qstr *name) 2176 { 2177 BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex)); 2178 BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */ 2179 2180 spin_lock(&dentry->d_lock); 2181 write_seqcount_begin(&dentry->d_seq); 2182 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len); 2183 write_seqcount_end(&dentry->d_seq); 2184 spin_unlock(&dentry->d_lock); 2185 } 2186 EXPORT_SYMBOL(dentry_update_name_case); 2187 2188 static void switch_names(struct dentry *dentry, struct dentry *target) 2189 { 2190 if (dname_external(target)) { 2191 if (dname_external(dentry)) { 2192 /* 2193 * Both external: swap the pointers 2194 */ 2195 swap(target->d_name.name, dentry->d_name.name); 2196 } else { 2197 /* 2198 * dentry:internal, target:external. Steal target's 2199 * storage and make target internal. 2200 */ 2201 memcpy(target->d_iname, dentry->d_name.name, 2202 dentry->d_name.len + 1); 2203 dentry->d_name.name = target->d_name.name; 2204 target->d_name.name = target->d_iname; 2205 } 2206 } else { 2207 if (dname_external(dentry)) { 2208 /* 2209 * dentry:external, target:internal. Give dentry's 2210 * storage to target and make dentry internal 2211 */ 2212 memcpy(dentry->d_iname, target->d_name.name, 2213 target->d_name.len + 1); 2214 target->d_name.name = dentry->d_name.name; 2215 dentry->d_name.name = dentry->d_iname; 2216 } else { 2217 /* 2218 * Both are internal. Just copy target to dentry 2219 */ 2220 memcpy(dentry->d_iname, target->d_name.name, 2221 target->d_name.len + 1); 2222 dentry->d_name.len = target->d_name.len; 2223 return; 2224 } 2225 } 2226 swap(dentry->d_name.len, target->d_name.len); 2227 } 2228 2229 static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target) 2230 { 2231 /* 2232 * XXXX: do we really need to take target->d_lock? 2233 */ 2234 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent) 2235 spin_lock(&target->d_parent->d_lock); 2236 else { 2237 if (d_ancestor(dentry->d_parent, target->d_parent)) { 2238 spin_lock(&dentry->d_parent->d_lock); 2239 spin_lock_nested(&target->d_parent->d_lock, 2240 DENTRY_D_LOCK_NESTED); 2241 } else { 2242 spin_lock(&target->d_parent->d_lock); 2243 spin_lock_nested(&dentry->d_parent->d_lock, 2244 DENTRY_D_LOCK_NESTED); 2245 } 2246 } 2247 if (target < dentry) { 2248 spin_lock_nested(&target->d_lock, 2); 2249 spin_lock_nested(&dentry->d_lock, 3); 2250 } else { 2251 spin_lock_nested(&dentry->d_lock, 2); 2252 spin_lock_nested(&target->d_lock, 3); 2253 } 2254 } 2255 2256 static void dentry_unlock_parents_for_move(struct dentry *dentry, 2257 struct dentry *target) 2258 { 2259 if (target->d_parent != dentry->d_parent) 2260 spin_unlock(&dentry->d_parent->d_lock); 2261 if (target->d_parent != target) 2262 spin_unlock(&target->d_parent->d_lock); 2263 } 2264 2265 /* 2266 * When switching names, the actual string doesn't strictly have to 2267 * be preserved in the target - because we're dropping the target 2268 * anyway. As such, we can just do a simple memcpy() to copy over 2269 * the new name before we switch. 2270 * 2271 * Note that we have to be a lot more careful about getting the hash 2272 * switched - we have to switch the hash value properly even if it 2273 * then no longer matches the actual (corrupted) string of the target. 2274 * The hash value has to match the hash queue that the dentry is on.. 2275 */ 2276 /* 2277 * __d_move - move a dentry 2278 * @dentry: entry to move 2279 * @target: new dentry 2280 * 2281 * Update the dcache to reflect the move of a file name. Negative 2282 * dcache entries should not be moved in this way. Caller must hold 2283 * rename_lock, the i_mutex of the source and target directories, 2284 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename(). 2285 */ 2286 static void __d_move(struct dentry * dentry, struct dentry * target) 2287 { 2288 if (!dentry->d_inode) 2289 printk(KERN_WARNING "VFS: moving negative dcache entry\n"); 2290 2291 BUG_ON(d_ancestor(dentry, target)); 2292 BUG_ON(d_ancestor(target, dentry)); 2293 2294 dentry_lock_for_move(dentry, target); 2295 2296 write_seqcount_begin(&dentry->d_seq); 2297 write_seqcount_begin(&target->d_seq); 2298 2299 /* __d_drop does write_seqcount_barrier, but they're OK to nest. */ 2300 2301 /* 2302 * Move the dentry to the target hash queue. Don't bother checking 2303 * for the same hash queue because of how unlikely it is. 2304 */ 2305 __d_drop(dentry); 2306 __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash)); 2307 2308 /* Unhash the target: dput() will then get rid of it */ 2309 __d_drop(target); 2310 2311 list_del(&dentry->d_u.d_child); 2312 list_del(&target->d_u.d_child); 2313 2314 /* Switch the names.. */ 2315 switch_names(dentry, target); 2316 swap(dentry->d_name.hash, target->d_name.hash); 2317 2318 /* ... and switch the parents */ 2319 if (IS_ROOT(dentry)) { 2320 dentry->d_parent = target->d_parent; 2321 target->d_parent = target; 2322 INIT_LIST_HEAD(&target->d_u.d_child); 2323 } else { 2324 swap(dentry->d_parent, target->d_parent); 2325 2326 /* And add them back to the (new) parent lists */ 2327 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs); 2328 } 2329 2330 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); 2331 2332 write_seqcount_end(&target->d_seq); 2333 write_seqcount_end(&dentry->d_seq); 2334 2335 dentry_unlock_parents_for_move(dentry, target); 2336 spin_unlock(&target->d_lock); 2337 fsnotify_d_move(dentry); 2338 spin_unlock(&dentry->d_lock); 2339 } 2340 2341 /* 2342 * d_move - move a dentry 2343 * @dentry: entry to move 2344 * @target: new dentry 2345 * 2346 * Update the dcache to reflect the move of a file name. Negative 2347 * dcache entries should not be moved in this way. See the locking 2348 * requirements for __d_move. 2349 */ 2350 void d_move(struct dentry *dentry, struct dentry *target) 2351 { 2352 write_seqlock(&rename_lock); 2353 __d_move(dentry, target); 2354 write_sequnlock(&rename_lock); 2355 } 2356 EXPORT_SYMBOL(d_move); 2357 2358 /** 2359 * d_ancestor - search for an ancestor 2360 * @p1: ancestor dentry 2361 * @p2: child dentry 2362 * 2363 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is 2364 * an ancestor of p2, else NULL. 2365 */ 2366 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2) 2367 { 2368 struct dentry *p; 2369 2370 for (p = p2; !IS_ROOT(p); p = p->d_parent) { 2371 if (p->d_parent == p1) 2372 return p; 2373 } 2374 return NULL; 2375 } 2376 2377 /* 2378 * This helper attempts to cope with remotely renamed directories 2379 * 2380 * It assumes that the caller is already holding 2381 * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock 2382 * 2383 * Note: If ever the locking in lock_rename() changes, then please 2384 * remember to update this too... 2385 */ 2386 static struct dentry *__d_unalias(struct inode *inode, 2387 struct dentry *dentry, struct dentry *alias) 2388 { 2389 struct mutex *m1 = NULL, *m2 = NULL; 2390 struct dentry *ret = ERR_PTR(-EBUSY); 2391 2392 /* If alias and dentry share a parent, then no extra locks required */ 2393 if (alias->d_parent == dentry->d_parent) 2394 goto out_unalias; 2395 2396 /* See lock_rename() */ 2397 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex)) 2398 goto out_err; 2399 m1 = &dentry->d_sb->s_vfs_rename_mutex; 2400 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex)) 2401 goto out_err; 2402 m2 = &alias->d_parent->d_inode->i_mutex; 2403 out_unalias: 2404 if (likely(!d_mountpoint(alias))) { 2405 __d_move(alias, dentry); 2406 ret = alias; 2407 } 2408 out_err: 2409 spin_unlock(&inode->i_lock); 2410 if (m2) 2411 mutex_unlock(m2); 2412 if (m1) 2413 mutex_unlock(m1); 2414 return ret; 2415 } 2416 2417 /* 2418 * Prepare an anonymous dentry for life in the superblock's dentry tree as a 2419 * named dentry in place of the dentry to be replaced. 2420 * returns with anon->d_lock held! 2421 */ 2422 static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) 2423 { 2424 struct dentry *dparent, *aparent; 2425 2426 dentry_lock_for_move(anon, dentry); 2427 2428 write_seqcount_begin(&dentry->d_seq); 2429 write_seqcount_begin(&anon->d_seq); 2430 2431 dparent = dentry->d_parent; 2432 aparent = anon->d_parent; 2433 2434 switch_names(dentry, anon); 2435 swap(dentry->d_name.hash, anon->d_name.hash); 2436 2437 dentry->d_parent = (aparent == anon) ? dentry : aparent; 2438 list_del(&dentry->d_u.d_child); 2439 if (!IS_ROOT(dentry)) 2440 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); 2441 else 2442 INIT_LIST_HEAD(&dentry->d_u.d_child); 2443 2444 anon->d_parent = (dparent == dentry) ? anon : dparent; 2445 list_del(&anon->d_u.d_child); 2446 if (!IS_ROOT(anon)) 2447 list_add(&anon->d_u.d_child, &anon->d_parent->d_subdirs); 2448 else 2449 INIT_LIST_HEAD(&anon->d_u.d_child); 2450 2451 write_seqcount_end(&dentry->d_seq); 2452 write_seqcount_end(&anon->d_seq); 2453 2454 dentry_unlock_parents_for_move(anon, dentry); 2455 spin_unlock(&dentry->d_lock); 2456 2457 /* anon->d_lock still locked, returns locked */ 2458 anon->d_flags &= ~DCACHE_DISCONNECTED; 2459 } 2460 2461 /** 2462 * d_materialise_unique - introduce an inode into the tree 2463 * @dentry: candidate dentry 2464 * @inode: inode to bind to the dentry, to which aliases may be attached 2465 * 2466 * Introduces an dentry into the tree, substituting an extant disconnected 2467 * root directory alias in its place if there is one. Caller must hold the 2468 * i_mutex of the parent directory. 2469 */ 2470 struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) 2471 { 2472 struct dentry *actual; 2473 2474 BUG_ON(!d_unhashed(dentry)); 2475 2476 if (!inode) { 2477 actual = dentry; 2478 __d_instantiate(dentry, NULL); 2479 d_rehash(actual); 2480 goto out_nolock; 2481 } 2482 2483 spin_lock(&inode->i_lock); 2484 2485 if (S_ISDIR(inode->i_mode)) { 2486 struct dentry *alias; 2487 2488 /* Does an aliased dentry already exist? */ 2489 alias = __d_find_alias(inode, 0); 2490 if (alias) { 2491 actual = alias; 2492 write_seqlock(&rename_lock); 2493 2494 if (d_ancestor(alias, dentry)) { 2495 /* Check for loops */ 2496 actual = ERR_PTR(-ELOOP); 2497 spin_unlock(&inode->i_lock); 2498 } else if (IS_ROOT(alias)) { 2499 /* Is this an anonymous mountpoint that we 2500 * could splice into our tree? */ 2501 __d_materialise_dentry(dentry, alias); 2502 write_sequnlock(&rename_lock); 2503 __d_drop(alias); 2504 goto found; 2505 } else { 2506 /* Nope, but we must(!) avoid directory 2507 * aliasing. This drops inode->i_lock */ 2508 actual = __d_unalias(inode, dentry, alias); 2509 } 2510 write_sequnlock(&rename_lock); 2511 if (IS_ERR(actual)) { 2512 if (PTR_ERR(actual) == -ELOOP) 2513 pr_warn_ratelimited( 2514 "VFS: Lookup of '%s' in %s %s" 2515 " would have caused loop\n", 2516 dentry->d_name.name, 2517 inode->i_sb->s_type->name, 2518 inode->i_sb->s_id); 2519 dput(alias); 2520 } 2521 goto out_nolock; 2522 } 2523 } 2524 2525 /* Add a unique reference */ 2526 actual = __d_instantiate_unique(dentry, inode); 2527 if (!actual) 2528 actual = dentry; 2529 else 2530 BUG_ON(!d_unhashed(actual)); 2531 2532 spin_lock(&actual->d_lock); 2533 found: 2534 _d_rehash(actual); 2535 spin_unlock(&actual->d_lock); 2536 spin_unlock(&inode->i_lock); 2537 out_nolock: 2538 if (actual == dentry) { 2539 security_d_instantiate(dentry, inode); 2540 return NULL; 2541 } 2542 2543 iput(inode); 2544 return actual; 2545 } 2546 EXPORT_SYMBOL_GPL(d_materialise_unique); 2547 2548 static int prepend(char **buffer, int *buflen, const char *str, int namelen) 2549 { 2550 *buflen -= namelen; 2551 if (*buflen < 0) 2552 return -ENAMETOOLONG; 2553 *buffer -= namelen; 2554 memcpy(*buffer, str, namelen); 2555 return 0; 2556 } 2557 2558 static int prepend_name(char **buffer, int *buflen, struct qstr *name) 2559 { 2560 return prepend(buffer, buflen, name->name, name->len); 2561 } 2562 2563 /** 2564 * prepend_path - Prepend path string to a buffer 2565 * @path: the dentry/vfsmount to report 2566 * @root: root vfsmnt/dentry 2567 * @buffer: pointer to the end of the buffer 2568 * @buflen: pointer to buffer length 2569 * 2570 * Caller holds the rename_lock. 2571 */ 2572 static int prepend_path(const struct path *path, 2573 const struct path *root, 2574 char **buffer, int *buflen) 2575 { 2576 struct dentry *dentry = path->dentry; 2577 struct vfsmount *vfsmnt = path->mnt; 2578 struct mount *mnt = real_mount(vfsmnt); 2579 bool slash = false; 2580 int error = 0; 2581 2582 br_read_lock(&vfsmount_lock); 2583 while (dentry != root->dentry || vfsmnt != root->mnt) { 2584 struct dentry * parent; 2585 2586 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { 2587 /* Global root? */ 2588 if (!mnt_has_parent(mnt)) 2589 goto global_root; 2590 dentry = mnt->mnt_mountpoint; 2591 mnt = mnt->mnt_parent; 2592 vfsmnt = &mnt->mnt; 2593 continue; 2594 } 2595 parent = dentry->d_parent; 2596 prefetch(parent); 2597 spin_lock(&dentry->d_lock); 2598 error = prepend_name(buffer, buflen, &dentry->d_name); 2599 spin_unlock(&dentry->d_lock); 2600 if (!error) 2601 error = prepend(buffer, buflen, "/", 1); 2602 if (error) 2603 break; 2604 2605 slash = true; 2606 dentry = parent; 2607 } 2608 2609 if (!error && !slash) 2610 error = prepend(buffer, buflen, "/", 1); 2611 2612 out: 2613 br_read_unlock(&vfsmount_lock); 2614 return error; 2615 2616 global_root: 2617 /* 2618 * Filesystems needing to implement special "root names" 2619 * should do so with ->d_dname() 2620 */ 2621 if (IS_ROOT(dentry) && 2622 (dentry->d_name.len != 1 || dentry->d_name.name[0] != '/')) { 2623 WARN(1, "Root dentry has weird name <%.*s>\n", 2624 (int) dentry->d_name.len, dentry->d_name.name); 2625 } 2626 if (!slash) 2627 error = prepend(buffer, buflen, "/", 1); 2628 if (!error) 2629 error = is_mounted(vfsmnt) ? 1 : 2; 2630 goto out; 2631 } 2632 2633 /** 2634 * __d_path - return the path of a dentry 2635 * @path: the dentry/vfsmount to report 2636 * @root: root vfsmnt/dentry 2637 * @buf: buffer to return value in 2638 * @buflen: buffer length 2639 * 2640 * Convert a dentry into an ASCII path name. 2641 * 2642 * Returns a pointer into the buffer or an error code if the 2643 * path was too long. 2644 * 2645 * "buflen" should be positive. 2646 * 2647 * If the path is not reachable from the supplied root, return %NULL. 2648 */ 2649 char *__d_path(const struct path *path, 2650 const struct path *root, 2651 char *buf, int buflen) 2652 { 2653 char *res = buf + buflen; 2654 int error; 2655 2656 prepend(&res, &buflen, "\0", 1); 2657 write_seqlock(&rename_lock); 2658 error = prepend_path(path, root, &res, &buflen); 2659 write_sequnlock(&rename_lock); 2660 2661 if (error < 0) 2662 return ERR_PTR(error); 2663 if (error > 0) 2664 return NULL; 2665 return res; 2666 } 2667 2668 char *d_absolute_path(const struct path *path, 2669 char *buf, int buflen) 2670 { 2671 struct path root = {}; 2672 char *res = buf + buflen; 2673 int error; 2674 2675 prepend(&res, &buflen, "\0", 1); 2676 write_seqlock(&rename_lock); 2677 error = prepend_path(path, &root, &res, &buflen); 2678 write_sequnlock(&rename_lock); 2679 2680 if (error > 1) 2681 error = -EINVAL; 2682 if (error < 0) 2683 return ERR_PTR(error); 2684 return res; 2685 } 2686 2687 /* 2688 * same as __d_path but appends "(deleted)" for unlinked files. 2689 */ 2690 static int path_with_deleted(const struct path *path, 2691 const struct path *root, 2692 char **buf, int *buflen) 2693 { 2694 prepend(buf, buflen, "\0", 1); 2695 if (d_unlinked(path->dentry)) { 2696 int error = prepend(buf, buflen, " (deleted)", 10); 2697 if (error) 2698 return error; 2699 } 2700 2701 return prepend_path(path, root, buf, buflen); 2702 } 2703 2704 static int prepend_unreachable(char **buffer, int *buflen) 2705 { 2706 return prepend(buffer, buflen, "(unreachable)", 13); 2707 } 2708 2709 /** 2710 * d_path - return the path of a dentry 2711 * @path: path to report 2712 * @buf: buffer to return value in 2713 * @buflen: buffer length 2714 * 2715 * Convert a dentry into an ASCII path name. If the entry has been deleted 2716 * the string " (deleted)" is appended. Note that this is ambiguous. 2717 * 2718 * Returns a pointer into the buffer or an error code if the path was 2719 * too long. Note: Callers should use the returned pointer, not the passed 2720 * in buffer, to use the name! The implementation often starts at an offset 2721 * into the buffer, and may leave 0 bytes at the start. 2722 * 2723 * "buflen" should be positive. 2724 */ 2725 char *d_path(const struct path *path, char *buf, int buflen) 2726 { 2727 char *res = buf + buflen; 2728 struct path root; 2729 int error; 2730 2731 /* 2732 * We have various synthetic filesystems that never get mounted. On 2733 * these filesystems dentries are never used for lookup purposes, and 2734 * thus don't need to be hashed. They also don't need a name until a 2735 * user wants to identify the object in /proc/pid/fd/. The little hack 2736 * below allows us to generate a name for these objects on demand: 2737 */ 2738 if (path->dentry->d_op && path->dentry->d_op->d_dname) 2739 return path->dentry->d_op->d_dname(path->dentry, buf, buflen); 2740 2741 get_fs_root(current->fs, &root); 2742 write_seqlock(&rename_lock); 2743 error = path_with_deleted(path, &root, &res, &buflen); 2744 if (error < 0) 2745 res = ERR_PTR(error); 2746 write_sequnlock(&rename_lock); 2747 path_put(&root); 2748 return res; 2749 } 2750 EXPORT_SYMBOL(d_path); 2751 2752 /** 2753 * d_path_with_unreachable - return the path of a dentry 2754 * @path: path to report 2755 * @buf: buffer to return value in 2756 * @buflen: buffer length 2757 * 2758 * The difference from d_path() is that this prepends "(unreachable)" 2759 * to paths which are unreachable from the current process' root. 2760 */ 2761 char *d_path_with_unreachable(const struct path *path, char *buf, int buflen) 2762 { 2763 char *res = buf + buflen; 2764 struct path root; 2765 int error; 2766 2767 if (path->dentry->d_op && path->dentry->d_op->d_dname) 2768 return path->dentry->d_op->d_dname(path->dentry, buf, buflen); 2769 2770 get_fs_root(current->fs, &root); 2771 write_seqlock(&rename_lock); 2772 error = path_with_deleted(path, &root, &res, &buflen); 2773 if (error > 0) 2774 error = prepend_unreachable(&res, &buflen); 2775 write_sequnlock(&rename_lock); 2776 path_put(&root); 2777 if (error) 2778 res = ERR_PTR(error); 2779 2780 return res; 2781 } 2782 2783 /* 2784 * Helper function for dentry_operations.d_dname() members 2785 */ 2786 char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen, 2787 const char *fmt, ...) 2788 { 2789 va_list args; 2790 char temp[64]; 2791 int sz; 2792 2793 va_start(args, fmt); 2794 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1; 2795 va_end(args); 2796 2797 if (sz > sizeof(temp) || sz > buflen) 2798 return ERR_PTR(-ENAMETOOLONG); 2799 2800 buffer += buflen - sz; 2801 return memcpy(buffer, temp, sz); 2802 } 2803 2804 /* 2805 * Write full pathname from the root of the filesystem into the buffer. 2806 */ 2807 static char *__dentry_path(struct dentry *dentry, char *buf, int buflen) 2808 { 2809 char *end = buf + buflen; 2810 char *retval; 2811 2812 prepend(&end, &buflen, "\0", 1); 2813 if (buflen < 1) 2814 goto Elong; 2815 /* Get '/' right */ 2816 retval = end-1; 2817 *retval = '/'; 2818 2819 while (!IS_ROOT(dentry)) { 2820 struct dentry *parent = dentry->d_parent; 2821 int error; 2822 2823 prefetch(parent); 2824 spin_lock(&dentry->d_lock); 2825 error = prepend_name(&end, &buflen, &dentry->d_name); 2826 spin_unlock(&dentry->d_lock); 2827 if (error != 0 || prepend(&end, &buflen, "/", 1) != 0) 2828 goto Elong; 2829 2830 retval = end; 2831 dentry = parent; 2832 } 2833 return retval; 2834 Elong: 2835 return ERR_PTR(-ENAMETOOLONG); 2836 } 2837 2838 char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen) 2839 { 2840 char *retval; 2841 2842 write_seqlock(&rename_lock); 2843 retval = __dentry_path(dentry, buf, buflen); 2844 write_sequnlock(&rename_lock); 2845 2846 return retval; 2847 } 2848 EXPORT_SYMBOL(dentry_path_raw); 2849 2850 char *dentry_path(struct dentry *dentry, char *buf, int buflen) 2851 { 2852 char *p = NULL; 2853 char *retval; 2854 2855 write_seqlock(&rename_lock); 2856 if (d_unlinked(dentry)) { 2857 p = buf + buflen; 2858 if (prepend(&p, &buflen, "//deleted", 10) != 0) 2859 goto Elong; 2860 buflen++; 2861 } 2862 retval = __dentry_path(dentry, buf, buflen); 2863 write_sequnlock(&rename_lock); 2864 if (!IS_ERR(retval) && p) 2865 *p = '/'; /* restore '/' overriden with '\0' */ 2866 return retval; 2867 Elong: 2868 return ERR_PTR(-ENAMETOOLONG); 2869 } 2870 2871 /* 2872 * NOTE! The user-level library version returns a 2873 * character pointer. The kernel system call just 2874 * returns the length of the buffer filled (which 2875 * includes the ending '\0' character), or a negative 2876 * error value. So libc would do something like 2877 * 2878 * char *getcwd(char * buf, size_t size) 2879 * { 2880 * int retval; 2881 * 2882 * retval = sys_getcwd(buf, size); 2883 * if (retval >= 0) 2884 * return buf; 2885 * errno = -retval; 2886 * return NULL; 2887 * } 2888 */ 2889 SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size) 2890 { 2891 int error; 2892 struct path pwd, root; 2893 char *page = (char *) __get_free_page(GFP_USER); 2894 2895 if (!page) 2896 return -ENOMEM; 2897 2898 get_fs_root_and_pwd(current->fs, &root, &pwd); 2899 2900 error = -ENOENT; 2901 write_seqlock(&rename_lock); 2902 if (!d_unlinked(pwd.dentry)) { 2903 unsigned long len; 2904 char *cwd = page + PAGE_SIZE; 2905 int buflen = PAGE_SIZE; 2906 2907 prepend(&cwd, &buflen, "\0", 1); 2908 error = prepend_path(&pwd, &root, &cwd, &buflen); 2909 write_sequnlock(&rename_lock); 2910 2911 if (error < 0) 2912 goto out; 2913 2914 /* Unreachable from current root */ 2915 if (error > 0) { 2916 error = prepend_unreachable(&cwd, &buflen); 2917 if (error) 2918 goto out; 2919 } 2920 2921 error = -ERANGE; 2922 len = PAGE_SIZE + page - cwd; 2923 if (len <= size) { 2924 error = len; 2925 if (copy_to_user(buf, cwd, len)) 2926 error = -EFAULT; 2927 } 2928 } else { 2929 write_sequnlock(&rename_lock); 2930 } 2931 2932 out: 2933 path_put(&pwd); 2934 path_put(&root); 2935 free_page((unsigned long) page); 2936 return error; 2937 } 2938 2939 /* 2940 * Test whether new_dentry is a subdirectory of old_dentry. 2941 * 2942 * Trivially implemented using the dcache structure 2943 */ 2944 2945 /** 2946 * is_subdir - is new dentry a subdirectory of old_dentry 2947 * @new_dentry: new dentry 2948 * @old_dentry: old dentry 2949 * 2950 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth). 2951 * Returns 0 otherwise. 2952 * Caller must ensure that "new_dentry" is pinned before calling is_subdir() 2953 */ 2954 2955 int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry) 2956 { 2957 int result; 2958 unsigned seq; 2959 2960 if (new_dentry == old_dentry) 2961 return 1; 2962 2963 do { 2964 /* for restarting inner loop in case of seq retry */ 2965 seq = read_seqbegin(&rename_lock); 2966 /* 2967 * Need rcu_readlock to protect against the d_parent trashing 2968 * due to d_move 2969 */ 2970 rcu_read_lock(); 2971 if (d_ancestor(old_dentry, new_dentry)) 2972 result = 1; 2973 else 2974 result = 0; 2975 rcu_read_unlock(); 2976 } while (read_seqretry(&rename_lock, seq)); 2977 2978 return result; 2979 } 2980 2981 void d_genocide(struct dentry *root) 2982 { 2983 struct dentry *this_parent; 2984 struct list_head *next; 2985 unsigned seq; 2986 int locked = 0; 2987 2988 seq = read_seqbegin(&rename_lock); 2989 again: 2990 this_parent = root; 2991 spin_lock(&this_parent->d_lock); 2992 repeat: 2993 next = this_parent->d_subdirs.next; 2994 resume: 2995 while (next != &this_parent->d_subdirs) { 2996 struct list_head *tmp = next; 2997 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 2998 next = tmp->next; 2999 3000 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 3001 if (d_unhashed(dentry) || !dentry->d_inode) { 3002 spin_unlock(&dentry->d_lock); 3003 continue; 3004 } 3005 if (!list_empty(&dentry->d_subdirs)) { 3006 spin_unlock(&this_parent->d_lock); 3007 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); 3008 this_parent = dentry; 3009 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); 3010 goto repeat; 3011 } 3012 if (!(dentry->d_flags & DCACHE_GENOCIDE)) { 3013 dentry->d_flags |= DCACHE_GENOCIDE; 3014 dentry->d_count--; 3015 } 3016 spin_unlock(&dentry->d_lock); 3017 } 3018 if (this_parent != root) { 3019 struct dentry *child = this_parent; 3020 if (!(this_parent->d_flags & DCACHE_GENOCIDE)) { 3021 this_parent->d_flags |= DCACHE_GENOCIDE; 3022 this_parent->d_count--; 3023 } 3024 this_parent = try_to_ascend(this_parent, locked, seq); 3025 if (!this_parent) 3026 goto rename_retry; 3027 next = child->d_u.d_child.next; 3028 goto resume; 3029 } 3030 spin_unlock(&this_parent->d_lock); 3031 if (!locked && read_seqretry(&rename_lock, seq)) 3032 goto rename_retry; 3033 if (locked) 3034 write_sequnlock(&rename_lock); 3035 return; 3036 3037 rename_retry: 3038 locked = 1; 3039 write_seqlock(&rename_lock); 3040 goto again; 3041 } 3042 3043 /** 3044 * find_inode_number - check for dentry with name 3045 * @dir: directory to check 3046 * @name: Name to find. 3047 * 3048 * Check whether a dentry already exists for the given name, 3049 * and return the inode number if it has an inode. Otherwise 3050 * 0 is returned. 3051 * 3052 * This routine is used to post-process directory listings for 3053 * filesystems using synthetic inode numbers, and is necessary 3054 * to keep getcwd() working. 3055 */ 3056 3057 ino_t find_inode_number(struct dentry *dir, struct qstr *name) 3058 { 3059 struct dentry * dentry; 3060 ino_t ino = 0; 3061 3062 dentry = d_hash_and_lookup(dir, name); 3063 if (dentry) { 3064 if (dentry->d_inode) 3065 ino = dentry->d_inode->i_ino; 3066 dput(dentry); 3067 } 3068 return ino; 3069 } 3070 EXPORT_SYMBOL(find_inode_number); 3071 3072 static __initdata unsigned long dhash_entries; 3073 static int __init set_dhash_entries(char *str) 3074 { 3075 if (!str) 3076 return 0; 3077 dhash_entries = simple_strtoul(str, &str, 0); 3078 return 1; 3079 } 3080 __setup("dhash_entries=", set_dhash_entries); 3081 3082 static void __init dcache_init_early(void) 3083 { 3084 unsigned int loop; 3085 3086 /* If hashes are distributed across NUMA nodes, defer 3087 * hash allocation until vmalloc space is available. 3088 */ 3089 if (hashdist) 3090 return; 3091 3092 dentry_hashtable = 3093 alloc_large_system_hash("Dentry cache", 3094 sizeof(struct hlist_bl_head), 3095 dhash_entries, 3096 13, 3097 HASH_EARLY, 3098 &d_hash_shift, 3099 &d_hash_mask, 3100 0, 3101 0); 3102 3103 for (loop = 0; loop < (1U << d_hash_shift); loop++) 3104 INIT_HLIST_BL_HEAD(dentry_hashtable + loop); 3105 } 3106 3107 static void __init dcache_init(void) 3108 { 3109 unsigned int loop; 3110 3111 /* 3112 * A constructor could be added for stable state like the lists, 3113 * but it is probably not worth it because of the cache nature 3114 * of the dcache. 3115 */ 3116 dentry_cache = KMEM_CACHE(dentry, 3117 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); 3118 3119 /* Hash may have been set up in dcache_init_early */ 3120 if (!hashdist) 3121 return; 3122 3123 dentry_hashtable = 3124 alloc_large_system_hash("Dentry cache", 3125 sizeof(struct hlist_bl_head), 3126 dhash_entries, 3127 13, 3128 0, 3129 &d_hash_shift, 3130 &d_hash_mask, 3131 0, 3132 0); 3133 3134 for (loop = 0; loop < (1U << d_hash_shift); loop++) 3135 INIT_HLIST_BL_HEAD(dentry_hashtable + loop); 3136 } 3137 3138 /* SLAB cache for __getname() consumers */ 3139 struct kmem_cache *names_cachep __read_mostly; 3140 EXPORT_SYMBOL(names_cachep); 3141 3142 EXPORT_SYMBOL(d_genocide); 3143 3144 void __init vfs_caches_init_early(void) 3145 { 3146 dcache_init_early(); 3147 inode_init_early(); 3148 } 3149 3150 void __init vfs_caches_init(unsigned long mempages) 3151 { 3152 unsigned long reserve; 3153 3154 /* Base hash sizes on available memory, with a reserve equal to 3155 150% of current kernel size */ 3156 3157 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1); 3158 mempages -= reserve; 3159 3160 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, 3161 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 3162 3163 dcache_init(); 3164 inode_init(); 3165 files_init(mempages); 3166 mnt_init(); 3167 bdev_cache_init(); 3168 chrdev_init(); 3169 } 3170