1 /* 2 * fs/dcache.c 3 * 4 * Complete reimplementation 5 * (C) 1997 Thomas Schoebel-Theuer, 6 * with heavy changes by Linus Torvalds 7 */ 8 9 /* 10 * Notes on the allocation strategy: 11 * 12 * The dcache is a master of the icache - whenever a dcache entry 13 * exists, the inode will always exist. "iput()" is done either when 14 * the dcache entry is deleted or garbage collected. 15 */ 16 17 #include <linux/syscalls.h> 18 #include <linux/string.h> 19 #include <linux/mm.h> 20 #include <linux/fs.h> 21 #include <linux/fsnotify.h> 22 #include <linux/slab.h> 23 #include <linux/init.h> 24 #include <linux/hash.h> 25 #include <linux/cache.h> 26 #include <linux/export.h> 27 #include <linux/mount.h> 28 #include <linux/file.h> 29 #include <asm/uaccess.h> 30 #include <linux/security.h> 31 #include <linux/seqlock.h> 32 #include <linux/swap.h> 33 #include <linux/bootmem.h> 34 #include <linux/fs_struct.h> 35 #include <linux/hardirq.h> 36 #include <linux/bit_spinlock.h> 37 #include <linux/rculist_bl.h> 38 #include <linux/prefetch.h> 39 #include <linux/ratelimit.h> 40 #include <linux/list_lru.h> 41 #include "internal.h" 42 #include "mount.h" 43 44 /* 45 * Usage: 46 * dcache->d_inode->i_lock protects: 47 * - i_dentry, d_alias, d_inode of aliases 48 * dcache_hash_bucket lock protects: 49 * - the dcache hash table 50 * s_anon bl list spinlock protects: 51 * - the s_anon list (see __d_drop) 52 * dentry->d_sb->s_dentry_lru_lock protects: 53 * - the dcache lru lists and counters 54 * d_lock protects: 55 * - d_flags 56 * - d_name 57 * - d_lru 58 * - d_count 59 * - d_unhashed() 60 * - d_parent and d_subdirs 61 * - childrens' d_child and d_parent 62 * - d_alias, d_inode 63 * 64 * Ordering: 65 * dentry->d_inode->i_lock 66 * dentry->d_lock 67 * dentry->d_sb->s_dentry_lru_lock 68 * dcache_hash_bucket lock 69 * s_anon lock 70 * 71 * If there is an ancestor relationship: 72 * dentry->d_parent->...->d_parent->d_lock 73 * ... 74 * dentry->d_parent->d_lock 75 * dentry->d_lock 76 * 77 * If no ancestor relationship: 78 * if (dentry1 < dentry2) 79 * dentry1->d_lock 80 * dentry2->d_lock 81 */ 82 int sysctl_vfs_cache_pressure __read_mostly = 100; 83 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); 84 85 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); 86 87 EXPORT_SYMBOL(rename_lock); 88 89 static struct kmem_cache *dentry_cache __read_mostly; 90 91 /* 92 * This is the single most critical data structure when it comes 93 * to the dcache: the hashtable for lookups. Somebody should try 94 * to make this good - I've just made it work. 95 * 96 * This hash-function tries to avoid losing too many bits of hash 97 * information, yet avoid using a prime hash-size or similar. 98 */ 99 100 static unsigned int d_hash_mask __read_mostly; 101 static unsigned int d_hash_shift __read_mostly; 102 103 static struct hlist_bl_head *dentry_hashtable __read_mostly; 104 105 static inline struct hlist_bl_head *d_hash(const struct dentry *parent, 106 unsigned int hash) 107 { 108 hash += (unsigned long) parent / L1_CACHE_BYTES; 109 return dentry_hashtable + hash_32(hash, d_hash_shift); 110 } 111 112 /* Statistics gathering. */ 113 struct dentry_stat_t dentry_stat = { 114 .age_limit = 45, 115 }; 116 117 static DEFINE_PER_CPU(long, nr_dentry); 118 static DEFINE_PER_CPU(long, nr_dentry_unused); 119 120 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) 121 122 /* 123 * Here we resort to our own counters instead of using generic per-cpu counters 124 * for consistency with what the vfs inode code does. We are expected to harvest 125 * better code and performance by having our own specialized counters. 126 * 127 * Please note that the loop is done over all possible CPUs, not over all online 128 * CPUs. The reason for this is that we don't want to play games with CPUs going 129 * on and off. If one of them goes off, we will just keep their counters. 130 * 131 * glommer: See cffbc8a for details, and if you ever intend to change this, 132 * please update all vfs counters to match. 133 */ 134 static long get_nr_dentry(void) 135 { 136 int i; 137 long sum = 0; 138 for_each_possible_cpu(i) 139 sum += per_cpu(nr_dentry, i); 140 return sum < 0 ? 0 : sum; 141 } 142 143 static long get_nr_dentry_unused(void) 144 { 145 int i; 146 long sum = 0; 147 for_each_possible_cpu(i) 148 sum += per_cpu(nr_dentry_unused, i); 149 return sum < 0 ? 0 : sum; 150 } 151 152 int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer, 153 size_t *lenp, loff_t *ppos) 154 { 155 dentry_stat.nr_dentry = get_nr_dentry(); 156 dentry_stat.nr_unused = get_nr_dentry_unused(); 157 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 158 } 159 #endif 160 161 /* 162 * Compare 2 name strings, return 0 if they match, otherwise non-zero. 163 * The strings are both count bytes long, and count is non-zero. 164 */ 165 #ifdef CONFIG_DCACHE_WORD_ACCESS 166 167 #include <asm/word-at-a-time.h> 168 /* 169 * NOTE! 'cs' and 'scount' come from a dentry, so it has a 170 * aligned allocation for this particular component. We don't 171 * strictly need the load_unaligned_zeropad() safety, but it 172 * doesn't hurt either. 173 * 174 * In contrast, 'ct' and 'tcount' can be from a pathname, and do 175 * need the careful unaligned handling. 176 */ 177 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount) 178 { 179 unsigned long a,b,mask; 180 181 for (;;) { 182 a = *(unsigned long *)cs; 183 b = load_unaligned_zeropad(ct); 184 if (tcount < sizeof(unsigned long)) 185 break; 186 if (unlikely(a != b)) 187 return 1; 188 cs += sizeof(unsigned long); 189 ct += sizeof(unsigned long); 190 tcount -= sizeof(unsigned long); 191 if (!tcount) 192 return 0; 193 } 194 mask = bytemask_from_count(tcount); 195 return unlikely(!!((a ^ b) & mask)); 196 } 197 198 #else 199 200 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount) 201 { 202 do { 203 if (*cs != *ct) 204 return 1; 205 cs++; 206 ct++; 207 tcount--; 208 } while (tcount); 209 return 0; 210 } 211 212 #endif 213 214 static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount) 215 { 216 const unsigned char *cs; 217 /* 218 * Be careful about RCU walk racing with rename: 219 * use ACCESS_ONCE to fetch the name pointer. 220 * 221 * NOTE! Even if a rename will mean that the length 222 * was not loaded atomically, we don't care. The 223 * RCU walk will check the sequence count eventually, 224 * and catch it. And we won't overrun the buffer, 225 * because we're reading the name pointer atomically, 226 * and a dentry name is guaranteed to be properly 227 * terminated with a NUL byte. 228 * 229 * End result: even if 'len' is wrong, we'll exit 230 * early because the data cannot match (there can 231 * be no NUL in the ct/tcount data) 232 */ 233 cs = ACCESS_ONCE(dentry->d_name.name); 234 smp_read_barrier_depends(); 235 return dentry_string_cmp(cs, ct, tcount); 236 } 237 238 struct external_name { 239 union { 240 atomic_t count; 241 struct rcu_head head; 242 } u; 243 unsigned char name[]; 244 }; 245 246 static inline struct external_name *external_name(struct dentry *dentry) 247 { 248 return container_of(dentry->d_name.name, struct external_name, name[0]); 249 } 250 251 static void __d_free(struct rcu_head *head) 252 { 253 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); 254 255 WARN_ON(!hlist_unhashed(&dentry->d_alias)); 256 kmem_cache_free(dentry_cache, dentry); 257 } 258 259 static void __d_free_external(struct rcu_head *head) 260 { 261 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); 262 WARN_ON(!hlist_unhashed(&dentry->d_alias)); 263 kfree(external_name(dentry)); 264 kmem_cache_free(dentry_cache, dentry); 265 } 266 267 static void dentry_free(struct dentry *dentry) 268 { 269 if (unlikely(dname_external(dentry))) { 270 struct external_name *p = external_name(dentry); 271 if (likely(atomic_dec_and_test(&p->u.count))) { 272 call_rcu(&dentry->d_u.d_rcu, __d_free_external); 273 return; 274 } 275 } 276 /* if dentry was never visible to RCU, immediate free is OK */ 277 if (!(dentry->d_flags & DCACHE_RCUACCESS)) 278 __d_free(&dentry->d_u.d_rcu); 279 else 280 call_rcu(&dentry->d_u.d_rcu, __d_free); 281 } 282 283 /** 284 * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups 285 * @dentry: the target dentry 286 * After this call, in-progress rcu-walk path lookup will fail. This 287 * should be called after unhashing, and after changing d_inode (if 288 * the dentry has not already been unhashed). 289 */ 290 static inline void dentry_rcuwalk_barrier(struct dentry *dentry) 291 { 292 assert_spin_locked(&dentry->d_lock); 293 /* Go through a barrier */ 294 write_seqcount_barrier(&dentry->d_seq); 295 } 296 297 /* 298 * Release the dentry's inode, using the filesystem 299 * d_iput() operation if defined. Dentry has no refcount 300 * and is unhashed. 301 */ 302 static void dentry_iput(struct dentry * dentry) 303 __releases(dentry->d_lock) 304 __releases(dentry->d_inode->i_lock) 305 { 306 struct inode *inode = dentry->d_inode; 307 if (inode) { 308 dentry->d_inode = NULL; 309 hlist_del_init(&dentry->d_alias); 310 spin_unlock(&dentry->d_lock); 311 spin_unlock(&inode->i_lock); 312 if (!inode->i_nlink) 313 fsnotify_inoderemove(inode); 314 if (dentry->d_op && dentry->d_op->d_iput) 315 dentry->d_op->d_iput(dentry, inode); 316 else 317 iput(inode); 318 } else { 319 spin_unlock(&dentry->d_lock); 320 } 321 } 322 323 /* 324 * Release the dentry's inode, using the filesystem 325 * d_iput() operation if defined. dentry remains in-use. 326 */ 327 static void dentry_unlink_inode(struct dentry * dentry) 328 __releases(dentry->d_lock) 329 __releases(dentry->d_inode->i_lock) 330 { 331 struct inode *inode = dentry->d_inode; 332 __d_clear_type(dentry); 333 dentry->d_inode = NULL; 334 hlist_del_init(&dentry->d_alias); 335 dentry_rcuwalk_barrier(dentry); 336 spin_unlock(&dentry->d_lock); 337 spin_unlock(&inode->i_lock); 338 if (!inode->i_nlink) 339 fsnotify_inoderemove(inode); 340 if (dentry->d_op && dentry->d_op->d_iput) 341 dentry->d_op->d_iput(dentry, inode); 342 else 343 iput(inode); 344 } 345 346 /* 347 * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry 348 * is in use - which includes both the "real" per-superblock 349 * LRU list _and_ the DCACHE_SHRINK_LIST use. 350 * 351 * The DCACHE_SHRINK_LIST bit is set whenever the dentry is 352 * on the shrink list (ie not on the superblock LRU list). 353 * 354 * The per-cpu "nr_dentry_unused" counters are updated with 355 * the DCACHE_LRU_LIST bit. 356 * 357 * These helper functions make sure we always follow the 358 * rules. d_lock must be held by the caller. 359 */ 360 #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x)) 361 static void d_lru_add(struct dentry *dentry) 362 { 363 D_FLAG_VERIFY(dentry, 0); 364 dentry->d_flags |= DCACHE_LRU_LIST; 365 this_cpu_inc(nr_dentry_unused); 366 WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); 367 } 368 369 static void d_lru_del(struct dentry *dentry) 370 { 371 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); 372 dentry->d_flags &= ~DCACHE_LRU_LIST; 373 this_cpu_dec(nr_dentry_unused); 374 WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); 375 } 376 377 static void d_shrink_del(struct dentry *dentry) 378 { 379 D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST); 380 list_del_init(&dentry->d_lru); 381 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST); 382 this_cpu_dec(nr_dentry_unused); 383 } 384 385 static void d_shrink_add(struct dentry *dentry, struct list_head *list) 386 { 387 D_FLAG_VERIFY(dentry, 0); 388 list_add(&dentry->d_lru, list); 389 dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST; 390 this_cpu_inc(nr_dentry_unused); 391 } 392 393 /* 394 * These can only be called under the global LRU lock, ie during the 395 * callback for freeing the LRU list. "isolate" removes it from the 396 * LRU lists entirely, while shrink_move moves it to the indicated 397 * private list. 398 */ 399 static void d_lru_isolate(struct dentry *dentry) 400 { 401 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); 402 dentry->d_flags &= ~DCACHE_LRU_LIST; 403 this_cpu_dec(nr_dentry_unused); 404 list_del_init(&dentry->d_lru); 405 } 406 407 static void d_lru_shrink_move(struct dentry *dentry, struct list_head *list) 408 { 409 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); 410 dentry->d_flags |= DCACHE_SHRINK_LIST; 411 list_move_tail(&dentry->d_lru, list); 412 } 413 414 /* 415 * dentry_lru_(add|del)_list) must be called with d_lock held. 416 */ 417 static void dentry_lru_add(struct dentry *dentry) 418 { 419 if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) 420 d_lru_add(dentry); 421 } 422 423 /** 424 * d_drop - drop a dentry 425 * @dentry: dentry to drop 426 * 427 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't 428 * be found through a VFS lookup any more. Note that this is different from 429 * deleting the dentry - d_delete will try to mark the dentry negative if 430 * possible, giving a successful _negative_ lookup, while d_drop will 431 * just make the cache lookup fail. 432 * 433 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some 434 * reason (NFS timeouts or autofs deletes). 435 * 436 * __d_drop requires dentry->d_lock. 437 */ 438 void __d_drop(struct dentry *dentry) 439 { 440 if (!d_unhashed(dentry)) { 441 struct hlist_bl_head *b; 442 /* 443 * Hashed dentries are normally on the dentry hashtable, 444 * with the exception of those newly allocated by 445 * d_obtain_alias, which are always IS_ROOT: 446 */ 447 if (unlikely(IS_ROOT(dentry))) 448 b = &dentry->d_sb->s_anon; 449 else 450 b = d_hash(dentry->d_parent, dentry->d_name.hash); 451 452 hlist_bl_lock(b); 453 __hlist_bl_del(&dentry->d_hash); 454 dentry->d_hash.pprev = NULL; 455 hlist_bl_unlock(b); 456 dentry_rcuwalk_barrier(dentry); 457 } 458 } 459 EXPORT_SYMBOL(__d_drop); 460 461 void d_drop(struct dentry *dentry) 462 { 463 spin_lock(&dentry->d_lock); 464 __d_drop(dentry); 465 spin_unlock(&dentry->d_lock); 466 } 467 EXPORT_SYMBOL(d_drop); 468 469 static void __dentry_kill(struct dentry *dentry) 470 { 471 struct dentry *parent = NULL; 472 bool can_free = true; 473 if (!IS_ROOT(dentry)) 474 parent = dentry->d_parent; 475 476 /* 477 * The dentry is now unrecoverably dead to the world. 478 */ 479 lockref_mark_dead(&dentry->d_lockref); 480 481 /* 482 * inform the fs via d_prune that this dentry is about to be 483 * unhashed and destroyed. 484 */ 485 if ((dentry->d_flags & DCACHE_OP_PRUNE) && !d_unhashed(dentry)) 486 dentry->d_op->d_prune(dentry); 487 488 if (dentry->d_flags & DCACHE_LRU_LIST) { 489 if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) 490 d_lru_del(dentry); 491 } 492 /* if it was on the hash then remove it */ 493 __d_drop(dentry); 494 list_del(&dentry->d_u.d_child); 495 /* 496 * Inform d_walk() that we are no longer attached to the 497 * dentry tree 498 */ 499 dentry->d_flags |= DCACHE_DENTRY_KILLED; 500 if (parent) 501 spin_unlock(&parent->d_lock); 502 dentry_iput(dentry); 503 /* 504 * dentry_iput drops the locks, at which point nobody (except 505 * transient RCU lookups) can reach this dentry. 506 */ 507 BUG_ON((int)dentry->d_lockref.count > 0); 508 this_cpu_dec(nr_dentry); 509 if (dentry->d_op && dentry->d_op->d_release) 510 dentry->d_op->d_release(dentry); 511 512 spin_lock(&dentry->d_lock); 513 if (dentry->d_flags & DCACHE_SHRINK_LIST) { 514 dentry->d_flags |= DCACHE_MAY_FREE; 515 can_free = false; 516 } 517 spin_unlock(&dentry->d_lock); 518 if (likely(can_free)) 519 dentry_free(dentry); 520 } 521 522 /* 523 * Finish off a dentry we've decided to kill. 524 * dentry->d_lock must be held, returns with it unlocked. 525 * If ref is non-zero, then decrement the refcount too. 526 * Returns dentry requiring refcount drop, or NULL if we're done. 527 */ 528 static struct dentry *dentry_kill(struct dentry *dentry) 529 __releases(dentry->d_lock) 530 { 531 struct inode *inode = dentry->d_inode; 532 struct dentry *parent = NULL; 533 534 if (inode && unlikely(!spin_trylock(&inode->i_lock))) 535 goto failed; 536 537 if (!IS_ROOT(dentry)) { 538 parent = dentry->d_parent; 539 if (unlikely(!spin_trylock(&parent->d_lock))) { 540 if (inode) 541 spin_unlock(&inode->i_lock); 542 goto failed; 543 } 544 } 545 546 __dentry_kill(dentry); 547 return parent; 548 549 failed: 550 spin_unlock(&dentry->d_lock); 551 cpu_relax(); 552 return dentry; /* try again with same dentry */ 553 } 554 555 static inline struct dentry *lock_parent(struct dentry *dentry) 556 { 557 struct dentry *parent = dentry->d_parent; 558 if (IS_ROOT(dentry)) 559 return NULL; 560 if (unlikely((int)dentry->d_lockref.count < 0)) 561 return NULL; 562 if (likely(spin_trylock(&parent->d_lock))) 563 return parent; 564 rcu_read_lock(); 565 spin_unlock(&dentry->d_lock); 566 again: 567 parent = ACCESS_ONCE(dentry->d_parent); 568 spin_lock(&parent->d_lock); 569 /* 570 * We can't blindly lock dentry until we are sure 571 * that we won't violate the locking order. 572 * Any changes of dentry->d_parent must have 573 * been done with parent->d_lock held, so 574 * spin_lock() above is enough of a barrier 575 * for checking if it's still our child. 576 */ 577 if (unlikely(parent != dentry->d_parent)) { 578 spin_unlock(&parent->d_lock); 579 goto again; 580 } 581 rcu_read_unlock(); 582 if (parent != dentry) 583 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 584 else 585 parent = NULL; 586 return parent; 587 } 588 589 /* 590 * This is dput 591 * 592 * This is complicated by the fact that we do not want to put 593 * dentries that are no longer on any hash chain on the unused 594 * list: we'd much rather just get rid of them immediately. 595 * 596 * However, that implies that we have to traverse the dentry 597 * tree upwards to the parents which might _also_ now be 598 * scheduled for deletion (it may have been only waiting for 599 * its last child to go away). 600 * 601 * This tail recursion is done by hand as we don't want to depend 602 * on the compiler to always get this right (gcc generally doesn't). 603 * Real recursion would eat up our stack space. 604 */ 605 606 /* 607 * dput - release a dentry 608 * @dentry: dentry to release 609 * 610 * Release a dentry. This will drop the usage count and if appropriate 611 * call the dentry unlink method as well as removing it from the queues and 612 * releasing its resources. If the parent dentries were scheduled for release 613 * they too may now get deleted. 614 */ 615 void dput(struct dentry *dentry) 616 { 617 if (unlikely(!dentry)) 618 return; 619 620 repeat: 621 if (lockref_put_or_lock(&dentry->d_lockref)) 622 return; 623 624 /* Unreachable? Get rid of it */ 625 if (unlikely(d_unhashed(dentry))) 626 goto kill_it; 627 628 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) { 629 if (dentry->d_op->d_delete(dentry)) 630 goto kill_it; 631 } 632 633 if (!(dentry->d_flags & DCACHE_REFERENCED)) 634 dentry->d_flags |= DCACHE_REFERENCED; 635 dentry_lru_add(dentry); 636 637 dentry->d_lockref.count--; 638 spin_unlock(&dentry->d_lock); 639 return; 640 641 kill_it: 642 dentry = dentry_kill(dentry); 643 if (dentry) 644 goto repeat; 645 } 646 EXPORT_SYMBOL(dput); 647 648 649 /* This must be called with d_lock held */ 650 static inline void __dget_dlock(struct dentry *dentry) 651 { 652 dentry->d_lockref.count++; 653 } 654 655 static inline void __dget(struct dentry *dentry) 656 { 657 lockref_get(&dentry->d_lockref); 658 } 659 660 struct dentry *dget_parent(struct dentry *dentry) 661 { 662 int gotref; 663 struct dentry *ret; 664 665 /* 666 * Do optimistic parent lookup without any 667 * locking. 668 */ 669 rcu_read_lock(); 670 ret = ACCESS_ONCE(dentry->d_parent); 671 gotref = lockref_get_not_zero(&ret->d_lockref); 672 rcu_read_unlock(); 673 if (likely(gotref)) { 674 if (likely(ret == ACCESS_ONCE(dentry->d_parent))) 675 return ret; 676 dput(ret); 677 } 678 679 repeat: 680 /* 681 * Don't need rcu_dereference because we re-check it was correct under 682 * the lock. 683 */ 684 rcu_read_lock(); 685 ret = dentry->d_parent; 686 spin_lock(&ret->d_lock); 687 if (unlikely(ret != dentry->d_parent)) { 688 spin_unlock(&ret->d_lock); 689 rcu_read_unlock(); 690 goto repeat; 691 } 692 rcu_read_unlock(); 693 BUG_ON(!ret->d_lockref.count); 694 ret->d_lockref.count++; 695 spin_unlock(&ret->d_lock); 696 return ret; 697 } 698 EXPORT_SYMBOL(dget_parent); 699 700 /** 701 * d_find_alias - grab a hashed alias of inode 702 * @inode: inode in question 703 * 704 * If inode has a hashed alias, or is a directory and has any alias, 705 * acquire the reference to alias and return it. Otherwise return NULL. 706 * Notice that if inode is a directory there can be only one alias and 707 * it can be unhashed only if it has no children, or if it is the root 708 * of a filesystem, or if the directory was renamed and d_revalidate 709 * was the first vfs operation to notice. 710 * 711 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer 712 * any other hashed alias over that one. 713 */ 714 static struct dentry *__d_find_alias(struct inode *inode) 715 { 716 struct dentry *alias, *discon_alias; 717 718 again: 719 discon_alias = NULL; 720 hlist_for_each_entry(alias, &inode->i_dentry, d_alias) { 721 spin_lock(&alias->d_lock); 722 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { 723 if (IS_ROOT(alias) && 724 (alias->d_flags & DCACHE_DISCONNECTED)) { 725 discon_alias = alias; 726 } else { 727 __dget_dlock(alias); 728 spin_unlock(&alias->d_lock); 729 return alias; 730 } 731 } 732 spin_unlock(&alias->d_lock); 733 } 734 if (discon_alias) { 735 alias = discon_alias; 736 spin_lock(&alias->d_lock); 737 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { 738 __dget_dlock(alias); 739 spin_unlock(&alias->d_lock); 740 return alias; 741 } 742 spin_unlock(&alias->d_lock); 743 goto again; 744 } 745 return NULL; 746 } 747 748 struct dentry *d_find_alias(struct inode *inode) 749 { 750 struct dentry *de = NULL; 751 752 if (!hlist_empty(&inode->i_dentry)) { 753 spin_lock(&inode->i_lock); 754 de = __d_find_alias(inode); 755 spin_unlock(&inode->i_lock); 756 } 757 return de; 758 } 759 EXPORT_SYMBOL(d_find_alias); 760 761 /* 762 * Try to kill dentries associated with this inode. 763 * WARNING: you must own a reference to inode. 764 */ 765 void d_prune_aliases(struct inode *inode) 766 { 767 struct dentry *dentry; 768 restart: 769 spin_lock(&inode->i_lock); 770 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) { 771 spin_lock(&dentry->d_lock); 772 if (!dentry->d_lockref.count) { 773 /* 774 * inform the fs via d_prune that this dentry 775 * is about to be unhashed and destroyed. 776 */ 777 if ((dentry->d_flags & DCACHE_OP_PRUNE) && 778 !d_unhashed(dentry)) 779 dentry->d_op->d_prune(dentry); 780 781 __dget_dlock(dentry); 782 __d_drop(dentry); 783 spin_unlock(&dentry->d_lock); 784 spin_unlock(&inode->i_lock); 785 dput(dentry); 786 goto restart; 787 } 788 spin_unlock(&dentry->d_lock); 789 } 790 spin_unlock(&inode->i_lock); 791 } 792 EXPORT_SYMBOL(d_prune_aliases); 793 794 static void shrink_dentry_list(struct list_head *list) 795 { 796 struct dentry *dentry, *parent; 797 798 while (!list_empty(list)) { 799 struct inode *inode; 800 dentry = list_entry(list->prev, struct dentry, d_lru); 801 spin_lock(&dentry->d_lock); 802 parent = lock_parent(dentry); 803 804 /* 805 * The dispose list is isolated and dentries are not accounted 806 * to the LRU here, so we can simply remove it from the list 807 * here regardless of whether it is referenced or not. 808 */ 809 d_shrink_del(dentry); 810 811 /* 812 * We found an inuse dentry which was not removed from 813 * the LRU because of laziness during lookup. Do not free it. 814 */ 815 if ((int)dentry->d_lockref.count > 0) { 816 spin_unlock(&dentry->d_lock); 817 if (parent) 818 spin_unlock(&parent->d_lock); 819 continue; 820 } 821 822 823 if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) { 824 bool can_free = dentry->d_flags & DCACHE_MAY_FREE; 825 spin_unlock(&dentry->d_lock); 826 if (parent) 827 spin_unlock(&parent->d_lock); 828 if (can_free) 829 dentry_free(dentry); 830 continue; 831 } 832 833 inode = dentry->d_inode; 834 if (inode && unlikely(!spin_trylock(&inode->i_lock))) { 835 d_shrink_add(dentry, list); 836 spin_unlock(&dentry->d_lock); 837 if (parent) 838 spin_unlock(&parent->d_lock); 839 continue; 840 } 841 842 __dentry_kill(dentry); 843 844 /* 845 * We need to prune ancestors too. This is necessary to prevent 846 * quadratic behavior of shrink_dcache_parent(), but is also 847 * expected to be beneficial in reducing dentry cache 848 * fragmentation. 849 */ 850 dentry = parent; 851 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) { 852 parent = lock_parent(dentry); 853 if (dentry->d_lockref.count != 1) { 854 dentry->d_lockref.count--; 855 spin_unlock(&dentry->d_lock); 856 if (parent) 857 spin_unlock(&parent->d_lock); 858 break; 859 } 860 inode = dentry->d_inode; /* can't be NULL */ 861 if (unlikely(!spin_trylock(&inode->i_lock))) { 862 spin_unlock(&dentry->d_lock); 863 if (parent) 864 spin_unlock(&parent->d_lock); 865 cpu_relax(); 866 continue; 867 } 868 __dentry_kill(dentry); 869 dentry = parent; 870 } 871 } 872 } 873 874 static enum lru_status 875 dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg) 876 { 877 struct list_head *freeable = arg; 878 struct dentry *dentry = container_of(item, struct dentry, d_lru); 879 880 881 /* 882 * we are inverting the lru lock/dentry->d_lock here, 883 * so use a trylock. If we fail to get the lock, just skip 884 * it 885 */ 886 if (!spin_trylock(&dentry->d_lock)) 887 return LRU_SKIP; 888 889 /* 890 * Referenced dentries are still in use. If they have active 891 * counts, just remove them from the LRU. Otherwise give them 892 * another pass through the LRU. 893 */ 894 if (dentry->d_lockref.count) { 895 d_lru_isolate(dentry); 896 spin_unlock(&dentry->d_lock); 897 return LRU_REMOVED; 898 } 899 900 if (dentry->d_flags & DCACHE_REFERENCED) { 901 dentry->d_flags &= ~DCACHE_REFERENCED; 902 spin_unlock(&dentry->d_lock); 903 904 /* 905 * The list move itself will be made by the common LRU code. At 906 * this point, we've dropped the dentry->d_lock but keep the 907 * lru lock. This is safe to do, since every list movement is 908 * protected by the lru lock even if both locks are held. 909 * 910 * This is guaranteed by the fact that all LRU management 911 * functions are intermediated by the LRU API calls like 912 * list_lru_add and list_lru_del. List movement in this file 913 * only ever occur through this functions or through callbacks 914 * like this one, that are called from the LRU API. 915 * 916 * The only exceptions to this are functions like 917 * shrink_dentry_list, and code that first checks for the 918 * DCACHE_SHRINK_LIST flag. Those are guaranteed to be 919 * operating only with stack provided lists after they are 920 * properly isolated from the main list. It is thus, always a 921 * local access. 922 */ 923 return LRU_ROTATE; 924 } 925 926 d_lru_shrink_move(dentry, freeable); 927 spin_unlock(&dentry->d_lock); 928 929 return LRU_REMOVED; 930 } 931 932 /** 933 * prune_dcache_sb - shrink the dcache 934 * @sb: superblock 935 * @nr_to_scan : number of entries to try to free 936 * @nid: which node to scan for freeable entities 937 * 938 * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is 939 * done when we need more memory an called from the superblock shrinker 940 * function. 941 * 942 * This function may fail to free any resources if all the dentries are in 943 * use. 944 */ 945 long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan, 946 int nid) 947 { 948 LIST_HEAD(dispose); 949 long freed; 950 951 freed = list_lru_walk_node(&sb->s_dentry_lru, nid, dentry_lru_isolate, 952 &dispose, &nr_to_scan); 953 shrink_dentry_list(&dispose); 954 return freed; 955 } 956 957 static enum lru_status dentry_lru_isolate_shrink(struct list_head *item, 958 spinlock_t *lru_lock, void *arg) 959 { 960 struct list_head *freeable = arg; 961 struct dentry *dentry = container_of(item, struct dentry, d_lru); 962 963 /* 964 * we are inverting the lru lock/dentry->d_lock here, 965 * so use a trylock. If we fail to get the lock, just skip 966 * it 967 */ 968 if (!spin_trylock(&dentry->d_lock)) 969 return LRU_SKIP; 970 971 d_lru_shrink_move(dentry, freeable); 972 spin_unlock(&dentry->d_lock); 973 974 return LRU_REMOVED; 975 } 976 977 978 /** 979 * shrink_dcache_sb - shrink dcache for a superblock 980 * @sb: superblock 981 * 982 * Shrink the dcache for the specified super block. This is used to free 983 * the dcache before unmounting a file system. 984 */ 985 void shrink_dcache_sb(struct super_block *sb) 986 { 987 long freed; 988 989 do { 990 LIST_HEAD(dispose); 991 992 freed = list_lru_walk(&sb->s_dentry_lru, 993 dentry_lru_isolate_shrink, &dispose, UINT_MAX); 994 995 this_cpu_sub(nr_dentry_unused, freed); 996 shrink_dentry_list(&dispose); 997 } while (freed > 0); 998 } 999 EXPORT_SYMBOL(shrink_dcache_sb); 1000 1001 /** 1002 * enum d_walk_ret - action to talke during tree walk 1003 * @D_WALK_CONTINUE: contrinue walk 1004 * @D_WALK_QUIT: quit walk 1005 * @D_WALK_NORETRY: quit when retry is needed 1006 * @D_WALK_SKIP: skip this dentry and its children 1007 */ 1008 enum d_walk_ret { 1009 D_WALK_CONTINUE, 1010 D_WALK_QUIT, 1011 D_WALK_NORETRY, 1012 D_WALK_SKIP, 1013 }; 1014 1015 /** 1016 * d_walk - walk the dentry tree 1017 * @parent: start of walk 1018 * @data: data passed to @enter() and @finish() 1019 * @enter: callback when first entering the dentry 1020 * @finish: callback when successfully finished the walk 1021 * 1022 * The @enter() and @finish() callbacks are called with d_lock held. 1023 */ 1024 static void d_walk(struct dentry *parent, void *data, 1025 enum d_walk_ret (*enter)(void *, struct dentry *), 1026 void (*finish)(void *)) 1027 { 1028 struct dentry *this_parent; 1029 struct list_head *next; 1030 unsigned seq = 0; 1031 enum d_walk_ret ret; 1032 bool retry = true; 1033 1034 again: 1035 read_seqbegin_or_lock(&rename_lock, &seq); 1036 this_parent = parent; 1037 spin_lock(&this_parent->d_lock); 1038 1039 ret = enter(data, this_parent); 1040 switch (ret) { 1041 case D_WALK_CONTINUE: 1042 break; 1043 case D_WALK_QUIT: 1044 case D_WALK_SKIP: 1045 goto out_unlock; 1046 case D_WALK_NORETRY: 1047 retry = false; 1048 break; 1049 } 1050 repeat: 1051 next = this_parent->d_subdirs.next; 1052 resume: 1053 while (next != &this_parent->d_subdirs) { 1054 struct list_head *tmp = next; 1055 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 1056 next = tmp->next; 1057 1058 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 1059 1060 ret = enter(data, dentry); 1061 switch (ret) { 1062 case D_WALK_CONTINUE: 1063 break; 1064 case D_WALK_QUIT: 1065 spin_unlock(&dentry->d_lock); 1066 goto out_unlock; 1067 case D_WALK_NORETRY: 1068 retry = false; 1069 break; 1070 case D_WALK_SKIP: 1071 spin_unlock(&dentry->d_lock); 1072 continue; 1073 } 1074 1075 if (!list_empty(&dentry->d_subdirs)) { 1076 spin_unlock(&this_parent->d_lock); 1077 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); 1078 this_parent = dentry; 1079 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); 1080 goto repeat; 1081 } 1082 spin_unlock(&dentry->d_lock); 1083 } 1084 /* 1085 * All done at this level ... ascend and resume the search. 1086 */ 1087 if (this_parent != parent) { 1088 struct dentry *child = this_parent; 1089 this_parent = child->d_parent; 1090 1091 rcu_read_lock(); 1092 spin_unlock(&child->d_lock); 1093 spin_lock(&this_parent->d_lock); 1094 1095 /* 1096 * might go back up the wrong parent if we have had a rename 1097 * or deletion 1098 */ 1099 if (this_parent != child->d_parent || 1100 (child->d_flags & DCACHE_DENTRY_KILLED) || 1101 need_seqretry(&rename_lock, seq)) { 1102 spin_unlock(&this_parent->d_lock); 1103 rcu_read_unlock(); 1104 goto rename_retry; 1105 } 1106 rcu_read_unlock(); 1107 next = child->d_u.d_child.next; 1108 goto resume; 1109 } 1110 if (need_seqretry(&rename_lock, seq)) { 1111 spin_unlock(&this_parent->d_lock); 1112 goto rename_retry; 1113 } 1114 if (finish) 1115 finish(data); 1116 1117 out_unlock: 1118 spin_unlock(&this_parent->d_lock); 1119 done_seqretry(&rename_lock, seq); 1120 return; 1121 1122 rename_retry: 1123 if (!retry) 1124 return; 1125 seq = 1; 1126 goto again; 1127 } 1128 1129 /* 1130 * Search for at least 1 mount point in the dentry's subdirs. 1131 * We descend to the next level whenever the d_subdirs 1132 * list is non-empty and continue searching. 1133 */ 1134 1135 static enum d_walk_ret check_mount(void *data, struct dentry *dentry) 1136 { 1137 int *ret = data; 1138 if (d_mountpoint(dentry)) { 1139 *ret = 1; 1140 return D_WALK_QUIT; 1141 } 1142 return D_WALK_CONTINUE; 1143 } 1144 1145 /** 1146 * have_submounts - check for mounts over a dentry 1147 * @parent: dentry to check. 1148 * 1149 * Return true if the parent or its subdirectories contain 1150 * a mount point 1151 */ 1152 int have_submounts(struct dentry *parent) 1153 { 1154 int ret = 0; 1155 1156 d_walk(parent, &ret, check_mount, NULL); 1157 1158 return ret; 1159 } 1160 EXPORT_SYMBOL(have_submounts); 1161 1162 /* 1163 * Called by mount code to set a mountpoint and check if the mountpoint is 1164 * reachable (e.g. NFS can unhash a directory dentry and then the complete 1165 * subtree can become unreachable). 1166 * 1167 * Only one of d_invalidate() and d_set_mounted() must succeed. For 1168 * this reason take rename_lock and d_lock on dentry and ancestors. 1169 */ 1170 int d_set_mounted(struct dentry *dentry) 1171 { 1172 struct dentry *p; 1173 int ret = -ENOENT; 1174 write_seqlock(&rename_lock); 1175 for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) { 1176 /* Need exclusion wrt. d_invalidate() */ 1177 spin_lock(&p->d_lock); 1178 if (unlikely(d_unhashed(p))) { 1179 spin_unlock(&p->d_lock); 1180 goto out; 1181 } 1182 spin_unlock(&p->d_lock); 1183 } 1184 spin_lock(&dentry->d_lock); 1185 if (!d_unlinked(dentry)) { 1186 dentry->d_flags |= DCACHE_MOUNTED; 1187 ret = 0; 1188 } 1189 spin_unlock(&dentry->d_lock); 1190 out: 1191 write_sequnlock(&rename_lock); 1192 return ret; 1193 } 1194 1195 /* 1196 * Search the dentry child list of the specified parent, 1197 * and move any unused dentries to the end of the unused 1198 * list for prune_dcache(). We descend to the next level 1199 * whenever the d_subdirs list is non-empty and continue 1200 * searching. 1201 * 1202 * It returns zero iff there are no unused children, 1203 * otherwise it returns the number of children moved to 1204 * the end of the unused list. This may not be the total 1205 * number of unused children, because select_parent can 1206 * drop the lock and return early due to latency 1207 * constraints. 1208 */ 1209 1210 struct select_data { 1211 struct dentry *start; 1212 struct list_head dispose; 1213 int found; 1214 }; 1215 1216 static enum d_walk_ret select_collect(void *_data, struct dentry *dentry) 1217 { 1218 struct select_data *data = _data; 1219 enum d_walk_ret ret = D_WALK_CONTINUE; 1220 1221 if (data->start == dentry) 1222 goto out; 1223 1224 if (dentry->d_flags & DCACHE_SHRINK_LIST) { 1225 data->found++; 1226 } else { 1227 if (dentry->d_flags & DCACHE_LRU_LIST) 1228 d_lru_del(dentry); 1229 if (!dentry->d_lockref.count) { 1230 d_shrink_add(dentry, &data->dispose); 1231 data->found++; 1232 } 1233 } 1234 /* 1235 * We can return to the caller if we have found some (this 1236 * ensures forward progress). We'll be coming back to find 1237 * the rest. 1238 */ 1239 if (!list_empty(&data->dispose)) 1240 ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY; 1241 out: 1242 return ret; 1243 } 1244 1245 /** 1246 * shrink_dcache_parent - prune dcache 1247 * @parent: parent of entries to prune 1248 * 1249 * Prune the dcache to remove unused children of the parent dentry. 1250 */ 1251 void shrink_dcache_parent(struct dentry *parent) 1252 { 1253 for (;;) { 1254 struct select_data data; 1255 1256 INIT_LIST_HEAD(&data.dispose); 1257 data.start = parent; 1258 data.found = 0; 1259 1260 d_walk(parent, &data, select_collect, NULL); 1261 if (!data.found) 1262 break; 1263 1264 shrink_dentry_list(&data.dispose); 1265 cond_resched(); 1266 } 1267 } 1268 EXPORT_SYMBOL(shrink_dcache_parent); 1269 1270 static enum d_walk_ret umount_check(void *_data, struct dentry *dentry) 1271 { 1272 /* it has busy descendents; complain about those instead */ 1273 if (!list_empty(&dentry->d_subdirs)) 1274 return D_WALK_CONTINUE; 1275 1276 /* root with refcount 1 is fine */ 1277 if (dentry == _data && dentry->d_lockref.count == 1) 1278 return D_WALK_CONTINUE; 1279 1280 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} " 1281 " still in use (%d) [unmount of %s %s]\n", 1282 dentry, 1283 dentry->d_inode ? 1284 dentry->d_inode->i_ino : 0UL, 1285 dentry, 1286 dentry->d_lockref.count, 1287 dentry->d_sb->s_type->name, 1288 dentry->d_sb->s_id); 1289 WARN_ON(1); 1290 return D_WALK_CONTINUE; 1291 } 1292 1293 static void do_one_tree(struct dentry *dentry) 1294 { 1295 shrink_dcache_parent(dentry); 1296 d_walk(dentry, dentry, umount_check, NULL); 1297 d_drop(dentry); 1298 dput(dentry); 1299 } 1300 1301 /* 1302 * destroy the dentries attached to a superblock on unmounting 1303 */ 1304 void shrink_dcache_for_umount(struct super_block *sb) 1305 { 1306 struct dentry *dentry; 1307 1308 WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked"); 1309 1310 dentry = sb->s_root; 1311 sb->s_root = NULL; 1312 do_one_tree(dentry); 1313 1314 while (!hlist_bl_empty(&sb->s_anon)) { 1315 dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash)); 1316 do_one_tree(dentry); 1317 } 1318 } 1319 1320 struct detach_data { 1321 struct select_data select; 1322 struct dentry *mountpoint; 1323 }; 1324 static enum d_walk_ret detach_and_collect(void *_data, struct dentry *dentry) 1325 { 1326 struct detach_data *data = _data; 1327 1328 if (d_mountpoint(dentry)) { 1329 __dget_dlock(dentry); 1330 data->mountpoint = dentry; 1331 return D_WALK_QUIT; 1332 } 1333 1334 return select_collect(&data->select, dentry); 1335 } 1336 1337 static void check_and_drop(void *_data) 1338 { 1339 struct detach_data *data = _data; 1340 1341 if (!data->mountpoint && !data->select.found) 1342 __d_drop(data->select.start); 1343 } 1344 1345 /** 1346 * d_invalidate - detach submounts, prune dcache, and drop 1347 * @dentry: dentry to invalidate (aka detach, prune and drop) 1348 * 1349 * no dcache lock. 1350 * 1351 * The final d_drop is done as an atomic operation relative to 1352 * rename_lock ensuring there are no races with d_set_mounted. This 1353 * ensures there are no unhashed dentries on the path to a mountpoint. 1354 */ 1355 void d_invalidate(struct dentry *dentry) 1356 { 1357 /* 1358 * If it's already been dropped, return OK. 1359 */ 1360 spin_lock(&dentry->d_lock); 1361 if (d_unhashed(dentry)) { 1362 spin_unlock(&dentry->d_lock); 1363 return; 1364 } 1365 spin_unlock(&dentry->d_lock); 1366 1367 /* Negative dentries can be dropped without further checks */ 1368 if (!dentry->d_inode) { 1369 d_drop(dentry); 1370 return; 1371 } 1372 1373 for (;;) { 1374 struct detach_data data; 1375 1376 data.mountpoint = NULL; 1377 INIT_LIST_HEAD(&data.select.dispose); 1378 data.select.start = dentry; 1379 data.select.found = 0; 1380 1381 d_walk(dentry, &data, detach_and_collect, check_and_drop); 1382 1383 if (data.select.found) 1384 shrink_dentry_list(&data.select.dispose); 1385 1386 if (data.mountpoint) { 1387 detach_mounts(data.mountpoint); 1388 dput(data.mountpoint); 1389 } 1390 1391 if (!data.mountpoint && !data.select.found) 1392 break; 1393 1394 cond_resched(); 1395 } 1396 } 1397 EXPORT_SYMBOL(d_invalidate); 1398 1399 /** 1400 * __d_alloc - allocate a dcache entry 1401 * @sb: filesystem it will belong to 1402 * @name: qstr of the name 1403 * 1404 * Allocates a dentry. It returns %NULL if there is insufficient memory 1405 * available. On a success the dentry is returned. The name passed in is 1406 * copied and the copy passed in may be reused after this call. 1407 */ 1408 1409 struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) 1410 { 1411 struct dentry *dentry; 1412 char *dname; 1413 1414 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL); 1415 if (!dentry) 1416 return NULL; 1417 1418 /* 1419 * We guarantee that the inline name is always NUL-terminated. 1420 * This way the memcpy() done by the name switching in rename 1421 * will still always have a NUL at the end, even if we might 1422 * be overwriting an internal NUL character 1423 */ 1424 dentry->d_iname[DNAME_INLINE_LEN-1] = 0; 1425 if (name->len > DNAME_INLINE_LEN-1) { 1426 size_t size = offsetof(struct external_name, name[1]); 1427 struct external_name *p = kmalloc(size + name->len, GFP_KERNEL); 1428 if (!p) { 1429 kmem_cache_free(dentry_cache, dentry); 1430 return NULL; 1431 } 1432 atomic_set(&p->u.count, 1); 1433 dname = p->name; 1434 } else { 1435 dname = dentry->d_iname; 1436 } 1437 1438 dentry->d_name.len = name->len; 1439 dentry->d_name.hash = name->hash; 1440 memcpy(dname, name->name, name->len); 1441 dname[name->len] = 0; 1442 1443 /* Make sure we always see the terminating NUL character */ 1444 smp_wmb(); 1445 dentry->d_name.name = dname; 1446 1447 dentry->d_lockref.count = 1; 1448 dentry->d_flags = 0; 1449 spin_lock_init(&dentry->d_lock); 1450 seqcount_init(&dentry->d_seq); 1451 dentry->d_inode = NULL; 1452 dentry->d_parent = dentry; 1453 dentry->d_sb = sb; 1454 dentry->d_op = NULL; 1455 dentry->d_fsdata = NULL; 1456 INIT_HLIST_BL_NODE(&dentry->d_hash); 1457 INIT_LIST_HEAD(&dentry->d_lru); 1458 INIT_LIST_HEAD(&dentry->d_subdirs); 1459 INIT_HLIST_NODE(&dentry->d_alias); 1460 INIT_LIST_HEAD(&dentry->d_u.d_child); 1461 d_set_d_op(dentry, dentry->d_sb->s_d_op); 1462 1463 this_cpu_inc(nr_dentry); 1464 1465 return dentry; 1466 } 1467 1468 /** 1469 * d_alloc - allocate a dcache entry 1470 * @parent: parent of entry to allocate 1471 * @name: qstr of the name 1472 * 1473 * Allocates a dentry. It returns %NULL if there is insufficient memory 1474 * available. On a success the dentry is returned. The name passed in is 1475 * copied and the copy passed in may be reused after this call. 1476 */ 1477 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) 1478 { 1479 struct dentry *dentry = __d_alloc(parent->d_sb, name); 1480 if (!dentry) 1481 return NULL; 1482 1483 spin_lock(&parent->d_lock); 1484 /* 1485 * don't need child lock because it is not subject 1486 * to concurrency here 1487 */ 1488 __dget_dlock(parent); 1489 dentry->d_parent = parent; 1490 list_add(&dentry->d_u.d_child, &parent->d_subdirs); 1491 spin_unlock(&parent->d_lock); 1492 1493 return dentry; 1494 } 1495 EXPORT_SYMBOL(d_alloc); 1496 1497 /** 1498 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems) 1499 * @sb: the superblock 1500 * @name: qstr of the name 1501 * 1502 * For a filesystem that just pins its dentries in memory and never 1503 * performs lookups at all, return an unhashed IS_ROOT dentry. 1504 */ 1505 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name) 1506 { 1507 return __d_alloc(sb, name); 1508 } 1509 EXPORT_SYMBOL(d_alloc_pseudo); 1510 1511 struct dentry *d_alloc_name(struct dentry *parent, const char *name) 1512 { 1513 struct qstr q; 1514 1515 q.name = name; 1516 q.len = strlen(name); 1517 q.hash = full_name_hash(q.name, q.len); 1518 return d_alloc(parent, &q); 1519 } 1520 EXPORT_SYMBOL(d_alloc_name); 1521 1522 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op) 1523 { 1524 WARN_ON_ONCE(dentry->d_op); 1525 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH | 1526 DCACHE_OP_COMPARE | 1527 DCACHE_OP_REVALIDATE | 1528 DCACHE_OP_WEAK_REVALIDATE | 1529 DCACHE_OP_DELETE )); 1530 dentry->d_op = op; 1531 if (!op) 1532 return; 1533 if (op->d_hash) 1534 dentry->d_flags |= DCACHE_OP_HASH; 1535 if (op->d_compare) 1536 dentry->d_flags |= DCACHE_OP_COMPARE; 1537 if (op->d_revalidate) 1538 dentry->d_flags |= DCACHE_OP_REVALIDATE; 1539 if (op->d_weak_revalidate) 1540 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE; 1541 if (op->d_delete) 1542 dentry->d_flags |= DCACHE_OP_DELETE; 1543 if (op->d_prune) 1544 dentry->d_flags |= DCACHE_OP_PRUNE; 1545 1546 } 1547 EXPORT_SYMBOL(d_set_d_op); 1548 1549 static unsigned d_flags_for_inode(struct inode *inode) 1550 { 1551 unsigned add_flags = DCACHE_FILE_TYPE; 1552 1553 if (!inode) 1554 return DCACHE_MISS_TYPE; 1555 1556 if (S_ISDIR(inode->i_mode)) { 1557 add_flags = DCACHE_DIRECTORY_TYPE; 1558 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) { 1559 if (unlikely(!inode->i_op->lookup)) 1560 add_flags = DCACHE_AUTODIR_TYPE; 1561 else 1562 inode->i_opflags |= IOP_LOOKUP; 1563 } 1564 } else if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) { 1565 if (unlikely(inode->i_op->follow_link)) 1566 add_flags = DCACHE_SYMLINK_TYPE; 1567 else 1568 inode->i_opflags |= IOP_NOFOLLOW; 1569 } 1570 1571 if (unlikely(IS_AUTOMOUNT(inode))) 1572 add_flags |= DCACHE_NEED_AUTOMOUNT; 1573 return add_flags; 1574 } 1575 1576 static void __d_instantiate(struct dentry *dentry, struct inode *inode) 1577 { 1578 unsigned add_flags = d_flags_for_inode(inode); 1579 1580 spin_lock(&dentry->d_lock); 1581 __d_set_type(dentry, add_flags); 1582 if (inode) 1583 hlist_add_head(&dentry->d_alias, &inode->i_dentry); 1584 dentry->d_inode = inode; 1585 dentry_rcuwalk_barrier(dentry); 1586 spin_unlock(&dentry->d_lock); 1587 fsnotify_d_instantiate(dentry, inode); 1588 } 1589 1590 /** 1591 * d_instantiate - fill in inode information for a dentry 1592 * @entry: dentry to complete 1593 * @inode: inode to attach to this dentry 1594 * 1595 * Fill in inode information in the entry. 1596 * 1597 * This turns negative dentries into productive full members 1598 * of society. 1599 * 1600 * NOTE! This assumes that the inode count has been incremented 1601 * (or otherwise set) by the caller to indicate that it is now 1602 * in use by the dcache. 1603 */ 1604 1605 void d_instantiate(struct dentry *entry, struct inode * inode) 1606 { 1607 BUG_ON(!hlist_unhashed(&entry->d_alias)); 1608 if (inode) 1609 spin_lock(&inode->i_lock); 1610 __d_instantiate(entry, inode); 1611 if (inode) 1612 spin_unlock(&inode->i_lock); 1613 security_d_instantiate(entry, inode); 1614 } 1615 EXPORT_SYMBOL(d_instantiate); 1616 1617 /** 1618 * d_instantiate_unique - instantiate a non-aliased dentry 1619 * @entry: dentry to instantiate 1620 * @inode: inode to attach to this dentry 1621 * 1622 * Fill in inode information in the entry. On success, it returns NULL. 1623 * If an unhashed alias of "entry" already exists, then we return the 1624 * aliased dentry instead and drop one reference to inode. 1625 * 1626 * Note that in order to avoid conflicts with rename() etc, the caller 1627 * had better be holding the parent directory semaphore. 1628 * 1629 * This also assumes that the inode count has been incremented 1630 * (or otherwise set) by the caller to indicate that it is now 1631 * in use by the dcache. 1632 */ 1633 static struct dentry *__d_instantiate_unique(struct dentry *entry, 1634 struct inode *inode) 1635 { 1636 struct dentry *alias; 1637 int len = entry->d_name.len; 1638 const char *name = entry->d_name.name; 1639 unsigned int hash = entry->d_name.hash; 1640 1641 if (!inode) { 1642 __d_instantiate(entry, NULL); 1643 return NULL; 1644 } 1645 1646 hlist_for_each_entry(alias, &inode->i_dentry, d_alias) { 1647 /* 1648 * Don't need alias->d_lock here, because aliases with 1649 * d_parent == entry->d_parent are not subject to name or 1650 * parent changes, because the parent inode i_mutex is held. 1651 */ 1652 if (alias->d_name.hash != hash) 1653 continue; 1654 if (alias->d_parent != entry->d_parent) 1655 continue; 1656 if (alias->d_name.len != len) 1657 continue; 1658 if (dentry_cmp(alias, name, len)) 1659 continue; 1660 __dget(alias); 1661 return alias; 1662 } 1663 1664 __d_instantiate(entry, inode); 1665 return NULL; 1666 } 1667 1668 struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode) 1669 { 1670 struct dentry *result; 1671 1672 BUG_ON(!hlist_unhashed(&entry->d_alias)); 1673 1674 if (inode) 1675 spin_lock(&inode->i_lock); 1676 result = __d_instantiate_unique(entry, inode); 1677 if (inode) 1678 spin_unlock(&inode->i_lock); 1679 1680 if (!result) { 1681 security_d_instantiate(entry, inode); 1682 return NULL; 1683 } 1684 1685 BUG_ON(!d_unhashed(result)); 1686 iput(inode); 1687 return result; 1688 } 1689 1690 EXPORT_SYMBOL(d_instantiate_unique); 1691 1692 /** 1693 * d_instantiate_no_diralias - instantiate a non-aliased dentry 1694 * @entry: dentry to complete 1695 * @inode: inode to attach to this dentry 1696 * 1697 * Fill in inode information in the entry. If a directory alias is found, then 1698 * return an error (and drop inode). Together with d_materialise_unique() this 1699 * guarantees that a directory inode may never have more than one alias. 1700 */ 1701 int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode) 1702 { 1703 BUG_ON(!hlist_unhashed(&entry->d_alias)); 1704 1705 spin_lock(&inode->i_lock); 1706 if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) { 1707 spin_unlock(&inode->i_lock); 1708 iput(inode); 1709 return -EBUSY; 1710 } 1711 __d_instantiate(entry, inode); 1712 spin_unlock(&inode->i_lock); 1713 security_d_instantiate(entry, inode); 1714 1715 return 0; 1716 } 1717 EXPORT_SYMBOL(d_instantiate_no_diralias); 1718 1719 struct dentry *d_make_root(struct inode *root_inode) 1720 { 1721 struct dentry *res = NULL; 1722 1723 if (root_inode) { 1724 static const struct qstr name = QSTR_INIT("/", 1); 1725 1726 res = __d_alloc(root_inode->i_sb, &name); 1727 if (res) 1728 d_instantiate(res, root_inode); 1729 else 1730 iput(root_inode); 1731 } 1732 return res; 1733 } 1734 EXPORT_SYMBOL(d_make_root); 1735 1736 static struct dentry * __d_find_any_alias(struct inode *inode) 1737 { 1738 struct dentry *alias; 1739 1740 if (hlist_empty(&inode->i_dentry)) 1741 return NULL; 1742 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_alias); 1743 __dget(alias); 1744 return alias; 1745 } 1746 1747 /** 1748 * d_find_any_alias - find any alias for a given inode 1749 * @inode: inode to find an alias for 1750 * 1751 * If any aliases exist for the given inode, take and return a 1752 * reference for one of them. If no aliases exist, return %NULL. 1753 */ 1754 struct dentry *d_find_any_alias(struct inode *inode) 1755 { 1756 struct dentry *de; 1757 1758 spin_lock(&inode->i_lock); 1759 de = __d_find_any_alias(inode); 1760 spin_unlock(&inode->i_lock); 1761 return de; 1762 } 1763 EXPORT_SYMBOL(d_find_any_alias); 1764 1765 static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected) 1766 { 1767 static const struct qstr anonstring = QSTR_INIT("/", 1); 1768 struct dentry *tmp; 1769 struct dentry *res; 1770 unsigned add_flags; 1771 1772 if (!inode) 1773 return ERR_PTR(-ESTALE); 1774 if (IS_ERR(inode)) 1775 return ERR_CAST(inode); 1776 1777 res = d_find_any_alias(inode); 1778 if (res) 1779 goto out_iput; 1780 1781 tmp = __d_alloc(inode->i_sb, &anonstring); 1782 if (!tmp) { 1783 res = ERR_PTR(-ENOMEM); 1784 goto out_iput; 1785 } 1786 1787 spin_lock(&inode->i_lock); 1788 res = __d_find_any_alias(inode); 1789 if (res) { 1790 spin_unlock(&inode->i_lock); 1791 dput(tmp); 1792 goto out_iput; 1793 } 1794 1795 /* attach a disconnected dentry */ 1796 add_flags = d_flags_for_inode(inode); 1797 1798 if (disconnected) 1799 add_flags |= DCACHE_DISCONNECTED; 1800 1801 spin_lock(&tmp->d_lock); 1802 tmp->d_inode = inode; 1803 tmp->d_flags |= add_flags; 1804 hlist_add_head(&tmp->d_alias, &inode->i_dentry); 1805 hlist_bl_lock(&tmp->d_sb->s_anon); 1806 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon); 1807 hlist_bl_unlock(&tmp->d_sb->s_anon); 1808 spin_unlock(&tmp->d_lock); 1809 spin_unlock(&inode->i_lock); 1810 security_d_instantiate(tmp, inode); 1811 1812 return tmp; 1813 1814 out_iput: 1815 if (res && !IS_ERR(res)) 1816 security_d_instantiate(res, inode); 1817 iput(inode); 1818 return res; 1819 } 1820 1821 /** 1822 * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode 1823 * @inode: inode to allocate the dentry for 1824 * 1825 * Obtain a dentry for an inode resulting from NFS filehandle conversion or 1826 * similar open by handle operations. The returned dentry may be anonymous, 1827 * or may have a full name (if the inode was already in the cache). 1828 * 1829 * When called on a directory inode, we must ensure that the inode only ever 1830 * has one dentry. If a dentry is found, that is returned instead of 1831 * allocating a new one. 1832 * 1833 * On successful return, the reference to the inode has been transferred 1834 * to the dentry. In case of an error the reference on the inode is released. 1835 * To make it easier to use in export operations a %NULL or IS_ERR inode may 1836 * be passed in and the error will be propagated to the return value, 1837 * with a %NULL @inode replaced by ERR_PTR(-ESTALE). 1838 */ 1839 struct dentry *d_obtain_alias(struct inode *inode) 1840 { 1841 return __d_obtain_alias(inode, 1); 1842 } 1843 EXPORT_SYMBOL(d_obtain_alias); 1844 1845 /** 1846 * d_obtain_root - find or allocate a dentry for a given inode 1847 * @inode: inode to allocate the dentry for 1848 * 1849 * Obtain an IS_ROOT dentry for the root of a filesystem. 1850 * 1851 * We must ensure that directory inodes only ever have one dentry. If a 1852 * dentry is found, that is returned instead of allocating a new one. 1853 * 1854 * On successful return, the reference to the inode has been transferred 1855 * to the dentry. In case of an error the reference on the inode is 1856 * released. A %NULL or IS_ERR inode may be passed in and will be the 1857 * error will be propagate to the return value, with a %NULL @inode 1858 * replaced by ERR_PTR(-ESTALE). 1859 */ 1860 struct dentry *d_obtain_root(struct inode *inode) 1861 { 1862 return __d_obtain_alias(inode, 0); 1863 } 1864 EXPORT_SYMBOL(d_obtain_root); 1865 1866 /** 1867 * d_add_ci - lookup or allocate new dentry with case-exact name 1868 * @inode: the inode case-insensitive lookup has found 1869 * @dentry: the negative dentry that was passed to the parent's lookup func 1870 * @name: the case-exact name to be associated with the returned dentry 1871 * 1872 * This is to avoid filling the dcache with case-insensitive names to the 1873 * same inode, only the actual correct case is stored in the dcache for 1874 * case-insensitive filesystems. 1875 * 1876 * For a case-insensitive lookup match and if the the case-exact dentry 1877 * already exists in in the dcache, use it and return it. 1878 * 1879 * If no entry exists with the exact case name, allocate new dentry with 1880 * the exact case, and return the spliced entry. 1881 */ 1882 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode, 1883 struct qstr *name) 1884 { 1885 struct dentry *found; 1886 struct dentry *new; 1887 1888 /* 1889 * First check if a dentry matching the name already exists, 1890 * if not go ahead and create it now. 1891 */ 1892 found = d_hash_and_lookup(dentry->d_parent, name); 1893 if (unlikely(IS_ERR(found))) 1894 goto err_out; 1895 if (!found) { 1896 new = d_alloc(dentry->d_parent, name); 1897 if (!new) { 1898 found = ERR_PTR(-ENOMEM); 1899 goto err_out; 1900 } 1901 1902 found = d_splice_alias(inode, new); 1903 if (found) { 1904 dput(new); 1905 return found; 1906 } 1907 return new; 1908 } 1909 1910 /* 1911 * If a matching dentry exists, and it's not negative use it. 1912 * 1913 * Decrement the reference count to balance the iget() done 1914 * earlier on. 1915 */ 1916 if (found->d_inode) { 1917 if (unlikely(found->d_inode != inode)) { 1918 /* This can't happen because bad inodes are unhashed. */ 1919 BUG_ON(!is_bad_inode(inode)); 1920 BUG_ON(!is_bad_inode(found->d_inode)); 1921 } 1922 iput(inode); 1923 return found; 1924 } 1925 1926 /* 1927 * Negative dentry: instantiate it unless the inode is a directory and 1928 * already has a dentry. 1929 */ 1930 new = d_splice_alias(inode, found); 1931 if (new) { 1932 dput(found); 1933 found = new; 1934 } 1935 return found; 1936 1937 err_out: 1938 iput(inode); 1939 return found; 1940 } 1941 EXPORT_SYMBOL(d_add_ci); 1942 1943 /* 1944 * Do the slow-case of the dentry name compare. 1945 * 1946 * Unlike the dentry_cmp() function, we need to atomically 1947 * load the name and length information, so that the 1948 * filesystem can rely on them, and can use the 'name' and 1949 * 'len' information without worrying about walking off the 1950 * end of memory etc. 1951 * 1952 * Thus the read_seqcount_retry() and the "duplicate" info 1953 * in arguments (the low-level filesystem should not look 1954 * at the dentry inode or name contents directly, since 1955 * rename can change them while we're in RCU mode). 1956 */ 1957 enum slow_d_compare { 1958 D_COMP_OK, 1959 D_COMP_NOMATCH, 1960 D_COMP_SEQRETRY, 1961 }; 1962 1963 static noinline enum slow_d_compare slow_dentry_cmp( 1964 const struct dentry *parent, 1965 struct dentry *dentry, 1966 unsigned int seq, 1967 const struct qstr *name) 1968 { 1969 int tlen = dentry->d_name.len; 1970 const char *tname = dentry->d_name.name; 1971 1972 if (read_seqcount_retry(&dentry->d_seq, seq)) { 1973 cpu_relax(); 1974 return D_COMP_SEQRETRY; 1975 } 1976 if (parent->d_op->d_compare(parent, dentry, tlen, tname, name)) 1977 return D_COMP_NOMATCH; 1978 return D_COMP_OK; 1979 } 1980 1981 /** 1982 * __d_lookup_rcu - search for a dentry (racy, store-free) 1983 * @parent: parent dentry 1984 * @name: qstr of name we wish to find 1985 * @seqp: returns d_seq value at the point where the dentry was found 1986 * Returns: dentry, or NULL 1987 * 1988 * __d_lookup_rcu is the dcache lookup function for rcu-walk name 1989 * resolution (store-free path walking) design described in 1990 * Documentation/filesystems/path-lookup.txt. 1991 * 1992 * This is not to be used outside core vfs. 1993 * 1994 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock 1995 * held, and rcu_read_lock held. The returned dentry must not be stored into 1996 * without taking d_lock and checking d_seq sequence count against @seq 1997 * returned here. 1998 * 1999 * A refcount may be taken on the found dentry with the d_rcu_to_refcount 2000 * function. 2001 * 2002 * Alternatively, __d_lookup_rcu may be called again to look up the child of 2003 * the returned dentry, so long as its parent's seqlock is checked after the 2004 * child is looked up. Thus, an interlocking stepping of sequence lock checks 2005 * is formed, giving integrity down the path walk. 2006 * 2007 * NOTE! The caller *has* to check the resulting dentry against the sequence 2008 * number we've returned before using any of the resulting dentry state! 2009 */ 2010 struct dentry *__d_lookup_rcu(const struct dentry *parent, 2011 const struct qstr *name, 2012 unsigned *seqp) 2013 { 2014 u64 hashlen = name->hash_len; 2015 const unsigned char *str = name->name; 2016 struct hlist_bl_head *b = d_hash(parent, hashlen_hash(hashlen)); 2017 struct hlist_bl_node *node; 2018 struct dentry *dentry; 2019 2020 /* 2021 * Note: There is significant duplication with __d_lookup_rcu which is 2022 * required to prevent single threaded performance regressions 2023 * especially on architectures where smp_rmb (in seqcounts) are costly. 2024 * Keep the two functions in sync. 2025 */ 2026 2027 /* 2028 * The hash list is protected using RCU. 2029 * 2030 * Carefully use d_seq when comparing a candidate dentry, to avoid 2031 * races with d_move(). 2032 * 2033 * It is possible that concurrent renames can mess up our list 2034 * walk here and result in missing our dentry, resulting in the 2035 * false-negative result. d_lookup() protects against concurrent 2036 * renames using rename_lock seqlock. 2037 * 2038 * See Documentation/filesystems/path-lookup.txt for more details. 2039 */ 2040 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { 2041 unsigned seq; 2042 2043 seqretry: 2044 /* 2045 * The dentry sequence count protects us from concurrent 2046 * renames, and thus protects parent and name fields. 2047 * 2048 * The caller must perform a seqcount check in order 2049 * to do anything useful with the returned dentry. 2050 * 2051 * NOTE! We do a "raw" seqcount_begin here. That means that 2052 * we don't wait for the sequence count to stabilize if it 2053 * is in the middle of a sequence change. If we do the slow 2054 * dentry compare, we will do seqretries until it is stable, 2055 * and if we end up with a successful lookup, we actually 2056 * want to exit RCU lookup anyway. 2057 */ 2058 seq = raw_seqcount_begin(&dentry->d_seq); 2059 if (dentry->d_parent != parent) 2060 continue; 2061 if (d_unhashed(dentry)) 2062 continue; 2063 2064 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) { 2065 if (dentry->d_name.hash != hashlen_hash(hashlen)) 2066 continue; 2067 *seqp = seq; 2068 switch (slow_dentry_cmp(parent, dentry, seq, name)) { 2069 case D_COMP_OK: 2070 return dentry; 2071 case D_COMP_NOMATCH: 2072 continue; 2073 default: 2074 goto seqretry; 2075 } 2076 } 2077 2078 if (dentry->d_name.hash_len != hashlen) 2079 continue; 2080 *seqp = seq; 2081 if (!dentry_cmp(dentry, str, hashlen_len(hashlen))) 2082 return dentry; 2083 } 2084 return NULL; 2085 } 2086 2087 /** 2088 * d_lookup - search for a dentry 2089 * @parent: parent dentry 2090 * @name: qstr of name we wish to find 2091 * Returns: dentry, or NULL 2092 * 2093 * d_lookup searches the children of the parent dentry for the name in 2094 * question. If the dentry is found its reference count is incremented and the 2095 * dentry is returned. The caller must use dput to free the entry when it has 2096 * finished using it. %NULL is returned if the dentry does not exist. 2097 */ 2098 struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name) 2099 { 2100 struct dentry *dentry; 2101 unsigned seq; 2102 2103 do { 2104 seq = read_seqbegin(&rename_lock); 2105 dentry = __d_lookup(parent, name); 2106 if (dentry) 2107 break; 2108 } while (read_seqretry(&rename_lock, seq)); 2109 return dentry; 2110 } 2111 EXPORT_SYMBOL(d_lookup); 2112 2113 /** 2114 * __d_lookup - search for a dentry (racy) 2115 * @parent: parent dentry 2116 * @name: qstr of name we wish to find 2117 * Returns: dentry, or NULL 2118 * 2119 * __d_lookup is like d_lookup, however it may (rarely) return a 2120 * false-negative result due to unrelated rename activity. 2121 * 2122 * __d_lookup is slightly faster by avoiding rename_lock read seqlock, 2123 * however it must be used carefully, eg. with a following d_lookup in 2124 * the case of failure. 2125 * 2126 * __d_lookup callers must be commented. 2127 */ 2128 struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name) 2129 { 2130 unsigned int len = name->len; 2131 unsigned int hash = name->hash; 2132 const unsigned char *str = name->name; 2133 struct hlist_bl_head *b = d_hash(parent, hash); 2134 struct hlist_bl_node *node; 2135 struct dentry *found = NULL; 2136 struct dentry *dentry; 2137 2138 /* 2139 * Note: There is significant duplication with __d_lookup_rcu which is 2140 * required to prevent single threaded performance regressions 2141 * especially on architectures where smp_rmb (in seqcounts) are costly. 2142 * Keep the two functions in sync. 2143 */ 2144 2145 /* 2146 * The hash list is protected using RCU. 2147 * 2148 * Take d_lock when comparing a candidate dentry, to avoid races 2149 * with d_move(). 2150 * 2151 * It is possible that concurrent renames can mess up our list 2152 * walk here and result in missing our dentry, resulting in the 2153 * false-negative result. d_lookup() protects against concurrent 2154 * renames using rename_lock seqlock. 2155 * 2156 * See Documentation/filesystems/path-lookup.txt for more details. 2157 */ 2158 rcu_read_lock(); 2159 2160 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { 2161 2162 if (dentry->d_name.hash != hash) 2163 continue; 2164 2165 spin_lock(&dentry->d_lock); 2166 if (dentry->d_parent != parent) 2167 goto next; 2168 if (d_unhashed(dentry)) 2169 goto next; 2170 2171 /* 2172 * It is safe to compare names since d_move() cannot 2173 * change the qstr (protected by d_lock). 2174 */ 2175 if (parent->d_flags & DCACHE_OP_COMPARE) { 2176 int tlen = dentry->d_name.len; 2177 const char *tname = dentry->d_name.name; 2178 if (parent->d_op->d_compare(parent, dentry, tlen, tname, name)) 2179 goto next; 2180 } else { 2181 if (dentry->d_name.len != len) 2182 goto next; 2183 if (dentry_cmp(dentry, str, len)) 2184 goto next; 2185 } 2186 2187 dentry->d_lockref.count++; 2188 found = dentry; 2189 spin_unlock(&dentry->d_lock); 2190 break; 2191 next: 2192 spin_unlock(&dentry->d_lock); 2193 } 2194 rcu_read_unlock(); 2195 2196 return found; 2197 } 2198 2199 /** 2200 * d_hash_and_lookup - hash the qstr then search for a dentry 2201 * @dir: Directory to search in 2202 * @name: qstr of name we wish to find 2203 * 2204 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error) 2205 */ 2206 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name) 2207 { 2208 /* 2209 * Check for a fs-specific hash function. Note that we must 2210 * calculate the standard hash first, as the d_op->d_hash() 2211 * routine may choose to leave the hash value unchanged. 2212 */ 2213 name->hash = full_name_hash(name->name, name->len); 2214 if (dir->d_flags & DCACHE_OP_HASH) { 2215 int err = dir->d_op->d_hash(dir, name); 2216 if (unlikely(err < 0)) 2217 return ERR_PTR(err); 2218 } 2219 return d_lookup(dir, name); 2220 } 2221 EXPORT_SYMBOL(d_hash_and_lookup); 2222 2223 /** 2224 * d_validate - verify dentry provided from insecure source (deprecated) 2225 * @dentry: The dentry alleged to be valid child of @dparent 2226 * @dparent: The parent dentry (known to be valid) 2227 * 2228 * An insecure source has sent us a dentry, here we verify it and dget() it. 2229 * This is used by ncpfs in its readdir implementation. 2230 * Zero is returned in the dentry is invalid. 2231 * 2232 * This function is slow for big directories, and deprecated, do not use it. 2233 */ 2234 int d_validate(struct dentry *dentry, struct dentry *dparent) 2235 { 2236 struct dentry *child; 2237 2238 spin_lock(&dparent->d_lock); 2239 list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) { 2240 if (dentry == child) { 2241 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 2242 __dget_dlock(dentry); 2243 spin_unlock(&dentry->d_lock); 2244 spin_unlock(&dparent->d_lock); 2245 return 1; 2246 } 2247 } 2248 spin_unlock(&dparent->d_lock); 2249 2250 return 0; 2251 } 2252 EXPORT_SYMBOL(d_validate); 2253 2254 /* 2255 * When a file is deleted, we have two options: 2256 * - turn this dentry into a negative dentry 2257 * - unhash this dentry and free it. 2258 * 2259 * Usually, we want to just turn this into 2260 * a negative dentry, but if anybody else is 2261 * currently using the dentry or the inode 2262 * we can't do that and we fall back on removing 2263 * it from the hash queues and waiting for 2264 * it to be deleted later when it has no users 2265 */ 2266 2267 /** 2268 * d_delete - delete a dentry 2269 * @dentry: The dentry to delete 2270 * 2271 * Turn the dentry into a negative dentry if possible, otherwise 2272 * remove it from the hash queues so it can be deleted later 2273 */ 2274 2275 void d_delete(struct dentry * dentry) 2276 { 2277 struct inode *inode; 2278 int isdir = 0; 2279 /* 2280 * Are we the only user? 2281 */ 2282 again: 2283 spin_lock(&dentry->d_lock); 2284 inode = dentry->d_inode; 2285 isdir = S_ISDIR(inode->i_mode); 2286 if (dentry->d_lockref.count == 1) { 2287 if (!spin_trylock(&inode->i_lock)) { 2288 spin_unlock(&dentry->d_lock); 2289 cpu_relax(); 2290 goto again; 2291 } 2292 dentry->d_flags &= ~DCACHE_CANT_MOUNT; 2293 dentry_unlink_inode(dentry); 2294 fsnotify_nameremove(dentry, isdir); 2295 return; 2296 } 2297 2298 if (!d_unhashed(dentry)) 2299 __d_drop(dentry); 2300 2301 spin_unlock(&dentry->d_lock); 2302 2303 fsnotify_nameremove(dentry, isdir); 2304 } 2305 EXPORT_SYMBOL(d_delete); 2306 2307 static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b) 2308 { 2309 BUG_ON(!d_unhashed(entry)); 2310 hlist_bl_lock(b); 2311 entry->d_flags |= DCACHE_RCUACCESS; 2312 hlist_bl_add_head_rcu(&entry->d_hash, b); 2313 hlist_bl_unlock(b); 2314 } 2315 2316 static void _d_rehash(struct dentry * entry) 2317 { 2318 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash)); 2319 } 2320 2321 /** 2322 * d_rehash - add an entry back to the hash 2323 * @entry: dentry to add to the hash 2324 * 2325 * Adds a dentry to the hash according to its name. 2326 */ 2327 2328 void d_rehash(struct dentry * entry) 2329 { 2330 spin_lock(&entry->d_lock); 2331 _d_rehash(entry); 2332 spin_unlock(&entry->d_lock); 2333 } 2334 EXPORT_SYMBOL(d_rehash); 2335 2336 /** 2337 * dentry_update_name_case - update case insensitive dentry with a new name 2338 * @dentry: dentry to be updated 2339 * @name: new name 2340 * 2341 * Update a case insensitive dentry with new case of name. 2342 * 2343 * dentry must have been returned by d_lookup with name @name. Old and new 2344 * name lengths must match (ie. no d_compare which allows mismatched name 2345 * lengths). 2346 * 2347 * Parent inode i_mutex must be held over d_lookup and into this call (to 2348 * keep renames and concurrent inserts, and readdir(2) away). 2349 */ 2350 void dentry_update_name_case(struct dentry *dentry, struct qstr *name) 2351 { 2352 BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex)); 2353 BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */ 2354 2355 spin_lock(&dentry->d_lock); 2356 write_seqcount_begin(&dentry->d_seq); 2357 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len); 2358 write_seqcount_end(&dentry->d_seq); 2359 spin_unlock(&dentry->d_lock); 2360 } 2361 EXPORT_SYMBOL(dentry_update_name_case); 2362 2363 static void swap_names(struct dentry *dentry, struct dentry *target) 2364 { 2365 if (unlikely(dname_external(target))) { 2366 if (unlikely(dname_external(dentry))) { 2367 /* 2368 * Both external: swap the pointers 2369 */ 2370 swap(target->d_name.name, dentry->d_name.name); 2371 } else { 2372 /* 2373 * dentry:internal, target:external. Steal target's 2374 * storage and make target internal. 2375 */ 2376 memcpy(target->d_iname, dentry->d_name.name, 2377 dentry->d_name.len + 1); 2378 dentry->d_name.name = target->d_name.name; 2379 target->d_name.name = target->d_iname; 2380 } 2381 } else { 2382 if (unlikely(dname_external(dentry))) { 2383 /* 2384 * dentry:external, target:internal. Give dentry's 2385 * storage to target and make dentry internal 2386 */ 2387 memcpy(dentry->d_iname, target->d_name.name, 2388 target->d_name.len + 1); 2389 target->d_name.name = dentry->d_name.name; 2390 dentry->d_name.name = dentry->d_iname; 2391 } else { 2392 /* 2393 * Both are internal. 2394 */ 2395 unsigned int i; 2396 BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long))); 2397 for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) { 2398 swap(((long *) &dentry->d_iname)[i], 2399 ((long *) &target->d_iname)[i]); 2400 } 2401 } 2402 } 2403 swap(dentry->d_name.hash_len, target->d_name.hash_len); 2404 } 2405 2406 static void copy_name(struct dentry *dentry, struct dentry *target) 2407 { 2408 struct external_name *old_name = NULL; 2409 if (unlikely(dname_external(dentry))) 2410 old_name = external_name(dentry); 2411 if (unlikely(dname_external(target))) { 2412 atomic_inc(&external_name(target)->u.count); 2413 dentry->d_name = target->d_name; 2414 } else { 2415 memcpy(dentry->d_iname, target->d_name.name, 2416 target->d_name.len + 1); 2417 dentry->d_name.name = dentry->d_iname; 2418 dentry->d_name.hash_len = target->d_name.hash_len; 2419 } 2420 if (old_name && likely(atomic_dec_and_test(&old_name->u.count))) 2421 kfree_rcu(old_name, u.head); 2422 } 2423 2424 static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target) 2425 { 2426 /* 2427 * XXXX: do we really need to take target->d_lock? 2428 */ 2429 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent) 2430 spin_lock(&target->d_parent->d_lock); 2431 else { 2432 if (d_ancestor(dentry->d_parent, target->d_parent)) { 2433 spin_lock(&dentry->d_parent->d_lock); 2434 spin_lock_nested(&target->d_parent->d_lock, 2435 DENTRY_D_LOCK_NESTED); 2436 } else { 2437 spin_lock(&target->d_parent->d_lock); 2438 spin_lock_nested(&dentry->d_parent->d_lock, 2439 DENTRY_D_LOCK_NESTED); 2440 } 2441 } 2442 if (target < dentry) { 2443 spin_lock_nested(&target->d_lock, 2); 2444 spin_lock_nested(&dentry->d_lock, 3); 2445 } else { 2446 spin_lock_nested(&dentry->d_lock, 2); 2447 spin_lock_nested(&target->d_lock, 3); 2448 } 2449 } 2450 2451 static void dentry_unlock_for_move(struct dentry *dentry, struct dentry *target) 2452 { 2453 if (target->d_parent != dentry->d_parent) 2454 spin_unlock(&dentry->d_parent->d_lock); 2455 if (target->d_parent != target) 2456 spin_unlock(&target->d_parent->d_lock); 2457 spin_unlock(&target->d_lock); 2458 spin_unlock(&dentry->d_lock); 2459 } 2460 2461 /* 2462 * When switching names, the actual string doesn't strictly have to 2463 * be preserved in the target - because we're dropping the target 2464 * anyway. As such, we can just do a simple memcpy() to copy over 2465 * the new name before we switch, unless we are going to rehash 2466 * it. Note that if we *do* unhash the target, we are not allowed 2467 * to rehash it without giving it a new name/hash key - whether 2468 * we swap or overwrite the names here, resulting name won't match 2469 * the reality in filesystem; it's only there for d_path() purposes. 2470 * Note that all of this is happening under rename_lock, so the 2471 * any hash lookup seeing it in the middle of manipulations will 2472 * be discarded anyway. So we do not care what happens to the hash 2473 * key in that case. 2474 */ 2475 /* 2476 * __d_move - move a dentry 2477 * @dentry: entry to move 2478 * @target: new dentry 2479 * @exchange: exchange the two dentries 2480 * 2481 * Update the dcache to reflect the move of a file name. Negative 2482 * dcache entries should not be moved in this way. Caller must hold 2483 * rename_lock, the i_mutex of the source and target directories, 2484 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename(). 2485 */ 2486 static void __d_move(struct dentry *dentry, struct dentry *target, 2487 bool exchange) 2488 { 2489 if (!dentry->d_inode) 2490 printk(KERN_WARNING "VFS: moving negative dcache entry\n"); 2491 2492 BUG_ON(d_ancestor(dentry, target)); 2493 BUG_ON(d_ancestor(target, dentry)); 2494 2495 dentry_lock_for_move(dentry, target); 2496 2497 write_seqcount_begin(&dentry->d_seq); 2498 write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED); 2499 2500 /* __d_drop does write_seqcount_barrier, but they're OK to nest. */ 2501 2502 /* 2503 * Move the dentry to the target hash queue. Don't bother checking 2504 * for the same hash queue because of how unlikely it is. 2505 */ 2506 __d_drop(dentry); 2507 __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash)); 2508 2509 /* 2510 * Unhash the target (d_delete() is not usable here). If exchanging 2511 * the two dentries, then rehash onto the other's hash queue. 2512 */ 2513 __d_drop(target); 2514 if (exchange) { 2515 __d_rehash(target, 2516 d_hash(dentry->d_parent, dentry->d_name.hash)); 2517 } 2518 2519 /* Switch the names.. */ 2520 if (exchange) 2521 swap_names(dentry, target); 2522 else 2523 copy_name(dentry, target); 2524 2525 /* ... and switch them in the tree */ 2526 if (IS_ROOT(dentry)) { 2527 /* splicing a tree */ 2528 dentry->d_parent = target->d_parent; 2529 target->d_parent = target; 2530 list_del_init(&target->d_u.d_child); 2531 list_move(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); 2532 } else { 2533 /* swapping two dentries */ 2534 swap(dentry->d_parent, target->d_parent); 2535 list_move(&target->d_u.d_child, &target->d_parent->d_subdirs); 2536 list_move(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); 2537 if (exchange) 2538 fsnotify_d_move(target); 2539 fsnotify_d_move(dentry); 2540 } 2541 2542 write_seqcount_end(&target->d_seq); 2543 write_seqcount_end(&dentry->d_seq); 2544 2545 dentry_unlock_for_move(dentry, target); 2546 } 2547 2548 /* 2549 * d_move - move a dentry 2550 * @dentry: entry to move 2551 * @target: new dentry 2552 * 2553 * Update the dcache to reflect the move of a file name. Negative 2554 * dcache entries should not be moved in this way. See the locking 2555 * requirements for __d_move. 2556 */ 2557 void d_move(struct dentry *dentry, struct dentry *target) 2558 { 2559 write_seqlock(&rename_lock); 2560 __d_move(dentry, target, false); 2561 write_sequnlock(&rename_lock); 2562 } 2563 EXPORT_SYMBOL(d_move); 2564 2565 /* 2566 * d_exchange - exchange two dentries 2567 * @dentry1: first dentry 2568 * @dentry2: second dentry 2569 */ 2570 void d_exchange(struct dentry *dentry1, struct dentry *dentry2) 2571 { 2572 write_seqlock(&rename_lock); 2573 2574 WARN_ON(!dentry1->d_inode); 2575 WARN_ON(!dentry2->d_inode); 2576 WARN_ON(IS_ROOT(dentry1)); 2577 WARN_ON(IS_ROOT(dentry2)); 2578 2579 __d_move(dentry1, dentry2, true); 2580 2581 write_sequnlock(&rename_lock); 2582 } 2583 2584 /** 2585 * d_ancestor - search for an ancestor 2586 * @p1: ancestor dentry 2587 * @p2: child dentry 2588 * 2589 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is 2590 * an ancestor of p2, else NULL. 2591 */ 2592 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2) 2593 { 2594 struct dentry *p; 2595 2596 for (p = p2; !IS_ROOT(p); p = p->d_parent) { 2597 if (p->d_parent == p1) 2598 return p; 2599 } 2600 return NULL; 2601 } 2602 2603 /* 2604 * This helper attempts to cope with remotely renamed directories 2605 * 2606 * It assumes that the caller is already holding 2607 * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock 2608 * 2609 * Note: If ever the locking in lock_rename() changes, then please 2610 * remember to update this too... 2611 */ 2612 static struct dentry *__d_unalias(struct inode *inode, 2613 struct dentry *dentry, struct dentry *alias) 2614 { 2615 struct mutex *m1 = NULL, *m2 = NULL; 2616 struct dentry *ret = ERR_PTR(-EBUSY); 2617 2618 /* If alias and dentry share a parent, then no extra locks required */ 2619 if (alias->d_parent == dentry->d_parent) 2620 goto out_unalias; 2621 2622 /* See lock_rename() */ 2623 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex)) 2624 goto out_err; 2625 m1 = &dentry->d_sb->s_vfs_rename_mutex; 2626 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex)) 2627 goto out_err; 2628 m2 = &alias->d_parent->d_inode->i_mutex; 2629 out_unalias: 2630 __d_move(alias, dentry, false); 2631 ret = alias; 2632 out_err: 2633 spin_unlock(&inode->i_lock); 2634 if (m2) 2635 mutex_unlock(m2); 2636 if (m1) 2637 mutex_unlock(m1); 2638 return ret; 2639 } 2640 2641 /** 2642 * d_splice_alias - splice a disconnected dentry into the tree if one exists 2643 * @inode: the inode which may have a disconnected dentry 2644 * @dentry: a negative dentry which we want to point to the inode. 2645 * 2646 * If inode is a directory and has an IS_ROOT alias, then d_move that in 2647 * place of the given dentry and return it, else simply d_add the inode 2648 * to the dentry and return NULL. 2649 * 2650 * If a non-IS_ROOT directory is found, the filesystem is corrupt, and 2651 * we should error out: directories can't have multiple aliases. 2652 * 2653 * This is needed in the lookup routine of any filesystem that is exportable 2654 * (via knfsd) so that we can build dcache paths to directories effectively. 2655 * 2656 * If a dentry was found and moved, then it is returned. Otherwise NULL 2657 * is returned. This matches the expected return value of ->lookup. 2658 * 2659 * Cluster filesystems may call this function with a negative, hashed dentry. 2660 * In that case, we know that the inode will be a regular file, and also this 2661 * will only occur during atomic_open. So we need to check for the dentry 2662 * being already hashed only in the final case. 2663 */ 2664 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) 2665 { 2666 struct dentry *new = NULL; 2667 2668 if (IS_ERR(inode)) 2669 return ERR_CAST(inode); 2670 2671 if (inode && S_ISDIR(inode->i_mode)) { 2672 spin_lock(&inode->i_lock); 2673 new = __d_find_any_alias(inode); 2674 if (new) { 2675 if (!IS_ROOT(new)) { 2676 spin_unlock(&inode->i_lock); 2677 dput(new); 2678 return ERR_PTR(-EIO); 2679 } 2680 if (d_ancestor(new, dentry)) { 2681 spin_unlock(&inode->i_lock); 2682 dput(new); 2683 return ERR_PTR(-EIO); 2684 } 2685 write_seqlock(&rename_lock); 2686 __d_move(new, dentry, false); 2687 write_sequnlock(&rename_lock); 2688 spin_unlock(&inode->i_lock); 2689 security_d_instantiate(new, inode); 2690 iput(inode); 2691 } else { 2692 /* already taking inode->i_lock, so d_add() by hand */ 2693 __d_instantiate(dentry, inode); 2694 spin_unlock(&inode->i_lock); 2695 security_d_instantiate(dentry, inode); 2696 d_rehash(dentry); 2697 } 2698 } else { 2699 d_instantiate(dentry, inode); 2700 if (d_unhashed(dentry)) 2701 d_rehash(dentry); 2702 } 2703 return new; 2704 } 2705 EXPORT_SYMBOL(d_splice_alias); 2706 2707 /** 2708 * d_materialise_unique - introduce an inode into the tree 2709 * @dentry: candidate dentry 2710 * @inode: inode to bind to the dentry, to which aliases may be attached 2711 * 2712 * Introduces an dentry into the tree, substituting an extant disconnected 2713 * root directory alias in its place if there is one. Caller must hold the 2714 * i_mutex of the parent directory. 2715 */ 2716 struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) 2717 { 2718 struct dentry *actual; 2719 2720 BUG_ON(!d_unhashed(dentry)); 2721 2722 if (!inode) { 2723 actual = dentry; 2724 __d_instantiate(dentry, NULL); 2725 d_rehash(actual); 2726 goto out_nolock; 2727 } 2728 2729 spin_lock(&inode->i_lock); 2730 2731 if (S_ISDIR(inode->i_mode)) { 2732 struct dentry *alias; 2733 2734 /* Does an aliased dentry already exist? */ 2735 alias = __d_find_alias(inode); 2736 if (alias) { 2737 actual = alias; 2738 write_seqlock(&rename_lock); 2739 2740 if (d_ancestor(alias, dentry)) { 2741 /* Check for loops */ 2742 actual = ERR_PTR(-ELOOP); 2743 spin_unlock(&inode->i_lock); 2744 } else if (IS_ROOT(alias)) { 2745 /* Is this an anonymous mountpoint that we 2746 * could splice into our tree? */ 2747 __d_move(alias, dentry, false); 2748 write_sequnlock(&rename_lock); 2749 goto found; 2750 } else { 2751 /* Nope, but we must(!) avoid directory 2752 * aliasing. This drops inode->i_lock */ 2753 actual = __d_unalias(inode, dentry, alias); 2754 } 2755 write_sequnlock(&rename_lock); 2756 if (IS_ERR(actual)) { 2757 if (PTR_ERR(actual) == -ELOOP) 2758 pr_warn_ratelimited( 2759 "VFS: Lookup of '%s' in %s %s" 2760 " would have caused loop\n", 2761 dentry->d_name.name, 2762 inode->i_sb->s_type->name, 2763 inode->i_sb->s_id); 2764 dput(alias); 2765 } 2766 goto out_nolock; 2767 } 2768 } 2769 2770 /* Add a unique reference */ 2771 actual = __d_instantiate_unique(dentry, inode); 2772 if (!actual) 2773 actual = dentry; 2774 2775 d_rehash(actual); 2776 found: 2777 spin_unlock(&inode->i_lock); 2778 out_nolock: 2779 if (actual == dentry) { 2780 security_d_instantiate(dentry, inode); 2781 return NULL; 2782 } 2783 2784 iput(inode); 2785 return actual; 2786 } 2787 EXPORT_SYMBOL_GPL(d_materialise_unique); 2788 2789 static int prepend(char **buffer, int *buflen, const char *str, int namelen) 2790 { 2791 *buflen -= namelen; 2792 if (*buflen < 0) 2793 return -ENAMETOOLONG; 2794 *buffer -= namelen; 2795 memcpy(*buffer, str, namelen); 2796 return 0; 2797 } 2798 2799 /** 2800 * prepend_name - prepend a pathname in front of current buffer pointer 2801 * @buffer: buffer pointer 2802 * @buflen: allocated length of the buffer 2803 * @name: name string and length qstr structure 2804 * 2805 * With RCU path tracing, it may race with d_move(). Use ACCESS_ONCE() to 2806 * make sure that either the old or the new name pointer and length are 2807 * fetched. However, there may be mismatch between length and pointer. 2808 * The length cannot be trusted, we need to copy it byte-by-byte until 2809 * the length is reached or a null byte is found. It also prepends "/" at 2810 * the beginning of the name. The sequence number check at the caller will 2811 * retry it again when a d_move() does happen. So any garbage in the buffer 2812 * due to mismatched pointer and length will be discarded. 2813 * 2814 * Data dependency barrier is needed to make sure that we see that terminating 2815 * NUL. Alpha strikes again, film at 11... 2816 */ 2817 static int prepend_name(char **buffer, int *buflen, struct qstr *name) 2818 { 2819 const char *dname = ACCESS_ONCE(name->name); 2820 u32 dlen = ACCESS_ONCE(name->len); 2821 char *p; 2822 2823 smp_read_barrier_depends(); 2824 2825 *buflen -= dlen + 1; 2826 if (*buflen < 0) 2827 return -ENAMETOOLONG; 2828 p = *buffer -= dlen + 1; 2829 *p++ = '/'; 2830 while (dlen--) { 2831 char c = *dname++; 2832 if (!c) 2833 break; 2834 *p++ = c; 2835 } 2836 return 0; 2837 } 2838 2839 /** 2840 * prepend_path - Prepend path string to a buffer 2841 * @path: the dentry/vfsmount to report 2842 * @root: root vfsmnt/dentry 2843 * @buffer: pointer to the end of the buffer 2844 * @buflen: pointer to buffer length 2845 * 2846 * The function will first try to write out the pathname without taking any 2847 * lock other than the RCU read lock to make sure that dentries won't go away. 2848 * It only checks the sequence number of the global rename_lock as any change 2849 * in the dentry's d_seq will be preceded by changes in the rename_lock 2850 * sequence number. If the sequence number had been changed, it will restart 2851 * the whole pathname back-tracing sequence again by taking the rename_lock. 2852 * In this case, there is no need to take the RCU read lock as the recursive 2853 * parent pointer references will keep the dentry chain alive as long as no 2854 * rename operation is performed. 2855 */ 2856 static int prepend_path(const struct path *path, 2857 const struct path *root, 2858 char **buffer, int *buflen) 2859 { 2860 struct dentry *dentry; 2861 struct vfsmount *vfsmnt; 2862 struct mount *mnt; 2863 int error = 0; 2864 unsigned seq, m_seq = 0; 2865 char *bptr; 2866 int blen; 2867 2868 rcu_read_lock(); 2869 restart_mnt: 2870 read_seqbegin_or_lock(&mount_lock, &m_seq); 2871 seq = 0; 2872 rcu_read_lock(); 2873 restart: 2874 bptr = *buffer; 2875 blen = *buflen; 2876 error = 0; 2877 dentry = path->dentry; 2878 vfsmnt = path->mnt; 2879 mnt = real_mount(vfsmnt); 2880 read_seqbegin_or_lock(&rename_lock, &seq); 2881 while (dentry != root->dentry || vfsmnt != root->mnt) { 2882 struct dentry * parent; 2883 2884 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { 2885 struct mount *parent = ACCESS_ONCE(mnt->mnt_parent); 2886 /* Global root? */ 2887 if (mnt != parent) { 2888 dentry = ACCESS_ONCE(mnt->mnt_mountpoint); 2889 mnt = parent; 2890 vfsmnt = &mnt->mnt; 2891 continue; 2892 } 2893 /* 2894 * Filesystems needing to implement special "root names" 2895 * should do so with ->d_dname() 2896 */ 2897 if (IS_ROOT(dentry) && 2898 (dentry->d_name.len != 1 || 2899 dentry->d_name.name[0] != '/')) { 2900 WARN(1, "Root dentry has weird name <%.*s>\n", 2901 (int) dentry->d_name.len, 2902 dentry->d_name.name); 2903 } 2904 if (!error) 2905 error = is_mounted(vfsmnt) ? 1 : 2; 2906 break; 2907 } 2908 parent = dentry->d_parent; 2909 prefetch(parent); 2910 error = prepend_name(&bptr, &blen, &dentry->d_name); 2911 if (error) 2912 break; 2913 2914 dentry = parent; 2915 } 2916 if (!(seq & 1)) 2917 rcu_read_unlock(); 2918 if (need_seqretry(&rename_lock, seq)) { 2919 seq = 1; 2920 goto restart; 2921 } 2922 done_seqretry(&rename_lock, seq); 2923 2924 if (!(m_seq & 1)) 2925 rcu_read_unlock(); 2926 if (need_seqretry(&mount_lock, m_seq)) { 2927 m_seq = 1; 2928 goto restart_mnt; 2929 } 2930 done_seqretry(&mount_lock, m_seq); 2931 2932 if (error >= 0 && bptr == *buffer) { 2933 if (--blen < 0) 2934 error = -ENAMETOOLONG; 2935 else 2936 *--bptr = '/'; 2937 } 2938 *buffer = bptr; 2939 *buflen = blen; 2940 return error; 2941 } 2942 2943 /** 2944 * __d_path - return the path of a dentry 2945 * @path: the dentry/vfsmount to report 2946 * @root: root vfsmnt/dentry 2947 * @buf: buffer to return value in 2948 * @buflen: buffer length 2949 * 2950 * Convert a dentry into an ASCII path name. 2951 * 2952 * Returns a pointer into the buffer or an error code if the 2953 * path was too long. 2954 * 2955 * "buflen" should be positive. 2956 * 2957 * If the path is not reachable from the supplied root, return %NULL. 2958 */ 2959 char *__d_path(const struct path *path, 2960 const struct path *root, 2961 char *buf, int buflen) 2962 { 2963 char *res = buf + buflen; 2964 int error; 2965 2966 prepend(&res, &buflen, "\0", 1); 2967 error = prepend_path(path, root, &res, &buflen); 2968 2969 if (error < 0) 2970 return ERR_PTR(error); 2971 if (error > 0) 2972 return NULL; 2973 return res; 2974 } 2975 2976 char *d_absolute_path(const struct path *path, 2977 char *buf, int buflen) 2978 { 2979 struct path root = {}; 2980 char *res = buf + buflen; 2981 int error; 2982 2983 prepend(&res, &buflen, "\0", 1); 2984 error = prepend_path(path, &root, &res, &buflen); 2985 2986 if (error > 1) 2987 error = -EINVAL; 2988 if (error < 0) 2989 return ERR_PTR(error); 2990 return res; 2991 } 2992 2993 /* 2994 * same as __d_path but appends "(deleted)" for unlinked files. 2995 */ 2996 static int path_with_deleted(const struct path *path, 2997 const struct path *root, 2998 char **buf, int *buflen) 2999 { 3000 prepend(buf, buflen, "\0", 1); 3001 if (d_unlinked(path->dentry)) { 3002 int error = prepend(buf, buflen, " (deleted)", 10); 3003 if (error) 3004 return error; 3005 } 3006 3007 return prepend_path(path, root, buf, buflen); 3008 } 3009 3010 static int prepend_unreachable(char **buffer, int *buflen) 3011 { 3012 return prepend(buffer, buflen, "(unreachable)", 13); 3013 } 3014 3015 static void get_fs_root_rcu(struct fs_struct *fs, struct path *root) 3016 { 3017 unsigned seq; 3018 3019 do { 3020 seq = read_seqcount_begin(&fs->seq); 3021 *root = fs->root; 3022 } while (read_seqcount_retry(&fs->seq, seq)); 3023 } 3024 3025 /** 3026 * d_path - return the path of a dentry 3027 * @path: path to report 3028 * @buf: buffer to return value in 3029 * @buflen: buffer length 3030 * 3031 * Convert a dentry into an ASCII path name. If the entry has been deleted 3032 * the string " (deleted)" is appended. Note that this is ambiguous. 3033 * 3034 * Returns a pointer into the buffer or an error code if the path was 3035 * too long. Note: Callers should use the returned pointer, not the passed 3036 * in buffer, to use the name! The implementation often starts at an offset 3037 * into the buffer, and may leave 0 bytes at the start. 3038 * 3039 * "buflen" should be positive. 3040 */ 3041 char *d_path(const struct path *path, char *buf, int buflen) 3042 { 3043 char *res = buf + buflen; 3044 struct path root; 3045 int error; 3046 3047 /* 3048 * We have various synthetic filesystems that never get mounted. On 3049 * these filesystems dentries are never used for lookup purposes, and 3050 * thus don't need to be hashed. They also don't need a name until a 3051 * user wants to identify the object in /proc/pid/fd/. The little hack 3052 * below allows us to generate a name for these objects on demand: 3053 * 3054 * Some pseudo inodes are mountable. When they are mounted 3055 * path->dentry == path->mnt->mnt_root. In that case don't call d_dname 3056 * and instead have d_path return the mounted path. 3057 */ 3058 if (path->dentry->d_op && path->dentry->d_op->d_dname && 3059 (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root)) 3060 return path->dentry->d_op->d_dname(path->dentry, buf, buflen); 3061 3062 rcu_read_lock(); 3063 get_fs_root_rcu(current->fs, &root); 3064 error = path_with_deleted(path, &root, &res, &buflen); 3065 rcu_read_unlock(); 3066 3067 if (error < 0) 3068 res = ERR_PTR(error); 3069 return res; 3070 } 3071 EXPORT_SYMBOL(d_path); 3072 3073 /* 3074 * Helper function for dentry_operations.d_dname() members 3075 */ 3076 char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen, 3077 const char *fmt, ...) 3078 { 3079 va_list args; 3080 char temp[64]; 3081 int sz; 3082 3083 va_start(args, fmt); 3084 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1; 3085 va_end(args); 3086 3087 if (sz > sizeof(temp) || sz > buflen) 3088 return ERR_PTR(-ENAMETOOLONG); 3089 3090 buffer += buflen - sz; 3091 return memcpy(buffer, temp, sz); 3092 } 3093 3094 char *simple_dname(struct dentry *dentry, char *buffer, int buflen) 3095 { 3096 char *end = buffer + buflen; 3097 /* these dentries are never renamed, so d_lock is not needed */ 3098 if (prepend(&end, &buflen, " (deleted)", 11) || 3099 prepend(&end, &buflen, dentry->d_name.name, dentry->d_name.len) || 3100 prepend(&end, &buflen, "/", 1)) 3101 end = ERR_PTR(-ENAMETOOLONG); 3102 return end; 3103 } 3104 EXPORT_SYMBOL(simple_dname); 3105 3106 /* 3107 * Write full pathname from the root of the filesystem into the buffer. 3108 */ 3109 static char *__dentry_path(struct dentry *d, char *buf, int buflen) 3110 { 3111 struct dentry *dentry; 3112 char *end, *retval; 3113 int len, seq = 0; 3114 int error = 0; 3115 3116 if (buflen < 2) 3117 goto Elong; 3118 3119 rcu_read_lock(); 3120 restart: 3121 dentry = d; 3122 end = buf + buflen; 3123 len = buflen; 3124 prepend(&end, &len, "\0", 1); 3125 /* Get '/' right */ 3126 retval = end-1; 3127 *retval = '/'; 3128 read_seqbegin_or_lock(&rename_lock, &seq); 3129 while (!IS_ROOT(dentry)) { 3130 struct dentry *parent = dentry->d_parent; 3131 3132 prefetch(parent); 3133 error = prepend_name(&end, &len, &dentry->d_name); 3134 if (error) 3135 break; 3136 3137 retval = end; 3138 dentry = parent; 3139 } 3140 if (!(seq & 1)) 3141 rcu_read_unlock(); 3142 if (need_seqretry(&rename_lock, seq)) { 3143 seq = 1; 3144 goto restart; 3145 } 3146 done_seqretry(&rename_lock, seq); 3147 if (error) 3148 goto Elong; 3149 return retval; 3150 Elong: 3151 return ERR_PTR(-ENAMETOOLONG); 3152 } 3153 3154 char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen) 3155 { 3156 return __dentry_path(dentry, buf, buflen); 3157 } 3158 EXPORT_SYMBOL(dentry_path_raw); 3159 3160 char *dentry_path(struct dentry *dentry, char *buf, int buflen) 3161 { 3162 char *p = NULL; 3163 char *retval; 3164 3165 if (d_unlinked(dentry)) { 3166 p = buf + buflen; 3167 if (prepend(&p, &buflen, "//deleted", 10) != 0) 3168 goto Elong; 3169 buflen++; 3170 } 3171 retval = __dentry_path(dentry, buf, buflen); 3172 if (!IS_ERR(retval) && p) 3173 *p = '/'; /* restore '/' overriden with '\0' */ 3174 return retval; 3175 Elong: 3176 return ERR_PTR(-ENAMETOOLONG); 3177 } 3178 3179 static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root, 3180 struct path *pwd) 3181 { 3182 unsigned seq; 3183 3184 do { 3185 seq = read_seqcount_begin(&fs->seq); 3186 *root = fs->root; 3187 *pwd = fs->pwd; 3188 } while (read_seqcount_retry(&fs->seq, seq)); 3189 } 3190 3191 /* 3192 * NOTE! The user-level library version returns a 3193 * character pointer. The kernel system call just 3194 * returns the length of the buffer filled (which 3195 * includes the ending '\0' character), or a negative 3196 * error value. So libc would do something like 3197 * 3198 * char *getcwd(char * buf, size_t size) 3199 * { 3200 * int retval; 3201 * 3202 * retval = sys_getcwd(buf, size); 3203 * if (retval >= 0) 3204 * return buf; 3205 * errno = -retval; 3206 * return NULL; 3207 * } 3208 */ 3209 SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size) 3210 { 3211 int error; 3212 struct path pwd, root; 3213 char *page = __getname(); 3214 3215 if (!page) 3216 return -ENOMEM; 3217 3218 rcu_read_lock(); 3219 get_fs_root_and_pwd_rcu(current->fs, &root, &pwd); 3220 3221 error = -ENOENT; 3222 if (!d_unlinked(pwd.dentry)) { 3223 unsigned long len; 3224 char *cwd = page + PATH_MAX; 3225 int buflen = PATH_MAX; 3226 3227 prepend(&cwd, &buflen, "\0", 1); 3228 error = prepend_path(&pwd, &root, &cwd, &buflen); 3229 rcu_read_unlock(); 3230 3231 if (error < 0) 3232 goto out; 3233 3234 /* Unreachable from current root */ 3235 if (error > 0) { 3236 error = prepend_unreachable(&cwd, &buflen); 3237 if (error) 3238 goto out; 3239 } 3240 3241 error = -ERANGE; 3242 len = PATH_MAX + page - cwd; 3243 if (len <= size) { 3244 error = len; 3245 if (copy_to_user(buf, cwd, len)) 3246 error = -EFAULT; 3247 } 3248 } else { 3249 rcu_read_unlock(); 3250 } 3251 3252 out: 3253 __putname(page); 3254 return error; 3255 } 3256 3257 /* 3258 * Test whether new_dentry is a subdirectory of old_dentry. 3259 * 3260 * Trivially implemented using the dcache structure 3261 */ 3262 3263 /** 3264 * is_subdir - is new dentry a subdirectory of old_dentry 3265 * @new_dentry: new dentry 3266 * @old_dentry: old dentry 3267 * 3268 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth). 3269 * Returns 0 otherwise. 3270 * Caller must ensure that "new_dentry" is pinned before calling is_subdir() 3271 */ 3272 3273 int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry) 3274 { 3275 int result; 3276 unsigned seq; 3277 3278 if (new_dentry == old_dentry) 3279 return 1; 3280 3281 do { 3282 /* for restarting inner loop in case of seq retry */ 3283 seq = read_seqbegin(&rename_lock); 3284 /* 3285 * Need rcu_readlock to protect against the d_parent trashing 3286 * due to d_move 3287 */ 3288 rcu_read_lock(); 3289 if (d_ancestor(old_dentry, new_dentry)) 3290 result = 1; 3291 else 3292 result = 0; 3293 rcu_read_unlock(); 3294 } while (read_seqretry(&rename_lock, seq)); 3295 3296 return result; 3297 } 3298 3299 static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry) 3300 { 3301 struct dentry *root = data; 3302 if (dentry != root) { 3303 if (d_unhashed(dentry) || !dentry->d_inode) 3304 return D_WALK_SKIP; 3305 3306 if (!(dentry->d_flags & DCACHE_GENOCIDE)) { 3307 dentry->d_flags |= DCACHE_GENOCIDE; 3308 dentry->d_lockref.count--; 3309 } 3310 } 3311 return D_WALK_CONTINUE; 3312 } 3313 3314 void d_genocide(struct dentry *parent) 3315 { 3316 d_walk(parent, parent, d_genocide_kill, NULL); 3317 } 3318 3319 void d_tmpfile(struct dentry *dentry, struct inode *inode) 3320 { 3321 inode_dec_link_count(inode); 3322 BUG_ON(dentry->d_name.name != dentry->d_iname || 3323 !hlist_unhashed(&dentry->d_alias) || 3324 !d_unlinked(dentry)); 3325 spin_lock(&dentry->d_parent->d_lock); 3326 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 3327 dentry->d_name.len = sprintf(dentry->d_iname, "#%llu", 3328 (unsigned long long)inode->i_ino); 3329 spin_unlock(&dentry->d_lock); 3330 spin_unlock(&dentry->d_parent->d_lock); 3331 d_instantiate(dentry, inode); 3332 } 3333 EXPORT_SYMBOL(d_tmpfile); 3334 3335 static __initdata unsigned long dhash_entries; 3336 static int __init set_dhash_entries(char *str) 3337 { 3338 if (!str) 3339 return 0; 3340 dhash_entries = simple_strtoul(str, &str, 0); 3341 return 1; 3342 } 3343 __setup("dhash_entries=", set_dhash_entries); 3344 3345 static void __init dcache_init_early(void) 3346 { 3347 unsigned int loop; 3348 3349 /* If hashes are distributed across NUMA nodes, defer 3350 * hash allocation until vmalloc space is available. 3351 */ 3352 if (hashdist) 3353 return; 3354 3355 dentry_hashtable = 3356 alloc_large_system_hash("Dentry cache", 3357 sizeof(struct hlist_bl_head), 3358 dhash_entries, 3359 13, 3360 HASH_EARLY, 3361 &d_hash_shift, 3362 &d_hash_mask, 3363 0, 3364 0); 3365 3366 for (loop = 0; loop < (1U << d_hash_shift); loop++) 3367 INIT_HLIST_BL_HEAD(dentry_hashtable + loop); 3368 } 3369 3370 static void __init dcache_init(void) 3371 { 3372 unsigned int loop; 3373 3374 /* 3375 * A constructor could be added for stable state like the lists, 3376 * but it is probably not worth it because of the cache nature 3377 * of the dcache. 3378 */ 3379 dentry_cache = KMEM_CACHE(dentry, 3380 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); 3381 3382 /* Hash may have been set up in dcache_init_early */ 3383 if (!hashdist) 3384 return; 3385 3386 dentry_hashtable = 3387 alloc_large_system_hash("Dentry cache", 3388 sizeof(struct hlist_bl_head), 3389 dhash_entries, 3390 13, 3391 0, 3392 &d_hash_shift, 3393 &d_hash_mask, 3394 0, 3395 0); 3396 3397 for (loop = 0; loop < (1U << d_hash_shift); loop++) 3398 INIT_HLIST_BL_HEAD(dentry_hashtable + loop); 3399 } 3400 3401 /* SLAB cache for __getname() consumers */ 3402 struct kmem_cache *names_cachep __read_mostly; 3403 EXPORT_SYMBOL(names_cachep); 3404 3405 EXPORT_SYMBOL(d_genocide); 3406 3407 void __init vfs_caches_init_early(void) 3408 { 3409 dcache_init_early(); 3410 inode_init_early(); 3411 } 3412 3413 void __init vfs_caches_init(unsigned long mempages) 3414 { 3415 unsigned long reserve; 3416 3417 /* Base hash sizes on available memory, with a reserve equal to 3418 150% of current kernel size */ 3419 3420 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1); 3421 mempages -= reserve; 3422 3423 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, 3424 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 3425 3426 dcache_init(); 3427 inode_init(); 3428 files_init(mempages); 3429 mnt_init(); 3430 bdev_cache_init(); 3431 chrdev_init(); 3432 } 3433