1 /* 2 * fs/dcache.c 3 * 4 * Complete reimplementation 5 * (C) 1997 Thomas Schoebel-Theuer, 6 * with heavy changes by Linus Torvalds 7 */ 8 9 /* 10 * Notes on the allocation strategy: 11 * 12 * The dcache is a master of the icache - whenever a dcache entry 13 * exists, the inode will always exist. "iput()" is done either when 14 * the dcache entry is deleted or garbage collected. 15 */ 16 17 #include <linux/syscalls.h> 18 #include <linux/string.h> 19 #include <linux/mm.h> 20 #include <linux/fs.h> 21 #include <linux/fsnotify.h> 22 #include <linux/slab.h> 23 #include <linux/init.h> 24 #include <linux/hash.h> 25 #include <linux/cache.h> 26 #include <linux/export.h> 27 #include <linux/mount.h> 28 #include <linux/file.h> 29 #include <asm/uaccess.h> 30 #include <linux/security.h> 31 #include <linux/seqlock.h> 32 #include <linux/swap.h> 33 #include <linux/bootmem.h> 34 #include <linux/fs_struct.h> 35 #include <linux/hardirq.h> 36 #include <linux/bit_spinlock.h> 37 #include <linux/rculist_bl.h> 38 #include <linux/prefetch.h> 39 #include <linux/ratelimit.h> 40 #include <linux/list_lru.h> 41 #include "internal.h" 42 #include "mount.h" 43 44 /* 45 * Usage: 46 * dcache->d_inode->i_lock protects: 47 * - i_dentry, d_alias, d_inode of aliases 48 * dcache_hash_bucket lock protects: 49 * - the dcache hash table 50 * s_anon bl list spinlock protects: 51 * - the s_anon list (see __d_drop) 52 * dentry->d_sb->s_dentry_lru_lock protects: 53 * - the dcache lru lists and counters 54 * d_lock protects: 55 * - d_flags 56 * - d_name 57 * - d_lru 58 * - d_count 59 * - d_unhashed() 60 * - d_parent and d_subdirs 61 * - childrens' d_child and d_parent 62 * - d_alias, d_inode 63 * 64 * Ordering: 65 * dentry->d_inode->i_lock 66 * dentry->d_lock 67 * dentry->d_sb->s_dentry_lru_lock 68 * dcache_hash_bucket lock 69 * s_anon lock 70 * 71 * If there is an ancestor relationship: 72 * dentry->d_parent->...->d_parent->d_lock 73 * ... 74 * dentry->d_parent->d_lock 75 * dentry->d_lock 76 * 77 * If no ancestor relationship: 78 * if (dentry1 < dentry2) 79 * dentry1->d_lock 80 * dentry2->d_lock 81 */ 82 int sysctl_vfs_cache_pressure __read_mostly = 100; 83 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); 84 85 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); 86 87 EXPORT_SYMBOL(rename_lock); 88 89 static struct kmem_cache *dentry_cache __read_mostly; 90 91 /* 92 * This is the single most critical data structure when it comes 93 * to the dcache: the hashtable for lookups. Somebody should try 94 * to make this good - I've just made it work. 95 * 96 * This hash-function tries to avoid losing too many bits of hash 97 * information, yet avoid using a prime hash-size or similar. 98 */ 99 100 static unsigned int d_hash_mask __read_mostly; 101 static unsigned int d_hash_shift __read_mostly; 102 103 static struct hlist_bl_head *dentry_hashtable __read_mostly; 104 105 static inline struct hlist_bl_head *d_hash(const struct dentry *parent, 106 unsigned int hash) 107 { 108 hash += (unsigned long) parent / L1_CACHE_BYTES; 109 return dentry_hashtable + hash_32(hash, d_hash_shift); 110 } 111 112 /* Statistics gathering. */ 113 struct dentry_stat_t dentry_stat = { 114 .age_limit = 45, 115 }; 116 117 static DEFINE_PER_CPU(long, nr_dentry); 118 static DEFINE_PER_CPU(long, nr_dentry_unused); 119 120 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) 121 122 /* 123 * Here we resort to our own counters instead of using generic per-cpu counters 124 * for consistency with what the vfs inode code does. We are expected to harvest 125 * better code and performance by having our own specialized counters. 126 * 127 * Please note that the loop is done over all possible CPUs, not over all online 128 * CPUs. The reason for this is that we don't want to play games with CPUs going 129 * on and off. If one of them goes off, we will just keep their counters. 130 * 131 * glommer: See cffbc8a for details, and if you ever intend to change this, 132 * please update all vfs counters to match. 133 */ 134 static long get_nr_dentry(void) 135 { 136 int i; 137 long sum = 0; 138 for_each_possible_cpu(i) 139 sum += per_cpu(nr_dentry, i); 140 return sum < 0 ? 0 : sum; 141 } 142 143 static long get_nr_dentry_unused(void) 144 { 145 int i; 146 long sum = 0; 147 for_each_possible_cpu(i) 148 sum += per_cpu(nr_dentry_unused, i); 149 return sum < 0 ? 0 : sum; 150 } 151 152 int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer, 153 size_t *lenp, loff_t *ppos) 154 { 155 dentry_stat.nr_dentry = get_nr_dentry(); 156 dentry_stat.nr_unused = get_nr_dentry_unused(); 157 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 158 } 159 #endif 160 161 /* 162 * Compare 2 name strings, return 0 if they match, otherwise non-zero. 163 * The strings are both count bytes long, and count is non-zero. 164 */ 165 #ifdef CONFIG_DCACHE_WORD_ACCESS 166 167 #include <asm/word-at-a-time.h> 168 /* 169 * NOTE! 'cs' and 'scount' come from a dentry, so it has a 170 * aligned allocation for this particular component. We don't 171 * strictly need the load_unaligned_zeropad() safety, but it 172 * doesn't hurt either. 173 * 174 * In contrast, 'ct' and 'tcount' can be from a pathname, and do 175 * need the careful unaligned handling. 176 */ 177 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount) 178 { 179 unsigned long a,b,mask; 180 181 for (;;) { 182 a = *(unsigned long *)cs; 183 b = load_unaligned_zeropad(ct); 184 if (tcount < sizeof(unsigned long)) 185 break; 186 if (unlikely(a != b)) 187 return 1; 188 cs += sizeof(unsigned long); 189 ct += sizeof(unsigned long); 190 tcount -= sizeof(unsigned long); 191 if (!tcount) 192 return 0; 193 } 194 mask = bytemask_from_count(tcount); 195 return unlikely(!!((a ^ b) & mask)); 196 } 197 198 #else 199 200 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount) 201 { 202 do { 203 if (*cs != *ct) 204 return 1; 205 cs++; 206 ct++; 207 tcount--; 208 } while (tcount); 209 return 0; 210 } 211 212 #endif 213 214 static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount) 215 { 216 const unsigned char *cs; 217 /* 218 * Be careful about RCU walk racing with rename: 219 * use ACCESS_ONCE to fetch the name pointer. 220 * 221 * NOTE! Even if a rename will mean that the length 222 * was not loaded atomically, we don't care. The 223 * RCU walk will check the sequence count eventually, 224 * and catch it. And we won't overrun the buffer, 225 * because we're reading the name pointer atomically, 226 * and a dentry name is guaranteed to be properly 227 * terminated with a NUL byte. 228 * 229 * End result: even if 'len' is wrong, we'll exit 230 * early because the data cannot match (there can 231 * be no NUL in the ct/tcount data) 232 */ 233 cs = ACCESS_ONCE(dentry->d_name.name); 234 smp_read_barrier_depends(); 235 return dentry_string_cmp(cs, ct, tcount); 236 } 237 238 struct external_name { 239 union { 240 atomic_t count; 241 struct rcu_head head; 242 } u; 243 unsigned char name[]; 244 }; 245 246 static inline struct external_name *external_name(struct dentry *dentry) 247 { 248 return container_of(dentry->d_name.name, struct external_name, name[0]); 249 } 250 251 static void __d_free(struct rcu_head *head) 252 { 253 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); 254 255 WARN_ON(!hlist_unhashed(&dentry->d_alias)); 256 kmem_cache_free(dentry_cache, dentry); 257 } 258 259 static void __d_free_external(struct rcu_head *head) 260 { 261 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); 262 WARN_ON(!hlist_unhashed(&dentry->d_alias)); 263 kfree(external_name(dentry)); 264 kmem_cache_free(dentry_cache, dentry); 265 } 266 267 static void dentry_free(struct dentry *dentry) 268 { 269 if (unlikely(dname_external(dentry))) { 270 struct external_name *p = external_name(dentry); 271 if (likely(atomic_dec_and_test(&p->u.count))) { 272 call_rcu(&dentry->d_u.d_rcu, __d_free_external); 273 return; 274 } 275 } 276 /* if dentry was never visible to RCU, immediate free is OK */ 277 if (!(dentry->d_flags & DCACHE_RCUACCESS)) 278 __d_free(&dentry->d_u.d_rcu); 279 else 280 call_rcu(&dentry->d_u.d_rcu, __d_free); 281 } 282 283 /** 284 * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups 285 * @dentry: the target dentry 286 * After this call, in-progress rcu-walk path lookup will fail. This 287 * should be called after unhashing, and after changing d_inode (if 288 * the dentry has not already been unhashed). 289 */ 290 static inline void dentry_rcuwalk_barrier(struct dentry *dentry) 291 { 292 assert_spin_locked(&dentry->d_lock); 293 /* Go through a barrier */ 294 write_seqcount_barrier(&dentry->d_seq); 295 } 296 297 /* 298 * Release the dentry's inode, using the filesystem 299 * d_iput() operation if defined. Dentry has no refcount 300 * and is unhashed. 301 */ 302 static void dentry_iput(struct dentry * dentry) 303 __releases(dentry->d_lock) 304 __releases(dentry->d_inode->i_lock) 305 { 306 struct inode *inode = dentry->d_inode; 307 if (inode) { 308 dentry->d_inode = NULL; 309 hlist_del_init(&dentry->d_alias); 310 spin_unlock(&dentry->d_lock); 311 spin_unlock(&inode->i_lock); 312 if (!inode->i_nlink) 313 fsnotify_inoderemove(inode); 314 if (dentry->d_op && dentry->d_op->d_iput) 315 dentry->d_op->d_iput(dentry, inode); 316 else 317 iput(inode); 318 } else { 319 spin_unlock(&dentry->d_lock); 320 } 321 } 322 323 /* 324 * Release the dentry's inode, using the filesystem 325 * d_iput() operation if defined. dentry remains in-use. 326 */ 327 static void dentry_unlink_inode(struct dentry * dentry) 328 __releases(dentry->d_lock) 329 __releases(dentry->d_inode->i_lock) 330 { 331 struct inode *inode = dentry->d_inode; 332 __d_clear_type(dentry); 333 dentry->d_inode = NULL; 334 hlist_del_init(&dentry->d_alias); 335 dentry_rcuwalk_barrier(dentry); 336 spin_unlock(&dentry->d_lock); 337 spin_unlock(&inode->i_lock); 338 if (!inode->i_nlink) 339 fsnotify_inoderemove(inode); 340 if (dentry->d_op && dentry->d_op->d_iput) 341 dentry->d_op->d_iput(dentry, inode); 342 else 343 iput(inode); 344 } 345 346 /* 347 * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry 348 * is in use - which includes both the "real" per-superblock 349 * LRU list _and_ the DCACHE_SHRINK_LIST use. 350 * 351 * The DCACHE_SHRINK_LIST bit is set whenever the dentry is 352 * on the shrink list (ie not on the superblock LRU list). 353 * 354 * The per-cpu "nr_dentry_unused" counters are updated with 355 * the DCACHE_LRU_LIST bit. 356 * 357 * These helper functions make sure we always follow the 358 * rules. d_lock must be held by the caller. 359 */ 360 #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x)) 361 static void d_lru_add(struct dentry *dentry) 362 { 363 D_FLAG_VERIFY(dentry, 0); 364 dentry->d_flags |= DCACHE_LRU_LIST; 365 this_cpu_inc(nr_dentry_unused); 366 WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); 367 } 368 369 static void d_lru_del(struct dentry *dentry) 370 { 371 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); 372 dentry->d_flags &= ~DCACHE_LRU_LIST; 373 this_cpu_dec(nr_dentry_unused); 374 WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); 375 } 376 377 static void d_shrink_del(struct dentry *dentry) 378 { 379 D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST); 380 list_del_init(&dentry->d_lru); 381 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST); 382 this_cpu_dec(nr_dentry_unused); 383 } 384 385 static void d_shrink_add(struct dentry *dentry, struct list_head *list) 386 { 387 D_FLAG_VERIFY(dentry, 0); 388 list_add(&dentry->d_lru, list); 389 dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST; 390 this_cpu_inc(nr_dentry_unused); 391 } 392 393 /* 394 * These can only be called under the global LRU lock, ie during the 395 * callback for freeing the LRU list. "isolate" removes it from the 396 * LRU lists entirely, while shrink_move moves it to the indicated 397 * private list. 398 */ 399 static void d_lru_isolate(struct dentry *dentry) 400 { 401 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); 402 dentry->d_flags &= ~DCACHE_LRU_LIST; 403 this_cpu_dec(nr_dentry_unused); 404 list_del_init(&dentry->d_lru); 405 } 406 407 static void d_lru_shrink_move(struct dentry *dentry, struct list_head *list) 408 { 409 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); 410 dentry->d_flags |= DCACHE_SHRINK_LIST; 411 list_move_tail(&dentry->d_lru, list); 412 } 413 414 /* 415 * dentry_lru_(add|del)_list) must be called with d_lock held. 416 */ 417 static void dentry_lru_add(struct dentry *dentry) 418 { 419 if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) 420 d_lru_add(dentry); 421 } 422 423 /** 424 * d_drop - drop a dentry 425 * @dentry: dentry to drop 426 * 427 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't 428 * be found through a VFS lookup any more. Note that this is different from 429 * deleting the dentry - d_delete will try to mark the dentry negative if 430 * possible, giving a successful _negative_ lookup, while d_drop will 431 * just make the cache lookup fail. 432 * 433 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some 434 * reason (NFS timeouts or autofs deletes). 435 * 436 * __d_drop requires dentry->d_lock. 437 */ 438 void __d_drop(struct dentry *dentry) 439 { 440 if (!d_unhashed(dentry)) { 441 struct hlist_bl_head *b; 442 /* 443 * Hashed dentries are normally on the dentry hashtable, 444 * with the exception of those newly allocated by 445 * d_obtain_alias, which are always IS_ROOT: 446 */ 447 if (unlikely(IS_ROOT(dentry))) 448 b = &dentry->d_sb->s_anon; 449 else 450 b = d_hash(dentry->d_parent, dentry->d_name.hash); 451 452 hlist_bl_lock(b); 453 __hlist_bl_del(&dentry->d_hash); 454 dentry->d_hash.pprev = NULL; 455 hlist_bl_unlock(b); 456 dentry_rcuwalk_barrier(dentry); 457 } 458 } 459 EXPORT_SYMBOL(__d_drop); 460 461 void d_drop(struct dentry *dentry) 462 { 463 spin_lock(&dentry->d_lock); 464 __d_drop(dentry); 465 spin_unlock(&dentry->d_lock); 466 } 467 EXPORT_SYMBOL(d_drop); 468 469 static void __dentry_kill(struct dentry *dentry) 470 { 471 struct dentry *parent = NULL; 472 bool can_free = true; 473 if (!IS_ROOT(dentry)) 474 parent = dentry->d_parent; 475 476 /* 477 * The dentry is now unrecoverably dead to the world. 478 */ 479 lockref_mark_dead(&dentry->d_lockref); 480 481 /* 482 * inform the fs via d_prune that this dentry is about to be 483 * unhashed and destroyed. 484 */ 485 if ((dentry->d_flags & DCACHE_OP_PRUNE) && !d_unhashed(dentry)) 486 dentry->d_op->d_prune(dentry); 487 488 if (dentry->d_flags & DCACHE_LRU_LIST) { 489 if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) 490 d_lru_del(dentry); 491 } 492 /* if it was on the hash then remove it */ 493 __d_drop(dentry); 494 list_del(&dentry->d_u.d_child); 495 /* 496 * Inform d_walk() that we are no longer attached to the 497 * dentry tree 498 */ 499 dentry->d_flags |= DCACHE_DENTRY_KILLED; 500 if (parent) 501 spin_unlock(&parent->d_lock); 502 dentry_iput(dentry); 503 /* 504 * dentry_iput drops the locks, at which point nobody (except 505 * transient RCU lookups) can reach this dentry. 506 */ 507 BUG_ON((int)dentry->d_lockref.count > 0); 508 this_cpu_dec(nr_dentry); 509 if (dentry->d_op && dentry->d_op->d_release) 510 dentry->d_op->d_release(dentry); 511 512 spin_lock(&dentry->d_lock); 513 if (dentry->d_flags & DCACHE_SHRINK_LIST) { 514 dentry->d_flags |= DCACHE_MAY_FREE; 515 can_free = false; 516 } 517 spin_unlock(&dentry->d_lock); 518 if (likely(can_free)) 519 dentry_free(dentry); 520 } 521 522 /* 523 * Finish off a dentry we've decided to kill. 524 * dentry->d_lock must be held, returns with it unlocked. 525 * If ref is non-zero, then decrement the refcount too. 526 * Returns dentry requiring refcount drop, or NULL if we're done. 527 */ 528 static struct dentry *dentry_kill(struct dentry *dentry) 529 __releases(dentry->d_lock) 530 { 531 struct inode *inode = dentry->d_inode; 532 struct dentry *parent = NULL; 533 534 if (inode && unlikely(!spin_trylock(&inode->i_lock))) 535 goto failed; 536 537 if (!IS_ROOT(dentry)) { 538 parent = dentry->d_parent; 539 if (unlikely(!spin_trylock(&parent->d_lock))) { 540 if (inode) 541 spin_unlock(&inode->i_lock); 542 goto failed; 543 } 544 } 545 546 __dentry_kill(dentry); 547 return parent; 548 549 failed: 550 spin_unlock(&dentry->d_lock); 551 cpu_relax(); 552 return dentry; /* try again with same dentry */ 553 } 554 555 static inline struct dentry *lock_parent(struct dentry *dentry) 556 { 557 struct dentry *parent = dentry->d_parent; 558 if (IS_ROOT(dentry)) 559 return NULL; 560 if (unlikely((int)dentry->d_lockref.count < 0)) 561 return NULL; 562 if (likely(spin_trylock(&parent->d_lock))) 563 return parent; 564 rcu_read_lock(); 565 spin_unlock(&dentry->d_lock); 566 again: 567 parent = ACCESS_ONCE(dentry->d_parent); 568 spin_lock(&parent->d_lock); 569 /* 570 * We can't blindly lock dentry until we are sure 571 * that we won't violate the locking order. 572 * Any changes of dentry->d_parent must have 573 * been done with parent->d_lock held, so 574 * spin_lock() above is enough of a barrier 575 * for checking if it's still our child. 576 */ 577 if (unlikely(parent != dentry->d_parent)) { 578 spin_unlock(&parent->d_lock); 579 goto again; 580 } 581 rcu_read_unlock(); 582 if (parent != dentry) 583 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 584 else 585 parent = NULL; 586 return parent; 587 } 588 589 /* 590 * This is dput 591 * 592 * This is complicated by the fact that we do not want to put 593 * dentries that are no longer on any hash chain on the unused 594 * list: we'd much rather just get rid of them immediately. 595 * 596 * However, that implies that we have to traverse the dentry 597 * tree upwards to the parents which might _also_ now be 598 * scheduled for deletion (it may have been only waiting for 599 * its last child to go away). 600 * 601 * This tail recursion is done by hand as we don't want to depend 602 * on the compiler to always get this right (gcc generally doesn't). 603 * Real recursion would eat up our stack space. 604 */ 605 606 /* 607 * dput - release a dentry 608 * @dentry: dentry to release 609 * 610 * Release a dentry. This will drop the usage count and if appropriate 611 * call the dentry unlink method as well as removing it from the queues and 612 * releasing its resources. If the parent dentries were scheduled for release 613 * they too may now get deleted. 614 */ 615 void dput(struct dentry *dentry) 616 { 617 if (unlikely(!dentry)) 618 return; 619 620 repeat: 621 if (lockref_put_or_lock(&dentry->d_lockref)) 622 return; 623 624 /* Unreachable? Get rid of it */ 625 if (unlikely(d_unhashed(dentry))) 626 goto kill_it; 627 628 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) { 629 if (dentry->d_op->d_delete(dentry)) 630 goto kill_it; 631 } 632 633 if (!(dentry->d_flags & DCACHE_REFERENCED)) 634 dentry->d_flags |= DCACHE_REFERENCED; 635 dentry_lru_add(dentry); 636 637 dentry->d_lockref.count--; 638 spin_unlock(&dentry->d_lock); 639 return; 640 641 kill_it: 642 dentry = dentry_kill(dentry); 643 if (dentry) 644 goto repeat; 645 } 646 EXPORT_SYMBOL(dput); 647 648 649 /* This must be called with d_lock held */ 650 static inline void __dget_dlock(struct dentry *dentry) 651 { 652 dentry->d_lockref.count++; 653 } 654 655 static inline void __dget(struct dentry *dentry) 656 { 657 lockref_get(&dentry->d_lockref); 658 } 659 660 struct dentry *dget_parent(struct dentry *dentry) 661 { 662 int gotref; 663 struct dentry *ret; 664 665 /* 666 * Do optimistic parent lookup without any 667 * locking. 668 */ 669 rcu_read_lock(); 670 ret = ACCESS_ONCE(dentry->d_parent); 671 gotref = lockref_get_not_zero(&ret->d_lockref); 672 rcu_read_unlock(); 673 if (likely(gotref)) { 674 if (likely(ret == ACCESS_ONCE(dentry->d_parent))) 675 return ret; 676 dput(ret); 677 } 678 679 repeat: 680 /* 681 * Don't need rcu_dereference because we re-check it was correct under 682 * the lock. 683 */ 684 rcu_read_lock(); 685 ret = dentry->d_parent; 686 spin_lock(&ret->d_lock); 687 if (unlikely(ret != dentry->d_parent)) { 688 spin_unlock(&ret->d_lock); 689 rcu_read_unlock(); 690 goto repeat; 691 } 692 rcu_read_unlock(); 693 BUG_ON(!ret->d_lockref.count); 694 ret->d_lockref.count++; 695 spin_unlock(&ret->d_lock); 696 return ret; 697 } 698 EXPORT_SYMBOL(dget_parent); 699 700 /** 701 * d_find_alias - grab a hashed alias of inode 702 * @inode: inode in question 703 * 704 * If inode has a hashed alias, or is a directory and has any alias, 705 * acquire the reference to alias and return it. Otherwise return NULL. 706 * Notice that if inode is a directory there can be only one alias and 707 * it can be unhashed only if it has no children, or if it is the root 708 * of a filesystem, or if the directory was renamed and d_revalidate 709 * was the first vfs operation to notice. 710 * 711 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer 712 * any other hashed alias over that one. 713 */ 714 static struct dentry *__d_find_alias(struct inode *inode) 715 { 716 struct dentry *alias, *discon_alias; 717 718 again: 719 discon_alias = NULL; 720 hlist_for_each_entry(alias, &inode->i_dentry, d_alias) { 721 spin_lock(&alias->d_lock); 722 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { 723 if (IS_ROOT(alias) && 724 (alias->d_flags & DCACHE_DISCONNECTED)) { 725 discon_alias = alias; 726 } else { 727 __dget_dlock(alias); 728 spin_unlock(&alias->d_lock); 729 return alias; 730 } 731 } 732 spin_unlock(&alias->d_lock); 733 } 734 if (discon_alias) { 735 alias = discon_alias; 736 spin_lock(&alias->d_lock); 737 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { 738 __dget_dlock(alias); 739 spin_unlock(&alias->d_lock); 740 return alias; 741 } 742 spin_unlock(&alias->d_lock); 743 goto again; 744 } 745 return NULL; 746 } 747 748 struct dentry *d_find_alias(struct inode *inode) 749 { 750 struct dentry *de = NULL; 751 752 if (!hlist_empty(&inode->i_dentry)) { 753 spin_lock(&inode->i_lock); 754 de = __d_find_alias(inode); 755 spin_unlock(&inode->i_lock); 756 } 757 return de; 758 } 759 EXPORT_SYMBOL(d_find_alias); 760 761 /* 762 * Try to kill dentries associated with this inode. 763 * WARNING: you must own a reference to inode. 764 */ 765 void d_prune_aliases(struct inode *inode) 766 { 767 struct dentry *dentry; 768 restart: 769 spin_lock(&inode->i_lock); 770 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) { 771 spin_lock(&dentry->d_lock); 772 if (!dentry->d_lockref.count) { 773 /* 774 * inform the fs via d_prune that this dentry 775 * is about to be unhashed and destroyed. 776 */ 777 if ((dentry->d_flags & DCACHE_OP_PRUNE) && 778 !d_unhashed(dentry)) 779 dentry->d_op->d_prune(dentry); 780 781 __dget_dlock(dentry); 782 __d_drop(dentry); 783 spin_unlock(&dentry->d_lock); 784 spin_unlock(&inode->i_lock); 785 dput(dentry); 786 goto restart; 787 } 788 spin_unlock(&dentry->d_lock); 789 } 790 spin_unlock(&inode->i_lock); 791 } 792 EXPORT_SYMBOL(d_prune_aliases); 793 794 static void shrink_dentry_list(struct list_head *list) 795 { 796 struct dentry *dentry, *parent; 797 798 while (!list_empty(list)) { 799 struct inode *inode; 800 dentry = list_entry(list->prev, struct dentry, d_lru); 801 spin_lock(&dentry->d_lock); 802 parent = lock_parent(dentry); 803 804 /* 805 * The dispose list is isolated and dentries are not accounted 806 * to the LRU here, so we can simply remove it from the list 807 * here regardless of whether it is referenced or not. 808 */ 809 d_shrink_del(dentry); 810 811 /* 812 * We found an inuse dentry which was not removed from 813 * the LRU because of laziness during lookup. Do not free it. 814 */ 815 if ((int)dentry->d_lockref.count > 0) { 816 spin_unlock(&dentry->d_lock); 817 if (parent) 818 spin_unlock(&parent->d_lock); 819 continue; 820 } 821 822 823 if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) { 824 bool can_free = dentry->d_flags & DCACHE_MAY_FREE; 825 spin_unlock(&dentry->d_lock); 826 if (parent) 827 spin_unlock(&parent->d_lock); 828 if (can_free) 829 dentry_free(dentry); 830 continue; 831 } 832 833 inode = dentry->d_inode; 834 if (inode && unlikely(!spin_trylock(&inode->i_lock))) { 835 d_shrink_add(dentry, list); 836 spin_unlock(&dentry->d_lock); 837 if (parent) 838 spin_unlock(&parent->d_lock); 839 continue; 840 } 841 842 __dentry_kill(dentry); 843 844 /* 845 * We need to prune ancestors too. This is necessary to prevent 846 * quadratic behavior of shrink_dcache_parent(), but is also 847 * expected to be beneficial in reducing dentry cache 848 * fragmentation. 849 */ 850 dentry = parent; 851 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) { 852 parent = lock_parent(dentry); 853 if (dentry->d_lockref.count != 1) { 854 dentry->d_lockref.count--; 855 spin_unlock(&dentry->d_lock); 856 if (parent) 857 spin_unlock(&parent->d_lock); 858 break; 859 } 860 inode = dentry->d_inode; /* can't be NULL */ 861 if (unlikely(!spin_trylock(&inode->i_lock))) { 862 spin_unlock(&dentry->d_lock); 863 if (parent) 864 spin_unlock(&parent->d_lock); 865 cpu_relax(); 866 continue; 867 } 868 __dentry_kill(dentry); 869 dentry = parent; 870 } 871 } 872 } 873 874 static enum lru_status 875 dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg) 876 { 877 struct list_head *freeable = arg; 878 struct dentry *dentry = container_of(item, struct dentry, d_lru); 879 880 881 /* 882 * we are inverting the lru lock/dentry->d_lock here, 883 * so use a trylock. If we fail to get the lock, just skip 884 * it 885 */ 886 if (!spin_trylock(&dentry->d_lock)) 887 return LRU_SKIP; 888 889 /* 890 * Referenced dentries are still in use. If they have active 891 * counts, just remove them from the LRU. Otherwise give them 892 * another pass through the LRU. 893 */ 894 if (dentry->d_lockref.count) { 895 d_lru_isolate(dentry); 896 spin_unlock(&dentry->d_lock); 897 return LRU_REMOVED; 898 } 899 900 if (dentry->d_flags & DCACHE_REFERENCED) { 901 dentry->d_flags &= ~DCACHE_REFERENCED; 902 spin_unlock(&dentry->d_lock); 903 904 /* 905 * The list move itself will be made by the common LRU code. At 906 * this point, we've dropped the dentry->d_lock but keep the 907 * lru lock. This is safe to do, since every list movement is 908 * protected by the lru lock even if both locks are held. 909 * 910 * This is guaranteed by the fact that all LRU management 911 * functions are intermediated by the LRU API calls like 912 * list_lru_add and list_lru_del. List movement in this file 913 * only ever occur through this functions or through callbacks 914 * like this one, that are called from the LRU API. 915 * 916 * The only exceptions to this are functions like 917 * shrink_dentry_list, and code that first checks for the 918 * DCACHE_SHRINK_LIST flag. Those are guaranteed to be 919 * operating only with stack provided lists after they are 920 * properly isolated from the main list. It is thus, always a 921 * local access. 922 */ 923 return LRU_ROTATE; 924 } 925 926 d_lru_shrink_move(dentry, freeable); 927 spin_unlock(&dentry->d_lock); 928 929 return LRU_REMOVED; 930 } 931 932 /** 933 * prune_dcache_sb - shrink the dcache 934 * @sb: superblock 935 * @nr_to_scan : number of entries to try to free 936 * @nid: which node to scan for freeable entities 937 * 938 * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is 939 * done when we need more memory an called from the superblock shrinker 940 * function. 941 * 942 * This function may fail to free any resources if all the dentries are in 943 * use. 944 */ 945 long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan, 946 int nid) 947 { 948 LIST_HEAD(dispose); 949 long freed; 950 951 freed = list_lru_walk_node(&sb->s_dentry_lru, nid, dentry_lru_isolate, 952 &dispose, &nr_to_scan); 953 shrink_dentry_list(&dispose); 954 return freed; 955 } 956 957 static enum lru_status dentry_lru_isolate_shrink(struct list_head *item, 958 spinlock_t *lru_lock, void *arg) 959 { 960 struct list_head *freeable = arg; 961 struct dentry *dentry = container_of(item, struct dentry, d_lru); 962 963 /* 964 * we are inverting the lru lock/dentry->d_lock here, 965 * so use a trylock. If we fail to get the lock, just skip 966 * it 967 */ 968 if (!spin_trylock(&dentry->d_lock)) 969 return LRU_SKIP; 970 971 d_lru_shrink_move(dentry, freeable); 972 spin_unlock(&dentry->d_lock); 973 974 return LRU_REMOVED; 975 } 976 977 978 /** 979 * shrink_dcache_sb - shrink dcache for a superblock 980 * @sb: superblock 981 * 982 * Shrink the dcache for the specified super block. This is used to free 983 * the dcache before unmounting a file system. 984 */ 985 void shrink_dcache_sb(struct super_block *sb) 986 { 987 long freed; 988 989 do { 990 LIST_HEAD(dispose); 991 992 freed = list_lru_walk(&sb->s_dentry_lru, 993 dentry_lru_isolate_shrink, &dispose, UINT_MAX); 994 995 this_cpu_sub(nr_dentry_unused, freed); 996 shrink_dentry_list(&dispose); 997 } while (freed > 0); 998 } 999 EXPORT_SYMBOL(shrink_dcache_sb); 1000 1001 /** 1002 * enum d_walk_ret - action to talke during tree walk 1003 * @D_WALK_CONTINUE: contrinue walk 1004 * @D_WALK_QUIT: quit walk 1005 * @D_WALK_NORETRY: quit when retry is needed 1006 * @D_WALK_SKIP: skip this dentry and its children 1007 */ 1008 enum d_walk_ret { 1009 D_WALK_CONTINUE, 1010 D_WALK_QUIT, 1011 D_WALK_NORETRY, 1012 D_WALK_SKIP, 1013 }; 1014 1015 /** 1016 * d_walk - walk the dentry tree 1017 * @parent: start of walk 1018 * @data: data passed to @enter() and @finish() 1019 * @enter: callback when first entering the dentry 1020 * @finish: callback when successfully finished the walk 1021 * 1022 * The @enter() and @finish() callbacks are called with d_lock held. 1023 */ 1024 static void d_walk(struct dentry *parent, void *data, 1025 enum d_walk_ret (*enter)(void *, struct dentry *), 1026 void (*finish)(void *)) 1027 { 1028 struct dentry *this_parent; 1029 struct list_head *next; 1030 unsigned seq = 0; 1031 enum d_walk_ret ret; 1032 bool retry = true; 1033 1034 again: 1035 read_seqbegin_or_lock(&rename_lock, &seq); 1036 this_parent = parent; 1037 spin_lock(&this_parent->d_lock); 1038 1039 ret = enter(data, this_parent); 1040 switch (ret) { 1041 case D_WALK_CONTINUE: 1042 break; 1043 case D_WALK_QUIT: 1044 case D_WALK_SKIP: 1045 goto out_unlock; 1046 case D_WALK_NORETRY: 1047 retry = false; 1048 break; 1049 } 1050 repeat: 1051 next = this_parent->d_subdirs.next; 1052 resume: 1053 while (next != &this_parent->d_subdirs) { 1054 struct list_head *tmp = next; 1055 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 1056 next = tmp->next; 1057 1058 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 1059 1060 ret = enter(data, dentry); 1061 switch (ret) { 1062 case D_WALK_CONTINUE: 1063 break; 1064 case D_WALK_QUIT: 1065 spin_unlock(&dentry->d_lock); 1066 goto out_unlock; 1067 case D_WALK_NORETRY: 1068 retry = false; 1069 break; 1070 case D_WALK_SKIP: 1071 spin_unlock(&dentry->d_lock); 1072 continue; 1073 } 1074 1075 if (!list_empty(&dentry->d_subdirs)) { 1076 spin_unlock(&this_parent->d_lock); 1077 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); 1078 this_parent = dentry; 1079 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); 1080 goto repeat; 1081 } 1082 spin_unlock(&dentry->d_lock); 1083 } 1084 /* 1085 * All done at this level ... ascend and resume the search. 1086 */ 1087 if (this_parent != parent) { 1088 struct dentry *child = this_parent; 1089 this_parent = child->d_parent; 1090 1091 rcu_read_lock(); 1092 spin_unlock(&child->d_lock); 1093 spin_lock(&this_parent->d_lock); 1094 1095 /* 1096 * might go back up the wrong parent if we have had a rename 1097 * or deletion 1098 */ 1099 if (this_parent != child->d_parent || 1100 (child->d_flags & DCACHE_DENTRY_KILLED) || 1101 need_seqretry(&rename_lock, seq)) { 1102 spin_unlock(&this_parent->d_lock); 1103 rcu_read_unlock(); 1104 goto rename_retry; 1105 } 1106 rcu_read_unlock(); 1107 next = child->d_u.d_child.next; 1108 goto resume; 1109 } 1110 if (need_seqretry(&rename_lock, seq)) { 1111 spin_unlock(&this_parent->d_lock); 1112 goto rename_retry; 1113 } 1114 if (finish) 1115 finish(data); 1116 1117 out_unlock: 1118 spin_unlock(&this_parent->d_lock); 1119 done_seqretry(&rename_lock, seq); 1120 return; 1121 1122 rename_retry: 1123 if (!retry) 1124 return; 1125 seq = 1; 1126 goto again; 1127 } 1128 1129 /* 1130 * Search for at least 1 mount point in the dentry's subdirs. 1131 * We descend to the next level whenever the d_subdirs 1132 * list is non-empty and continue searching. 1133 */ 1134 1135 static enum d_walk_ret check_mount(void *data, struct dentry *dentry) 1136 { 1137 int *ret = data; 1138 if (d_mountpoint(dentry)) { 1139 *ret = 1; 1140 return D_WALK_QUIT; 1141 } 1142 return D_WALK_CONTINUE; 1143 } 1144 1145 /** 1146 * have_submounts - check for mounts over a dentry 1147 * @parent: dentry to check. 1148 * 1149 * Return true if the parent or its subdirectories contain 1150 * a mount point 1151 */ 1152 int have_submounts(struct dentry *parent) 1153 { 1154 int ret = 0; 1155 1156 d_walk(parent, &ret, check_mount, NULL); 1157 1158 return ret; 1159 } 1160 EXPORT_SYMBOL(have_submounts); 1161 1162 /* 1163 * Called by mount code to set a mountpoint and check if the mountpoint is 1164 * reachable (e.g. NFS can unhash a directory dentry and then the complete 1165 * subtree can become unreachable). 1166 * 1167 * Only one of d_invalidate() and d_set_mounted() must succeed. For 1168 * this reason take rename_lock and d_lock on dentry and ancestors. 1169 */ 1170 int d_set_mounted(struct dentry *dentry) 1171 { 1172 struct dentry *p; 1173 int ret = -ENOENT; 1174 write_seqlock(&rename_lock); 1175 for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) { 1176 /* Need exclusion wrt. d_invalidate() */ 1177 spin_lock(&p->d_lock); 1178 if (unlikely(d_unhashed(p))) { 1179 spin_unlock(&p->d_lock); 1180 goto out; 1181 } 1182 spin_unlock(&p->d_lock); 1183 } 1184 spin_lock(&dentry->d_lock); 1185 if (!d_unlinked(dentry)) { 1186 dentry->d_flags |= DCACHE_MOUNTED; 1187 ret = 0; 1188 } 1189 spin_unlock(&dentry->d_lock); 1190 out: 1191 write_sequnlock(&rename_lock); 1192 return ret; 1193 } 1194 1195 /* 1196 * Search the dentry child list of the specified parent, 1197 * and move any unused dentries to the end of the unused 1198 * list for prune_dcache(). We descend to the next level 1199 * whenever the d_subdirs list is non-empty and continue 1200 * searching. 1201 * 1202 * It returns zero iff there are no unused children, 1203 * otherwise it returns the number of children moved to 1204 * the end of the unused list. This may not be the total 1205 * number of unused children, because select_parent can 1206 * drop the lock and return early due to latency 1207 * constraints. 1208 */ 1209 1210 struct select_data { 1211 struct dentry *start; 1212 struct list_head dispose; 1213 int found; 1214 }; 1215 1216 static enum d_walk_ret select_collect(void *_data, struct dentry *dentry) 1217 { 1218 struct select_data *data = _data; 1219 enum d_walk_ret ret = D_WALK_CONTINUE; 1220 1221 if (data->start == dentry) 1222 goto out; 1223 1224 if (dentry->d_flags & DCACHE_SHRINK_LIST) { 1225 data->found++; 1226 } else { 1227 if (dentry->d_flags & DCACHE_LRU_LIST) 1228 d_lru_del(dentry); 1229 if (!dentry->d_lockref.count) { 1230 d_shrink_add(dentry, &data->dispose); 1231 data->found++; 1232 } 1233 } 1234 /* 1235 * We can return to the caller if we have found some (this 1236 * ensures forward progress). We'll be coming back to find 1237 * the rest. 1238 */ 1239 if (!list_empty(&data->dispose)) 1240 ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY; 1241 out: 1242 return ret; 1243 } 1244 1245 /** 1246 * shrink_dcache_parent - prune dcache 1247 * @parent: parent of entries to prune 1248 * 1249 * Prune the dcache to remove unused children of the parent dentry. 1250 */ 1251 void shrink_dcache_parent(struct dentry *parent) 1252 { 1253 for (;;) { 1254 struct select_data data; 1255 1256 INIT_LIST_HEAD(&data.dispose); 1257 data.start = parent; 1258 data.found = 0; 1259 1260 d_walk(parent, &data, select_collect, NULL); 1261 if (!data.found) 1262 break; 1263 1264 shrink_dentry_list(&data.dispose); 1265 cond_resched(); 1266 } 1267 } 1268 EXPORT_SYMBOL(shrink_dcache_parent); 1269 1270 static enum d_walk_ret umount_check(void *_data, struct dentry *dentry) 1271 { 1272 /* it has busy descendents; complain about those instead */ 1273 if (!list_empty(&dentry->d_subdirs)) 1274 return D_WALK_CONTINUE; 1275 1276 /* root with refcount 1 is fine */ 1277 if (dentry == _data && dentry->d_lockref.count == 1) 1278 return D_WALK_CONTINUE; 1279 1280 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} " 1281 " still in use (%d) [unmount of %s %s]\n", 1282 dentry, 1283 dentry->d_inode ? 1284 dentry->d_inode->i_ino : 0UL, 1285 dentry, 1286 dentry->d_lockref.count, 1287 dentry->d_sb->s_type->name, 1288 dentry->d_sb->s_id); 1289 WARN_ON(1); 1290 return D_WALK_CONTINUE; 1291 } 1292 1293 static void do_one_tree(struct dentry *dentry) 1294 { 1295 shrink_dcache_parent(dentry); 1296 d_walk(dentry, dentry, umount_check, NULL); 1297 d_drop(dentry); 1298 dput(dentry); 1299 } 1300 1301 /* 1302 * destroy the dentries attached to a superblock on unmounting 1303 */ 1304 void shrink_dcache_for_umount(struct super_block *sb) 1305 { 1306 struct dentry *dentry; 1307 1308 WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked"); 1309 1310 dentry = sb->s_root; 1311 sb->s_root = NULL; 1312 do_one_tree(dentry); 1313 1314 while (!hlist_bl_empty(&sb->s_anon)) { 1315 dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash)); 1316 do_one_tree(dentry); 1317 } 1318 } 1319 1320 struct detach_data { 1321 struct select_data select; 1322 struct dentry *mountpoint; 1323 }; 1324 static enum d_walk_ret detach_and_collect(void *_data, struct dentry *dentry) 1325 { 1326 struct detach_data *data = _data; 1327 1328 if (d_mountpoint(dentry)) { 1329 __dget_dlock(dentry); 1330 data->mountpoint = dentry; 1331 return D_WALK_QUIT; 1332 } 1333 1334 return select_collect(&data->select, dentry); 1335 } 1336 1337 static void check_and_drop(void *_data) 1338 { 1339 struct detach_data *data = _data; 1340 1341 if (!data->mountpoint && !data->select.found) 1342 __d_drop(data->select.start); 1343 } 1344 1345 /** 1346 * d_invalidate - detach submounts, prune dcache, and drop 1347 * @dentry: dentry to invalidate (aka detach, prune and drop) 1348 * 1349 * Try to invalidate the dentry if it turns out to be 1350 * possible. If there are reasons not to delete it 1351 * return -EBUSY. On success return 0. 1352 * 1353 * no dcache lock. 1354 * 1355 * The final d_drop is done as an atomic operation relative to 1356 * rename_lock ensuring there are no races with d_set_mounted. This 1357 * ensures there are no unhashed dentries on the path to a mountpoint. 1358 */ 1359 int d_invalidate(struct dentry *dentry) 1360 { 1361 int ret = 0; 1362 1363 /* 1364 * If it's already been dropped, return OK. 1365 */ 1366 spin_lock(&dentry->d_lock); 1367 if (d_unhashed(dentry)) { 1368 spin_unlock(&dentry->d_lock); 1369 return 0; 1370 } 1371 spin_unlock(&dentry->d_lock); 1372 1373 /* Negative dentries can be dropped without further checks */ 1374 if (!dentry->d_inode) { 1375 d_drop(dentry); 1376 goto out; 1377 } 1378 1379 for (;;) { 1380 struct detach_data data; 1381 1382 data.mountpoint = NULL; 1383 INIT_LIST_HEAD(&data.select.dispose); 1384 data.select.start = dentry; 1385 data.select.found = 0; 1386 1387 d_walk(dentry, &data, detach_and_collect, check_and_drop); 1388 1389 if (data.select.found) 1390 shrink_dentry_list(&data.select.dispose); 1391 1392 if (data.mountpoint) { 1393 detach_mounts(data.mountpoint); 1394 dput(data.mountpoint); 1395 } 1396 1397 if (!data.mountpoint && !data.select.found) 1398 break; 1399 1400 cond_resched(); 1401 } 1402 1403 out: 1404 return ret; 1405 } 1406 EXPORT_SYMBOL(d_invalidate); 1407 1408 /** 1409 * __d_alloc - allocate a dcache entry 1410 * @sb: filesystem it will belong to 1411 * @name: qstr of the name 1412 * 1413 * Allocates a dentry. It returns %NULL if there is insufficient memory 1414 * available. On a success the dentry is returned. The name passed in is 1415 * copied and the copy passed in may be reused after this call. 1416 */ 1417 1418 struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) 1419 { 1420 struct dentry *dentry; 1421 char *dname; 1422 1423 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL); 1424 if (!dentry) 1425 return NULL; 1426 1427 /* 1428 * We guarantee that the inline name is always NUL-terminated. 1429 * This way the memcpy() done by the name switching in rename 1430 * will still always have a NUL at the end, even if we might 1431 * be overwriting an internal NUL character 1432 */ 1433 dentry->d_iname[DNAME_INLINE_LEN-1] = 0; 1434 if (name->len > DNAME_INLINE_LEN-1) { 1435 size_t size = offsetof(struct external_name, name[1]); 1436 struct external_name *p = kmalloc(size + name->len, GFP_KERNEL); 1437 if (!p) { 1438 kmem_cache_free(dentry_cache, dentry); 1439 return NULL; 1440 } 1441 atomic_set(&p->u.count, 1); 1442 dname = p->name; 1443 } else { 1444 dname = dentry->d_iname; 1445 } 1446 1447 dentry->d_name.len = name->len; 1448 dentry->d_name.hash = name->hash; 1449 memcpy(dname, name->name, name->len); 1450 dname[name->len] = 0; 1451 1452 /* Make sure we always see the terminating NUL character */ 1453 smp_wmb(); 1454 dentry->d_name.name = dname; 1455 1456 dentry->d_lockref.count = 1; 1457 dentry->d_flags = 0; 1458 spin_lock_init(&dentry->d_lock); 1459 seqcount_init(&dentry->d_seq); 1460 dentry->d_inode = NULL; 1461 dentry->d_parent = dentry; 1462 dentry->d_sb = sb; 1463 dentry->d_op = NULL; 1464 dentry->d_fsdata = NULL; 1465 INIT_HLIST_BL_NODE(&dentry->d_hash); 1466 INIT_LIST_HEAD(&dentry->d_lru); 1467 INIT_LIST_HEAD(&dentry->d_subdirs); 1468 INIT_HLIST_NODE(&dentry->d_alias); 1469 INIT_LIST_HEAD(&dentry->d_u.d_child); 1470 d_set_d_op(dentry, dentry->d_sb->s_d_op); 1471 1472 this_cpu_inc(nr_dentry); 1473 1474 return dentry; 1475 } 1476 1477 /** 1478 * d_alloc - allocate a dcache entry 1479 * @parent: parent of entry to allocate 1480 * @name: qstr of the name 1481 * 1482 * Allocates a dentry. It returns %NULL if there is insufficient memory 1483 * available. On a success the dentry is returned. The name passed in is 1484 * copied and the copy passed in may be reused after this call. 1485 */ 1486 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) 1487 { 1488 struct dentry *dentry = __d_alloc(parent->d_sb, name); 1489 if (!dentry) 1490 return NULL; 1491 1492 spin_lock(&parent->d_lock); 1493 /* 1494 * don't need child lock because it is not subject 1495 * to concurrency here 1496 */ 1497 __dget_dlock(parent); 1498 dentry->d_parent = parent; 1499 list_add(&dentry->d_u.d_child, &parent->d_subdirs); 1500 spin_unlock(&parent->d_lock); 1501 1502 return dentry; 1503 } 1504 EXPORT_SYMBOL(d_alloc); 1505 1506 /** 1507 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems) 1508 * @sb: the superblock 1509 * @name: qstr of the name 1510 * 1511 * For a filesystem that just pins its dentries in memory and never 1512 * performs lookups at all, return an unhashed IS_ROOT dentry. 1513 */ 1514 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name) 1515 { 1516 return __d_alloc(sb, name); 1517 } 1518 EXPORT_SYMBOL(d_alloc_pseudo); 1519 1520 struct dentry *d_alloc_name(struct dentry *parent, const char *name) 1521 { 1522 struct qstr q; 1523 1524 q.name = name; 1525 q.len = strlen(name); 1526 q.hash = full_name_hash(q.name, q.len); 1527 return d_alloc(parent, &q); 1528 } 1529 EXPORT_SYMBOL(d_alloc_name); 1530 1531 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op) 1532 { 1533 WARN_ON_ONCE(dentry->d_op); 1534 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH | 1535 DCACHE_OP_COMPARE | 1536 DCACHE_OP_REVALIDATE | 1537 DCACHE_OP_WEAK_REVALIDATE | 1538 DCACHE_OP_DELETE )); 1539 dentry->d_op = op; 1540 if (!op) 1541 return; 1542 if (op->d_hash) 1543 dentry->d_flags |= DCACHE_OP_HASH; 1544 if (op->d_compare) 1545 dentry->d_flags |= DCACHE_OP_COMPARE; 1546 if (op->d_revalidate) 1547 dentry->d_flags |= DCACHE_OP_REVALIDATE; 1548 if (op->d_weak_revalidate) 1549 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE; 1550 if (op->d_delete) 1551 dentry->d_flags |= DCACHE_OP_DELETE; 1552 if (op->d_prune) 1553 dentry->d_flags |= DCACHE_OP_PRUNE; 1554 1555 } 1556 EXPORT_SYMBOL(d_set_d_op); 1557 1558 static unsigned d_flags_for_inode(struct inode *inode) 1559 { 1560 unsigned add_flags = DCACHE_FILE_TYPE; 1561 1562 if (!inode) 1563 return DCACHE_MISS_TYPE; 1564 1565 if (S_ISDIR(inode->i_mode)) { 1566 add_flags = DCACHE_DIRECTORY_TYPE; 1567 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) { 1568 if (unlikely(!inode->i_op->lookup)) 1569 add_flags = DCACHE_AUTODIR_TYPE; 1570 else 1571 inode->i_opflags |= IOP_LOOKUP; 1572 } 1573 } else if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) { 1574 if (unlikely(inode->i_op->follow_link)) 1575 add_flags = DCACHE_SYMLINK_TYPE; 1576 else 1577 inode->i_opflags |= IOP_NOFOLLOW; 1578 } 1579 1580 if (unlikely(IS_AUTOMOUNT(inode))) 1581 add_flags |= DCACHE_NEED_AUTOMOUNT; 1582 return add_flags; 1583 } 1584 1585 static void __d_instantiate(struct dentry *dentry, struct inode *inode) 1586 { 1587 unsigned add_flags = d_flags_for_inode(inode); 1588 1589 spin_lock(&dentry->d_lock); 1590 __d_set_type(dentry, add_flags); 1591 if (inode) 1592 hlist_add_head(&dentry->d_alias, &inode->i_dentry); 1593 dentry->d_inode = inode; 1594 dentry_rcuwalk_barrier(dentry); 1595 spin_unlock(&dentry->d_lock); 1596 fsnotify_d_instantiate(dentry, inode); 1597 } 1598 1599 /** 1600 * d_instantiate - fill in inode information for a dentry 1601 * @entry: dentry to complete 1602 * @inode: inode to attach to this dentry 1603 * 1604 * Fill in inode information in the entry. 1605 * 1606 * This turns negative dentries into productive full members 1607 * of society. 1608 * 1609 * NOTE! This assumes that the inode count has been incremented 1610 * (or otherwise set) by the caller to indicate that it is now 1611 * in use by the dcache. 1612 */ 1613 1614 void d_instantiate(struct dentry *entry, struct inode * inode) 1615 { 1616 BUG_ON(!hlist_unhashed(&entry->d_alias)); 1617 if (inode) 1618 spin_lock(&inode->i_lock); 1619 __d_instantiate(entry, inode); 1620 if (inode) 1621 spin_unlock(&inode->i_lock); 1622 security_d_instantiate(entry, inode); 1623 } 1624 EXPORT_SYMBOL(d_instantiate); 1625 1626 /** 1627 * d_instantiate_unique - instantiate a non-aliased dentry 1628 * @entry: dentry to instantiate 1629 * @inode: inode to attach to this dentry 1630 * 1631 * Fill in inode information in the entry. On success, it returns NULL. 1632 * If an unhashed alias of "entry" already exists, then we return the 1633 * aliased dentry instead and drop one reference to inode. 1634 * 1635 * Note that in order to avoid conflicts with rename() etc, the caller 1636 * had better be holding the parent directory semaphore. 1637 * 1638 * This also assumes that the inode count has been incremented 1639 * (or otherwise set) by the caller to indicate that it is now 1640 * in use by the dcache. 1641 */ 1642 static struct dentry *__d_instantiate_unique(struct dentry *entry, 1643 struct inode *inode) 1644 { 1645 struct dentry *alias; 1646 int len = entry->d_name.len; 1647 const char *name = entry->d_name.name; 1648 unsigned int hash = entry->d_name.hash; 1649 1650 if (!inode) { 1651 __d_instantiate(entry, NULL); 1652 return NULL; 1653 } 1654 1655 hlist_for_each_entry(alias, &inode->i_dentry, d_alias) { 1656 /* 1657 * Don't need alias->d_lock here, because aliases with 1658 * d_parent == entry->d_parent are not subject to name or 1659 * parent changes, because the parent inode i_mutex is held. 1660 */ 1661 if (alias->d_name.hash != hash) 1662 continue; 1663 if (alias->d_parent != entry->d_parent) 1664 continue; 1665 if (alias->d_name.len != len) 1666 continue; 1667 if (dentry_cmp(alias, name, len)) 1668 continue; 1669 __dget(alias); 1670 return alias; 1671 } 1672 1673 __d_instantiate(entry, inode); 1674 return NULL; 1675 } 1676 1677 struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode) 1678 { 1679 struct dentry *result; 1680 1681 BUG_ON(!hlist_unhashed(&entry->d_alias)); 1682 1683 if (inode) 1684 spin_lock(&inode->i_lock); 1685 result = __d_instantiate_unique(entry, inode); 1686 if (inode) 1687 spin_unlock(&inode->i_lock); 1688 1689 if (!result) { 1690 security_d_instantiate(entry, inode); 1691 return NULL; 1692 } 1693 1694 BUG_ON(!d_unhashed(result)); 1695 iput(inode); 1696 return result; 1697 } 1698 1699 EXPORT_SYMBOL(d_instantiate_unique); 1700 1701 /** 1702 * d_instantiate_no_diralias - instantiate a non-aliased dentry 1703 * @entry: dentry to complete 1704 * @inode: inode to attach to this dentry 1705 * 1706 * Fill in inode information in the entry. If a directory alias is found, then 1707 * return an error (and drop inode). Together with d_materialise_unique() this 1708 * guarantees that a directory inode may never have more than one alias. 1709 */ 1710 int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode) 1711 { 1712 BUG_ON(!hlist_unhashed(&entry->d_alias)); 1713 1714 spin_lock(&inode->i_lock); 1715 if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) { 1716 spin_unlock(&inode->i_lock); 1717 iput(inode); 1718 return -EBUSY; 1719 } 1720 __d_instantiate(entry, inode); 1721 spin_unlock(&inode->i_lock); 1722 security_d_instantiate(entry, inode); 1723 1724 return 0; 1725 } 1726 EXPORT_SYMBOL(d_instantiate_no_diralias); 1727 1728 struct dentry *d_make_root(struct inode *root_inode) 1729 { 1730 struct dentry *res = NULL; 1731 1732 if (root_inode) { 1733 static const struct qstr name = QSTR_INIT("/", 1); 1734 1735 res = __d_alloc(root_inode->i_sb, &name); 1736 if (res) 1737 d_instantiate(res, root_inode); 1738 else 1739 iput(root_inode); 1740 } 1741 return res; 1742 } 1743 EXPORT_SYMBOL(d_make_root); 1744 1745 static struct dentry * __d_find_any_alias(struct inode *inode) 1746 { 1747 struct dentry *alias; 1748 1749 if (hlist_empty(&inode->i_dentry)) 1750 return NULL; 1751 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_alias); 1752 __dget(alias); 1753 return alias; 1754 } 1755 1756 /** 1757 * d_find_any_alias - find any alias for a given inode 1758 * @inode: inode to find an alias for 1759 * 1760 * If any aliases exist for the given inode, take and return a 1761 * reference for one of them. If no aliases exist, return %NULL. 1762 */ 1763 struct dentry *d_find_any_alias(struct inode *inode) 1764 { 1765 struct dentry *de; 1766 1767 spin_lock(&inode->i_lock); 1768 de = __d_find_any_alias(inode); 1769 spin_unlock(&inode->i_lock); 1770 return de; 1771 } 1772 EXPORT_SYMBOL(d_find_any_alias); 1773 1774 static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected) 1775 { 1776 static const struct qstr anonstring = QSTR_INIT("/", 1); 1777 struct dentry *tmp; 1778 struct dentry *res; 1779 unsigned add_flags; 1780 1781 if (!inode) 1782 return ERR_PTR(-ESTALE); 1783 if (IS_ERR(inode)) 1784 return ERR_CAST(inode); 1785 1786 res = d_find_any_alias(inode); 1787 if (res) 1788 goto out_iput; 1789 1790 tmp = __d_alloc(inode->i_sb, &anonstring); 1791 if (!tmp) { 1792 res = ERR_PTR(-ENOMEM); 1793 goto out_iput; 1794 } 1795 1796 spin_lock(&inode->i_lock); 1797 res = __d_find_any_alias(inode); 1798 if (res) { 1799 spin_unlock(&inode->i_lock); 1800 dput(tmp); 1801 goto out_iput; 1802 } 1803 1804 /* attach a disconnected dentry */ 1805 add_flags = d_flags_for_inode(inode); 1806 1807 if (disconnected) 1808 add_flags |= DCACHE_DISCONNECTED; 1809 1810 spin_lock(&tmp->d_lock); 1811 tmp->d_inode = inode; 1812 tmp->d_flags |= add_flags; 1813 hlist_add_head(&tmp->d_alias, &inode->i_dentry); 1814 hlist_bl_lock(&tmp->d_sb->s_anon); 1815 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon); 1816 hlist_bl_unlock(&tmp->d_sb->s_anon); 1817 spin_unlock(&tmp->d_lock); 1818 spin_unlock(&inode->i_lock); 1819 security_d_instantiate(tmp, inode); 1820 1821 return tmp; 1822 1823 out_iput: 1824 if (res && !IS_ERR(res)) 1825 security_d_instantiate(res, inode); 1826 iput(inode); 1827 return res; 1828 } 1829 1830 /** 1831 * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode 1832 * @inode: inode to allocate the dentry for 1833 * 1834 * Obtain a dentry for an inode resulting from NFS filehandle conversion or 1835 * similar open by handle operations. The returned dentry may be anonymous, 1836 * or may have a full name (if the inode was already in the cache). 1837 * 1838 * When called on a directory inode, we must ensure that the inode only ever 1839 * has one dentry. If a dentry is found, that is returned instead of 1840 * allocating a new one. 1841 * 1842 * On successful return, the reference to the inode has been transferred 1843 * to the dentry. In case of an error the reference on the inode is released. 1844 * To make it easier to use in export operations a %NULL or IS_ERR inode may 1845 * be passed in and the error will be propagated to the return value, 1846 * with a %NULL @inode replaced by ERR_PTR(-ESTALE). 1847 */ 1848 struct dentry *d_obtain_alias(struct inode *inode) 1849 { 1850 return __d_obtain_alias(inode, 1); 1851 } 1852 EXPORT_SYMBOL(d_obtain_alias); 1853 1854 /** 1855 * d_obtain_root - find or allocate a dentry for a given inode 1856 * @inode: inode to allocate the dentry for 1857 * 1858 * Obtain an IS_ROOT dentry for the root of a filesystem. 1859 * 1860 * We must ensure that directory inodes only ever have one dentry. If a 1861 * dentry is found, that is returned instead of allocating a new one. 1862 * 1863 * On successful return, the reference to the inode has been transferred 1864 * to the dentry. In case of an error the reference on the inode is 1865 * released. A %NULL or IS_ERR inode may be passed in and will be the 1866 * error will be propagate to the return value, with a %NULL @inode 1867 * replaced by ERR_PTR(-ESTALE). 1868 */ 1869 struct dentry *d_obtain_root(struct inode *inode) 1870 { 1871 return __d_obtain_alias(inode, 0); 1872 } 1873 EXPORT_SYMBOL(d_obtain_root); 1874 1875 /** 1876 * d_add_ci - lookup or allocate new dentry with case-exact name 1877 * @inode: the inode case-insensitive lookup has found 1878 * @dentry: the negative dentry that was passed to the parent's lookup func 1879 * @name: the case-exact name to be associated with the returned dentry 1880 * 1881 * This is to avoid filling the dcache with case-insensitive names to the 1882 * same inode, only the actual correct case is stored in the dcache for 1883 * case-insensitive filesystems. 1884 * 1885 * For a case-insensitive lookup match and if the the case-exact dentry 1886 * already exists in in the dcache, use it and return it. 1887 * 1888 * If no entry exists with the exact case name, allocate new dentry with 1889 * the exact case, and return the spliced entry. 1890 */ 1891 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode, 1892 struct qstr *name) 1893 { 1894 struct dentry *found; 1895 struct dentry *new; 1896 1897 /* 1898 * First check if a dentry matching the name already exists, 1899 * if not go ahead and create it now. 1900 */ 1901 found = d_hash_and_lookup(dentry->d_parent, name); 1902 if (unlikely(IS_ERR(found))) 1903 goto err_out; 1904 if (!found) { 1905 new = d_alloc(dentry->d_parent, name); 1906 if (!new) { 1907 found = ERR_PTR(-ENOMEM); 1908 goto err_out; 1909 } 1910 1911 found = d_splice_alias(inode, new); 1912 if (found) { 1913 dput(new); 1914 return found; 1915 } 1916 return new; 1917 } 1918 1919 /* 1920 * If a matching dentry exists, and it's not negative use it. 1921 * 1922 * Decrement the reference count to balance the iget() done 1923 * earlier on. 1924 */ 1925 if (found->d_inode) { 1926 if (unlikely(found->d_inode != inode)) { 1927 /* This can't happen because bad inodes are unhashed. */ 1928 BUG_ON(!is_bad_inode(inode)); 1929 BUG_ON(!is_bad_inode(found->d_inode)); 1930 } 1931 iput(inode); 1932 return found; 1933 } 1934 1935 /* 1936 * Negative dentry: instantiate it unless the inode is a directory and 1937 * already has a dentry. 1938 */ 1939 new = d_splice_alias(inode, found); 1940 if (new) { 1941 dput(found); 1942 found = new; 1943 } 1944 return found; 1945 1946 err_out: 1947 iput(inode); 1948 return found; 1949 } 1950 EXPORT_SYMBOL(d_add_ci); 1951 1952 /* 1953 * Do the slow-case of the dentry name compare. 1954 * 1955 * Unlike the dentry_cmp() function, we need to atomically 1956 * load the name and length information, so that the 1957 * filesystem can rely on them, and can use the 'name' and 1958 * 'len' information without worrying about walking off the 1959 * end of memory etc. 1960 * 1961 * Thus the read_seqcount_retry() and the "duplicate" info 1962 * in arguments (the low-level filesystem should not look 1963 * at the dentry inode or name contents directly, since 1964 * rename can change them while we're in RCU mode). 1965 */ 1966 enum slow_d_compare { 1967 D_COMP_OK, 1968 D_COMP_NOMATCH, 1969 D_COMP_SEQRETRY, 1970 }; 1971 1972 static noinline enum slow_d_compare slow_dentry_cmp( 1973 const struct dentry *parent, 1974 struct dentry *dentry, 1975 unsigned int seq, 1976 const struct qstr *name) 1977 { 1978 int tlen = dentry->d_name.len; 1979 const char *tname = dentry->d_name.name; 1980 1981 if (read_seqcount_retry(&dentry->d_seq, seq)) { 1982 cpu_relax(); 1983 return D_COMP_SEQRETRY; 1984 } 1985 if (parent->d_op->d_compare(parent, dentry, tlen, tname, name)) 1986 return D_COMP_NOMATCH; 1987 return D_COMP_OK; 1988 } 1989 1990 /** 1991 * __d_lookup_rcu - search for a dentry (racy, store-free) 1992 * @parent: parent dentry 1993 * @name: qstr of name we wish to find 1994 * @seqp: returns d_seq value at the point where the dentry was found 1995 * Returns: dentry, or NULL 1996 * 1997 * __d_lookup_rcu is the dcache lookup function for rcu-walk name 1998 * resolution (store-free path walking) design described in 1999 * Documentation/filesystems/path-lookup.txt. 2000 * 2001 * This is not to be used outside core vfs. 2002 * 2003 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock 2004 * held, and rcu_read_lock held. The returned dentry must not be stored into 2005 * without taking d_lock and checking d_seq sequence count against @seq 2006 * returned here. 2007 * 2008 * A refcount may be taken on the found dentry with the d_rcu_to_refcount 2009 * function. 2010 * 2011 * Alternatively, __d_lookup_rcu may be called again to look up the child of 2012 * the returned dentry, so long as its parent's seqlock is checked after the 2013 * child is looked up. Thus, an interlocking stepping of sequence lock checks 2014 * is formed, giving integrity down the path walk. 2015 * 2016 * NOTE! The caller *has* to check the resulting dentry against the sequence 2017 * number we've returned before using any of the resulting dentry state! 2018 */ 2019 struct dentry *__d_lookup_rcu(const struct dentry *parent, 2020 const struct qstr *name, 2021 unsigned *seqp) 2022 { 2023 u64 hashlen = name->hash_len; 2024 const unsigned char *str = name->name; 2025 struct hlist_bl_head *b = d_hash(parent, hashlen_hash(hashlen)); 2026 struct hlist_bl_node *node; 2027 struct dentry *dentry; 2028 2029 /* 2030 * Note: There is significant duplication with __d_lookup_rcu which is 2031 * required to prevent single threaded performance regressions 2032 * especially on architectures where smp_rmb (in seqcounts) are costly. 2033 * Keep the two functions in sync. 2034 */ 2035 2036 /* 2037 * The hash list is protected using RCU. 2038 * 2039 * Carefully use d_seq when comparing a candidate dentry, to avoid 2040 * races with d_move(). 2041 * 2042 * It is possible that concurrent renames can mess up our list 2043 * walk here and result in missing our dentry, resulting in the 2044 * false-negative result. d_lookup() protects against concurrent 2045 * renames using rename_lock seqlock. 2046 * 2047 * See Documentation/filesystems/path-lookup.txt for more details. 2048 */ 2049 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { 2050 unsigned seq; 2051 2052 seqretry: 2053 /* 2054 * The dentry sequence count protects us from concurrent 2055 * renames, and thus protects parent and name fields. 2056 * 2057 * The caller must perform a seqcount check in order 2058 * to do anything useful with the returned dentry. 2059 * 2060 * NOTE! We do a "raw" seqcount_begin here. That means that 2061 * we don't wait for the sequence count to stabilize if it 2062 * is in the middle of a sequence change. If we do the slow 2063 * dentry compare, we will do seqretries until it is stable, 2064 * and if we end up with a successful lookup, we actually 2065 * want to exit RCU lookup anyway. 2066 */ 2067 seq = raw_seqcount_begin(&dentry->d_seq); 2068 if (dentry->d_parent != parent) 2069 continue; 2070 if (d_unhashed(dentry)) 2071 continue; 2072 2073 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) { 2074 if (dentry->d_name.hash != hashlen_hash(hashlen)) 2075 continue; 2076 *seqp = seq; 2077 switch (slow_dentry_cmp(parent, dentry, seq, name)) { 2078 case D_COMP_OK: 2079 return dentry; 2080 case D_COMP_NOMATCH: 2081 continue; 2082 default: 2083 goto seqretry; 2084 } 2085 } 2086 2087 if (dentry->d_name.hash_len != hashlen) 2088 continue; 2089 *seqp = seq; 2090 if (!dentry_cmp(dentry, str, hashlen_len(hashlen))) 2091 return dentry; 2092 } 2093 return NULL; 2094 } 2095 2096 /** 2097 * d_lookup - search for a dentry 2098 * @parent: parent dentry 2099 * @name: qstr of name we wish to find 2100 * Returns: dentry, or NULL 2101 * 2102 * d_lookup searches the children of the parent dentry for the name in 2103 * question. If the dentry is found its reference count is incremented and the 2104 * dentry is returned. The caller must use dput to free the entry when it has 2105 * finished using it. %NULL is returned if the dentry does not exist. 2106 */ 2107 struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name) 2108 { 2109 struct dentry *dentry; 2110 unsigned seq; 2111 2112 do { 2113 seq = read_seqbegin(&rename_lock); 2114 dentry = __d_lookup(parent, name); 2115 if (dentry) 2116 break; 2117 } while (read_seqretry(&rename_lock, seq)); 2118 return dentry; 2119 } 2120 EXPORT_SYMBOL(d_lookup); 2121 2122 /** 2123 * __d_lookup - search for a dentry (racy) 2124 * @parent: parent dentry 2125 * @name: qstr of name we wish to find 2126 * Returns: dentry, or NULL 2127 * 2128 * __d_lookup is like d_lookup, however it may (rarely) return a 2129 * false-negative result due to unrelated rename activity. 2130 * 2131 * __d_lookup is slightly faster by avoiding rename_lock read seqlock, 2132 * however it must be used carefully, eg. with a following d_lookup in 2133 * the case of failure. 2134 * 2135 * __d_lookup callers must be commented. 2136 */ 2137 struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name) 2138 { 2139 unsigned int len = name->len; 2140 unsigned int hash = name->hash; 2141 const unsigned char *str = name->name; 2142 struct hlist_bl_head *b = d_hash(parent, hash); 2143 struct hlist_bl_node *node; 2144 struct dentry *found = NULL; 2145 struct dentry *dentry; 2146 2147 /* 2148 * Note: There is significant duplication with __d_lookup_rcu which is 2149 * required to prevent single threaded performance regressions 2150 * especially on architectures where smp_rmb (in seqcounts) are costly. 2151 * Keep the two functions in sync. 2152 */ 2153 2154 /* 2155 * The hash list is protected using RCU. 2156 * 2157 * Take d_lock when comparing a candidate dentry, to avoid races 2158 * with d_move(). 2159 * 2160 * It is possible that concurrent renames can mess up our list 2161 * walk here and result in missing our dentry, resulting in the 2162 * false-negative result. d_lookup() protects against concurrent 2163 * renames using rename_lock seqlock. 2164 * 2165 * See Documentation/filesystems/path-lookup.txt for more details. 2166 */ 2167 rcu_read_lock(); 2168 2169 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { 2170 2171 if (dentry->d_name.hash != hash) 2172 continue; 2173 2174 spin_lock(&dentry->d_lock); 2175 if (dentry->d_parent != parent) 2176 goto next; 2177 if (d_unhashed(dentry)) 2178 goto next; 2179 2180 /* 2181 * It is safe to compare names since d_move() cannot 2182 * change the qstr (protected by d_lock). 2183 */ 2184 if (parent->d_flags & DCACHE_OP_COMPARE) { 2185 int tlen = dentry->d_name.len; 2186 const char *tname = dentry->d_name.name; 2187 if (parent->d_op->d_compare(parent, dentry, tlen, tname, name)) 2188 goto next; 2189 } else { 2190 if (dentry->d_name.len != len) 2191 goto next; 2192 if (dentry_cmp(dentry, str, len)) 2193 goto next; 2194 } 2195 2196 dentry->d_lockref.count++; 2197 found = dentry; 2198 spin_unlock(&dentry->d_lock); 2199 break; 2200 next: 2201 spin_unlock(&dentry->d_lock); 2202 } 2203 rcu_read_unlock(); 2204 2205 return found; 2206 } 2207 2208 /** 2209 * d_hash_and_lookup - hash the qstr then search for a dentry 2210 * @dir: Directory to search in 2211 * @name: qstr of name we wish to find 2212 * 2213 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error) 2214 */ 2215 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name) 2216 { 2217 /* 2218 * Check for a fs-specific hash function. Note that we must 2219 * calculate the standard hash first, as the d_op->d_hash() 2220 * routine may choose to leave the hash value unchanged. 2221 */ 2222 name->hash = full_name_hash(name->name, name->len); 2223 if (dir->d_flags & DCACHE_OP_HASH) { 2224 int err = dir->d_op->d_hash(dir, name); 2225 if (unlikely(err < 0)) 2226 return ERR_PTR(err); 2227 } 2228 return d_lookup(dir, name); 2229 } 2230 EXPORT_SYMBOL(d_hash_and_lookup); 2231 2232 /** 2233 * d_validate - verify dentry provided from insecure source (deprecated) 2234 * @dentry: The dentry alleged to be valid child of @dparent 2235 * @dparent: The parent dentry (known to be valid) 2236 * 2237 * An insecure source has sent us a dentry, here we verify it and dget() it. 2238 * This is used by ncpfs in its readdir implementation. 2239 * Zero is returned in the dentry is invalid. 2240 * 2241 * This function is slow for big directories, and deprecated, do not use it. 2242 */ 2243 int d_validate(struct dentry *dentry, struct dentry *dparent) 2244 { 2245 struct dentry *child; 2246 2247 spin_lock(&dparent->d_lock); 2248 list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) { 2249 if (dentry == child) { 2250 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 2251 __dget_dlock(dentry); 2252 spin_unlock(&dentry->d_lock); 2253 spin_unlock(&dparent->d_lock); 2254 return 1; 2255 } 2256 } 2257 spin_unlock(&dparent->d_lock); 2258 2259 return 0; 2260 } 2261 EXPORT_SYMBOL(d_validate); 2262 2263 /* 2264 * When a file is deleted, we have two options: 2265 * - turn this dentry into a negative dentry 2266 * - unhash this dentry and free it. 2267 * 2268 * Usually, we want to just turn this into 2269 * a negative dentry, but if anybody else is 2270 * currently using the dentry or the inode 2271 * we can't do that and we fall back on removing 2272 * it from the hash queues and waiting for 2273 * it to be deleted later when it has no users 2274 */ 2275 2276 /** 2277 * d_delete - delete a dentry 2278 * @dentry: The dentry to delete 2279 * 2280 * Turn the dentry into a negative dentry if possible, otherwise 2281 * remove it from the hash queues so it can be deleted later 2282 */ 2283 2284 void d_delete(struct dentry * dentry) 2285 { 2286 struct inode *inode; 2287 int isdir = 0; 2288 /* 2289 * Are we the only user? 2290 */ 2291 again: 2292 spin_lock(&dentry->d_lock); 2293 inode = dentry->d_inode; 2294 isdir = S_ISDIR(inode->i_mode); 2295 if (dentry->d_lockref.count == 1) { 2296 if (!spin_trylock(&inode->i_lock)) { 2297 spin_unlock(&dentry->d_lock); 2298 cpu_relax(); 2299 goto again; 2300 } 2301 dentry->d_flags &= ~DCACHE_CANT_MOUNT; 2302 dentry_unlink_inode(dentry); 2303 fsnotify_nameremove(dentry, isdir); 2304 return; 2305 } 2306 2307 if (!d_unhashed(dentry)) 2308 __d_drop(dentry); 2309 2310 spin_unlock(&dentry->d_lock); 2311 2312 fsnotify_nameremove(dentry, isdir); 2313 } 2314 EXPORT_SYMBOL(d_delete); 2315 2316 static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b) 2317 { 2318 BUG_ON(!d_unhashed(entry)); 2319 hlist_bl_lock(b); 2320 entry->d_flags |= DCACHE_RCUACCESS; 2321 hlist_bl_add_head_rcu(&entry->d_hash, b); 2322 hlist_bl_unlock(b); 2323 } 2324 2325 static void _d_rehash(struct dentry * entry) 2326 { 2327 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash)); 2328 } 2329 2330 /** 2331 * d_rehash - add an entry back to the hash 2332 * @entry: dentry to add to the hash 2333 * 2334 * Adds a dentry to the hash according to its name. 2335 */ 2336 2337 void d_rehash(struct dentry * entry) 2338 { 2339 spin_lock(&entry->d_lock); 2340 _d_rehash(entry); 2341 spin_unlock(&entry->d_lock); 2342 } 2343 EXPORT_SYMBOL(d_rehash); 2344 2345 /** 2346 * dentry_update_name_case - update case insensitive dentry with a new name 2347 * @dentry: dentry to be updated 2348 * @name: new name 2349 * 2350 * Update a case insensitive dentry with new case of name. 2351 * 2352 * dentry must have been returned by d_lookup with name @name. Old and new 2353 * name lengths must match (ie. no d_compare which allows mismatched name 2354 * lengths). 2355 * 2356 * Parent inode i_mutex must be held over d_lookup and into this call (to 2357 * keep renames and concurrent inserts, and readdir(2) away). 2358 */ 2359 void dentry_update_name_case(struct dentry *dentry, struct qstr *name) 2360 { 2361 BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex)); 2362 BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */ 2363 2364 spin_lock(&dentry->d_lock); 2365 write_seqcount_begin(&dentry->d_seq); 2366 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len); 2367 write_seqcount_end(&dentry->d_seq); 2368 spin_unlock(&dentry->d_lock); 2369 } 2370 EXPORT_SYMBOL(dentry_update_name_case); 2371 2372 static void swap_names(struct dentry *dentry, struct dentry *target) 2373 { 2374 if (unlikely(dname_external(target))) { 2375 if (unlikely(dname_external(dentry))) { 2376 /* 2377 * Both external: swap the pointers 2378 */ 2379 swap(target->d_name.name, dentry->d_name.name); 2380 } else { 2381 /* 2382 * dentry:internal, target:external. Steal target's 2383 * storage and make target internal. 2384 */ 2385 memcpy(target->d_iname, dentry->d_name.name, 2386 dentry->d_name.len + 1); 2387 dentry->d_name.name = target->d_name.name; 2388 target->d_name.name = target->d_iname; 2389 } 2390 } else { 2391 if (unlikely(dname_external(dentry))) { 2392 /* 2393 * dentry:external, target:internal. Give dentry's 2394 * storage to target and make dentry internal 2395 */ 2396 memcpy(dentry->d_iname, target->d_name.name, 2397 target->d_name.len + 1); 2398 target->d_name.name = dentry->d_name.name; 2399 dentry->d_name.name = dentry->d_iname; 2400 } else { 2401 /* 2402 * Both are internal. 2403 */ 2404 unsigned int i; 2405 BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long))); 2406 for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) { 2407 swap(((long *) &dentry->d_iname)[i], 2408 ((long *) &target->d_iname)[i]); 2409 } 2410 } 2411 } 2412 swap(dentry->d_name.hash_len, target->d_name.hash_len); 2413 } 2414 2415 static void copy_name(struct dentry *dentry, struct dentry *target) 2416 { 2417 struct external_name *old_name = NULL; 2418 if (unlikely(dname_external(dentry))) 2419 old_name = external_name(dentry); 2420 if (unlikely(dname_external(target))) { 2421 atomic_inc(&external_name(target)->u.count); 2422 dentry->d_name = target->d_name; 2423 } else { 2424 memcpy(dentry->d_iname, target->d_name.name, 2425 target->d_name.len + 1); 2426 dentry->d_name.name = dentry->d_iname; 2427 dentry->d_name.hash_len = target->d_name.hash_len; 2428 } 2429 if (old_name && likely(atomic_dec_and_test(&old_name->u.count))) 2430 kfree_rcu(old_name, u.head); 2431 } 2432 2433 static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target) 2434 { 2435 /* 2436 * XXXX: do we really need to take target->d_lock? 2437 */ 2438 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent) 2439 spin_lock(&target->d_parent->d_lock); 2440 else { 2441 if (d_ancestor(dentry->d_parent, target->d_parent)) { 2442 spin_lock(&dentry->d_parent->d_lock); 2443 spin_lock_nested(&target->d_parent->d_lock, 2444 DENTRY_D_LOCK_NESTED); 2445 } else { 2446 spin_lock(&target->d_parent->d_lock); 2447 spin_lock_nested(&dentry->d_parent->d_lock, 2448 DENTRY_D_LOCK_NESTED); 2449 } 2450 } 2451 if (target < dentry) { 2452 spin_lock_nested(&target->d_lock, 2); 2453 spin_lock_nested(&dentry->d_lock, 3); 2454 } else { 2455 spin_lock_nested(&dentry->d_lock, 2); 2456 spin_lock_nested(&target->d_lock, 3); 2457 } 2458 } 2459 2460 static void dentry_unlock_for_move(struct dentry *dentry, struct dentry *target) 2461 { 2462 if (target->d_parent != dentry->d_parent) 2463 spin_unlock(&dentry->d_parent->d_lock); 2464 if (target->d_parent != target) 2465 spin_unlock(&target->d_parent->d_lock); 2466 spin_unlock(&target->d_lock); 2467 spin_unlock(&dentry->d_lock); 2468 } 2469 2470 /* 2471 * When switching names, the actual string doesn't strictly have to 2472 * be preserved in the target - because we're dropping the target 2473 * anyway. As such, we can just do a simple memcpy() to copy over 2474 * the new name before we switch, unless we are going to rehash 2475 * it. Note that if we *do* unhash the target, we are not allowed 2476 * to rehash it without giving it a new name/hash key - whether 2477 * we swap or overwrite the names here, resulting name won't match 2478 * the reality in filesystem; it's only there for d_path() purposes. 2479 * Note that all of this is happening under rename_lock, so the 2480 * any hash lookup seeing it in the middle of manipulations will 2481 * be discarded anyway. So we do not care what happens to the hash 2482 * key in that case. 2483 */ 2484 /* 2485 * __d_move - move a dentry 2486 * @dentry: entry to move 2487 * @target: new dentry 2488 * @exchange: exchange the two dentries 2489 * 2490 * Update the dcache to reflect the move of a file name. Negative 2491 * dcache entries should not be moved in this way. Caller must hold 2492 * rename_lock, the i_mutex of the source and target directories, 2493 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename(). 2494 */ 2495 static void __d_move(struct dentry *dentry, struct dentry *target, 2496 bool exchange) 2497 { 2498 if (!dentry->d_inode) 2499 printk(KERN_WARNING "VFS: moving negative dcache entry\n"); 2500 2501 BUG_ON(d_ancestor(dentry, target)); 2502 BUG_ON(d_ancestor(target, dentry)); 2503 2504 dentry_lock_for_move(dentry, target); 2505 2506 write_seqcount_begin(&dentry->d_seq); 2507 write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED); 2508 2509 /* __d_drop does write_seqcount_barrier, but they're OK to nest. */ 2510 2511 /* 2512 * Move the dentry to the target hash queue. Don't bother checking 2513 * for the same hash queue because of how unlikely it is. 2514 */ 2515 __d_drop(dentry); 2516 __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash)); 2517 2518 /* 2519 * Unhash the target (d_delete() is not usable here). If exchanging 2520 * the two dentries, then rehash onto the other's hash queue. 2521 */ 2522 __d_drop(target); 2523 if (exchange) { 2524 __d_rehash(target, 2525 d_hash(dentry->d_parent, dentry->d_name.hash)); 2526 } 2527 2528 /* Switch the names.. */ 2529 if (exchange) 2530 swap_names(dentry, target); 2531 else 2532 copy_name(dentry, target); 2533 2534 /* ... and switch them in the tree */ 2535 if (IS_ROOT(dentry)) { 2536 /* splicing a tree */ 2537 dentry->d_parent = target->d_parent; 2538 target->d_parent = target; 2539 list_del_init(&target->d_u.d_child); 2540 list_move(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); 2541 } else { 2542 /* swapping two dentries */ 2543 swap(dentry->d_parent, target->d_parent); 2544 list_move(&target->d_u.d_child, &target->d_parent->d_subdirs); 2545 list_move(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); 2546 if (exchange) 2547 fsnotify_d_move(target); 2548 fsnotify_d_move(dentry); 2549 } 2550 2551 write_seqcount_end(&target->d_seq); 2552 write_seqcount_end(&dentry->d_seq); 2553 2554 dentry_unlock_for_move(dentry, target); 2555 } 2556 2557 /* 2558 * d_move - move a dentry 2559 * @dentry: entry to move 2560 * @target: new dentry 2561 * 2562 * Update the dcache to reflect the move of a file name. Negative 2563 * dcache entries should not be moved in this way. See the locking 2564 * requirements for __d_move. 2565 */ 2566 void d_move(struct dentry *dentry, struct dentry *target) 2567 { 2568 write_seqlock(&rename_lock); 2569 __d_move(dentry, target, false); 2570 write_sequnlock(&rename_lock); 2571 } 2572 EXPORT_SYMBOL(d_move); 2573 2574 /* 2575 * d_exchange - exchange two dentries 2576 * @dentry1: first dentry 2577 * @dentry2: second dentry 2578 */ 2579 void d_exchange(struct dentry *dentry1, struct dentry *dentry2) 2580 { 2581 write_seqlock(&rename_lock); 2582 2583 WARN_ON(!dentry1->d_inode); 2584 WARN_ON(!dentry2->d_inode); 2585 WARN_ON(IS_ROOT(dentry1)); 2586 WARN_ON(IS_ROOT(dentry2)); 2587 2588 __d_move(dentry1, dentry2, true); 2589 2590 write_sequnlock(&rename_lock); 2591 } 2592 2593 /** 2594 * d_ancestor - search for an ancestor 2595 * @p1: ancestor dentry 2596 * @p2: child dentry 2597 * 2598 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is 2599 * an ancestor of p2, else NULL. 2600 */ 2601 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2) 2602 { 2603 struct dentry *p; 2604 2605 for (p = p2; !IS_ROOT(p); p = p->d_parent) { 2606 if (p->d_parent == p1) 2607 return p; 2608 } 2609 return NULL; 2610 } 2611 2612 /* 2613 * This helper attempts to cope with remotely renamed directories 2614 * 2615 * It assumes that the caller is already holding 2616 * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock 2617 * 2618 * Note: If ever the locking in lock_rename() changes, then please 2619 * remember to update this too... 2620 */ 2621 static struct dentry *__d_unalias(struct inode *inode, 2622 struct dentry *dentry, struct dentry *alias) 2623 { 2624 struct mutex *m1 = NULL, *m2 = NULL; 2625 struct dentry *ret = ERR_PTR(-EBUSY); 2626 2627 /* If alias and dentry share a parent, then no extra locks required */ 2628 if (alias->d_parent == dentry->d_parent) 2629 goto out_unalias; 2630 2631 /* See lock_rename() */ 2632 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex)) 2633 goto out_err; 2634 m1 = &dentry->d_sb->s_vfs_rename_mutex; 2635 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex)) 2636 goto out_err; 2637 m2 = &alias->d_parent->d_inode->i_mutex; 2638 out_unalias: 2639 __d_move(alias, dentry, false); 2640 ret = alias; 2641 out_err: 2642 spin_unlock(&inode->i_lock); 2643 if (m2) 2644 mutex_unlock(m2); 2645 if (m1) 2646 mutex_unlock(m1); 2647 return ret; 2648 } 2649 2650 /** 2651 * d_splice_alias - splice a disconnected dentry into the tree if one exists 2652 * @inode: the inode which may have a disconnected dentry 2653 * @dentry: a negative dentry which we want to point to the inode. 2654 * 2655 * If inode is a directory and has an IS_ROOT alias, then d_move that in 2656 * place of the given dentry and return it, else simply d_add the inode 2657 * to the dentry and return NULL. 2658 * 2659 * If a non-IS_ROOT directory is found, the filesystem is corrupt, and 2660 * we should error out: directories can't have multiple aliases. 2661 * 2662 * This is needed in the lookup routine of any filesystem that is exportable 2663 * (via knfsd) so that we can build dcache paths to directories effectively. 2664 * 2665 * If a dentry was found and moved, then it is returned. Otherwise NULL 2666 * is returned. This matches the expected return value of ->lookup. 2667 * 2668 * Cluster filesystems may call this function with a negative, hashed dentry. 2669 * In that case, we know that the inode will be a regular file, and also this 2670 * will only occur during atomic_open. So we need to check for the dentry 2671 * being already hashed only in the final case. 2672 */ 2673 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) 2674 { 2675 struct dentry *new = NULL; 2676 2677 if (IS_ERR(inode)) 2678 return ERR_CAST(inode); 2679 2680 if (inode && S_ISDIR(inode->i_mode)) { 2681 spin_lock(&inode->i_lock); 2682 new = __d_find_any_alias(inode); 2683 if (new) { 2684 if (!IS_ROOT(new)) { 2685 spin_unlock(&inode->i_lock); 2686 dput(new); 2687 return ERR_PTR(-EIO); 2688 } 2689 if (d_ancestor(new, dentry)) { 2690 spin_unlock(&inode->i_lock); 2691 dput(new); 2692 return ERR_PTR(-EIO); 2693 } 2694 write_seqlock(&rename_lock); 2695 __d_move(new, dentry, false); 2696 write_sequnlock(&rename_lock); 2697 spin_unlock(&inode->i_lock); 2698 security_d_instantiate(new, inode); 2699 iput(inode); 2700 } else { 2701 /* already taking inode->i_lock, so d_add() by hand */ 2702 __d_instantiate(dentry, inode); 2703 spin_unlock(&inode->i_lock); 2704 security_d_instantiate(dentry, inode); 2705 d_rehash(dentry); 2706 } 2707 } else { 2708 d_instantiate(dentry, inode); 2709 if (d_unhashed(dentry)) 2710 d_rehash(dentry); 2711 } 2712 return new; 2713 } 2714 EXPORT_SYMBOL(d_splice_alias); 2715 2716 /** 2717 * d_materialise_unique - introduce an inode into the tree 2718 * @dentry: candidate dentry 2719 * @inode: inode to bind to the dentry, to which aliases may be attached 2720 * 2721 * Introduces an dentry into the tree, substituting an extant disconnected 2722 * root directory alias in its place if there is one. Caller must hold the 2723 * i_mutex of the parent directory. 2724 */ 2725 struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) 2726 { 2727 struct dentry *actual; 2728 2729 BUG_ON(!d_unhashed(dentry)); 2730 2731 if (!inode) { 2732 actual = dentry; 2733 __d_instantiate(dentry, NULL); 2734 d_rehash(actual); 2735 goto out_nolock; 2736 } 2737 2738 spin_lock(&inode->i_lock); 2739 2740 if (S_ISDIR(inode->i_mode)) { 2741 struct dentry *alias; 2742 2743 /* Does an aliased dentry already exist? */ 2744 alias = __d_find_alias(inode); 2745 if (alias) { 2746 actual = alias; 2747 write_seqlock(&rename_lock); 2748 2749 if (d_ancestor(alias, dentry)) { 2750 /* Check for loops */ 2751 actual = ERR_PTR(-ELOOP); 2752 spin_unlock(&inode->i_lock); 2753 } else if (IS_ROOT(alias)) { 2754 /* Is this an anonymous mountpoint that we 2755 * could splice into our tree? */ 2756 __d_move(alias, dentry, false); 2757 write_sequnlock(&rename_lock); 2758 goto found; 2759 } else { 2760 /* Nope, but we must(!) avoid directory 2761 * aliasing. This drops inode->i_lock */ 2762 actual = __d_unalias(inode, dentry, alias); 2763 } 2764 write_sequnlock(&rename_lock); 2765 if (IS_ERR(actual)) { 2766 if (PTR_ERR(actual) == -ELOOP) 2767 pr_warn_ratelimited( 2768 "VFS: Lookup of '%s' in %s %s" 2769 " would have caused loop\n", 2770 dentry->d_name.name, 2771 inode->i_sb->s_type->name, 2772 inode->i_sb->s_id); 2773 dput(alias); 2774 } 2775 goto out_nolock; 2776 } 2777 } 2778 2779 /* Add a unique reference */ 2780 actual = __d_instantiate_unique(dentry, inode); 2781 if (!actual) 2782 actual = dentry; 2783 2784 d_rehash(actual); 2785 found: 2786 spin_unlock(&inode->i_lock); 2787 out_nolock: 2788 if (actual == dentry) { 2789 security_d_instantiate(dentry, inode); 2790 return NULL; 2791 } 2792 2793 iput(inode); 2794 return actual; 2795 } 2796 EXPORT_SYMBOL_GPL(d_materialise_unique); 2797 2798 static int prepend(char **buffer, int *buflen, const char *str, int namelen) 2799 { 2800 *buflen -= namelen; 2801 if (*buflen < 0) 2802 return -ENAMETOOLONG; 2803 *buffer -= namelen; 2804 memcpy(*buffer, str, namelen); 2805 return 0; 2806 } 2807 2808 /** 2809 * prepend_name - prepend a pathname in front of current buffer pointer 2810 * @buffer: buffer pointer 2811 * @buflen: allocated length of the buffer 2812 * @name: name string and length qstr structure 2813 * 2814 * With RCU path tracing, it may race with d_move(). Use ACCESS_ONCE() to 2815 * make sure that either the old or the new name pointer and length are 2816 * fetched. However, there may be mismatch between length and pointer. 2817 * The length cannot be trusted, we need to copy it byte-by-byte until 2818 * the length is reached or a null byte is found. It also prepends "/" at 2819 * the beginning of the name. The sequence number check at the caller will 2820 * retry it again when a d_move() does happen. So any garbage in the buffer 2821 * due to mismatched pointer and length will be discarded. 2822 * 2823 * Data dependency barrier is needed to make sure that we see that terminating 2824 * NUL. Alpha strikes again, film at 11... 2825 */ 2826 static int prepend_name(char **buffer, int *buflen, struct qstr *name) 2827 { 2828 const char *dname = ACCESS_ONCE(name->name); 2829 u32 dlen = ACCESS_ONCE(name->len); 2830 char *p; 2831 2832 smp_read_barrier_depends(); 2833 2834 *buflen -= dlen + 1; 2835 if (*buflen < 0) 2836 return -ENAMETOOLONG; 2837 p = *buffer -= dlen + 1; 2838 *p++ = '/'; 2839 while (dlen--) { 2840 char c = *dname++; 2841 if (!c) 2842 break; 2843 *p++ = c; 2844 } 2845 return 0; 2846 } 2847 2848 /** 2849 * prepend_path - Prepend path string to a buffer 2850 * @path: the dentry/vfsmount to report 2851 * @root: root vfsmnt/dentry 2852 * @buffer: pointer to the end of the buffer 2853 * @buflen: pointer to buffer length 2854 * 2855 * The function will first try to write out the pathname without taking any 2856 * lock other than the RCU read lock to make sure that dentries won't go away. 2857 * It only checks the sequence number of the global rename_lock as any change 2858 * in the dentry's d_seq will be preceded by changes in the rename_lock 2859 * sequence number. If the sequence number had been changed, it will restart 2860 * the whole pathname back-tracing sequence again by taking the rename_lock. 2861 * In this case, there is no need to take the RCU read lock as the recursive 2862 * parent pointer references will keep the dentry chain alive as long as no 2863 * rename operation is performed. 2864 */ 2865 static int prepend_path(const struct path *path, 2866 const struct path *root, 2867 char **buffer, int *buflen) 2868 { 2869 struct dentry *dentry; 2870 struct vfsmount *vfsmnt; 2871 struct mount *mnt; 2872 int error = 0; 2873 unsigned seq, m_seq = 0; 2874 char *bptr; 2875 int blen; 2876 2877 rcu_read_lock(); 2878 restart_mnt: 2879 read_seqbegin_or_lock(&mount_lock, &m_seq); 2880 seq = 0; 2881 rcu_read_lock(); 2882 restart: 2883 bptr = *buffer; 2884 blen = *buflen; 2885 error = 0; 2886 dentry = path->dentry; 2887 vfsmnt = path->mnt; 2888 mnt = real_mount(vfsmnt); 2889 read_seqbegin_or_lock(&rename_lock, &seq); 2890 while (dentry != root->dentry || vfsmnt != root->mnt) { 2891 struct dentry * parent; 2892 2893 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { 2894 struct mount *parent = ACCESS_ONCE(mnt->mnt_parent); 2895 /* Global root? */ 2896 if (mnt != parent) { 2897 dentry = ACCESS_ONCE(mnt->mnt_mountpoint); 2898 mnt = parent; 2899 vfsmnt = &mnt->mnt; 2900 continue; 2901 } 2902 /* 2903 * Filesystems needing to implement special "root names" 2904 * should do so with ->d_dname() 2905 */ 2906 if (IS_ROOT(dentry) && 2907 (dentry->d_name.len != 1 || 2908 dentry->d_name.name[0] != '/')) { 2909 WARN(1, "Root dentry has weird name <%.*s>\n", 2910 (int) dentry->d_name.len, 2911 dentry->d_name.name); 2912 } 2913 if (!error) 2914 error = is_mounted(vfsmnt) ? 1 : 2; 2915 break; 2916 } 2917 parent = dentry->d_parent; 2918 prefetch(parent); 2919 error = prepend_name(&bptr, &blen, &dentry->d_name); 2920 if (error) 2921 break; 2922 2923 dentry = parent; 2924 } 2925 if (!(seq & 1)) 2926 rcu_read_unlock(); 2927 if (need_seqretry(&rename_lock, seq)) { 2928 seq = 1; 2929 goto restart; 2930 } 2931 done_seqretry(&rename_lock, seq); 2932 2933 if (!(m_seq & 1)) 2934 rcu_read_unlock(); 2935 if (need_seqretry(&mount_lock, m_seq)) { 2936 m_seq = 1; 2937 goto restart_mnt; 2938 } 2939 done_seqretry(&mount_lock, m_seq); 2940 2941 if (error >= 0 && bptr == *buffer) { 2942 if (--blen < 0) 2943 error = -ENAMETOOLONG; 2944 else 2945 *--bptr = '/'; 2946 } 2947 *buffer = bptr; 2948 *buflen = blen; 2949 return error; 2950 } 2951 2952 /** 2953 * __d_path - return the path of a dentry 2954 * @path: the dentry/vfsmount to report 2955 * @root: root vfsmnt/dentry 2956 * @buf: buffer to return value in 2957 * @buflen: buffer length 2958 * 2959 * Convert a dentry into an ASCII path name. 2960 * 2961 * Returns a pointer into the buffer or an error code if the 2962 * path was too long. 2963 * 2964 * "buflen" should be positive. 2965 * 2966 * If the path is not reachable from the supplied root, return %NULL. 2967 */ 2968 char *__d_path(const struct path *path, 2969 const struct path *root, 2970 char *buf, int buflen) 2971 { 2972 char *res = buf + buflen; 2973 int error; 2974 2975 prepend(&res, &buflen, "\0", 1); 2976 error = prepend_path(path, root, &res, &buflen); 2977 2978 if (error < 0) 2979 return ERR_PTR(error); 2980 if (error > 0) 2981 return NULL; 2982 return res; 2983 } 2984 2985 char *d_absolute_path(const struct path *path, 2986 char *buf, int buflen) 2987 { 2988 struct path root = {}; 2989 char *res = buf + buflen; 2990 int error; 2991 2992 prepend(&res, &buflen, "\0", 1); 2993 error = prepend_path(path, &root, &res, &buflen); 2994 2995 if (error > 1) 2996 error = -EINVAL; 2997 if (error < 0) 2998 return ERR_PTR(error); 2999 return res; 3000 } 3001 3002 /* 3003 * same as __d_path but appends "(deleted)" for unlinked files. 3004 */ 3005 static int path_with_deleted(const struct path *path, 3006 const struct path *root, 3007 char **buf, int *buflen) 3008 { 3009 prepend(buf, buflen, "\0", 1); 3010 if (d_unlinked(path->dentry)) { 3011 int error = prepend(buf, buflen, " (deleted)", 10); 3012 if (error) 3013 return error; 3014 } 3015 3016 return prepend_path(path, root, buf, buflen); 3017 } 3018 3019 static int prepend_unreachable(char **buffer, int *buflen) 3020 { 3021 return prepend(buffer, buflen, "(unreachable)", 13); 3022 } 3023 3024 static void get_fs_root_rcu(struct fs_struct *fs, struct path *root) 3025 { 3026 unsigned seq; 3027 3028 do { 3029 seq = read_seqcount_begin(&fs->seq); 3030 *root = fs->root; 3031 } while (read_seqcount_retry(&fs->seq, seq)); 3032 } 3033 3034 /** 3035 * d_path - return the path of a dentry 3036 * @path: path to report 3037 * @buf: buffer to return value in 3038 * @buflen: buffer length 3039 * 3040 * Convert a dentry into an ASCII path name. If the entry has been deleted 3041 * the string " (deleted)" is appended. Note that this is ambiguous. 3042 * 3043 * Returns a pointer into the buffer or an error code if the path was 3044 * too long. Note: Callers should use the returned pointer, not the passed 3045 * in buffer, to use the name! The implementation often starts at an offset 3046 * into the buffer, and may leave 0 bytes at the start. 3047 * 3048 * "buflen" should be positive. 3049 */ 3050 char *d_path(const struct path *path, char *buf, int buflen) 3051 { 3052 char *res = buf + buflen; 3053 struct path root; 3054 int error; 3055 3056 /* 3057 * We have various synthetic filesystems that never get mounted. On 3058 * these filesystems dentries are never used for lookup purposes, and 3059 * thus don't need to be hashed. They also don't need a name until a 3060 * user wants to identify the object in /proc/pid/fd/. The little hack 3061 * below allows us to generate a name for these objects on demand: 3062 * 3063 * Some pseudo inodes are mountable. When they are mounted 3064 * path->dentry == path->mnt->mnt_root. In that case don't call d_dname 3065 * and instead have d_path return the mounted path. 3066 */ 3067 if (path->dentry->d_op && path->dentry->d_op->d_dname && 3068 (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root)) 3069 return path->dentry->d_op->d_dname(path->dentry, buf, buflen); 3070 3071 rcu_read_lock(); 3072 get_fs_root_rcu(current->fs, &root); 3073 error = path_with_deleted(path, &root, &res, &buflen); 3074 rcu_read_unlock(); 3075 3076 if (error < 0) 3077 res = ERR_PTR(error); 3078 return res; 3079 } 3080 EXPORT_SYMBOL(d_path); 3081 3082 /* 3083 * Helper function for dentry_operations.d_dname() members 3084 */ 3085 char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen, 3086 const char *fmt, ...) 3087 { 3088 va_list args; 3089 char temp[64]; 3090 int sz; 3091 3092 va_start(args, fmt); 3093 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1; 3094 va_end(args); 3095 3096 if (sz > sizeof(temp) || sz > buflen) 3097 return ERR_PTR(-ENAMETOOLONG); 3098 3099 buffer += buflen - sz; 3100 return memcpy(buffer, temp, sz); 3101 } 3102 3103 char *simple_dname(struct dentry *dentry, char *buffer, int buflen) 3104 { 3105 char *end = buffer + buflen; 3106 /* these dentries are never renamed, so d_lock is not needed */ 3107 if (prepend(&end, &buflen, " (deleted)", 11) || 3108 prepend(&end, &buflen, dentry->d_name.name, dentry->d_name.len) || 3109 prepend(&end, &buflen, "/", 1)) 3110 end = ERR_PTR(-ENAMETOOLONG); 3111 return end; 3112 } 3113 EXPORT_SYMBOL(simple_dname); 3114 3115 /* 3116 * Write full pathname from the root of the filesystem into the buffer. 3117 */ 3118 static char *__dentry_path(struct dentry *d, char *buf, int buflen) 3119 { 3120 struct dentry *dentry; 3121 char *end, *retval; 3122 int len, seq = 0; 3123 int error = 0; 3124 3125 if (buflen < 2) 3126 goto Elong; 3127 3128 rcu_read_lock(); 3129 restart: 3130 dentry = d; 3131 end = buf + buflen; 3132 len = buflen; 3133 prepend(&end, &len, "\0", 1); 3134 /* Get '/' right */ 3135 retval = end-1; 3136 *retval = '/'; 3137 read_seqbegin_or_lock(&rename_lock, &seq); 3138 while (!IS_ROOT(dentry)) { 3139 struct dentry *parent = dentry->d_parent; 3140 3141 prefetch(parent); 3142 error = prepend_name(&end, &len, &dentry->d_name); 3143 if (error) 3144 break; 3145 3146 retval = end; 3147 dentry = parent; 3148 } 3149 if (!(seq & 1)) 3150 rcu_read_unlock(); 3151 if (need_seqretry(&rename_lock, seq)) { 3152 seq = 1; 3153 goto restart; 3154 } 3155 done_seqretry(&rename_lock, seq); 3156 if (error) 3157 goto Elong; 3158 return retval; 3159 Elong: 3160 return ERR_PTR(-ENAMETOOLONG); 3161 } 3162 3163 char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen) 3164 { 3165 return __dentry_path(dentry, buf, buflen); 3166 } 3167 EXPORT_SYMBOL(dentry_path_raw); 3168 3169 char *dentry_path(struct dentry *dentry, char *buf, int buflen) 3170 { 3171 char *p = NULL; 3172 char *retval; 3173 3174 if (d_unlinked(dentry)) { 3175 p = buf + buflen; 3176 if (prepend(&p, &buflen, "//deleted", 10) != 0) 3177 goto Elong; 3178 buflen++; 3179 } 3180 retval = __dentry_path(dentry, buf, buflen); 3181 if (!IS_ERR(retval) && p) 3182 *p = '/'; /* restore '/' overriden with '\0' */ 3183 return retval; 3184 Elong: 3185 return ERR_PTR(-ENAMETOOLONG); 3186 } 3187 3188 static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root, 3189 struct path *pwd) 3190 { 3191 unsigned seq; 3192 3193 do { 3194 seq = read_seqcount_begin(&fs->seq); 3195 *root = fs->root; 3196 *pwd = fs->pwd; 3197 } while (read_seqcount_retry(&fs->seq, seq)); 3198 } 3199 3200 /* 3201 * NOTE! The user-level library version returns a 3202 * character pointer. The kernel system call just 3203 * returns the length of the buffer filled (which 3204 * includes the ending '\0' character), or a negative 3205 * error value. So libc would do something like 3206 * 3207 * char *getcwd(char * buf, size_t size) 3208 * { 3209 * int retval; 3210 * 3211 * retval = sys_getcwd(buf, size); 3212 * if (retval >= 0) 3213 * return buf; 3214 * errno = -retval; 3215 * return NULL; 3216 * } 3217 */ 3218 SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size) 3219 { 3220 int error; 3221 struct path pwd, root; 3222 char *page = __getname(); 3223 3224 if (!page) 3225 return -ENOMEM; 3226 3227 rcu_read_lock(); 3228 get_fs_root_and_pwd_rcu(current->fs, &root, &pwd); 3229 3230 error = -ENOENT; 3231 if (!d_unlinked(pwd.dentry)) { 3232 unsigned long len; 3233 char *cwd = page + PATH_MAX; 3234 int buflen = PATH_MAX; 3235 3236 prepend(&cwd, &buflen, "\0", 1); 3237 error = prepend_path(&pwd, &root, &cwd, &buflen); 3238 rcu_read_unlock(); 3239 3240 if (error < 0) 3241 goto out; 3242 3243 /* Unreachable from current root */ 3244 if (error > 0) { 3245 error = prepend_unreachable(&cwd, &buflen); 3246 if (error) 3247 goto out; 3248 } 3249 3250 error = -ERANGE; 3251 len = PATH_MAX + page - cwd; 3252 if (len <= size) { 3253 error = len; 3254 if (copy_to_user(buf, cwd, len)) 3255 error = -EFAULT; 3256 } 3257 } else { 3258 rcu_read_unlock(); 3259 } 3260 3261 out: 3262 __putname(page); 3263 return error; 3264 } 3265 3266 /* 3267 * Test whether new_dentry is a subdirectory of old_dentry. 3268 * 3269 * Trivially implemented using the dcache structure 3270 */ 3271 3272 /** 3273 * is_subdir - is new dentry a subdirectory of old_dentry 3274 * @new_dentry: new dentry 3275 * @old_dentry: old dentry 3276 * 3277 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth). 3278 * Returns 0 otherwise. 3279 * Caller must ensure that "new_dentry" is pinned before calling is_subdir() 3280 */ 3281 3282 int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry) 3283 { 3284 int result; 3285 unsigned seq; 3286 3287 if (new_dentry == old_dentry) 3288 return 1; 3289 3290 do { 3291 /* for restarting inner loop in case of seq retry */ 3292 seq = read_seqbegin(&rename_lock); 3293 /* 3294 * Need rcu_readlock to protect against the d_parent trashing 3295 * due to d_move 3296 */ 3297 rcu_read_lock(); 3298 if (d_ancestor(old_dentry, new_dentry)) 3299 result = 1; 3300 else 3301 result = 0; 3302 rcu_read_unlock(); 3303 } while (read_seqretry(&rename_lock, seq)); 3304 3305 return result; 3306 } 3307 3308 static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry) 3309 { 3310 struct dentry *root = data; 3311 if (dentry != root) { 3312 if (d_unhashed(dentry) || !dentry->d_inode) 3313 return D_WALK_SKIP; 3314 3315 if (!(dentry->d_flags & DCACHE_GENOCIDE)) { 3316 dentry->d_flags |= DCACHE_GENOCIDE; 3317 dentry->d_lockref.count--; 3318 } 3319 } 3320 return D_WALK_CONTINUE; 3321 } 3322 3323 void d_genocide(struct dentry *parent) 3324 { 3325 d_walk(parent, parent, d_genocide_kill, NULL); 3326 } 3327 3328 void d_tmpfile(struct dentry *dentry, struct inode *inode) 3329 { 3330 inode_dec_link_count(inode); 3331 BUG_ON(dentry->d_name.name != dentry->d_iname || 3332 !hlist_unhashed(&dentry->d_alias) || 3333 !d_unlinked(dentry)); 3334 spin_lock(&dentry->d_parent->d_lock); 3335 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 3336 dentry->d_name.len = sprintf(dentry->d_iname, "#%llu", 3337 (unsigned long long)inode->i_ino); 3338 spin_unlock(&dentry->d_lock); 3339 spin_unlock(&dentry->d_parent->d_lock); 3340 d_instantiate(dentry, inode); 3341 } 3342 EXPORT_SYMBOL(d_tmpfile); 3343 3344 static __initdata unsigned long dhash_entries; 3345 static int __init set_dhash_entries(char *str) 3346 { 3347 if (!str) 3348 return 0; 3349 dhash_entries = simple_strtoul(str, &str, 0); 3350 return 1; 3351 } 3352 __setup("dhash_entries=", set_dhash_entries); 3353 3354 static void __init dcache_init_early(void) 3355 { 3356 unsigned int loop; 3357 3358 /* If hashes are distributed across NUMA nodes, defer 3359 * hash allocation until vmalloc space is available. 3360 */ 3361 if (hashdist) 3362 return; 3363 3364 dentry_hashtable = 3365 alloc_large_system_hash("Dentry cache", 3366 sizeof(struct hlist_bl_head), 3367 dhash_entries, 3368 13, 3369 HASH_EARLY, 3370 &d_hash_shift, 3371 &d_hash_mask, 3372 0, 3373 0); 3374 3375 for (loop = 0; loop < (1U << d_hash_shift); loop++) 3376 INIT_HLIST_BL_HEAD(dentry_hashtable + loop); 3377 } 3378 3379 static void __init dcache_init(void) 3380 { 3381 unsigned int loop; 3382 3383 /* 3384 * A constructor could be added for stable state like the lists, 3385 * but it is probably not worth it because of the cache nature 3386 * of the dcache. 3387 */ 3388 dentry_cache = KMEM_CACHE(dentry, 3389 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); 3390 3391 /* Hash may have been set up in dcache_init_early */ 3392 if (!hashdist) 3393 return; 3394 3395 dentry_hashtable = 3396 alloc_large_system_hash("Dentry cache", 3397 sizeof(struct hlist_bl_head), 3398 dhash_entries, 3399 13, 3400 0, 3401 &d_hash_shift, 3402 &d_hash_mask, 3403 0, 3404 0); 3405 3406 for (loop = 0; loop < (1U << d_hash_shift); loop++) 3407 INIT_HLIST_BL_HEAD(dentry_hashtable + loop); 3408 } 3409 3410 /* SLAB cache for __getname() consumers */ 3411 struct kmem_cache *names_cachep __read_mostly; 3412 EXPORT_SYMBOL(names_cachep); 3413 3414 EXPORT_SYMBOL(d_genocide); 3415 3416 void __init vfs_caches_init_early(void) 3417 { 3418 dcache_init_early(); 3419 inode_init_early(); 3420 } 3421 3422 void __init vfs_caches_init(unsigned long mempages) 3423 { 3424 unsigned long reserve; 3425 3426 /* Base hash sizes on available memory, with a reserve equal to 3427 150% of current kernel size */ 3428 3429 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1); 3430 mempages -= reserve; 3431 3432 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, 3433 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 3434 3435 dcache_init(); 3436 inode_init(); 3437 files_init(mempages); 3438 mnt_init(); 3439 bdev_cache_init(); 3440 chrdev_init(); 3441 } 3442