1 /* 2 * fs/dcache.c 3 * 4 * Complete reimplementation 5 * (C) 1997 Thomas Schoebel-Theuer, 6 * with heavy changes by Linus Torvalds 7 */ 8 9 /* 10 * Notes on the allocation strategy: 11 * 12 * The dcache is a master of the icache - whenever a dcache entry 13 * exists, the inode will always exist. "iput()" is done either when 14 * the dcache entry is deleted or garbage collected. 15 */ 16 17 #include <linux/syscalls.h> 18 #include <linux/string.h> 19 #include <linux/mm.h> 20 #include <linux/fs.h> 21 #include <linux/fsnotify.h> 22 #include <linux/slab.h> 23 #include <linux/init.h> 24 #include <linux/hash.h> 25 #include <linux/cache.h> 26 #include <linux/export.h> 27 #include <linux/mount.h> 28 #include <linux/file.h> 29 #include <asm/uaccess.h> 30 #include <linux/security.h> 31 #include <linux/seqlock.h> 32 #include <linux/swap.h> 33 #include <linux/bootmem.h> 34 #include <linux/fs_struct.h> 35 #include <linux/hardirq.h> 36 #include <linux/bit_spinlock.h> 37 #include <linux/rculist_bl.h> 38 #include <linux/prefetch.h> 39 #include <linux/ratelimit.h> 40 #include <linux/list_lru.h> 41 #include <linux/kasan.h> 42 43 #include "internal.h" 44 #include "mount.h" 45 46 /* 47 * Usage: 48 * dcache->d_inode->i_lock protects: 49 * - i_dentry, d_u.d_alias, d_inode of aliases 50 * dcache_hash_bucket lock protects: 51 * - the dcache hash table 52 * s_anon bl list spinlock protects: 53 * - the s_anon list (see __d_drop) 54 * dentry->d_sb->s_dentry_lru_lock protects: 55 * - the dcache lru lists and counters 56 * d_lock protects: 57 * - d_flags 58 * - d_name 59 * - d_lru 60 * - d_count 61 * - d_unhashed() 62 * - d_parent and d_subdirs 63 * - childrens' d_child and d_parent 64 * - d_u.d_alias, d_inode 65 * 66 * Ordering: 67 * dentry->d_inode->i_lock 68 * dentry->d_lock 69 * dentry->d_sb->s_dentry_lru_lock 70 * dcache_hash_bucket lock 71 * s_anon lock 72 * 73 * If there is an ancestor relationship: 74 * dentry->d_parent->...->d_parent->d_lock 75 * ... 76 * dentry->d_parent->d_lock 77 * dentry->d_lock 78 * 79 * If no ancestor relationship: 80 * if (dentry1 < dentry2) 81 * dentry1->d_lock 82 * dentry2->d_lock 83 */ 84 int sysctl_vfs_cache_pressure __read_mostly = 100; 85 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); 86 87 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); 88 89 EXPORT_SYMBOL(rename_lock); 90 91 static struct kmem_cache *dentry_cache __read_mostly; 92 93 /* 94 * This is the single most critical data structure when it comes 95 * to the dcache: the hashtable for lookups. Somebody should try 96 * to make this good - I've just made it work. 97 * 98 * This hash-function tries to avoid losing too many bits of hash 99 * information, yet avoid using a prime hash-size or similar. 100 */ 101 102 static unsigned int d_hash_mask __read_mostly; 103 static unsigned int d_hash_shift __read_mostly; 104 105 static struct hlist_bl_head *dentry_hashtable __read_mostly; 106 107 static inline struct hlist_bl_head *d_hash(const struct dentry *parent, 108 unsigned int hash) 109 { 110 hash += (unsigned long) parent / L1_CACHE_BYTES; 111 return dentry_hashtable + hash_32(hash, d_hash_shift); 112 } 113 114 /* Statistics gathering. */ 115 struct dentry_stat_t dentry_stat = { 116 .age_limit = 45, 117 }; 118 119 static DEFINE_PER_CPU(long, nr_dentry); 120 static DEFINE_PER_CPU(long, nr_dentry_unused); 121 122 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) 123 124 /* 125 * Here we resort to our own counters instead of using generic per-cpu counters 126 * for consistency with what the vfs inode code does. We are expected to harvest 127 * better code and performance by having our own specialized counters. 128 * 129 * Please note that the loop is done over all possible CPUs, not over all online 130 * CPUs. The reason for this is that we don't want to play games with CPUs going 131 * on and off. If one of them goes off, we will just keep their counters. 132 * 133 * glommer: See cffbc8a for details, and if you ever intend to change this, 134 * please update all vfs counters to match. 135 */ 136 static long get_nr_dentry(void) 137 { 138 int i; 139 long sum = 0; 140 for_each_possible_cpu(i) 141 sum += per_cpu(nr_dentry, i); 142 return sum < 0 ? 0 : sum; 143 } 144 145 static long get_nr_dentry_unused(void) 146 { 147 int i; 148 long sum = 0; 149 for_each_possible_cpu(i) 150 sum += per_cpu(nr_dentry_unused, i); 151 return sum < 0 ? 0 : sum; 152 } 153 154 int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer, 155 size_t *lenp, loff_t *ppos) 156 { 157 dentry_stat.nr_dentry = get_nr_dentry(); 158 dentry_stat.nr_unused = get_nr_dentry_unused(); 159 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 160 } 161 #endif 162 163 /* 164 * Compare 2 name strings, return 0 if they match, otherwise non-zero. 165 * The strings are both count bytes long, and count is non-zero. 166 */ 167 #ifdef CONFIG_DCACHE_WORD_ACCESS 168 169 #include <asm/word-at-a-time.h> 170 /* 171 * NOTE! 'cs' and 'scount' come from a dentry, so it has a 172 * aligned allocation for this particular component. We don't 173 * strictly need the load_unaligned_zeropad() safety, but it 174 * doesn't hurt either. 175 * 176 * In contrast, 'ct' and 'tcount' can be from a pathname, and do 177 * need the careful unaligned handling. 178 */ 179 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount) 180 { 181 unsigned long a,b,mask; 182 183 for (;;) { 184 a = *(unsigned long *)cs; 185 b = load_unaligned_zeropad(ct); 186 if (tcount < sizeof(unsigned long)) 187 break; 188 if (unlikely(a != b)) 189 return 1; 190 cs += sizeof(unsigned long); 191 ct += sizeof(unsigned long); 192 tcount -= sizeof(unsigned long); 193 if (!tcount) 194 return 0; 195 } 196 mask = bytemask_from_count(tcount); 197 return unlikely(!!((a ^ b) & mask)); 198 } 199 200 #else 201 202 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount) 203 { 204 do { 205 if (*cs != *ct) 206 return 1; 207 cs++; 208 ct++; 209 tcount--; 210 } while (tcount); 211 return 0; 212 } 213 214 #endif 215 216 static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount) 217 { 218 const unsigned char *cs; 219 /* 220 * Be careful about RCU walk racing with rename: 221 * use ACCESS_ONCE to fetch the name pointer. 222 * 223 * NOTE! Even if a rename will mean that the length 224 * was not loaded atomically, we don't care. The 225 * RCU walk will check the sequence count eventually, 226 * and catch it. And we won't overrun the buffer, 227 * because we're reading the name pointer atomically, 228 * and a dentry name is guaranteed to be properly 229 * terminated with a NUL byte. 230 * 231 * End result: even if 'len' is wrong, we'll exit 232 * early because the data cannot match (there can 233 * be no NUL in the ct/tcount data) 234 */ 235 cs = ACCESS_ONCE(dentry->d_name.name); 236 smp_read_barrier_depends(); 237 return dentry_string_cmp(cs, ct, tcount); 238 } 239 240 struct external_name { 241 union { 242 atomic_t count; 243 struct rcu_head head; 244 } u; 245 unsigned char name[]; 246 }; 247 248 static inline struct external_name *external_name(struct dentry *dentry) 249 { 250 return container_of(dentry->d_name.name, struct external_name, name[0]); 251 } 252 253 static void __d_free(struct rcu_head *head) 254 { 255 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); 256 257 kmem_cache_free(dentry_cache, dentry); 258 } 259 260 static void __d_free_external(struct rcu_head *head) 261 { 262 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); 263 kfree(external_name(dentry)); 264 kmem_cache_free(dentry_cache, dentry); 265 } 266 267 static inline int dname_external(const struct dentry *dentry) 268 { 269 return dentry->d_name.name != dentry->d_iname; 270 } 271 272 static void dentry_free(struct dentry *dentry) 273 { 274 WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias)); 275 if (unlikely(dname_external(dentry))) { 276 struct external_name *p = external_name(dentry); 277 if (likely(atomic_dec_and_test(&p->u.count))) { 278 call_rcu(&dentry->d_u.d_rcu, __d_free_external); 279 return; 280 } 281 } 282 /* if dentry was never visible to RCU, immediate free is OK */ 283 if (!(dentry->d_flags & DCACHE_RCUACCESS)) 284 __d_free(&dentry->d_u.d_rcu); 285 else 286 call_rcu(&dentry->d_u.d_rcu, __d_free); 287 } 288 289 /** 290 * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups 291 * @dentry: the target dentry 292 * After this call, in-progress rcu-walk path lookup will fail. This 293 * should be called after unhashing, and after changing d_inode (if 294 * the dentry has not already been unhashed). 295 */ 296 static inline void dentry_rcuwalk_barrier(struct dentry *dentry) 297 { 298 assert_spin_locked(&dentry->d_lock); 299 /* Go through a barrier */ 300 write_seqcount_barrier(&dentry->d_seq); 301 } 302 303 /* 304 * Release the dentry's inode, using the filesystem 305 * d_iput() operation if defined. Dentry has no refcount 306 * and is unhashed. 307 */ 308 static void dentry_iput(struct dentry * dentry) 309 __releases(dentry->d_lock) 310 __releases(dentry->d_inode->i_lock) 311 { 312 struct inode *inode = dentry->d_inode; 313 if (inode) { 314 dentry->d_inode = NULL; 315 hlist_del_init(&dentry->d_u.d_alias); 316 spin_unlock(&dentry->d_lock); 317 spin_unlock(&inode->i_lock); 318 if (!inode->i_nlink) 319 fsnotify_inoderemove(inode); 320 if (dentry->d_op && dentry->d_op->d_iput) 321 dentry->d_op->d_iput(dentry, inode); 322 else 323 iput(inode); 324 } else { 325 spin_unlock(&dentry->d_lock); 326 } 327 } 328 329 /* 330 * Release the dentry's inode, using the filesystem 331 * d_iput() operation if defined. dentry remains in-use. 332 */ 333 static void dentry_unlink_inode(struct dentry * dentry) 334 __releases(dentry->d_lock) 335 __releases(dentry->d_inode->i_lock) 336 { 337 struct inode *inode = dentry->d_inode; 338 __d_clear_type(dentry); 339 dentry->d_inode = NULL; 340 hlist_del_init(&dentry->d_u.d_alias); 341 dentry_rcuwalk_barrier(dentry); 342 spin_unlock(&dentry->d_lock); 343 spin_unlock(&inode->i_lock); 344 if (!inode->i_nlink) 345 fsnotify_inoderemove(inode); 346 if (dentry->d_op && dentry->d_op->d_iput) 347 dentry->d_op->d_iput(dentry, inode); 348 else 349 iput(inode); 350 } 351 352 /* 353 * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry 354 * is in use - which includes both the "real" per-superblock 355 * LRU list _and_ the DCACHE_SHRINK_LIST use. 356 * 357 * The DCACHE_SHRINK_LIST bit is set whenever the dentry is 358 * on the shrink list (ie not on the superblock LRU list). 359 * 360 * The per-cpu "nr_dentry_unused" counters are updated with 361 * the DCACHE_LRU_LIST bit. 362 * 363 * These helper functions make sure we always follow the 364 * rules. d_lock must be held by the caller. 365 */ 366 #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x)) 367 static void d_lru_add(struct dentry *dentry) 368 { 369 D_FLAG_VERIFY(dentry, 0); 370 dentry->d_flags |= DCACHE_LRU_LIST; 371 this_cpu_inc(nr_dentry_unused); 372 WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); 373 } 374 375 static void d_lru_del(struct dentry *dentry) 376 { 377 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); 378 dentry->d_flags &= ~DCACHE_LRU_LIST; 379 this_cpu_dec(nr_dentry_unused); 380 WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); 381 } 382 383 static void d_shrink_del(struct dentry *dentry) 384 { 385 D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST); 386 list_del_init(&dentry->d_lru); 387 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST); 388 this_cpu_dec(nr_dentry_unused); 389 } 390 391 static void d_shrink_add(struct dentry *dentry, struct list_head *list) 392 { 393 D_FLAG_VERIFY(dentry, 0); 394 list_add(&dentry->d_lru, list); 395 dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST; 396 this_cpu_inc(nr_dentry_unused); 397 } 398 399 /* 400 * These can only be called under the global LRU lock, ie during the 401 * callback for freeing the LRU list. "isolate" removes it from the 402 * LRU lists entirely, while shrink_move moves it to the indicated 403 * private list. 404 */ 405 static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry) 406 { 407 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); 408 dentry->d_flags &= ~DCACHE_LRU_LIST; 409 this_cpu_dec(nr_dentry_unused); 410 list_lru_isolate(lru, &dentry->d_lru); 411 } 412 413 static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry, 414 struct list_head *list) 415 { 416 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); 417 dentry->d_flags |= DCACHE_SHRINK_LIST; 418 list_lru_isolate_move(lru, &dentry->d_lru, list); 419 } 420 421 /* 422 * dentry_lru_(add|del)_list) must be called with d_lock held. 423 */ 424 static void dentry_lru_add(struct dentry *dentry) 425 { 426 if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) 427 d_lru_add(dentry); 428 } 429 430 /** 431 * d_drop - drop a dentry 432 * @dentry: dentry to drop 433 * 434 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't 435 * be found through a VFS lookup any more. Note that this is different from 436 * deleting the dentry - d_delete will try to mark the dentry negative if 437 * possible, giving a successful _negative_ lookup, while d_drop will 438 * just make the cache lookup fail. 439 * 440 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some 441 * reason (NFS timeouts or autofs deletes). 442 * 443 * __d_drop requires dentry->d_lock. 444 */ 445 void __d_drop(struct dentry *dentry) 446 { 447 if (!d_unhashed(dentry)) { 448 struct hlist_bl_head *b; 449 /* 450 * Hashed dentries are normally on the dentry hashtable, 451 * with the exception of those newly allocated by 452 * d_obtain_alias, which are always IS_ROOT: 453 */ 454 if (unlikely(IS_ROOT(dentry))) 455 b = &dentry->d_sb->s_anon; 456 else 457 b = d_hash(dentry->d_parent, dentry->d_name.hash); 458 459 hlist_bl_lock(b); 460 __hlist_bl_del(&dentry->d_hash); 461 dentry->d_hash.pprev = NULL; 462 hlist_bl_unlock(b); 463 dentry_rcuwalk_barrier(dentry); 464 } 465 } 466 EXPORT_SYMBOL(__d_drop); 467 468 void d_drop(struct dentry *dentry) 469 { 470 spin_lock(&dentry->d_lock); 471 __d_drop(dentry); 472 spin_unlock(&dentry->d_lock); 473 } 474 EXPORT_SYMBOL(d_drop); 475 476 static void __dentry_kill(struct dentry *dentry) 477 { 478 struct dentry *parent = NULL; 479 bool can_free = true; 480 if (!IS_ROOT(dentry)) 481 parent = dentry->d_parent; 482 483 /* 484 * The dentry is now unrecoverably dead to the world. 485 */ 486 lockref_mark_dead(&dentry->d_lockref); 487 488 /* 489 * inform the fs via d_prune that this dentry is about to be 490 * unhashed and destroyed. 491 */ 492 if (dentry->d_flags & DCACHE_OP_PRUNE) 493 dentry->d_op->d_prune(dentry); 494 495 if (dentry->d_flags & DCACHE_LRU_LIST) { 496 if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) 497 d_lru_del(dentry); 498 } 499 /* if it was on the hash then remove it */ 500 __d_drop(dentry); 501 __list_del_entry(&dentry->d_child); 502 /* 503 * Inform d_walk() that we are no longer attached to the 504 * dentry tree 505 */ 506 dentry->d_flags |= DCACHE_DENTRY_KILLED; 507 if (parent) 508 spin_unlock(&parent->d_lock); 509 dentry_iput(dentry); 510 /* 511 * dentry_iput drops the locks, at which point nobody (except 512 * transient RCU lookups) can reach this dentry. 513 */ 514 BUG_ON(dentry->d_lockref.count > 0); 515 this_cpu_dec(nr_dentry); 516 if (dentry->d_op && dentry->d_op->d_release) 517 dentry->d_op->d_release(dentry); 518 519 spin_lock(&dentry->d_lock); 520 if (dentry->d_flags & DCACHE_SHRINK_LIST) { 521 dentry->d_flags |= DCACHE_MAY_FREE; 522 can_free = false; 523 } 524 spin_unlock(&dentry->d_lock); 525 if (likely(can_free)) 526 dentry_free(dentry); 527 } 528 529 /* 530 * Finish off a dentry we've decided to kill. 531 * dentry->d_lock must be held, returns with it unlocked. 532 * If ref is non-zero, then decrement the refcount too. 533 * Returns dentry requiring refcount drop, or NULL if we're done. 534 */ 535 static struct dentry *dentry_kill(struct dentry *dentry) 536 __releases(dentry->d_lock) 537 { 538 struct inode *inode = dentry->d_inode; 539 struct dentry *parent = NULL; 540 541 if (inode && unlikely(!spin_trylock(&inode->i_lock))) 542 goto failed; 543 544 if (!IS_ROOT(dentry)) { 545 parent = dentry->d_parent; 546 if (unlikely(!spin_trylock(&parent->d_lock))) { 547 if (inode) 548 spin_unlock(&inode->i_lock); 549 goto failed; 550 } 551 } 552 553 __dentry_kill(dentry); 554 return parent; 555 556 failed: 557 spin_unlock(&dentry->d_lock); 558 cpu_relax(); 559 return dentry; /* try again with same dentry */ 560 } 561 562 static inline struct dentry *lock_parent(struct dentry *dentry) 563 { 564 struct dentry *parent = dentry->d_parent; 565 if (IS_ROOT(dentry)) 566 return NULL; 567 if (unlikely(dentry->d_lockref.count < 0)) 568 return NULL; 569 if (likely(spin_trylock(&parent->d_lock))) 570 return parent; 571 rcu_read_lock(); 572 spin_unlock(&dentry->d_lock); 573 again: 574 parent = ACCESS_ONCE(dentry->d_parent); 575 spin_lock(&parent->d_lock); 576 /* 577 * We can't blindly lock dentry until we are sure 578 * that we won't violate the locking order. 579 * Any changes of dentry->d_parent must have 580 * been done with parent->d_lock held, so 581 * spin_lock() above is enough of a barrier 582 * for checking if it's still our child. 583 */ 584 if (unlikely(parent != dentry->d_parent)) { 585 spin_unlock(&parent->d_lock); 586 goto again; 587 } 588 rcu_read_unlock(); 589 if (parent != dentry) 590 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 591 else 592 parent = NULL; 593 return parent; 594 } 595 596 /* 597 * Try to do a lockless dput(), and return whether that was successful. 598 * 599 * If unsuccessful, we return false, having already taken the dentry lock. 600 * 601 * The caller needs to hold the RCU read lock, so that the dentry is 602 * guaranteed to stay around even if the refcount goes down to zero! 603 */ 604 static inline bool fast_dput(struct dentry *dentry) 605 { 606 int ret; 607 unsigned int d_flags; 608 609 /* 610 * If we have a d_op->d_delete() operation, we sould not 611 * let the dentry count go to zero, so use "put__or_lock". 612 */ 613 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) 614 return lockref_put_or_lock(&dentry->d_lockref); 615 616 /* 617 * .. otherwise, we can try to just decrement the 618 * lockref optimistically. 619 */ 620 ret = lockref_put_return(&dentry->d_lockref); 621 622 /* 623 * If the lockref_put_return() failed due to the lock being held 624 * by somebody else, the fast path has failed. We will need to 625 * get the lock, and then check the count again. 626 */ 627 if (unlikely(ret < 0)) { 628 spin_lock(&dentry->d_lock); 629 if (dentry->d_lockref.count > 1) { 630 dentry->d_lockref.count--; 631 spin_unlock(&dentry->d_lock); 632 return 1; 633 } 634 return 0; 635 } 636 637 /* 638 * If we weren't the last ref, we're done. 639 */ 640 if (ret) 641 return 1; 642 643 /* 644 * Careful, careful. The reference count went down 645 * to zero, but we don't hold the dentry lock, so 646 * somebody else could get it again, and do another 647 * dput(), and we need to not race with that. 648 * 649 * However, there is a very special and common case 650 * where we don't care, because there is nothing to 651 * do: the dentry is still hashed, it does not have 652 * a 'delete' op, and it's referenced and already on 653 * the LRU list. 654 * 655 * NOTE! Since we aren't locked, these values are 656 * not "stable". However, it is sufficient that at 657 * some point after we dropped the reference the 658 * dentry was hashed and the flags had the proper 659 * value. Other dentry users may have re-gotten 660 * a reference to the dentry and change that, but 661 * our work is done - we can leave the dentry 662 * around with a zero refcount. 663 */ 664 smp_rmb(); 665 d_flags = ACCESS_ONCE(dentry->d_flags); 666 d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST; 667 668 /* Nothing to do? Dropping the reference was all we needed? */ 669 if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry)) 670 return 1; 671 672 /* 673 * Not the fast normal case? Get the lock. We've already decremented 674 * the refcount, but we'll need to re-check the situation after 675 * getting the lock. 676 */ 677 spin_lock(&dentry->d_lock); 678 679 /* 680 * Did somebody else grab a reference to it in the meantime, and 681 * we're no longer the last user after all? Alternatively, somebody 682 * else could have killed it and marked it dead. Either way, we 683 * don't need to do anything else. 684 */ 685 if (dentry->d_lockref.count) { 686 spin_unlock(&dentry->d_lock); 687 return 1; 688 } 689 690 /* 691 * Re-get the reference we optimistically dropped. We hold the 692 * lock, and we just tested that it was zero, so we can just 693 * set it to 1. 694 */ 695 dentry->d_lockref.count = 1; 696 return 0; 697 } 698 699 700 /* 701 * This is dput 702 * 703 * This is complicated by the fact that we do not want to put 704 * dentries that are no longer on any hash chain on the unused 705 * list: we'd much rather just get rid of them immediately. 706 * 707 * However, that implies that we have to traverse the dentry 708 * tree upwards to the parents which might _also_ now be 709 * scheduled for deletion (it may have been only waiting for 710 * its last child to go away). 711 * 712 * This tail recursion is done by hand as we don't want to depend 713 * on the compiler to always get this right (gcc generally doesn't). 714 * Real recursion would eat up our stack space. 715 */ 716 717 /* 718 * dput - release a dentry 719 * @dentry: dentry to release 720 * 721 * Release a dentry. This will drop the usage count and if appropriate 722 * call the dentry unlink method as well as removing it from the queues and 723 * releasing its resources. If the parent dentries were scheduled for release 724 * they too may now get deleted. 725 */ 726 void dput(struct dentry *dentry) 727 { 728 if (unlikely(!dentry)) 729 return; 730 731 repeat: 732 rcu_read_lock(); 733 if (likely(fast_dput(dentry))) { 734 rcu_read_unlock(); 735 return; 736 } 737 738 /* Slow case: now with the dentry lock held */ 739 rcu_read_unlock(); 740 741 /* Unreachable? Get rid of it */ 742 if (unlikely(d_unhashed(dentry))) 743 goto kill_it; 744 745 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) { 746 if (dentry->d_op->d_delete(dentry)) 747 goto kill_it; 748 } 749 750 if (!(dentry->d_flags & DCACHE_REFERENCED)) 751 dentry->d_flags |= DCACHE_REFERENCED; 752 dentry_lru_add(dentry); 753 754 dentry->d_lockref.count--; 755 spin_unlock(&dentry->d_lock); 756 return; 757 758 kill_it: 759 dentry = dentry_kill(dentry); 760 if (dentry) 761 goto repeat; 762 } 763 EXPORT_SYMBOL(dput); 764 765 766 /* This must be called with d_lock held */ 767 static inline void __dget_dlock(struct dentry *dentry) 768 { 769 dentry->d_lockref.count++; 770 } 771 772 static inline void __dget(struct dentry *dentry) 773 { 774 lockref_get(&dentry->d_lockref); 775 } 776 777 struct dentry *dget_parent(struct dentry *dentry) 778 { 779 int gotref; 780 struct dentry *ret; 781 782 /* 783 * Do optimistic parent lookup without any 784 * locking. 785 */ 786 rcu_read_lock(); 787 ret = ACCESS_ONCE(dentry->d_parent); 788 gotref = lockref_get_not_zero(&ret->d_lockref); 789 rcu_read_unlock(); 790 if (likely(gotref)) { 791 if (likely(ret == ACCESS_ONCE(dentry->d_parent))) 792 return ret; 793 dput(ret); 794 } 795 796 repeat: 797 /* 798 * Don't need rcu_dereference because we re-check it was correct under 799 * the lock. 800 */ 801 rcu_read_lock(); 802 ret = dentry->d_parent; 803 spin_lock(&ret->d_lock); 804 if (unlikely(ret != dentry->d_parent)) { 805 spin_unlock(&ret->d_lock); 806 rcu_read_unlock(); 807 goto repeat; 808 } 809 rcu_read_unlock(); 810 BUG_ON(!ret->d_lockref.count); 811 ret->d_lockref.count++; 812 spin_unlock(&ret->d_lock); 813 return ret; 814 } 815 EXPORT_SYMBOL(dget_parent); 816 817 /** 818 * d_find_alias - grab a hashed alias of inode 819 * @inode: inode in question 820 * 821 * If inode has a hashed alias, or is a directory and has any alias, 822 * acquire the reference to alias and return it. Otherwise return NULL. 823 * Notice that if inode is a directory there can be only one alias and 824 * it can be unhashed only if it has no children, or if it is the root 825 * of a filesystem, or if the directory was renamed and d_revalidate 826 * was the first vfs operation to notice. 827 * 828 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer 829 * any other hashed alias over that one. 830 */ 831 static struct dentry *__d_find_alias(struct inode *inode) 832 { 833 struct dentry *alias, *discon_alias; 834 835 again: 836 discon_alias = NULL; 837 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) { 838 spin_lock(&alias->d_lock); 839 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { 840 if (IS_ROOT(alias) && 841 (alias->d_flags & DCACHE_DISCONNECTED)) { 842 discon_alias = alias; 843 } else { 844 __dget_dlock(alias); 845 spin_unlock(&alias->d_lock); 846 return alias; 847 } 848 } 849 spin_unlock(&alias->d_lock); 850 } 851 if (discon_alias) { 852 alias = discon_alias; 853 spin_lock(&alias->d_lock); 854 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { 855 __dget_dlock(alias); 856 spin_unlock(&alias->d_lock); 857 return alias; 858 } 859 spin_unlock(&alias->d_lock); 860 goto again; 861 } 862 return NULL; 863 } 864 865 struct dentry *d_find_alias(struct inode *inode) 866 { 867 struct dentry *de = NULL; 868 869 if (!hlist_empty(&inode->i_dentry)) { 870 spin_lock(&inode->i_lock); 871 de = __d_find_alias(inode); 872 spin_unlock(&inode->i_lock); 873 } 874 return de; 875 } 876 EXPORT_SYMBOL(d_find_alias); 877 878 /* 879 * Try to kill dentries associated with this inode. 880 * WARNING: you must own a reference to inode. 881 */ 882 void d_prune_aliases(struct inode *inode) 883 { 884 struct dentry *dentry; 885 restart: 886 spin_lock(&inode->i_lock); 887 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) { 888 spin_lock(&dentry->d_lock); 889 if (!dentry->d_lockref.count) { 890 struct dentry *parent = lock_parent(dentry); 891 if (likely(!dentry->d_lockref.count)) { 892 __dentry_kill(dentry); 893 dput(parent); 894 goto restart; 895 } 896 if (parent) 897 spin_unlock(&parent->d_lock); 898 } 899 spin_unlock(&dentry->d_lock); 900 } 901 spin_unlock(&inode->i_lock); 902 } 903 EXPORT_SYMBOL(d_prune_aliases); 904 905 static void shrink_dentry_list(struct list_head *list) 906 { 907 struct dentry *dentry, *parent; 908 909 while (!list_empty(list)) { 910 struct inode *inode; 911 dentry = list_entry(list->prev, struct dentry, d_lru); 912 spin_lock(&dentry->d_lock); 913 parent = lock_parent(dentry); 914 915 /* 916 * The dispose list is isolated and dentries are not accounted 917 * to the LRU here, so we can simply remove it from the list 918 * here regardless of whether it is referenced or not. 919 */ 920 d_shrink_del(dentry); 921 922 /* 923 * We found an inuse dentry which was not removed from 924 * the LRU because of laziness during lookup. Do not free it. 925 */ 926 if (dentry->d_lockref.count > 0) { 927 spin_unlock(&dentry->d_lock); 928 if (parent) 929 spin_unlock(&parent->d_lock); 930 continue; 931 } 932 933 934 if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) { 935 bool can_free = dentry->d_flags & DCACHE_MAY_FREE; 936 spin_unlock(&dentry->d_lock); 937 if (parent) 938 spin_unlock(&parent->d_lock); 939 if (can_free) 940 dentry_free(dentry); 941 continue; 942 } 943 944 inode = dentry->d_inode; 945 if (inode && unlikely(!spin_trylock(&inode->i_lock))) { 946 d_shrink_add(dentry, list); 947 spin_unlock(&dentry->d_lock); 948 if (parent) 949 spin_unlock(&parent->d_lock); 950 continue; 951 } 952 953 __dentry_kill(dentry); 954 955 /* 956 * We need to prune ancestors too. This is necessary to prevent 957 * quadratic behavior of shrink_dcache_parent(), but is also 958 * expected to be beneficial in reducing dentry cache 959 * fragmentation. 960 */ 961 dentry = parent; 962 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) { 963 parent = lock_parent(dentry); 964 if (dentry->d_lockref.count != 1) { 965 dentry->d_lockref.count--; 966 spin_unlock(&dentry->d_lock); 967 if (parent) 968 spin_unlock(&parent->d_lock); 969 break; 970 } 971 inode = dentry->d_inode; /* can't be NULL */ 972 if (unlikely(!spin_trylock(&inode->i_lock))) { 973 spin_unlock(&dentry->d_lock); 974 if (parent) 975 spin_unlock(&parent->d_lock); 976 cpu_relax(); 977 continue; 978 } 979 __dentry_kill(dentry); 980 dentry = parent; 981 } 982 } 983 } 984 985 static enum lru_status dentry_lru_isolate(struct list_head *item, 986 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) 987 { 988 struct list_head *freeable = arg; 989 struct dentry *dentry = container_of(item, struct dentry, d_lru); 990 991 992 /* 993 * we are inverting the lru lock/dentry->d_lock here, 994 * so use a trylock. If we fail to get the lock, just skip 995 * it 996 */ 997 if (!spin_trylock(&dentry->d_lock)) 998 return LRU_SKIP; 999 1000 /* 1001 * Referenced dentries are still in use. If they have active 1002 * counts, just remove them from the LRU. Otherwise give them 1003 * another pass through the LRU. 1004 */ 1005 if (dentry->d_lockref.count) { 1006 d_lru_isolate(lru, dentry); 1007 spin_unlock(&dentry->d_lock); 1008 return LRU_REMOVED; 1009 } 1010 1011 if (dentry->d_flags & DCACHE_REFERENCED) { 1012 dentry->d_flags &= ~DCACHE_REFERENCED; 1013 spin_unlock(&dentry->d_lock); 1014 1015 /* 1016 * The list move itself will be made by the common LRU code. At 1017 * this point, we've dropped the dentry->d_lock but keep the 1018 * lru lock. This is safe to do, since every list movement is 1019 * protected by the lru lock even if both locks are held. 1020 * 1021 * This is guaranteed by the fact that all LRU management 1022 * functions are intermediated by the LRU API calls like 1023 * list_lru_add and list_lru_del. List movement in this file 1024 * only ever occur through this functions or through callbacks 1025 * like this one, that are called from the LRU API. 1026 * 1027 * The only exceptions to this are functions like 1028 * shrink_dentry_list, and code that first checks for the 1029 * DCACHE_SHRINK_LIST flag. Those are guaranteed to be 1030 * operating only with stack provided lists after they are 1031 * properly isolated from the main list. It is thus, always a 1032 * local access. 1033 */ 1034 return LRU_ROTATE; 1035 } 1036 1037 d_lru_shrink_move(lru, dentry, freeable); 1038 spin_unlock(&dentry->d_lock); 1039 1040 return LRU_REMOVED; 1041 } 1042 1043 /** 1044 * prune_dcache_sb - shrink the dcache 1045 * @sb: superblock 1046 * @sc: shrink control, passed to list_lru_shrink_walk() 1047 * 1048 * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This 1049 * is done when we need more memory and called from the superblock shrinker 1050 * function. 1051 * 1052 * This function may fail to free any resources if all the dentries are in 1053 * use. 1054 */ 1055 long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc) 1056 { 1057 LIST_HEAD(dispose); 1058 long freed; 1059 1060 freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc, 1061 dentry_lru_isolate, &dispose); 1062 shrink_dentry_list(&dispose); 1063 return freed; 1064 } 1065 1066 static enum lru_status dentry_lru_isolate_shrink(struct list_head *item, 1067 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) 1068 { 1069 struct list_head *freeable = arg; 1070 struct dentry *dentry = container_of(item, struct dentry, d_lru); 1071 1072 /* 1073 * we are inverting the lru lock/dentry->d_lock here, 1074 * so use a trylock. If we fail to get the lock, just skip 1075 * it 1076 */ 1077 if (!spin_trylock(&dentry->d_lock)) 1078 return LRU_SKIP; 1079 1080 d_lru_shrink_move(lru, dentry, freeable); 1081 spin_unlock(&dentry->d_lock); 1082 1083 return LRU_REMOVED; 1084 } 1085 1086 1087 /** 1088 * shrink_dcache_sb - shrink dcache for a superblock 1089 * @sb: superblock 1090 * 1091 * Shrink the dcache for the specified super block. This is used to free 1092 * the dcache before unmounting a file system. 1093 */ 1094 void shrink_dcache_sb(struct super_block *sb) 1095 { 1096 long freed; 1097 1098 do { 1099 LIST_HEAD(dispose); 1100 1101 freed = list_lru_walk(&sb->s_dentry_lru, 1102 dentry_lru_isolate_shrink, &dispose, UINT_MAX); 1103 1104 this_cpu_sub(nr_dentry_unused, freed); 1105 shrink_dentry_list(&dispose); 1106 } while (freed > 0); 1107 } 1108 EXPORT_SYMBOL(shrink_dcache_sb); 1109 1110 /** 1111 * enum d_walk_ret - action to talke during tree walk 1112 * @D_WALK_CONTINUE: contrinue walk 1113 * @D_WALK_QUIT: quit walk 1114 * @D_WALK_NORETRY: quit when retry is needed 1115 * @D_WALK_SKIP: skip this dentry and its children 1116 */ 1117 enum d_walk_ret { 1118 D_WALK_CONTINUE, 1119 D_WALK_QUIT, 1120 D_WALK_NORETRY, 1121 D_WALK_SKIP, 1122 }; 1123 1124 /** 1125 * d_walk - walk the dentry tree 1126 * @parent: start of walk 1127 * @data: data passed to @enter() and @finish() 1128 * @enter: callback when first entering the dentry 1129 * @finish: callback when successfully finished the walk 1130 * 1131 * The @enter() and @finish() callbacks are called with d_lock held. 1132 */ 1133 static void d_walk(struct dentry *parent, void *data, 1134 enum d_walk_ret (*enter)(void *, struct dentry *), 1135 void (*finish)(void *)) 1136 { 1137 struct dentry *this_parent; 1138 struct list_head *next; 1139 unsigned seq = 0; 1140 enum d_walk_ret ret; 1141 bool retry = true; 1142 1143 again: 1144 read_seqbegin_or_lock(&rename_lock, &seq); 1145 this_parent = parent; 1146 spin_lock(&this_parent->d_lock); 1147 1148 ret = enter(data, this_parent); 1149 switch (ret) { 1150 case D_WALK_CONTINUE: 1151 break; 1152 case D_WALK_QUIT: 1153 case D_WALK_SKIP: 1154 goto out_unlock; 1155 case D_WALK_NORETRY: 1156 retry = false; 1157 break; 1158 } 1159 repeat: 1160 next = this_parent->d_subdirs.next; 1161 resume: 1162 while (next != &this_parent->d_subdirs) { 1163 struct list_head *tmp = next; 1164 struct dentry *dentry = list_entry(tmp, struct dentry, d_child); 1165 next = tmp->next; 1166 1167 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 1168 1169 ret = enter(data, dentry); 1170 switch (ret) { 1171 case D_WALK_CONTINUE: 1172 break; 1173 case D_WALK_QUIT: 1174 spin_unlock(&dentry->d_lock); 1175 goto out_unlock; 1176 case D_WALK_NORETRY: 1177 retry = false; 1178 break; 1179 case D_WALK_SKIP: 1180 spin_unlock(&dentry->d_lock); 1181 continue; 1182 } 1183 1184 if (!list_empty(&dentry->d_subdirs)) { 1185 spin_unlock(&this_parent->d_lock); 1186 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); 1187 this_parent = dentry; 1188 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); 1189 goto repeat; 1190 } 1191 spin_unlock(&dentry->d_lock); 1192 } 1193 /* 1194 * All done at this level ... ascend and resume the search. 1195 */ 1196 rcu_read_lock(); 1197 ascend: 1198 if (this_parent != parent) { 1199 struct dentry *child = this_parent; 1200 this_parent = child->d_parent; 1201 1202 spin_unlock(&child->d_lock); 1203 spin_lock(&this_parent->d_lock); 1204 1205 /* might go back up the wrong parent if we have had a rename. */ 1206 if (need_seqretry(&rename_lock, seq)) 1207 goto rename_retry; 1208 next = child->d_child.next; 1209 while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) { 1210 if (next == &this_parent->d_subdirs) 1211 goto ascend; 1212 child = list_entry(next, struct dentry, d_child); 1213 next = next->next; 1214 } 1215 rcu_read_unlock(); 1216 goto resume; 1217 } 1218 if (need_seqretry(&rename_lock, seq)) 1219 goto rename_retry; 1220 rcu_read_unlock(); 1221 if (finish) 1222 finish(data); 1223 1224 out_unlock: 1225 spin_unlock(&this_parent->d_lock); 1226 done_seqretry(&rename_lock, seq); 1227 return; 1228 1229 rename_retry: 1230 spin_unlock(&this_parent->d_lock); 1231 rcu_read_unlock(); 1232 BUG_ON(seq & 1); 1233 if (!retry) 1234 return; 1235 seq = 1; 1236 goto again; 1237 } 1238 1239 /* 1240 * Search for at least 1 mount point in the dentry's subdirs. 1241 * We descend to the next level whenever the d_subdirs 1242 * list is non-empty and continue searching. 1243 */ 1244 1245 static enum d_walk_ret check_mount(void *data, struct dentry *dentry) 1246 { 1247 int *ret = data; 1248 if (d_mountpoint(dentry)) { 1249 *ret = 1; 1250 return D_WALK_QUIT; 1251 } 1252 return D_WALK_CONTINUE; 1253 } 1254 1255 /** 1256 * have_submounts - check for mounts over a dentry 1257 * @parent: dentry to check. 1258 * 1259 * Return true if the parent or its subdirectories contain 1260 * a mount point 1261 */ 1262 int have_submounts(struct dentry *parent) 1263 { 1264 int ret = 0; 1265 1266 d_walk(parent, &ret, check_mount, NULL); 1267 1268 return ret; 1269 } 1270 EXPORT_SYMBOL(have_submounts); 1271 1272 /* 1273 * Called by mount code to set a mountpoint and check if the mountpoint is 1274 * reachable (e.g. NFS can unhash a directory dentry and then the complete 1275 * subtree can become unreachable). 1276 * 1277 * Only one of d_invalidate() and d_set_mounted() must succeed. For 1278 * this reason take rename_lock and d_lock on dentry and ancestors. 1279 */ 1280 int d_set_mounted(struct dentry *dentry) 1281 { 1282 struct dentry *p; 1283 int ret = -ENOENT; 1284 write_seqlock(&rename_lock); 1285 for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) { 1286 /* Need exclusion wrt. d_invalidate() */ 1287 spin_lock(&p->d_lock); 1288 if (unlikely(d_unhashed(p))) { 1289 spin_unlock(&p->d_lock); 1290 goto out; 1291 } 1292 spin_unlock(&p->d_lock); 1293 } 1294 spin_lock(&dentry->d_lock); 1295 if (!d_unlinked(dentry)) { 1296 dentry->d_flags |= DCACHE_MOUNTED; 1297 ret = 0; 1298 } 1299 spin_unlock(&dentry->d_lock); 1300 out: 1301 write_sequnlock(&rename_lock); 1302 return ret; 1303 } 1304 1305 /* 1306 * Search the dentry child list of the specified parent, 1307 * and move any unused dentries to the end of the unused 1308 * list for prune_dcache(). We descend to the next level 1309 * whenever the d_subdirs list is non-empty and continue 1310 * searching. 1311 * 1312 * It returns zero iff there are no unused children, 1313 * otherwise it returns the number of children moved to 1314 * the end of the unused list. This may not be the total 1315 * number of unused children, because select_parent can 1316 * drop the lock and return early due to latency 1317 * constraints. 1318 */ 1319 1320 struct select_data { 1321 struct dentry *start; 1322 struct list_head dispose; 1323 int found; 1324 }; 1325 1326 static enum d_walk_ret select_collect(void *_data, struct dentry *dentry) 1327 { 1328 struct select_data *data = _data; 1329 enum d_walk_ret ret = D_WALK_CONTINUE; 1330 1331 if (data->start == dentry) 1332 goto out; 1333 1334 if (dentry->d_flags & DCACHE_SHRINK_LIST) { 1335 data->found++; 1336 } else { 1337 if (dentry->d_flags & DCACHE_LRU_LIST) 1338 d_lru_del(dentry); 1339 if (!dentry->d_lockref.count) { 1340 d_shrink_add(dentry, &data->dispose); 1341 data->found++; 1342 } 1343 } 1344 /* 1345 * We can return to the caller if we have found some (this 1346 * ensures forward progress). We'll be coming back to find 1347 * the rest. 1348 */ 1349 if (!list_empty(&data->dispose)) 1350 ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY; 1351 out: 1352 return ret; 1353 } 1354 1355 /** 1356 * shrink_dcache_parent - prune dcache 1357 * @parent: parent of entries to prune 1358 * 1359 * Prune the dcache to remove unused children of the parent dentry. 1360 */ 1361 void shrink_dcache_parent(struct dentry *parent) 1362 { 1363 for (;;) { 1364 struct select_data data; 1365 1366 INIT_LIST_HEAD(&data.dispose); 1367 data.start = parent; 1368 data.found = 0; 1369 1370 d_walk(parent, &data, select_collect, NULL); 1371 if (!data.found) 1372 break; 1373 1374 shrink_dentry_list(&data.dispose); 1375 cond_resched(); 1376 } 1377 } 1378 EXPORT_SYMBOL(shrink_dcache_parent); 1379 1380 static enum d_walk_ret umount_check(void *_data, struct dentry *dentry) 1381 { 1382 /* it has busy descendents; complain about those instead */ 1383 if (!list_empty(&dentry->d_subdirs)) 1384 return D_WALK_CONTINUE; 1385 1386 /* root with refcount 1 is fine */ 1387 if (dentry == _data && dentry->d_lockref.count == 1) 1388 return D_WALK_CONTINUE; 1389 1390 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} " 1391 " still in use (%d) [unmount of %s %s]\n", 1392 dentry, 1393 dentry->d_inode ? 1394 dentry->d_inode->i_ino : 0UL, 1395 dentry, 1396 dentry->d_lockref.count, 1397 dentry->d_sb->s_type->name, 1398 dentry->d_sb->s_id); 1399 WARN_ON(1); 1400 return D_WALK_CONTINUE; 1401 } 1402 1403 static void do_one_tree(struct dentry *dentry) 1404 { 1405 shrink_dcache_parent(dentry); 1406 d_walk(dentry, dentry, umount_check, NULL); 1407 d_drop(dentry); 1408 dput(dentry); 1409 } 1410 1411 /* 1412 * destroy the dentries attached to a superblock on unmounting 1413 */ 1414 void shrink_dcache_for_umount(struct super_block *sb) 1415 { 1416 struct dentry *dentry; 1417 1418 WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked"); 1419 1420 dentry = sb->s_root; 1421 sb->s_root = NULL; 1422 do_one_tree(dentry); 1423 1424 while (!hlist_bl_empty(&sb->s_anon)) { 1425 dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash)); 1426 do_one_tree(dentry); 1427 } 1428 } 1429 1430 struct detach_data { 1431 struct select_data select; 1432 struct dentry *mountpoint; 1433 }; 1434 static enum d_walk_ret detach_and_collect(void *_data, struct dentry *dentry) 1435 { 1436 struct detach_data *data = _data; 1437 1438 if (d_mountpoint(dentry)) { 1439 __dget_dlock(dentry); 1440 data->mountpoint = dentry; 1441 return D_WALK_QUIT; 1442 } 1443 1444 return select_collect(&data->select, dentry); 1445 } 1446 1447 static void check_and_drop(void *_data) 1448 { 1449 struct detach_data *data = _data; 1450 1451 if (!data->mountpoint && !data->select.found) 1452 __d_drop(data->select.start); 1453 } 1454 1455 /** 1456 * d_invalidate - detach submounts, prune dcache, and drop 1457 * @dentry: dentry to invalidate (aka detach, prune and drop) 1458 * 1459 * no dcache lock. 1460 * 1461 * The final d_drop is done as an atomic operation relative to 1462 * rename_lock ensuring there are no races with d_set_mounted. This 1463 * ensures there are no unhashed dentries on the path to a mountpoint. 1464 */ 1465 void d_invalidate(struct dentry *dentry) 1466 { 1467 /* 1468 * If it's already been dropped, return OK. 1469 */ 1470 spin_lock(&dentry->d_lock); 1471 if (d_unhashed(dentry)) { 1472 spin_unlock(&dentry->d_lock); 1473 return; 1474 } 1475 spin_unlock(&dentry->d_lock); 1476 1477 /* Negative dentries can be dropped without further checks */ 1478 if (!dentry->d_inode) { 1479 d_drop(dentry); 1480 return; 1481 } 1482 1483 for (;;) { 1484 struct detach_data data; 1485 1486 data.mountpoint = NULL; 1487 INIT_LIST_HEAD(&data.select.dispose); 1488 data.select.start = dentry; 1489 data.select.found = 0; 1490 1491 d_walk(dentry, &data, detach_and_collect, check_and_drop); 1492 1493 if (data.select.found) 1494 shrink_dentry_list(&data.select.dispose); 1495 1496 if (data.mountpoint) { 1497 detach_mounts(data.mountpoint); 1498 dput(data.mountpoint); 1499 } 1500 1501 if (!data.mountpoint && !data.select.found) 1502 break; 1503 1504 cond_resched(); 1505 } 1506 } 1507 EXPORT_SYMBOL(d_invalidate); 1508 1509 /** 1510 * __d_alloc - allocate a dcache entry 1511 * @sb: filesystem it will belong to 1512 * @name: qstr of the name 1513 * 1514 * Allocates a dentry. It returns %NULL if there is insufficient memory 1515 * available. On a success the dentry is returned. The name passed in is 1516 * copied and the copy passed in may be reused after this call. 1517 */ 1518 1519 struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) 1520 { 1521 struct dentry *dentry; 1522 char *dname; 1523 1524 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL); 1525 if (!dentry) 1526 return NULL; 1527 1528 /* 1529 * We guarantee that the inline name is always NUL-terminated. 1530 * This way the memcpy() done by the name switching in rename 1531 * will still always have a NUL at the end, even if we might 1532 * be overwriting an internal NUL character 1533 */ 1534 dentry->d_iname[DNAME_INLINE_LEN-1] = 0; 1535 if (name->len > DNAME_INLINE_LEN-1) { 1536 size_t size = offsetof(struct external_name, name[1]); 1537 struct external_name *p = kmalloc(size + name->len, GFP_KERNEL); 1538 if (!p) { 1539 kmem_cache_free(dentry_cache, dentry); 1540 return NULL; 1541 } 1542 atomic_set(&p->u.count, 1); 1543 dname = p->name; 1544 if (IS_ENABLED(CONFIG_DCACHE_WORD_ACCESS)) 1545 kasan_unpoison_shadow(dname, 1546 round_up(name->len + 1, sizeof(unsigned long))); 1547 } else { 1548 dname = dentry->d_iname; 1549 } 1550 1551 dentry->d_name.len = name->len; 1552 dentry->d_name.hash = name->hash; 1553 memcpy(dname, name->name, name->len); 1554 dname[name->len] = 0; 1555 1556 /* Make sure we always see the terminating NUL character */ 1557 smp_wmb(); 1558 dentry->d_name.name = dname; 1559 1560 dentry->d_lockref.count = 1; 1561 dentry->d_flags = 0; 1562 spin_lock_init(&dentry->d_lock); 1563 seqcount_init(&dentry->d_seq); 1564 dentry->d_inode = NULL; 1565 dentry->d_parent = dentry; 1566 dentry->d_sb = sb; 1567 dentry->d_op = NULL; 1568 dentry->d_fsdata = NULL; 1569 INIT_HLIST_BL_NODE(&dentry->d_hash); 1570 INIT_LIST_HEAD(&dentry->d_lru); 1571 INIT_LIST_HEAD(&dentry->d_subdirs); 1572 INIT_HLIST_NODE(&dentry->d_u.d_alias); 1573 INIT_LIST_HEAD(&dentry->d_child); 1574 d_set_d_op(dentry, dentry->d_sb->s_d_op); 1575 1576 this_cpu_inc(nr_dentry); 1577 1578 return dentry; 1579 } 1580 1581 /** 1582 * d_alloc - allocate a dcache entry 1583 * @parent: parent of entry to allocate 1584 * @name: qstr of the name 1585 * 1586 * Allocates a dentry. It returns %NULL if there is insufficient memory 1587 * available. On a success the dentry is returned. The name passed in is 1588 * copied and the copy passed in may be reused after this call. 1589 */ 1590 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) 1591 { 1592 struct dentry *dentry = __d_alloc(parent->d_sb, name); 1593 if (!dentry) 1594 return NULL; 1595 1596 spin_lock(&parent->d_lock); 1597 /* 1598 * don't need child lock because it is not subject 1599 * to concurrency here 1600 */ 1601 __dget_dlock(parent); 1602 dentry->d_parent = parent; 1603 list_add(&dentry->d_child, &parent->d_subdirs); 1604 spin_unlock(&parent->d_lock); 1605 1606 return dentry; 1607 } 1608 EXPORT_SYMBOL(d_alloc); 1609 1610 /** 1611 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems) 1612 * @sb: the superblock 1613 * @name: qstr of the name 1614 * 1615 * For a filesystem that just pins its dentries in memory and never 1616 * performs lookups at all, return an unhashed IS_ROOT dentry. 1617 */ 1618 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name) 1619 { 1620 return __d_alloc(sb, name); 1621 } 1622 EXPORT_SYMBOL(d_alloc_pseudo); 1623 1624 struct dentry *d_alloc_name(struct dentry *parent, const char *name) 1625 { 1626 struct qstr q; 1627 1628 q.name = name; 1629 q.len = strlen(name); 1630 q.hash = full_name_hash(q.name, q.len); 1631 return d_alloc(parent, &q); 1632 } 1633 EXPORT_SYMBOL(d_alloc_name); 1634 1635 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op) 1636 { 1637 WARN_ON_ONCE(dentry->d_op); 1638 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH | 1639 DCACHE_OP_COMPARE | 1640 DCACHE_OP_REVALIDATE | 1641 DCACHE_OP_WEAK_REVALIDATE | 1642 DCACHE_OP_DELETE )); 1643 dentry->d_op = op; 1644 if (!op) 1645 return; 1646 if (op->d_hash) 1647 dentry->d_flags |= DCACHE_OP_HASH; 1648 if (op->d_compare) 1649 dentry->d_flags |= DCACHE_OP_COMPARE; 1650 if (op->d_revalidate) 1651 dentry->d_flags |= DCACHE_OP_REVALIDATE; 1652 if (op->d_weak_revalidate) 1653 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE; 1654 if (op->d_delete) 1655 dentry->d_flags |= DCACHE_OP_DELETE; 1656 if (op->d_prune) 1657 dentry->d_flags |= DCACHE_OP_PRUNE; 1658 1659 } 1660 EXPORT_SYMBOL(d_set_d_op); 1661 1662 1663 /* 1664 * d_set_fallthru - Mark a dentry as falling through to a lower layer 1665 * @dentry - The dentry to mark 1666 * 1667 * Mark a dentry as falling through to the lower layer (as set with 1668 * d_pin_lower()). This flag may be recorded on the medium. 1669 */ 1670 void d_set_fallthru(struct dentry *dentry) 1671 { 1672 spin_lock(&dentry->d_lock); 1673 dentry->d_flags |= DCACHE_FALLTHRU; 1674 spin_unlock(&dentry->d_lock); 1675 } 1676 EXPORT_SYMBOL(d_set_fallthru); 1677 1678 static unsigned d_flags_for_inode(struct inode *inode) 1679 { 1680 unsigned add_flags = DCACHE_REGULAR_TYPE; 1681 1682 if (!inode) 1683 return DCACHE_MISS_TYPE; 1684 1685 if (S_ISDIR(inode->i_mode)) { 1686 add_flags = DCACHE_DIRECTORY_TYPE; 1687 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) { 1688 if (unlikely(!inode->i_op->lookup)) 1689 add_flags = DCACHE_AUTODIR_TYPE; 1690 else 1691 inode->i_opflags |= IOP_LOOKUP; 1692 } 1693 goto type_determined; 1694 } 1695 1696 if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) { 1697 if (unlikely(inode->i_op->follow_link)) { 1698 add_flags = DCACHE_SYMLINK_TYPE; 1699 goto type_determined; 1700 } 1701 inode->i_opflags |= IOP_NOFOLLOW; 1702 } 1703 1704 if (unlikely(!S_ISREG(inode->i_mode))) 1705 add_flags = DCACHE_SPECIAL_TYPE; 1706 1707 type_determined: 1708 if (unlikely(IS_AUTOMOUNT(inode))) 1709 add_flags |= DCACHE_NEED_AUTOMOUNT; 1710 return add_flags; 1711 } 1712 1713 static void __d_instantiate(struct dentry *dentry, struct inode *inode) 1714 { 1715 unsigned add_flags = d_flags_for_inode(inode); 1716 1717 spin_lock(&dentry->d_lock); 1718 dentry->d_flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); 1719 dentry->d_flags |= add_flags; 1720 if (inode) 1721 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); 1722 dentry->d_inode = inode; 1723 dentry_rcuwalk_barrier(dentry); 1724 spin_unlock(&dentry->d_lock); 1725 fsnotify_d_instantiate(dentry, inode); 1726 } 1727 1728 /** 1729 * d_instantiate - fill in inode information for a dentry 1730 * @entry: dentry to complete 1731 * @inode: inode to attach to this dentry 1732 * 1733 * Fill in inode information in the entry. 1734 * 1735 * This turns negative dentries into productive full members 1736 * of society. 1737 * 1738 * NOTE! This assumes that the inode count has been incremented 1739 * (or otherwise set) by the caller to indicate that it is now 1740 * in use by the dcache. 1741 */ 1742 1743 void d_instantiate(struct dentry *entry, struct inode * inode) 1744 { 1745 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias)); 1746 if (inode) 1747 spin_lock(&inode->i_lock); 1748 __d_instantiate(entry, inode); 1749 if (inode) 1750 spin_unlock(&inode->i_lock); 1751 security_d_instantiate(entry, inode); 1752 } 1753 EXPORT_SYMBOL(d_instantiate); 1754 1755 /** 1756 * d_instantiate_unique - instantiate a non-aliased dentry 1757 * @entry: dentry to instantiate 1758 * @inode: inode to attach to this dentry 1759 * 1760 * Fill in inode information in the entry. On success, it returns NULL. 1761 * If an unhashed alias of "entry" already exists, then we return the 1762 * aliased dentry instead and drop one reference to inode. 1763 * 1764 * Note that in order to avoid conflicts with rename() etc, the caller 1765 * had better be holding the parent directory semaphore. 1766 * 1767 * This also assumes that the inode count has been incremented 1768 * (or otherwise set) by the caller to indicate that it is now 1769 * in use by the dcache. 1770 */ 1771 static struct dentry *__d_instantiate_unique(struct dentry *entry, 1772 struct inode *inode) 1773 { 1774 struct dentry *alias; 1775 int len = entry->d_name.len; 1776 const char *name = entry->d_name.name; 1777 unsigned int hash = entry->d_name.hash; 1778 1779 if (!inode) { 1780 __d_instantiate(entry, NULL); 1781 return NULL; 1782 } 1783 1784 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) { 1785 /* 1786 * Don't need alias->d_lock here, because aliases with 1787 * d_parent == entry->d_parent are not subject to name or 1788 * parent changes, because the parent inode i_mutex is held. 1789 */ 1790 if (alias->d_name.hash != hash) 1791 continue; 1792 if (alias->d_parent != entry->d_parent) 1793 continue; 1794 if (alias->d_name.len != len) 1795 continue; 1796 if (dentry_cmp(alias, name, len)) 1797 continue; 1798 __dget(alias); 1799 return alias; 1800 } 1801 1802 __d_instantiate(entry, inode); 1803 return NULL; 1804 } 1805 1806 struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode) 1807 { 1808 struct dentry *result; 1809 1810 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias)); 1811 1812 if (inode) 1813 spin_lock(&inode->i_lock); 1814 result = __d_instantiate_unique(entry, inode); 1815 if (inode) 1816 spin_unlock(&inode->i_lock); 1817 1818 if (!result) { 1819 security_d_instantiate(entry, inode); 1820 return NULL; 1821 } 1822 1823 BUG_ON(!d_unhashed(result)); 1824 iput(inode); 1825 return result; 1826 } 1827 1828 EXPORT_SYMBOL(d_instantiate_unique); 1829 1830 /** 1831 * d_instantiate_no_diralias - instantiate a non-aliased dentry 1832 * @entry: dentry to complete 1833 * @inode: inode to attach to this dentry 1834 * 1835 * Fill in inode information in the entry. If a directory alias is found, then 1836 * return an error (and drop inode). Together with d_materialise_unique() this 1837 * guarantees that a directory inode may never have more than one alias. 1838 */ 1839 int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode) 1840 { 1841 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias)); 1842 1843 spin_lock(&inode->i_lock); 1844 if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) { 1845 spin_unlock(&inode->i_lock); 1846 iput(inode); 1847 return -EBUSY; 1848 } 1849 __d_instantiate(entry, inode); 1850 spin_unlock(&inode->i_lock); 1851 security_d_instantiate(entry, inode); 1852 1853 return 0; 1854 } 1855 EXPORT_SYMBOL(d_instantiate_no_diralias); 1856 1857 struct dentry *d_make_root(struct inode *root_inode) 1858 { 1859 struct dentry *res = NULL; 1860 1861 if (root_inode) { 1862 static const struct qstr name = QSTR_INIT("/", 1); 1863 1864 res = __d_alloc(root_inode->i_sb, &name); 1865 if (res) 1866 d_instantiate(res, root_inode); 1867 else 1868 iput(root_inode); 1869 } 1870 return res; 1871 } 1872 EXPORT_SYMBOL(d_make_root); 1873 1874 static struct dentry * __d_find_any_alias(struct inode *inode) 1875 { 1876 struct dentry *alias; 1877 1878 if (hlist_empty(&inode->i_dentry)) 1879 return NULL; 1880 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias); 1881 __dget(alias); 1882 return alias; 1883 } 1884 1885 /** 1886 * d_find_any_alias - find any alias for a given inode 1887 * @inode: inode to find an alias for 1888 * 1889 * If any aliases exist for the given inode, take and return a 1890 * reference for one of them. If no aliases exist, return %NULL. 1891 */ 1892 struct dentry *d_find_any_alias(struct inode *inode) 1893 { 1894 struct dentry *de; 1895 1896 spin_lock(&inode->i_lock); 1897 de = __d_find_any_alias(inode); 1898 spin_unlock(&inode->i_lock); 1899 return de; 1900 } 1901 EXPORT_SYMBOL(d_find_any_alias); 1902 1903 static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected) 1904 { 1905 static const struct qstr anonstring = QSTR_INIT("/", 1); 1906 struct dentry *tmp; 1907 struct dentry *res; 1908 unsigned add_flags; 1909 1910 if (!inode) 1911 return ERR_PTR(-ESTALE); 1912 if (IS_ERR(inode)) 1913 return ERR_CAST(inode); 1914 1915 res = d_find_any_alias(inode); 1916 if (res) 1917 goto out_iput; 1918 1919 tmp = __d_alloc(inode->i_sb, &anonstring); 1920 if (!tmp) { 1921 res = ERR_PTR(-ENOMEM); 1922 goto out_iput; 1923 } 1924 1925 spin_lock(&inode->i_lock); 1926 res = __d_find_any_alias(inode); 1927 if (res) { 1928 spin_unlock(&inode->i_lock); 1929 dput(tmp); 1930 goto out_iput; 1931 } 1932 1933 /* attach a disconnected dentry */ 1934 add_flags = d_flags_for_inode(inode); 1935 1936 if (disconnected) 1937 add_flags |= DCACHE_DISCONNECTED; 1938 1939 spin_lock(&tmp->d_lock); 1940 tmp->d_inode = inode; 1941 tmp->d_flags |= add_flags; 1942 hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry); 1943 hlist_bl_lock(&tmp->d_sb->s_anon); 1944 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon); 1945 hlist_bl_unlock(&tmp->d_sb->s_anon); 1946 spin_unlock(&tmp->d_lock); 1947 spin_unlock(&inode->i_lock); 1948 security_d_instantiate(tmp, inode); 1949 1950 return tmp; 1951 1952 out_iput: 1953 if (res && !IS_ERR(res)) 1954 security_d_instantiate(res, inode); 1955 iput(inode); 1956 return res; 1957 } 1958 1959 /** 1960 * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode 1961 * @inode: inode to allocate the dentry for 1962 * 1963 * Obtain a dentry for an inode resulting from NFS filehandle conversion or 1964 * similar open by handle operations. The returned dentry may be anonymous, 1965 * or may have a full name (if the inode was already in the cache). 1966 * 1967 * When called on a directory inode, we must ensure that the inode only ever 1968 * has one dentry. If a dentry is found, that is returned instead of 1969 * allocating a new one. 1970 * 1971 * On successful return, the reference to the inode has been transferred 1972 * to the dentry. In case of an error the reference on the inode is released. 1973 * To make it easier to use in export operations a %NULL or IS_ERR inode may 1974 * be passed in and the error will be propagated to the return value, 1975 * with a %NULL @inode replaced by ERR_PTR(-ESTALE). 1976 */ 1977 struct dentry *d_obtain_alias(struct inode *inode) 1978 { 1979 return __d_obtain_alias(inode, 1); 1980 } 1981 EXPORT_SYMBOL(d_obtain_alias); 1982 1983 /** 1984 * d_obtain_root - find or allocate a dentry for a given inode 1985 * @inode: inode to allocate the dentry for 1986 * 1987 * Obtain an IS_ROOT dentry for the root of a filesystem. 1988 * 1989 * We must ensure that directory inodes only ever have one dentry. If a 1990 * dentry is found, that is returned instead of allocating a new one. 1991 * 1992 * On successful return, the reference to the inode has been transferred 1993 * to the dentry. In case of an error the reference on the inode is 1994 * released. A %NULL or IS_ERR inode may be passed in and will be the 1995 * error will be propagate to the return value, with a %NULL @inode 1996 * replaced by ERR_PTR(-ESTALE). 1997 */ 1998 struct dentry *d_obtain_root(struct inode *inode) 1999 { 2000 return __d_obtain_alias(inode, 0); 2001 } 2002 EXPORT_SYMBOL(d_obtain_root); 2003 2004 /** 2005 * d_add_ci - lookup or allocate new dentry with case-exact name 2006 * @inode: the inode case-insensitive lookup has found 2007 * @dentry: the negative dentry that was passed to the parent's lookup func 2008 * @name: the case-exact name to be associated with the returned dentry 2009 * 2010 * This is to avoid filling the dcache with case-insensitive names to the 2011 * same inode, only the actual correct case is stored in the dcache for 2012 * case-insensitive filesystems. 2013 * 2014 * For a case-insensitive lookup match and if the the case-exact dentry 2015 * already exists in in the dcache, use it and return it. 2016 * 2017 * If no entry exists with the exact case name, allocate new dentry with 2018 * the exact case, and return the spliced entry. 2019 */ 2020 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode, 2021 struct qstr *name) 2022 { 2023 struct dentry *found; 2024 struct dentry *new; 2025 2026 /* 2027 * First check if a dentry matching the name already exists, 2028 * if not go ahead and create it now. 2029 */ 2030 found = d_hash_and_lookup(dentry->d_parent, name); 2031 if (!found) { 2032 new = d_alloc(dentry->d_parent, name); 2033 if (!new) { 2034 found = ERR_PTR(-ENOMEM); 2035 } else { 2036 found = d_splice_alias(inode, new); 2037 if (found) { 2038 dput(new); 2039 return found; 2040 } 2041 return new; 2042 } 2043 } 2044 iput(inode); 2045 return found; 2046 } 2047 EXPORT_SYMBOL(d_add_ci); 2048 2049 /* 2050 * Do the slow-case of the dentry name compare. 2051 * 2052 * Unlike the dentry_cmp() function, we need to atomically 2053 * load the name and length information, so that the 2054 * filesystem can rely on them, and can use the 'name' and 2055 * 'len' information without worrying about walking off the 2056 * end of memory etc. 2057 * 2058 * Thus the read_seqcount_retry() and the "duplicate" info 2059 * in arguments (the low-level filesystem should not look 2060 * at the dentry inode or name contents directly, since 2061 * rename can change them while we're in RCU mode). 2062 */ 2063 enum slow_d_compare { 2064 D_COMP_OK, 2065 D_COMP_NOMATCH, 2066 D_COMP_SEQRETRY, 2067 }; 2068 2069 static noinline enum slow_d_compare slow_dentry_cmp( 2070 const struct dentry *parent, 2071 struct dentry *dentry, 2072 unsigned int seq, 2073 const struct qstr *name) 2074 { 2075 int tlen = dentry->d_name.len; 2076 const char *tname = dentry->d_name.name; 2077 2078 if (read_seqcount_retry(&dentry->d_seq, seq)) { 2079 cpu_relax(); 2080 return D_COMP_SEQRETRY; 2081 } 2082 if (parent->d_op->d_compare(parent, dentry, tlen, tname, name)) 2083 return D_COMP_NOMATCH; 2084 return D_COMP_OK; 2085 } 2086 2087 /** 2088 * __d_lookup_rcu - search for a dentry (racy, store-free) 2089 * @parent: parent dentry 2090 * @name: qstr of name we wish to find 2091 * @seqp: returns d_seq value at the point where the dentry was found 2092 * Returns: dentry, or NULL 2093 * 2094 * __d_lookup_rcu is the dcache lookup function for rcu-walk name 2095 * resolution (store-free path walking) design described in 2096 * Documentation/filesystems/path-lookup.txt. 2097 * 2098 * This is not to be used outside core vfs. 2099 * 2100 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock 2101 * held, and rcu_read_lock held. The returned dentry must not be stored into 2102 * without taking d_lock and checking d_seq sequence count against @seq 2103 * returned here. 2104 * 2105 * A refcount may be taken on the found dentry with the d_rcu_to_refcount 2106 * function. 2107 * 2108 * Alternatively, __d_lookup_rcu may be called again to look up the child of 2109 * the returned dentry, so long as its parent's seqlock is checked after the 2110 * child is looked up. Thus, an interlocking stepping of sequence lock checks 2111 * is formed, giving integrity down the path walk. 2112 * 2113 * NOTE! The caller *has* to check the resulting dentry against the sequence 2114 * number we've returned before using any of the resulting dentry state! 2115 */ 2116 struct dentry *__d_lookup_rcu(const struct dentry *parent, 2117 const struct qstr *name, 2118 unsigned *seqp) 2119 { 2120 u64 hashlen = name->hash_len; 2121 const unsigned char *str = name->name; 2122 struct hlist_bl_head *b = d_hash(parent, hashlen_hash(hashlen)); 2123 struct hlist_bl_node *node; 2124 struct dentry *dentry; 2125 2126 /* 2127 * Note: There is significant duplication with __d_lookup_rcu which is 2128 * required to prevent single threaded performance regressions 2129 * especially on architectures where smp_rmb (in seqcounts) are costly. 2130 * Keep the two functions in sync. 2131 */ 2132 2133 /* 2134 * The hash list is protected using RCU. 2135 * 2136 * Carefully use d_seq when comparing a candidate dentry, to avoid 2137 * races with d_move(). 2138 * 2139 * It is possible that concurrent renames can mess up our list 2140 * walk here and result in missing our dentry, resulting in the 2141 * false-negative result. d_lookup() protects against concurrent 2142 * renames using rename_lock seqlock. 2143 * 2144 * See Documentation/filesystems/path-lookup.txt for more details. 2145 */ 2146 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { 2147 unsigned seq; 2148 2149 seqretry: 2150 /* 2151 * The dentry sequence count protects us from concurrent 2152 * renames, and thus protects parent and name fields. 2153 * 2154 * The caller must perform a seqcount check in order 2155 * to do anything useful with the returned dentry. 2156 * 2157 * NOTE! We do a "raw" seqcount_begin here. That means that 2158 * we don't wait for the sequence count to stabilize if it 2159 * is in the middle of a sequence change. If we do the slow 2160 * dentry compare, we will do seqretries until it is stable, 2161 * and if we end up with a successful lookup, we actually 2162 * want to exit RCU lookup anyway. 2163 */ 2164 seq = raw_seqcount_begin(&dentry->d_seq); 2165 if (dentry->d_parent != parent) 2166 continue; 2167 if (d_unhashed(dentry)) 2168 continue; 2169 2170 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) { 2171 if (dentry->d_name.hash != hashlen_hash(hashlen)) 2172 continue; 2173 *seqp = seq; 2174 switch (slow_dentry_cmp(parent, dentry, seq, name)) { 2175 case D_COMP_OK: 2176 return dentry; 2177 case D_COMP_NOMATCH: 2178 continue; 2179 default: 2180 goto seqretry; 2181 } 2182 } 2183 2184 if (dentry->d_name.hash_len != hashlen) 2185 continue; 2186 *seqp = seq; 2187 if (!dentry_cmp(dentry, str, hashlen_len(hashlen))) 2188 return dentry; 2189 } 2190 return NULL; 2191 } 2192 2193 /** 2194 * d_lookup - search for a dentry 2195 * @parent: parent dentry 2196 * @name: qstr of name we wish to find 2197 * Returns: dentry, or NULL 2198 * 2199 * d_lookup searches the children of the parent dentry for the name in 2200 * question. If the dentry is found its reference count is incremented and the 2201 * dentry is returned. The caller must use dput to free the entry when it has 2202 * finished using it. %NULL is returned if the dentry does not exist. 2203 */ 2204 struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name) 2205 { 2206 struct dentry *dentry; 2207 unsigned seq; 2208 2209 do { 2210 seq = read_seqbegin(&rename_lock); 2211 dentry = __d_lookup(parent, name); 2212 if (dentry) 2213 break; 2214 } while (read_seqretry(&rename_lock, seq)); 2215 return dentry; 2216 } 2217 EXPORT_SYMBOL(d_lookup); 2218 2219 /** 2220 * __d_lookup - search for a dentry (racy) 2221 * @parent: parent dentry 2222 * @name: qstr of name we wish to find 2223 * Returns: dentry, or NULL 2224 * 2225 * __d_lookup is like d_lookup, however it may (rarely) return a 2226 * false-negative result due to unrelated rename activity. 2227 * 2228 * __d_lookup is slightly faster by avoiding rename_lock read seqlock, 2229 * however it must be used carefully, eg. with a following d_lookup in 2230 * the case of failure. 2231 * 2232 * __d_lookup callers must be commented. 2233 */ 2234 struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name) 2235 { 2236 unsigned int len = name->len; 2237 unsigned int hash = name->hash; 2238 const unsigned char *str = name->name; 2239 struct hlist_bl_head *b = d_hash(parent, hash); 2240 struct hlist_bl_node *node; 2241 struct dentry *found = NULL; 2242 struct dentry *dentry; 2243 2244 /* 2245 * Note: There is significant duplication with __d_lookup_rcu which is 2246 * required to prevent single threaded performance regressions 2247 * especially on architectures where smp_rmb (in seqcounts) are costly. 2248 * Keep the two functions in sync. 2249 */ 2250 2251 /* 2252 * The hash list is protected using RCU. 2253 * 2254 * Take d_lock when comparing a candidate dentry, to avoid races 2255 * with d_move(). 2256 * 2257 * It is possible that concurrent renames can mess up our list 2258 * walk here and result in missing our dentry, resulting in the 2259 * false-negative result. d_lookup() protects against concurrent 2260 * renames using rename_lock seqlock. 2261 * 2262 * See Documentation/filesystems/path-lookup.txt for more details. 2263 */ 2264 rcu_read_lock(); 2265 2266 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) { 2267 2268 if (dentry->d_name.hash != hash) 2269 continue; 2270 2271 spin_lock(&dentry->d_lock); 2272 if (dentry->d_parent != parent) 2273 goto next; 2274 if (d_unhashed(dentry)) 2275 goto next; 2276 2277 /* 2278 * It is safe to compare names since d_move() cannot 2279 * change the qstr (protected by d_lock). 2280 */ 2281 if (parent->d_flags & DCACHE_OP_COMPARE) { 2282 int tlen = dentry->d_name.len; 2283 const char *tname = dentry->d_name.name; 2284 if (parent->d_op->d_compare(parent, dentry, tlen, tname, name)) 2285 goto next; 2286 } else { 2287 if (dentry->d_name.len != len) 2288 goto next; 2289 if (dentry_cmp(dentry, str, len)) 2290 goto next; 2291 } 2292 2293 dentry->d_lockref.count++; 2294 found = dentry; 2295 spin_unlock(&dentry->d_lock); 2296 break; 2297 next: 2298 spin_unlock(&dentry->d_lock); 2299 } 2300 rcu_read_unlock(); 2301 2302 return found; 2303 } 2304 2305 /** 2306 * d_hash_and_lookup - hash the qstr then search for a dentry 2307 * @dir: Directory to search in 2308 * @name: qstr of name we wish to find 2309 * 2310 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error) 2311 */ 2312 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name) 2313 { 2314 /* 2315 * Check for a fs-specific hash function. Note that we must 2316 * calculate the standard hash first, as the d_op->d_hash() 2317 * routine may choose to leave the hash value unchanged. 2318 */ 2319 name->hash = full_name_hash(name->name, name->len); 2320 if (dir->d_flags & DCACHE_OP_HASH) { 2321 int err = dir->d_op->d_hash(dir, name); 2322 if (unlikely(err < 0)) 2323 return ERR_PTR(err); 2324 } 2325 return d_lookup(dir, name); 2326 } 2327 EXPORT_SYMBOL(d_hash_and_lookup); 2328 2329 /* 2330 * When a file is deleted, we have two options: 2331 * - turn this dentry into a negative dentry 2332 * - unhash this dentry and free it. 2333 * 2334 * Usually, we want to just turn this into 2335 * a negative dentry, but if anybody else is 2336 * currently using the dentry or the inode 2337 * we can't do that and we fall back on removing 2338 * it from the hash queues and waiting for 2339 * it to be deleted later when it has no users 2340 */ 2341 2342 /** 2343 * d_delete - delete a dentry 2344 * @dentry: The dentry to delete 2345 * 2346 * Turn the dentry into a negative dentry if possible, otherwise 2347 * remove it from the hash queues so it can be deleted later 2348 */ 2349 2350 void d_delete(struct dentry * dentry) 2351 { 2352 struct inode *inode; 2353 int isdir = 0; 2354 /* 2355 * Are we the only user? 2356 */ 2357 again: 2358 spin_lock(&dentry->d_lock); 2359 inode = dentry->d_inode; 2360 isdir = S_ISDIR(inode->i_mode); 2361 if (dentry->d_lockref.count == 1) { 2362 if (!spin_trylock(&inode->i_lock)) { 2363 spin_unlock(&dentry->d_lock); 2364 cpu_relax(); 2365 goto again; 2366 } 2367 dentry->d_flags &= ~DCACHE_CANT_MOUNT; 2368 dentry_unlink_inode(dentry); 2369 fsnotify_nameremove(dentry, isdir); 2370 return; 2371 } 2372 2373 if (!d_unhashed(dentry)) 2374 __d_drop(dentry); 2375 2376 spin_unlock(&dentry->d_lock); 2377 2378 fsnotify_nameremove(dentry, isdir); 2379 } 2380 EXPORT_SYMBOL(d_delete); 2381 2382 static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b) 2383 { 2384 BUG_ON(!d_unhashed(entry)); 2385 hlist_bl_lock(b); 2386 entry->d_flags |= DCACHE_RCUACCESS; 2387 hlist_bl_add_head_rcu(&entry->d_hash, b); 2388 hlist_bl_unlock(b); 2389 } 2390 2391 static void _d_rehash(struct dentry * entry) 2392 { 2393 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash)); 2394 } 2395 2396 /** 2397 * d_rehash - add an entry back to the hash 2398 * @entry: dentry to add to the hash 2399 * 2400 * Adds a dentry to the hash according to its name. 2401 */ 2402 2403 void d_rehash(struct dentry * entry) 2404 { 2405 spin_lock(&entry->d_lock); 2406 _d_rehash(entry); 2407 spin_unlock(&entry->d_lock); 2408 } 2409 EXPORT_SYMBOL(d_rehash); 2410 2411 /** 2412 * dentry_update_name_case - update case insensitive dentry with a new name 2413 * @dentry: dentry to be updated 2414 * @name: new name 2415 * 2416 * Update a case insensitive dentry with new case of name. 2417 * 2418 * dentry must have been returned by d_lookup with name @name. Old and new 2419 * name lengths must match (ie. no d_compare which allows mismatched name 2420 * lengths). 2421 * 2422 * Parent inode i_mutex must be held over d_lookup and into this call (to 2423 * keep renames and concurrent inserts, and readdir(2) away). 2424 */ 2425 void dentry_update_name_case(struct dentry *dentry, struct qstr *name) 2426 { 2427 BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex)); 2428 BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */ 2429 2430 spin_lock(&dentry->d_lock); 2431 write_seqcount_begin(&dentry->d_seq); 2432 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len); 2433 write_seqcount_end(&dentry->d_seq); 2434 spin_unlock(&dentry->d_lock); 2435 } 2436 EXPORT_SYMBOL(dentry_update_name_case); 2437 2438 static void swap_names(struct dentry *dentry, struct dentry *target) 2439 { 2440 if (unlikely(dname_external(target))) { 2441 if (unlikely(dname_external(dentry))) { 2442 /* 2443 * Both external: swap the pointers 2444 */ 2445 swap(target->d_name.name, dentry->d_name.name); 2446 } else { 2447 /* 2448 * dentry:internal, target:external. Steal target's 2449 * storage and make target internal. 2450 */ 2451 memcpy(target->d_iname, dentry->d_name.name, 2452 dentry->d_name.len + 1); 2453 dentry->d_name.name = target->d_name.name; 2454 target->d_name.name = target->d_iname; 2455 } 2456 } else { 2457 if (unlikely(dname_external(dentry))) { 2458 /* 2459 * dentry:external, target:internal. Give dentry's 2460 * storage to target and make dentry internal 2461 */ 2462 memcpy(dentry->d_iname, target->d_name.name, 2463 target->d_name.len + 1); 2464 target->d_name.name = dentry->d_name.name; 2465 dentry->d_name.name = dentry->d_iname; 2466 } else { 2467 /* 2468 * Both are internal. 2469 */ 2470 unsigned int i; 2471 BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long))); 2472 kmemcheck_mark_initialized(dentry->d_iname, DNAME_INLINE_LEN); 2473 kmemcheck_mark_initialized(target->d_iname, DNAME_INLINE_LEN); 2474 for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) { 2475 swap(((long *) &dentry->d_iname)[i], 2476 ((long *) &target->d_iname)[i]); 2477 } 2478 } 2479 } 2480 swap(dentry->d_name.hash_len, target->d_name.hash_len); 2481 } 2482 2483 static void copy_name(struct dentry *dentry, struct dentry *target) 2484 { 2485 struct external_name *old_name = NULL; 2486 if (unlikely(dname_external(dentry))) 2487 old_name = external_name(dentry); 2488 if (unlikely(dname_external(target))) { 2489 atomic_inc(&external_name(target)->u.count); 2490 dentry->d_name = target->d_name; 2491 } else { 2492 memcpy(dentry->d_iname, target->d_name.name, 2493 target->d_name.len + 1); 2494 dentry->d_name.name = dentry->d_iname; 2495 dentry->d_name.hash_len = target->d_name.hash_len; 2496 } 2497 if (old_name && likely(atomic_dec_and_test(&old_name->u.count))) 2498 kfree_rcu(old_name, u.head); 2499 } 2500 2501 static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target) 2502 { 2503 /* 2504 * XXXX: do we really need to take target->d_lock? 2505 */ 2506 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent) 2507 spin_lock(&target->d_parent->d_lock); 2508 else { 2509 if (d_ancestor(dentry->d_parent, target->d_parent)) { 2510 spin_lock(&dentry->d_parent->d_lock); 2511 spin_lock_nested(&target->d_parent->d_lock, 2512 DENTRY_D_LOCK_NESTED); 2513 } else { 2514 spin_lock(&target->d_parent->d_lock); 2515 spin_lock_nested(&dentry->d_parent->d_lock, 2516 DENTRY_D_LOCK_NESTED); 2517 } 2518 } 2519 if (target < dentry) { 2520 spin_lock_nested(&target->d_lock, 2); 2521 spin_lock_nested(&dentry->d_lock, 3); 2522 } else { 2523 spin_lock_nested(&dentry->d_lock, 2); 2524 spin_lock_nested(&target->d_lock, 3); 2525 } 2526 } 2527 2528 static void dentry_unlock_for_move(struct dentry *dentry, struct dentry *target) 2529 { 2530 if (target->d_parent != dentry->d_parent) 2531 spin_unlock(&dentry->d_parent->d_lock); 2532 if (target->d_parent != target) 2533 spin_unlock(&target->d_parent->d_lock); 2534 spin_unlock(&target->d_lock); 2535 spin_unlock(&dentry->d_lock); 2536 } 2537 2538 /* 2539 * When switching names, the actual string doesn't strictly have to 2540 * be preserved in the target - because we're dropping the target 2541 * anyway. As such, we can just do a simple memcpy() to copy over 2542 * the new name before we switch, unless we are going to rehash 2543 * it. Note that if we *do* unhash the target, we are not allowed 2544 * to rehash it without giving it a new name/hash key - whether 2545 * we swap or overwrite the names here, resulting name won't match 2546 * the reality in filesystem; it's only there for d_path() purposes. 2547 * Note that all of this is happening under rename_lock, so the 2548 * any hash lookup seeing it in the middle of manipulations will 2549 * be discarded anyway. So we do not care what happens to the hash 2550 * key in that case. 2551 */ 2552 /* 2553 * __d_move - move a dentry 2554 * @dentry: entry to move 2555 * @target: new dentry 2556 * @exchange: exchange the two dentries 2557 * 2558 * Update the dcache to reflect the move of a file name. Negative 2559 * dcache entries should not be moved in this way. Caller must hold 2560 * rename_lock, the i_mutex of the source and target directories, 2561 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename(). 2562 */ 2563 static void __d_move(struct dentry *dentry, struct dentry *target, 2564 bool exchange) 2565 { 2566 if (!dentry->d_inode) 2567 printk(KERN_WARNING "VFS: moving negative dcache entry\n"); 2568 2569 BUG_ON(d_ancestor(dentry, target)); 2570 BUG_ON(d_ancestor(target, dentry)); 2571 2572 dentry_lock_for_move(dentry, target); 2573 2574 write_seqcount_begin(&dentry->d_seq); 2575 write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED); 2576 2577 /* __d_drop does write_seqcount_barrier, but they're OK to nest. */ 2578 2579 /* 2580 * Move the dentry to the target hash queue. Don't bother checking 2581 * for the same hash queue because of how unlikely it is. 2582 */ 2583 __d_drop(dentry); 2584 __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash)); 2585 2586 /* 2587 * Unhash the target (d_delete() is not usable here). If exchanging 2588 * the two dentries, then rehash onto the other's hash queue. 2589 */ 2590 __d_drop(target); 2591 if (exchange) { 2592 __d_rehash(target, 2593 d_hash(dentry->d_parent, dentry->d_name.hash)); 2594 } 2595 2596 /* Switch the names.. */ 2597 if (exchange) 2598 swap_names(dentry, target); 2599 else 2600 copy_name(dentry, target); 2601 2602 /* ... and switch them in the tree */ 2603 if (IS_ROOT(dentry)) { 2604 /* splicing a tree */ 2605 dentry->d_parent = target->d_parent; 2606 target->d_parent = target; 2607 list_del_init(&target->d_child); 2608 list_move(&dentry->d_child, &dentry->d_parent->d_subdirs); 2609 } else { 2610 /* swapping two dentries */ 2611 swap(dentry->d_parent, target->d_parent); 2612 list_move(&target->d_child, &target->d_parent->d_subdirs); 2613 list_move(&dentry->d_child, &dentry->d_parent->d_subdirs); 2614 if (exchange) 2615 fsnotify_d_move(target); 2616 fsnotify_d_move(dentry); 2617 } 2618 2619 write_seqcount_end(&target->d_seq); 2620 write_seqcount_end(&dentry->d_seq); 2621 2622 dentry_unlock_for_move(dentry, target); 2623 } 2624 2625 /* 2626 * d_move - move a dentry 2627 * @dentry: entry to move 2628 * @target: new dentry 2629 * 2630 * Update the dcache to reflect the move of a file name. Negative 2631 * dcache entries should not be moved in this way. See the locking 2632 * requirements for __d_move. 2633 */ 2634 void d_move(struct dentry *dentry, struct dentry *target) 2635 { 2636 write_seqlock(&rename_lock); 2637 __d_move(dentry, target, false); 2638 write_sequnlock(&rename_lock); 2639 } 2640 EXPORT_SYMBOL(d_move); 2641 2642 /* 2643 * d_exchange - exchange two dentries 2644 * @dentry1: first dentry 2645 * @dentry2: second dentry 2646 */ 2647 void d_exchange(struct dentry *dentry1, struct dentry *dentry2) 2648 { 2649 write_seqlock(&rename_lock); 2650 2651 WARN_ON(!dentry1->d_inode); 2652 WARN_ON(!dentry2->d_inode); 2653 WARN_ON(IS_ROOT(dentry1)); 2654 WARN_ON(IS_ROOT(dentry2)); 2655 2656 __d_move(dentry1, dentry2, true); 2657 2658 write_sequnlock(&rename_lock); 2659 } 2660 2661 /** 2662 * d_ancestor - search for an ancestor 2663 * @p1: ancestor dentry 2664 * @p2: child dentry 2665 * 2666 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is 2667 * an ancestor of p2, else NULL. 2668 */ 2669 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2) 2670 { 2671 struct dentry *p; 2672 2673 for (p = p2; !IS_ROOT(p); p = p->d_parent) { 2674 if (p->d_parent == p1) 2675 return p; 2676 } 2677 return NULL; 2678 } 2679 2680 /* 2681 * This helper attempts to cope with remotely renamed directories 2682 * 2683 * It assumes that the caller is already holding 2684 * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock 2685 * 2686 * Note: If ever the locking in lock_rename() changes, then please 2687 * remember to update this too... 2688 */ 2689 static int __d_unalias(struct inode *inode, 2690 struct dentry *dentry, struct dentry *alias) 2691 { 2692 struct mutex *m1 = NULL, *m2 = NULL; 2693 int ret = -EBUSY; 2694 2695 /* If alias and dentry share a parent, then no extra locks required */ 2696 if (alias->d_parent == dentry->d_parent) 2697 goto out_unalias; 2698 2699 /* See lock_rename() */ 2700 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex)) 2701 goto out_err; 2702 m1 = &dentry->d_sb->s_vfs_rename_mutex; 2703 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex)) 2704 goto out_err; 2705 m2 = &alias->d_parent->d_inode->i_mutex; 2706 out_unalias: 2707 __d_move(alias, dentry, false); 2708 ret = 0; 2709 out_err: 2710 spin_unlock(&inode->i_lock); 2711 if (m2) 2712 mutex_unlock(m2); 2713 if (m1) 2714 mutex_unlock(m1); 2715 return ret; 2716 } 2717 2718 /** 2719 * d_splice_alias - splice a disconnected dentry into the tree if one exists 2720 * @inode: the inode which may have a disconnected dentry 2721 * @dentry: a negative dentry which we want to point to the inode. 2722 * 2723 * If inode is a directory and has an IS_ROOT alias, then d_move that in 2724 * place of the given dentry and return it, else simply d_add the inode 2725 * to the dentry and return NULL. 2726 * 2727 * If a non-IS_ROOT directory is found, the filesystem is corrupt, and 2728 * we should error out: directories can't have multiple aliases. 2729 * 2730 * This is needed in the lookup routine of any filesystem that is exportable 2731 * (via knfsd) so that we can build dcache paths to directories effectively. 2732 * 2733 * If a dentry was found and moved, then it is returned. Otherwise NULL 2734 * is returned. This matches the expected return value of ->lookup. 2735 * 2736 * Cluster filesystems may call this function with a negative, hashed dentry. 2737 * In that case, we know that the inode will be a regular file, and also this 2738 * will only occur during atomic_open. So we need to check for the dentry 2739 * being already hashed only in the final case. 2740 */ 2741 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) 2742 { 2743 if (IS_ERR(inode)) 2744 return ERR_CAST(inode); 2745 2746 BUG_ON(!d_unhashed(dentry)); 2747 2748 if (!inode) { 2749 __d_instantiate(dentry, NULL); 2750 goto out; 2751 } 2752 spin_lock(&inode->i_lock); 2753 if (S_ISDIR(inode->i_mode)) { 2754 struct dentry *new = __d_find_any_alias(inode); 2755 if (unlikely(new)) { 2756 write_seqlock(&rename_lock); 2757 if (unlikely(d_ancestor(new, dentry))) { 2758 write_sequnlock(&rename_lock); 2759 spin_unlock(&inode->i_lock); 2760 dput(new); 2761 new = ERR_PTR(-ELOOP); 2762 pr_warn_ratelimited( 2763 "VFS: Lookup of '%s' in %s %s" 2764 " would have caused loop\n", 2765 dentry->d_name.name, 2766 inode->i_sb->s_type->name, 2767 inode->i_sb->s_id); 2768 } else if (!IS_ROOT(new)) { 2769 int err = __d_unalias(inode, dentry, new); 2770 write_sequnlock(&rename_lock); 2771 if (err) { 2772 dput(new); 2773 new = ERR_PTR(err); 2774 } 2775 } else { 2776 __d_move(new, dentry, false); 2777 write_sequnlock(&rename_lock); 2778 spin_unlock(&inode->i_lock); 2779 security_d_instantiate(new, inode); 2780 } 2781 iput(inode); 2782 return new; 2783 } 2784 } 2785 /* already taking inode->i_lock, so d_add() by hand */ 2786 __d_instantiate(dentry, inode); 2787 spin_unlock(&inode->i_lock); 2788 out: 2789 security_d_instantiate(dentry, inode); 2790 d_rehash(dentry); 2791 return NULL; 2792 } 2793 EXPORT_SYMBOL(d_splice_alias); 2794 2795 static int prepend(char **buffer, int *buflen, const char *str, int namelen) 2796 { 2797 *buflen -= namelen; 2798 if (*buflen < 0) 2799 return -ENAMETOOLONG; 2800 *buffer -= namelen; 2801 memcpy(*buffer, str, namelen); 2802 return 0; 2803 } 2804 2805 /** 2806 * prepend_name - prepend a pathname in front of current buffer pointer 2807 * @buffer: buffer pointer 2808 * @buflen: allocated length of the buffer 2809 * @name: name string and length qstr structure 2810 * 2811 * With RCU path tracing, it may race with d_move(). Use ACCESS_ONCE() to 2812 * make sure that either the old or the new name pointer and length are 2813 * fetched. However, there may be mismatch between length and pointer. 2814 * The length cannot be trusted, we need to copy it byte-by-byte until 2815 * the length is reached or a null byte is found. It also prepends "/" at 2816 * the beginning of the name. The sequence number check at the caller will 2817 * retry it again when a d_move() does happen. So any garbage in the buffer 2818 * due to mismatched pointer and length will be discarded. 2819 * 2820 * Data dependency barrier is needed to make sure that we see that terminating 2821 * NUL. Alpha strikes again, film at 11... 2822 */ 2823 static int prepend_name(char **buffer, int *buflen, struct qstr *name) 2824 { 2825 const char *dname = ACCESS_ONCE(name->name); 2826 u32 dlen = ACCESS_ONCE(name->len); 2827 char *p; 2828 2829 smp_read_barrier_depends(); 2830 2831 *buflen -= dlen + 1; 2832 if (*buflen < 0) 2833 return -ENAMETOOLONG; 2834 p = *buffer -= dlen + 1; 2835 *p++ = '/'; 2836 while (dlen--) { 2837 char c = *dname++; 2838 if (!c) 2839 break; 2840 *p++ = c; 2841 } 2842 return 0; 2843 } 2844 2845 /** 2846 * prepend_path - Prepend path string to a buffer 2847 * @path: the dentry/vfsmount to report 2848 * @root: root vfsmnt/dentry 2849 * @buffer: pointer to the end of the buffer 2850 * @buflen: pointer to buffer length 2851 * 2852 * The function will first try to write out the pathname without taking any 2853 * lock other than the RCU read lock to make sure that dentries won't go away. 2854 * It only checks the sequence number of the global rename_lock as any change 2855 * in the dentry's d_seq will be preceded by changes in the rename_lock 2856 * sequence number. If the sequence number had been changed, it will restart 2857 * the whole pathname back-tracing sequence again by taking the rename_lock. 2858 * In this case, there is no need to take the RCU read lock as the recursive 2859 * parent pointer references will keep the dentry chain alive as long as no 2860 * rename operation is performed. 2861 */ 2862 static int prepend_path(const struct path *path, 2863 const struct path *root, 2864 char **buffer, int *buflen) 2865 { 2866 struct dentry *dentry; 2867 struct vfsmount *vfsmnt; 2868 struct mount *mnt; 2869 int error = 0; 2870 unsigned seq, m_seq = 0; 2871 char *bptr; 2872 int blen; 2873 2874 rcu_read_lock(); 2875 restart_mnt: 2876 read_seqbegin_or_lock(&mount_lock, &m_seq); 2877 seq = 0; 2878 rcu_read_lock(); 2879 restart: 2880 bptr = *buffer; 2881 blen = *buflen; 2882 error = 0; 2883 dentry = path->dentry; 2884 vfsmnt = path->mnt; 2885 mnt = real_mount(vfsmnt); 2886 read_seqbegin_or_lock(&rename_lock, &seq); 2887 while (dentry != root->dentry || vfsmnt != root->mnt) { 2888 struct dentry * parent; 2889 2890 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { 2891 struct mount *parent = ACCESS_ONCE(mnt->mnt_parent); 2892 /* Global root? */ 2893 if (mnt != parent) { 2894 dentry = ACCESS_ONCE(mnt->mnt_mountpoint); 2895 mnt = parent; 2896 vfsmnt = &mnt->mnt; 2897 continue; 2898 } 2899 /* 2900 * Filesystems needing to implement special "root names" 2901 * should do so with ->d_dname() 2902 */ 2903 if (IS_ROOT(dentry) && 2904 (dentry->d_name.len != 1 || 2905 dentry->d_name.name[0] != '/')) { 2906 WARN(1, "Root dentry has weird name <%.*s>\n", 2907 (int) dentry->d_name.len, 2908 dentry->d_name.name); 2909 } 2910 if (!error) 2911 error = is_mounted(vfsmnt) ? 1 : 2; 2912 break; 2913 } 2914 parent = dentry->d_parent; 2915 prefetch(parent); 2916 error = prepend_name(&bptr, &blen, &dentry->d_name); 2917 if (error) 2918 break; 2919 2920 dentry = parent; 2921 } 2922 if (!(seq & 1)) 2923 rcu_read_unlock(); 2924 if (need_seqretry(&rename_lock, seq)) { 2925 seq = 1; 2926 goto restart; 2927 } 2928 done_seqretry(&rename_lock, seq); 2929 2930 if (!(m_seq & 1)) 2931 rcu_read_unlock(); 2932 if (need_seqretry(&mount_lock, m_seq)) { 2933 m_seq = 1; 2934 goto restart_mnt; 2935 } 2936 done_seqretry(&mount_lock, m_seq); 2937 2938 if (error >= 0 && bptr == *buffer) { 2939 if (--blen < 0) 2940 error = -ENAMETOOLONG; 2941 else 2942 *--bptr = '/'; 2943 } 2944 *buffer = bptr; 2945 *buflen = blen; 2946 return error; 2947 } 2948 2949 /** 2950 * __d_path - return the path of a dentry 2951 * @path: the dentry/vfsmount to report 2952 * @root: root vfsmnt/dentry 2953 * @buf: buffer to return value in 2954 * @buflen: buffer length 2955 * 2956 * Convert a dentry into an ASCII path name. 2957 * 2958 * Returns a pointer into the buffer or an error code if the 2959 * path was too long. 2960 * 2961 * "buflen" should be positive. 2962 * 2963 * If the path is not reachable from the supplied root, return %NULL. 2964 */ 2965 char *__d_path(const struct path *path, 2966 const struct path *root, 2967 char *buf, int buflen) 2968 { 2969 char *res = buf + buflen; 2970 int error; 2971 2972 prepend(&res, &buflen, "\0", 1); 2973 error = prepend_path(path, root, &res, &buflen); 2974 2975 if (error < 0) 2976 return ERR_PTR(error); 2977 if (error > 0) 2978 return NULL; 2979 return res; 2980 } 2981 2982 char *d_absolute_path(const struct path *path, 2983 char *buf, int buflen) 2984 { 2985 struct path root = {}; 2986 char *res = buf + buflen; 2987 int error; 2988 2989 prepend(&res, &buflen, "\0", 1); 2990 error = prepend_path(path, &root, &res, &buflen); 2991 2992 if (error > 1) 2993 error = -EINVAL; 2994 if (error < 0) 2995 return ERR_PTR(error); 2996 return res; 2997 } 2998 2999 /* 3000 * same as __d_path but appends "(deleted)" for unlinked files. 3001 */ 3002 static int path_with_deleted(const struct path *path, 3003 const struct path *root, 3004 char **buf, int *buflen) 3005 { 3006 prepend(buf, buflen, "\0", 1); 3007 if (d_unlinked(path->dentry)) { 3008 int error = prepend(buf, buflen, " (deleted)", 10); 3009 if (error) 3010 return error; 3011 } 3012 3013 return prepend_path(path, root, buf, buflen); 3014 } 3015 3016 static int prepend_unreachable(char **buffer, int *buflen) 3017 { 3018 return prepend(buffer, buflen, "(unreachable)", 13); 3019 } 3020 3021 static void get_fs_root_rcu(struct fs_struct *fs, struct path *root) 3022 { 3023 unsigned seq; 3024 3025 do { 3026 seq = read_seqcount_begin(&fs->seq); 3027 *root = fs->root; 3028 } while (read_seqcount_retry(&fs->seq, seq)); 3029 } 3030 3031 /** 3032 * d_path - return the path of a dentry 3033 * @path: path to report 3034 * @buf: buffer to return value in 3035 * @buflen: buffer length 3036 * 3037 * Convert a dentry into an ASCII path name. If the entry has been deleted 3038 * the string " (deleted)" is appended. Note that this is ambiguous. 3039 * 3040 * Returns a pointer into the buffer or an error code if the path was 3041 * too long. Note: Callers should use the returned pointer, not the passed 3042 * in buffer, to use the name! The implementation often starts at an offset 3043 * into the buffer, and may leave 0 bytes at the start. 3044 * 3045 * "buflen" should be positive. 3046 */ 3047 char *d_path(const struct path *path, char *buf, int buflen) 3048 { 3049 char *res = buf + buflen; 3050 struct path root; 3051 int error; 3052 3053 /* 3054 * We have various synthetic filesystems that never get mounted. On 3055 * these filesystems dentries are never used for lookup purposes, and 3056 * thus don't need to be hashed. They also don't need a name until a 3057 * user wants to identify the object in /proc/pid/fd/. The little hack 3058 * below allows us to generate a name for these objects on demand: 3059 * 3060 * Some pseudo inodes are mountable. When they are mounted 3061 * path->dentry == path->mnt->mnt_root. In that case don't call d_dname 3062 * and instead have d_path return the mounted path. 3063 */ 3064 if (path->dentry->d_op && path->dentry->d_op->d_dname && 3065 (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root)) 3066 return path->dentry->d_op->d_dname(path->dentry, buf, buflen); 3067 3068 rcu_read_lock(); 3069 get_fs_root_rcu(current->fs, &root); 3070 error = path_with_deleted(path, &root, &res, &buflen); 3071 rcu_read_unlock(); 3072 3073 if (error < 0) 3074 res = ERR_PTR(error); 3075 return res; 3076 } 3077 EXPORT_SYMBOL(d_path); 3078 3079 /* 3080 * Helper function for dentry_operations.d_dname() members 3081 */ 3082 char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen, 3083 const char *fmt, ...) 3084 { 3085 va_list args; 3086 char temp[64]; 3087 int sz; 3088 3089 va_start(args, fmt); 3090 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1; 3091 va_end(args); 3092 3093 if (sz > sizeof(temp) || sz > buflen) 3094 return ERR_PTR(-ENAMETOOLONG); 3095 3096 buffer += buflen - sz; 3097 return memcpy(buffer, temp, sz); 3098 } 3099 3100 char *simple_dname(struct dentry *dentry, char *buffer, int buflen) 3101 { 3102 char *end = buffer + buflen; 3103 /* these dentries are never renamed, so d_lock is not needed */ 3104 if (prepend(&end, &buflen, " (deleted)", 11) || 3105 prepend(&end, &buflen, dentry->d_name.name, dentry->d_name.len) || 3106 prepend(&end, &buflen, "/", 1)) 3107 end = ERR_PTR(-ENAMETOOLONG); 3108 return end; 3109 } 3110 EXPORT_SYMBOL(simple_dname); 3111 3112 /* 3113 * Write full pathname from the root of the filesystem into the buffer. 3114 */ 3115 static char *__dentry_path(struct dentry *d, char *buf, int buflen) 3116 { 3117 struct dentry *dentry; 3118 char *end, *retval; 3119 int len, seq = 0; 3120 int error = 0; 3121 3122 if (buflen < 2) 3123 goto Elong; 3124 3125 rcu_read_lock(); 3126 restart: 3127 dentry = d; 3128 end = buf + buflen; 3129 len = buflen; 3130 prepend(&end, &len, "\0", 1); 3131 /* Get '/' right */ 3132 retval = end-1; 3133 *retval = '/'; 3134 read_seqbegin_or_lock(&rename_lock, &seq); 3135 while (!IS_ROOT(dentry)) { 3136 struct dentry *parent = dentry->d_parent; 3137 3138 prefetch(parent); 3139 error = prepend_name(&end, &len, &dentry->d_name); 3140 if (error) 3141 break; 3142 3143 retval = end; 3144 dentry = parent; 3145 } 3146 if (!(seq & 1)) 3147 rcu_read_unlock(); 3148 if (need_seqretry(&rename_lock, seq)) { 3149 seq = 1; 3150 goto restart; 3151 } 3152 done_seqretry(&rename_lock, seq); 3153 if (error) 3154 goto Elong; 3155 return retval; 3156 Elong: 3157 return ERR_PTR(-ENAMETOOLONG); 3158 } 3159 3160 char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen) 3161 { 3162 return __dentry_path(dentry, buf, buflen); 3163 } 3164 EXPORT_SYMBOL(dentry_path_raw); 3165 3166 char *dentry_path(struct dentry *dentry, char *buf, int buflen) 3167 { 3168 char *p = NULL; 3169 char *retval; 3170 3171 if (d_unlinked(dentry)) { 3172 p = buf + buflen; 3173 if (prepend(&p, &buflen, "//deleted", 10) != 0) 3174 goto Elong; 3175 buflen++; 3176 } 3177 retval = __dentry_path(dentry, buf, buflen); 3178 if (!IS_ERR(retval) && p) 3179 *p = '/'; /* restore '/' overriden with '\0' */ 3180 return retval; 3181 Elong: 3182 return ERR_PTR(-ENAMETOOLONG); 3183 } 3184 3185 static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root, 3186 struct path *pwd) 3187 { 3188 unsigned seq; 3189 3190 do { 3191 seq = read_seqcount_begin(&fs->seq); 3192 *root = fs->root; 3193 *pwd = fs->pwd; 3194 } while (read_seqcount_retry(&fs->seq, seq)); 3195 } 3196 3197 /* 3198 * NOTE! The user-level library version returns a 3199 * character pointer. The kernel system call just 3200 * returns the length of the buffer filled (which 3201 * includes the ending '\0' character), or a negative 3202 * error value. So libc would do something like 3203 * 3204 * char *getcwd(char * buf, size_t size) 3205 * { 3206 * int retval; 3207 * 3208 * retval = sys_getcwd(buf, size); 3209 * if (retval >= 0) 3210 * return buf; 3211 * errno = -retval; 3212 * return NULL; 3213 * } 3214 */ 3215 SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size) 3216 { 3217 int error; 3218 struct path pwd, root; 3219 char *page = __getname(); 3220 3221 if (!page) 3222 return -ENOMEM; 3223 3224 rcu_read_lock(); 3225 get_fs_root_and_pwd_rcu(current->fs, &root, &pwd); 3226 3227 error = -ENOENT; 3228 if (!d_unlinked(pwd.dentry)) { 3229 unsigned long len; 3230 char *cwd = page + PATH_MAX; 3231 int buflen = PATH_MAX; 3232 3233 prepend(&cwd, &buflen, "\0", 1); 3234 error = prepend_path(&pwd, &root, &cwd, &buflen); 3235 rcu_read_unlock(); 3236 3237 if (error < 0) 3238 goto out; 3239 3240 /* Unreachable from current root */ 3241 if (error > 0) { 3242 error = prepend_unreachable(&cwd, &buflen); 3243 if (error) 3244 goto out; 3245 } 3246 3247 error = -ERANGE; 3248 len = PATH_MAX + page - cwd; 3249 if (len <= size) { 3250 error = len; 3251 if (copy_to_user(buf, cwd, len)) 3252 error = -EFAULT; 3253 } 3254 } else { 3255 rcu_read_unlock(); 3256 } 3257 3258 out: 3259 __putname(page); 3260 return error; 3261 } 3262 3263 /* 3264 * Test whether new_dentry is a subdirectory of old_dentry. 3265 * 3266 * Trivially implemented using the dcache structure 3267 */ 3268 3269 /** 3270 * is_subdir - is new dentry a subdirectory of old_dentry 3271 * @new_dentry: new dentry 3272 * @old_dentry: old dentry 3273 * 3274 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth). 3275 * Returns 0 otherwise. 3276 * Caller must ensure that "new_dentry" is pinned before calling is_subdir() 3277 */ 3278 3279 int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry) 3280 { 3281 int result; 3282 unsigned seq; 3283 3284 if (new_dentry == old_dentry) 3285 return 1; 3286 3287 do { 3288 /* for restarting inner loop in case of seq retry */ 3289 seq = read_seqbegin(&rename_lock); 3290 /* 3291 * Need rcu_readlock to protect against the d_parent trashing 3292 * due to d_move 3293 */ 3294 rcu_read_lock(); 3295 if (d_ancestor(old_dentry, new_dentry)) 3296 result = 1; 3297 else 3298 result = 0; 3299 rcu_read_unlock(); 3300 } while (read_seqretry(&rename_lock, seq)); 3301 3302 return result; 3303 } 3304 3305 static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry) 3306 { 3307 struct dentry *root = data; 3308 if (dentry != root) { 3309 if (d_unhashed(dentry) || !dentry->d_inode) 3310 return D_WALK_SKIP; 3311 3312 if (!(dentry->d_flags & DCACHE_GENOCIDE)) { 3313 dentry->d_flags |= DCACHE_GENOCIDE; 3314 dentry->d_lockref.count--; 3315 } 3316 } 3317 return D_WALK_CONTINUE; 3318 } 3319 3320 void d_genocide(struct dentry *parent) 3321 { 3322 d_walk(parent, parent, d_genocide_kill, NULL); 3323 } 3324 3325 void d_tmpfile(struct dentry *dentry, struct inode *inode) 3326 { 3327 inode_dec_link_count(inode); 3328 BUG_ON(dentry->d_name.name != dentry->d_iname || 3329 !hlist_unhashed(&dentry->d_u.d_alias) || 3330 !d_unlinked(dentry)); 3331 spin_lock(&dentry->d_parent->d_lock); 3332 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 3333 dentry->d_name.len = sprintf(dentry->d_iname, "#%llu", 3334 (unsigned long long)inode->i_ino); 3335 spin_unlock(&dentry->d_lock); 3336 spin_unlock(&dentry->d_parent->d_lock); 3337 d_instantiate(dentry, inode); 3338 } 3339 EXPORT_SYMBOL(d_tmpfile); 3340 3341 static __initdata unsigned long dhash_entries; 3342 static int __init set_dhash_entries(char *str) 3343 { 3344 if (!str) 3345 return 0; 3346 dhash_entries = simple_strtoul(str, &str, 0); 3347 return 1; 3348 } 3349 __setup("dhash_entries=", set_dhash_entries); 3350 3351 static void __init dcache_init_early(void) 3352 { 3353 unsigned int loop; 3354 3355 /* If hashes are distributed across NUMA nodes, defer 3356 * hash allocation until vmalloc space is available. 3357 */ 3358 if (hashdist) 3359 return; 3360 3361 dentry_hashtable = 3362 alloc_large_system_hash("Dentry cache", 3363 sizeof(struct hlist_bl_head), 3364 dhash_entries, 3365 13, 3366 HASH_EARLY, 3367 &d_hash_shift, 3368 &d_hash_mask, 3369 0, 3370 0); 3371 3372 for (loop = 0; loop < (1U << d_hash_shift); loop++) 3373 INIT_HLIST_BL_HEAD(dentry_hashtable + loop); 3374 } 3375 3376 static void __init dcache_init(void) 3377 { 3378 unsigned int loop; 3379 3380 /* 3381 * A constructor could be added for stable state like the lists, 3382 * but it is probably not worth it because of the cache nature 3383 * of the dcache. 3384 */ 3385 dentry_cache = KMEM_CACHE(dentry, 3386 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); 3387 3388 /* Hash may have been set up in dcache_init_early */ 3389 if (!hashdist) 3390 return; 3391 3392 dentry_hashtable = 3393 alloc_large_system_hash("Dentry cache", 3394 sizeof(struct hlist_bl_head), 3395 dhash_entries, 3396 13, 3397 0, 3398 &d_hash_shift, 3399 &d_hash_mask, 3400 0, 3401 0); 3402 3403 for (loop = 0; loop < (1U << d_hash_shift); loop++) 3404 INIT_HLIST_BL_HEAD(dentry_hashtable + loop); 3405 } 3406 3407 /* SLAB cache for __getname() consumers */ 3408 struct kmem_cache *names_cachep __read_mostly; 3409 EXPORT_SYMBOL(names_cachep); 3410 3411 EXPORT_SYMBOL(d_genocide); 3412 3413 void __init vfs_caches_init_early(void) 3414 { 3415 dcache_init_early(); 3416 inode_init_early(); 3417 } 3418 3419 void __init vfs_caches_init(unsigned long mempages) 3420 { 3421 unsigned long reserve; 3422 3423 /* Base hash sizes on available memory, with a reserve equal to 3424 150% of current kernel size */ 3425 3426 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1); 3427 mempages -= reserve; 3428 3429 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, 3430 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 3431 3432 dcache_init(); 3433 inode_init(); 3434 files_init(mempages); 3435 mnt_init(); 3436 bdev_cache_init(); 3437 chrdev_init(); 3438 } 3439