1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * fs/kernfs/dir.c - kernfs directory implementation 4 * 5 * Copyright (c) 2001-3 Patrick Mochel 6 * Copyright (c) 2007 SUSE Linux Products GmbH 7 * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org> 8 */ 9 10 #include <linux/sched.h> 11 #include <linux/fs.h> 12 #include <linux/namei.h> 13 #include <linux/idr.h> 14 #include <linux/slab.h> 15 #include <linux/security.h> 16 #include <linux/hash.h> 17 18 #include "kernfs-internal.h" 19 20 static DEFINE_SPINLOCK(kernfs_rename_lock); /* kn->parent and ->name */ 21 /* 22 * Don't use rename_lock to piggy back on pr_cont_buf. We don't want to 23 * call pr_cont() while holding rename_lock. Because sometimes pr_cont() 24 * will perform wakeups when releasing console_sem. Holding rename_lock 25 * will introduce deadlock if the scheduler reads the kernfs_name in the 26 * wakeup path. 27 */ 28 static DEFINE_SPINLOCK(kernfs_pr_cont_lock); 29 static char kernfs_pr_cont_buf[PATH_MAX]; /* protected by pr_cont_lock */ 30 static DEFINE_SPINLOCK(kernfs_idr_lock); /* root->ino_idr */ 31 32 #define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb) 33 34 static bool kernfs_active(struct kernfs_node *kn) 35 { 36 lockdep_assert_held(&kernfs_root(kn)->kernfs_rwsem); 37 return atomic_read(&kn->active) >= 0; 38 } 39 40 static bool kernfs_lockdep(struct kernfs_node *kn) 41 { 42 #ifdef CONFIG_DEBUG_LOCK_ALLOC 43 return kn->flags & KERNFS_LOCKDEP; 44 #else 45 return false; 46 #endif 47 } 48 49 static int kernfs_name_locked(struct kernfs_node *kn, char *buf, size_t buflen) 50 { 51 if (!kn) 52 return strlcpy(buf, "(null)", buflen); 53 54 return strlcpy(buf, kn->parent ? kn->name : "/", buflen); 55 } 56 57 /* kernfs_node_depth - compute depth from @from to @to */ 58 static size_t kernfs_depth(struct kernfs_node *from, struct kernfs_node *to) 59 { 60 size_t depth = 0; 61 62 while (to->parent && to != from) { 63 depth++; 64 to = to->parent; 65 } 66 return depth; 67 } 68 69 static struct kernfs_node *kernfs_common_ancestor(struct kernfs_node *a, 70 struct kernfs_node *b) 71 { 72 size_t da, db; 73 struct kernfs_root *ra = kernfs_root(a), *rb = kernfs_root(b); 74 75 if (ra != rb) 76 return NULL; 77 78 da = kernfs_depth(ra->kn, a); 79 db = kernfs_depth(rb->kn, b); 80 81 while (da > db) { 82 a = a->parent; 83 da--; 84 } 85 while (db > da) { 86 b = b->parent; 87 db--; 88 } 89 90 /* worst case b and a will be the same at root */ 91 while (b != a) { 92 b = b->parent; 93 a = a->parent; 94 } 95 96 return a; 97 } 98 99 /** 100 * kernfs_path_from_node_locked - find a pseudo-absolute path to @kn_to, 101 * where kn_from is treated as root of the path. 102 * @kn_from: kernfs node which should be treated as root for the path 103 * @kn_to: kernfs node to which path is needed 104 * @buf: buffer to copy the path into 105 * @buflen: size of @buf 106 * 107 * We need to handle couple of scenarios here: 108 * [1] when @kn_from is an ancestor of @kn_to at some level 109 * kn_from: /n1/n2/n3 110 * kn_to: /n1/n2/n3/n4/n5 111 * result: /n4/n5 112 * 113 * [2] when @kn_from is on a different hierarchy and we need to find common 114 * ancestor between @kn_from and @kn_to. 115 * kn_from: /n1/n2/n3/n4 116 * kn_to: /n1/n2/n5 117 * result: /../../n5 118 * OR 119 * kn_from: /n1/n2/n3/n4/n5 [depth=5] 120 * kn_to: /n1/n2/n3 [depth=3] 121 * result: /../.. 122 * 123 * [3] when @kn_to is NULL result will be "(null)" 124 * 125 * Returns the length of the full path. If the full length is equal to or 126 * greater than @buflen, @buf contains the truncated path with the trailing 127 * '\0'. On error, -errno is returned. 128 */ 129 static int kernfs_path_from_node_locked(struct kernfs_node *kn_to, 130 struct kernfs_node *kn_from, 131 char *buf, size_t buflen) 132 { 133 struct kernfs_node *kn, *common; 134 const char parent_str[] = "/.."; 135 size_t depth_from, depth_to, len = 0; 136 int i, j; 137 138 if (!kn_to) 139 return strlcpy(buf, "(null)", buflen); 140 141 if (!kn_from) 142 kn_from = kernfs_root(kn_to)->kn; 143 144 if (kn_from == kn_to) 145 return strlcpy(buf, "/", buflen); 146 147 if (!buf) 148 return -EINVAL; 149 150 common = kernfs_common_ancestor(kn_from, kn_to); 151 if (WARN_ON(!common)) 152 return -EINVAL; 153 154 depth_to = kernfs_depth(common, kn_to); 155 depth_from = kernfs_depth(common, kn_from); 156 157 buf[0] = '\0'; 158 159 for (i = 0; i < depth_from; i++) 160 len += strlcpy(buf + len, parent_str, 161 len < buflen ? buflen - len : 0); 162 163 /* Calculate how many bytes we need for the rest */ 164 for (i = depth_to - 1; i >= 0; i--) { 165 for (kn = kn_to, j = 0; j < i; j++) 166 kn = kn->parent; 167 len += strlcpy(buf + len, "/", 168 len < buflen ? buflen - len : 0); 169 len += strlcpy(buf + len, kn->name, 170 len < buflen ? buflen - len : 0); 171 } 172 173 return len; 174 } 175 176 /** 177 * kernfs_name - obtain the name of a given node 178 * @kn: kernfs_node of interest 179 * @buf: buffer to copy @kn's name into 180 * @buflen: size of @buf 181 * 182 * Copies the name of @kn into @buf of @buflen bytes. The behavior is 183 * similar to strlcpy(). It returns the length of @kn's name and if @buf 184 * isn't long enough, it's filled upto @buflen-1 and nul terminated. 185 * 186 * Fills buffer with "(null)" if @kn is NULL. 187 * 188 * This function can be called from any context. 189 */ 190 int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen) 191 { 192 unsigned long flags; 193 int ret; 194 195 spin_lock_irqsave(&kernfs_rename_lock, flags); 196 ret = kernfs_name_locked(kn, buf, buflen); 197 spin_unlock_irqrestore(&kernfs_rename_lock, flags); 198 return ret; 199 } 200 201 /** 202 * kernfs_path_from_node - build path of node @to relative to @from. 203 * @from: parent kernfs_node relative to which we need to build the path 204 * @to: kernfs_node of interest 205 * @buf: buffer to copy @to's path into 206 * @buflen: size of @buf 207 * 208 * Builds @to's path relative to @from in @buf. @from and @to must 209 * be on the same kernfs-root. If @from is not parent of @to, then a relative 210 * path (which includes '..'s) as needed to reach from @from to @to is 211 * returned. 212 * 213 * Returns the length of the full path. If the full length is equal to or 214 * greater than @buflen, @buf contains the truncated path with the trailing 215 * '\0'. On error, -errno is returned. 216 */ 217 int kernfs_path_from_node(struct kernfs_node *to, struct kernfs_node *from, 218 char *buf, size_t buflen) 219 { 220 unsigned long flags; 221 int ret; 222 223 spin_lock_irqsave(&kernfs_rename_lock, flags); 224 ret = kernfs_path_from_node_locked(to, from, buf, buflen); 225 spin_unlock_irqrestore(&kernfs_rename_lock, flags); 226 return ret; 227 } 228 EXPORT_SYMBOL_GPL(kernfs_path_from_node); 229 230 /** 231 * pr_cont_kernfs_name - pr_cont name of a kernfs_node 232 * @kn: kernfs_node of interest 233 * 234 * This function can be called from any context. 235 */ 236 void pr_cont_kernfs_name(struct kernfs_node *kn) 237 { 238 unsigned long flags; 239 240 spin_lock_irqsave(&kernfs_pr_cont_lock, flags); 241 242 kernfs_name(kn, kernfs_pr_cont_buf, sizeof(kernfs_pr_cont_buf)); 243 pr_cont("%s", kernfs_pr_cont_buf); 244 245 spin_unlock_irqrestore(&kernfs_pr_cont_lock, flags); 246 } 247 248 /** 249 * pr_cont_kernfs_path - pr_cont path of a kernfs_node 250 * @kn: kernfs_node of interest 251 * 252 * This function can be called from any context. 253 */ 254 void pr_cont_kernfs_path(struct kernfs_node *kn) 255 { 256 unsigned long flags; 257 int sz; 258 259 spin_lock_irqsave(&kernfs_pr_cont_lock, flags); 260 261 sz = kernfs_path_from_node(kn, NULL, kernfs_pr_cont_buf, 262 sizeof(kernfs_pr_cont_buf)); 263 if (sz < 0) { 264 pr_cont("(error)"); 265 goto out; 266 } 267 268 if (sz >= sizeof(kernfs_pr_cont_buf)) { 269 pr_cont("(name too long)"); 270 goto out; 271 } 272 273 pr_cont("%s", kernfs_pr_cont_buf); 274 275 out: 276 spin_unlock_irqrestore(&kernfs_pr_cont_lock, flags); 277 } 278 279 /** 280 * kernfs_get_parent - determine the parent node and pin it 281 * @kn: kernfs_node of interest 282 * 283 * Determines @kn's parent, pins and returns it. This function can be 284 * called from any context. 285 */ 286 struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn) 287 { 288 struct kernfs_node *parent; 289 unsigned long flags; 290 291 spin_lock_irqsave(&kernfs_rename_lock, flags); 292 parent = kn->parent; 293 kernfs_get(parent); 294 spin_unlock_irqrestore(&kernfs_rename_lock, flags); 295 296 return parent; 297 } 298 299 /** 300 * kernfs_name_hash 301 * @name: Null terminated string to hash 302 * @ns: Namespace tag to hash 303 * 304 * Returns 31 bit hash of ns + name (so it fits in an off_t ) 305 */ 306 static unsigned int kernfs_name_hash(const char *name, const void *ns) 307 { 308 unsigned long hash = init_name_hash(ns); 309 unsigned int len = strlen(name); 310 while (len--) 311 hash = partial_name_hash(*name++, hash); 312 hash = end_name_hash(hash); 313 hash &= 0x7fffffffU; 314 /* Reserve hash numbers 0, 1 and INT_MAX for magic directory entries */ 315 if (hash < 2) 316 hash += 2; 317 if (hash >= INT_MAX) 318 hash = INT_MAX - 1; 319 return hash; 320 } 321 322 static int kernfs_name_compare(unsigned int hash, const char *name, 323 const void *ns, const struct kernfs_node *kn) 324 { 325 if (hash < kn->hash) 326 return -1; 327 if (hash > kn->hash) 328 return 1; 329 if (ns < kn->ns) 330 return -1; 331 if (ns > kn->ns) 332 return 1; 333 return strcmp(name, kn->name); 334 } 335 336 static int kernfs_sd_compare(const struct kernfs_node *left, 337 const struct kernfs_node *right) 338 { 339 return kernfs_name_compare(left->hash, left->name, left->ns, right); 340 } 341 342 /** 343 * kernfs_link_sibling - link kernfs_node into sibling rbtree 344 * @kn: kernfs_node of interest 345 * 346 * Link @kn into its sibling rbtree which starts from 347 * @kn->parent->dir.children. 348 * 349 * Locking: 350 * kernfs_rwsem held exclusive 351 * 352 * RETURNS: 353 * 0 on susccess -EEXIST on failure. 354 */ 355 static int kernfs_link_sibling(struct kernfs_node *kn) 356 { 357 struct rb_node **node = &kn->parent->dir.children.rb_node; 358 struct rb_node *parent = NULL; 359 360 while (*node) { 361 struct kernfs_node *pos; 362 int result; 363 364 pos = rb_to_kn(*node); 365 parent = *node; 366 result = kernfs_sd_compare(kn, pos); 367 if (result < 0) 368 node = &pos->rb.rb_left; 369 else if (result > 0) 370 node = &pos->rb.rb_right; 371 else 372 return -EEXIST; 373 } 374 375 /* add new node and rebalance the tree */ 376 rb_link_node(&kn->rb, parent, node); 377 rb_insert_color(&kn->rb, &kn->parent->dir.children); 378 379 /* successfully added, account subdir number */ 380 if (kernfs_type(kn) == KERNFS_DIR) 381 kn->parent->dir.subdirs++; 382 kernfs_inc_rev(kn->parent); 383 384 return 0; 385 } 386 387 /** 388 * kernfs_unlink_sibling - unlink kernfs_node from sibling rbtree 389 * @kn: kernfs_node of interest 390 * 391 * Try to unlink @kn from its sibling rbtree which starts from 392 * kn->parent->dir.children. Returns %true if @kn was actually 393 * removed, %false if @kn wasn't on the rbtree. 394 * 395 * Locking: 396 * kernfs_rwsem held exclusive 397 */ 398 static bool kernfs_unlink_sibling(struct kernfs_node *kn) 399 { 400 if (RB_EMPTY_NODE(&kn->rb)) 401 return false; 402 403 if (kernfs_type(kn) == KERNFS_DIR) 404 kn->parent->dir.subdirs--; 405 kernfs_inc_rev(kn->parent); 406 407 rb_erase(&kn->rb, &kn->parent->dir.children); 408 RB_CLEAR_NODE(&kn->rb); 409 return true; 410 } 411 412 /** 413 * kernfs_get_active - get an active reference to kernfs_node 414 * @kn: kernfs_node to get an active reference to 415 * 416 * Get an active reference of @kn. This function is noop if @kn 417 * is NULL. 418 * 419 * RETURNS: 420 * Pointer to @kn on success, NULL on failure. 421 */ 422 struct kernfs_node *kernfs_get_active(struct kernfs_node *kn) 423 { 424 if (unlikely(!kn)) 425 return NULL; 426 427 if (!atomic_inc_unless_negative(&kn->active)) 428 return NULL; 429 430 if (kernfs_lockdep(kn)) 431 rwsem_acquire_read(&kn->dep_map, 0, 1, _RET_IP_); 432 return kn; 433 } 434 435 /** 436 * kernfs_put_active - put an active reference to kernfs_node 437 * @kn: kernfs_node to put an active reference to 438 * 439 * Put an active reference to @kn. This function is noop if @kn 440 * is NULL. 441 */ 442 void kernfs_put_active(struct kernfs_node *kn) 443 { 444 int v; 445 446 if (unlikely(!kn)) 447 return; 448 449 if (kernfs_lockdep(kn)) 450 rwsem_release(&kn->dep_map, _RET_IP_); 451 v = atomic_dec_return(&kn->active); 452 if (likely(v != KN_DEACTIVATED_BIAS)) 453 return; 454 455 wake_up_all(&kernfs_root(kn)->deactivate_waitq); 456 } 457 458 /** 459 * kernfs_drain - drain kernfs_node 460 * @kn: kernfs_node to drain 461 * 462 * Drain existing usages and nuke all existing mmaps of @kn. Mutiple 463 * removers may invoke this function concurrently on @kn and all will 464 * return after draining is complete. 465 */ 466 static void kernfs_drain(struct kernfs_node *kn) 467 __releases(&kernfs_root(kn)->kernfs_rwsem) 468 __acquires(&kernfs_root(kn)->kernfs_rwsem) 469 { 470 struct kernfs_root *root = kernfs_root(kn); 471 472 lockdep_assert_held_write(&root->kernfs_rwsem); 473 WARN_ON_ONCE(kernfs_active(kn)); 474 475 up_write(&root->kernfs_rwsem); 476 477 if (kernfs_lockdep(kn)) { 478 rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_); 479 if (atomic_read(&kn->active) != KN_DEACTIVATED_BIAS) 480 lock_contended(&kn->dep_map, _RET_IP_); 481 } 482 483 /* but everyone should wait for draining */ 484 wait_event(root->deactivate_waitq, 485 atomic_read(&kn->active) == KN_DEACTIVATED_BIAS); 486 487 if (kernfs_lockdep(kn)) { 488 lock_acquired(&kn->dep_map, _RET_IP_); 489 rwsem_release(&kn->dep_map, _RET_IP_); 490 } 491 492 kernfs_drain_open_files(kn); 493 494 down_write(&root->kernfs_rwsem); 495 } 496 497 /** 498 * kernfs_get - get a reference count on a kernfs_node 499 * @kn: the target kernfs_node 500 */ 501 void kernfs_get(struct kernfs_node *kn) 502 { 503 if (kn) { 504 WARN_ON(!atomic_read(&kn->count)); 505 atomic_inc(&kn->count); 506 } 507 } 508 EXPORT_SYMBOL_GPL(kernfs_get); 509 510 /** 511 * kernfs_put - put a reference count on a kernfs_node 512 * @kn: the target kernfs_node 513 * 514 * Put a reference count of @kn and destroy it if it reached zero. 515 */ 516 void kernfs_put(struct kernfs_node *kn) 517 { 518 struct kernfs_node *parent; 519 struct kernfs_root *root; 520 521 if (!kn || !atomic_dec_and_test(&kn->count)) 522 return; 523 root = kernfs_root(kn); 524 repeat: 525 /* 526 * Moving/renaming is always done while holding reference. 527 * kn->parent won't change beneath us. 528 */ 529 parent = kn->parent; 530 531 WARN_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS, 532 "kernfs_put: %s/%s: released with incorrect active_ref %d\n", 533 parent ? parent->name : "", kn->name, atomic_read(&kn->active)); 534 535 if (kernfs_type(kn) == KERNFS_LINK) 536 kernfs_put(kn->symlink.target_kn); 537 538 kfree_const(kn->name); 539 540 if (kn->iattr) { 541 simple_xattrs_free(&kn->iattr->xattrs); 542 kmem_cache_free(kernfs_iattrs_cache, kn->iattr); 543 } 544 spin_lock(&kernfs_idr_lock); 545 idr_remove(&root->ino_idr, (u32)kernfs_ino(kn)); 546 spin_unlock(&kernfs_idr_lock); 547 kmem_cache_free(kernfs_node_cache, kn); 548 549 kn = parent; 550 if (kn) { 551 if (atomic_dec_and_test(&kn->count)) 552 goto repeat; 553 } else { 554 /* just released the root kn, free @root too */ 555 idr_destroy(&root->ino_idr); 556 kfree(root); 557 } 558 } 559 EXPORT_SYMBOL_GPL(kernfs_put); 560 561 /** 562 * kernfs_node_from_dentry - determine kernfs_node associated with a dentry 563 * @dentry: the dentry in question 564 * 565 * Return the kernfs_node associated with @dentry. If @dentry is not a 566 * kernfs one, %NULL is returned. 567 * 568 * While the returned kernfs_node will stay accessible as long as @dentry 569 * is accessible, the returned node can be in any state and the caller is 570 * fully responsible for determining what's accessible. 571 */ 572 struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry) 573 { 574 if (dentry->d_sb->s_op == &kernfs_sops) 575 return kernfs_dentry_node(dentry); 576 return NULL; 577 } 578 579 static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root, 580 struct kernfs_node *parent, 581 const char *name, umode_t mode, 582 kuid_t uid, kgid_t gid, 583 unsigned flags) 584 { 585 struct kernfs_node *kn; 586 u32 id_highbits; 587 int ret; 588 589 name = kstrdup_const(name, GFP_KERNEL); 590 if (!name) 591 return NULL; 592 593 kn = kmem_cache_zalloc(kernfs_node_cache, GFP_KERNEL); 594 if (!kn) 595 goto err_out1; 596 597 idr_preload(GFP_KERNEL); 598 spin_lock(&kernfs_idr_lock); 599 ret = idr_alloc_cyclic(&root->ino_idr, kn, 1, 0, GFP_ATOMIC); 600 if (ret >= 0 && ret < root->last_id_lowbits) 601 root->id_highbits++; 602 id_highbits = root->id_highbits; 603 root->last_id_lowbits = ret; 604 spin_unlock(&kernfs_idr_lock); 605 idr_preload_end(); 606 if (ret < 0) 607 goto err_out2; 608 609 kn->id = (u64)id_highbits << 32 | ret; 610 611 atomic_set(&kn->count, 1); 612 atomic_set(&kn->active, KN_DEACTIVATED_BIAS); 613 RB_CLEAR_NODE(&kn->rb); 614 615 kn->name = name; 616 kn->mode = mode; 617 kn->flags = flags; 618 619 if (!uid_eq(uid, GLOBAL_ROOT_UID) || !gid_eq(gid, GLOBAL_ROOT_GID)) { 620 struct iattr iattr = { 621 .ia_valid = ATTR_UID | ATTR_GID, 622 .ia_uid = uid, 623 .ia_gid = gid, 624 }; 625 626 ret = __kernfs_setattr(kn, &iattr); 627 if (ret < 0) 628 goto err_out3; 629 } 630 631 if (parent) { 632 ret = security_kernfs_init_security(parent, kn); 633 if (ret) 634 goto err_out3; 635 } 636 637 return kn; 638 639 err_out3: 640 idr_remove(&root->ino_idr, (u32)kernfs_ino(kn)); 641 err_out2: 642 kmem_cache_free(kernfs_node_cache, kn); 643 err_out1: 644 kfree_const(name); 645 return NULL; 646 } 647 648 struct kernfs_node *kernfs_new_node(struct kernfs_node *parent, 649 const char *name, umode_t mode, 650 kuid_t uid, kgid_t gid, 651 unsigned flags) 652 { 653 struct kernfs_node *kn; 654 655 kn = __kernfs_new_node(kernfs_root(parent), parent, 656 name, mode, uid, gid, flags); 657 if (kn) { 658 kernfs_get(parent); 659 kn->parent = parent; 660 } 661 return kn; 662 } 663 664 /* 665 * kernfs_find_and_get_node_by_id - get kernfs_node from node id 666 * @root: the kernfs root 667 * @id: the target node id 668 * 669 * @id's lower 32bits encode ino and upper gen. If the gen portion is 670 * zero, all generations are matched. 671 * 672 * RETURNS: 673 * NULL on failure. Return a kernfs node with reference counter incremented 674 */ 675 struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root, 676 u64 id) 677 { 678 struct kernfs_node *kn; 679 ino_t ino = kernfs_id_ino(id); 680 u32 gen = kernfs_id_gen(id); 681 682 spin_lock(&kernfs_idr_lock); 683 684 kn = idr_find(&root->ino_idr, (u32)ino); 685 if (!kn) 686 goto err_unlock; 687 688 if (sizeof(ino_t) >= sizeof(u64)) { 689 /* we looked up with the low 32bits, compare the whole */ 690 if (kernfs_ino(kn) != ino) 691 goto err_unlock; 692 } else { 693 /* 0 matches all generations */ 694 if (unlikely(gen && kernfs_gen(kn) != gen)) 695 goto err_unlock; 696 } 697 698 /* 699 * ACTIVATED is protected with kernfs_mutex but it was clear when 700 * @kn was added to idr and we just wanna see it set. No need to 701 * grab kernfs_mutex. 702 */ 703 if (unlikely(!(kn->flags & KERNFS_ACTIVATED) || 704 !atomic_inc_not_zero(&kn->count))) 705 goto err_unlock; 706 707 spin_unlock(&kernfs_idr_lock); 708 return kn; 709 err_unlock: 710 spin_unlock(&kernfs_idr_lock); 711 return NULL; 712 } 713 714 /** 715 * kernfs_add_one - add kernfs_node to parent without warning 716 * @kn: kernfs_node to be added 717 * 718 * The caller must already have initialized @kn->parent. This 719 * function increments nlink of the parent's inode if @kn is a 720 * directory and link into the children list of the parent. 721 * 722 * RETURNS: 723 * 0 on success, -EEXIST if entry with the given name already 724 * exists. 725 */ 726 int kernfs_add_one(struct kernfs_node *kn) 727 { 728 struct kernfs_node *parent = kn->parent; 729 struct kernfs_root *root = kernfs_root(parent); 730 struct kernfs_iattrs *ps_iattr; 731 bool has_ns; 732 int ret; 733 734 down_write(&root->kernfs_rwsem); 735 736 ret = -EINVAL; 737 has_ns = kernfs_ns_enabled(parent); 738 if (WARN(has_ns != (bool)kn->ns, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n", 739 has_ns ? "required" : "invalid", parent->name, kn->name)) 740 goto out_unlock; 741 742 if (kernfs_type(parent) != KERNFS_DIR) 743 goto out_unlock; 744 745 ret = -ENOENT; 746 if (parent->flags & KERNFS_EMPTY_DIR) 747 goto out_unlock; 748 749 if ((parent->flags & KERNFS_ACTIVATED) && !kernfs_active(parent)) 750 goto out_unlock; 751 752 kn->hash = kernfs_name_hash(kn->name, kn->ns); 753 754 ret = kernfs_link_sibling(kn); 755 if (ret) 756 goto out_unlock; 757 758 /* Update timestamps on the parent */ 759 ps_iattr = parent->iattr; 760 if (ps_iattr) { 761 ktime_get_real_ts64(&ps_iattr->ia_ctime); 762 ps_iattr->ia_mtime = ps_iattr->ia_ctime; 763 } 764 765 up_write(&root->kernfs_rwsem); 766 767 /* 768 * Activate the new node unless CREATE_DEACTIVATED is requested. 769 * If not activated here, the kernfs user is responsible for 770 * activating the node with kernfs_activate(). A node which hasn't 771 * been activated is not visible to userland and its removal won't 772 * trigger deactivation. 773 */ 774 if (!(kernfs_root(kn)->flags & KERNFS_ROOT_CREATE_DEACTIVATED)) 775 kernfs_activate(kn); 776 return 0; 777 778 out_unlock: 779 up_write(&root->kernfs_rwsem); 780 return ret; 781 } 782 783 /** 784 * kernfs_find_ns - find kernfs_node with the given name 785 * @parent: kernfs_node to search under 786 * @name: name to look for 787 * @ns: the namespace tag to use 788 * 789 * Look for kernfs_node with name @name under @parent. Returns pointer to 790 * the found kernfs_node on success, %NULL on failure. 791 */ 792 static struct kernfs_node *kernfs_find_ns(struct kernfs_node *parent, 793 const unsigned char *name, 794 const void *ns) 795 { 796 struct rb_node *node = parent->dir.children.rb_node; 797 bool has_ns = kernfs_ns_enabled(parent); 798 unsigned int hash; 799 800 lockdep_assert_held(&kernfs_root(parent)->kernfs_rwsem); 801 802 if (has_ns != (bool)ns) { 803 WARN(1, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n", 804 has_ns ? "required" : "invalid", parent->name, name); 805 return NULL; 806 } 807 808 hash = kernfs_name_hash(name, ns); 809 while (node) { 810 struct kernfs_node *kn; 811 int result; 812 813 kn = rb_to_kn(node); 814 result = kernfs_name_compare(hash, name, ns, kn); 815 if (result < 0) 816 node = node->rb_left; 817 else if (result > 0) 818 node = node->rb_right; 819 else 820 return kn; 821 } 822 return NULL; 823 } 824 825 static struct kernfs_node *kernfs_walk_ns(struct kernfs_node *parent, 826 const unsigned char *path, 827 const void *ns) 828 { 829 size_t len; 830 char *p, *name; 831 832 lockdep_assert_held_read(&kernfs_root(parent)->kernfs_rwsem); 833 834 spin_lock_irq(&kernfs_pr_cont_lock); 835 836 len = strlcpy(kernfs_pr_cont_buf, path, sizeof(kernfs_pr_cont_buf)); 837 838 if (len >= sizeof(kernfs_pr_cont_buf)) { 839 spin_unlock_irq(&kernfs_pr_cont_lock); 840 return NULL; 841 } 842 843 p = kernfs_pr_cont_buf; 844 845 while ((name = strsep(&p, "/")) && parent) { 846 if (*name == '\0') 847 continue; 848 parent = kernfs_find_ns(parent, name, ns); 849 } 850 851 spin_unlock_irq(&kernfs_pr_cont_lock); 852 853 return parent; 854 } 855 856 /** 857 * kernfs_find_and_get_ns - find and get kernfs_node with the given name 858 * @parent: kernfs_node to search under 859 * @name: name to look for 860 * @ns: the namespace tag to use 861 * 862 * Look for kernfs_node with name @name under @parent and get a reference 863 * if found. This function may sleep and returns pointer to the found 864 * kernfs_node on success, %NULL on failure. 865 */ 866 struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent, 867 const char *name, const void *ns) 868 { 869 struct kernfs_node *kn; 870 struct kernfs_root *root = kernfs_root(parent); 871 872 down_read(&root->kernfs_rwsem); 873 kn = kernfs_find_ns(parent, name, ns); 874 kernfs_get(kn); 875 up_read(&root->kernfs_rwsem); 876 877 return kn; 878 } 879 EXPORT_SYMBOL_GPL(kernfs_find_and_get_ns); 880 881 /** 882 * kernfs_walk_and_get_ns - find and get kernfs_node with the given path 883 * @parent: kernfs_node to search under 884 * @path: path to look for 885 * @ns: the namespace tag to use 886 * 887 * Look for kernfs_node with path @path under @parent and get a reference 888 * if found. This function may sleep and returns pointer to the found 889 * kernfs_node on success, %NULL on failure. 890 */ 891 struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent, 892 const char *path, const void *ns) 893 { 894 struct kernfs_node *kn; 895 struct kernfs_root *root = kernfs_root(parent); 896 897 down_read(&root->kernfs_rwsem); 898 kn = kernfs_walk_ns(parent, path, ns); 899 kernfs_get(kn); 900 up_read(&root->kernfs_rwsem); 901 902 return kn; 903 } 904 905 /** 906 * kernfs_create_root - create a new kernfs hierarchy 907 * @scops: optional syscall operations for the hierarchy 908 * @flags: KERNFS_ROOT_* flags 909 * @priv: opaque data associated with the new directory 910 * 911 * Returns the root of the new hierarchy on success, ERR_PTR() value on 912 * failure. 913 */ 914 struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops, 915 unsigned int flags, void *priv) 916 { 917 struct kernfs_root *root; 918 struct kernfs_node *kn; 919 920 root = kzalloc(sizeof(*root), GFP_KERNEL); 921 if (!root) 922 return ERR_PTR(-ENOMEM); 923 924 idr_init(&root->ino_idr); 925 init_rwsem(&root->kernfs_rwsem); 926 INIT_LIST_HEAD(&root->supers); 927 928 /* 929 * On 64bit ino setups, id is ino. On 32bit, low 32bits are ino. 930 * High bits generation. The starting value for both ino and 931 * genenration is 1. Initialize upper 32bit allocation 932 * accordingly. 933 */ 934 if (sizeof(ino_t) >= sizeof(u64)) 935 root->id_highbits = 0; 936 else 937 root->id_highbits = 1; 938 939 kn = __kernfs_new_node(root, NULL, "", S_IFDIR | S_IRUGO | S_IXUGO, 940 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 941 KERNFS_DIR); 942 if (!kn) { 943 idr_destroy(&root->ino_idr); 944 kfree(root); 945 return ERR_PTR(-ENOMEM); 946 } 947 948 kn->priv = priv; 949 kn->dir.root = root; 950 951 root->syscall_ops = scops; 952 root->flags = flags; 953 root->kn = kn; 954 init_waitqueue_head(&root->deactivate_waitq); 955 956 if (!(root->flags & KERNFS_ROOT_CREATE_DEACTIVATED)) 957 kernfs_activate(kn); 958 959 return root; 960 } 961 962 /** 963 * kernfs_destroy_root - destroy a kernfs hierarchy 964 * @root: root of the hierarchy to destroy 965 * 966 * Destroy the hierarchy anchored at @root by removing all existing 967 * directories and destroying @root. 968 */ 969 void kernfs_destroy_root(struct kernfs_root *root) 970 { 971 /* 972 * kernfs_remove holds kernfs_rwsem from the root so the root 973 * shouldn't be freed during the operation. 974 */ 975 kernfs_get(root->kn); 976 kernfs_remove(root->kn); 977 kernfs_put(root->kn); /* will also free @root */ 978 } 979 980 /** 981 * kernfs_root_to_node - return the kernfs_node associated with a kernfs_root 982 * @root: root to use to lookup 983 */ 984 struct kernfs_node *kernfs_root_to_node(struct kernfs_root *root) 985 { 986 return root->kn; 987 } 988 989 /** 990 * kernfs_create_dir_ns - create a directory 991 * @parent: parent in which to create a new directory 992 * @name: name of the new directory 993 * @mode: mode of the new directory 994 * @uid: uid of the new directory 995 * @gid: gid of the new directory 996 * @priv: opaque data associated with the new directory 997 * @ns: optional namespace tag of the directory 998 * 999 * Returns the created node on success, ERR_PTR() value on failure. 1000 */ 1001 struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent, 1002 const char *name, umode_t mode, 1003 kuid_t uid, kgid_t gid, 1004 void *priv, const void *ns) 1005 { 1006 struct kernfs_node *kn; 1007 int rc; 1008 1009 /* allocate */ 1010 kn = kernfs_new_node(parent, name, mode | S_IFDIR, 1011 uid, gid, KERNFS_DIR); 1012 if (!kn) 1013 return ERR_PTR(-ENOMEM); 1014 1015 kn->dir.root = parent->dir.root; 1016 kn->ns = ns; 1017 kn->priv = priv; 1018 1019 /* link in */ 1020 rc = kernfs_add_one(kn); 1021 if (!rc) 1022 return kn; 1023 1024 kernfs_put(kn); 1025 return ERR_PTR(rc); 1026 } 1027 1028 /** 1029 * kernfs_create_empty_dir - create an always empty directory 1030 * @parent: parent in which to create a new directory 1031 * @name: name of the new directory 1032 * 1033 * Returns the created node on success, ERR_PTR() value on failure. 1034 */ 1035 struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent, 1036 const char *name) 1037 { 1038 struct kernfs_node *kn; 1039 int rc; 1040 1041 /* allocate */ 1042 kn = kernfs_new_node(parent, name, S_IRUGO|S_IXUGO|S_IFDIR, 1043 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, KERNFS_DIR); 1044 if (!kn) 1045 return ERR_PTR(-ENOMEM); 1046 1047 kn->flags |= KERNFS_EMPTY_DIR; 1048 kn->dir.root = parent->dir.root; 1049 kn->ns = NULL; 1050 kn->priv = NULL; 1051 1052 /* link in */ 1053 rc = kernfs_add_one(kn); 1054 if (!rc) 1055 return kn; 1056 1057 kernfs_put(kn); 1058 return ERR_PTR(rc); 1059 } 1060 1061 static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags) 1062 { 1063 struct kernfs_node *kn; 1064 struct kernfs_root *root; 1065 1066 if (flags & LOOKUP_RCU) 1067 return -ECHILD; 1068 1069 /* Negative hashed dentry? */ 1070 if (d_really_is_negative(dentry)) { 1071 struct kernfs_node *parent; 1072 1073 /* If the kernfs parent node has changed discard and 1074 * proceed to ->lookup. 1075 */ 1076 spin_lock(&dentry->d_lock); 1077 parent = kernfs_dentry_node(dentry->d_parent); 1078 if (parent) { 1079 spin_unlock(&dentry->d_lock); 1080 root = kernfs_root(parent); 1081 down_read(&root->kernfs_rwsem); 1082 if (kernfs_dir_changed(parent, dentry)) { 1083 up_read(&root->kernfs_rwsem); 1084 return 0; 1085 } 1086 up_read(&root->kernfs_rwsem); 1087 } else 1088 spin_unlock(&dentry->d_lock); 1089 1090 /* The kernfs parent node hasn't changed, leave the 1091 * dentry negative and return success. 1092 */ 1093 return 1; 1094 } 1095 1096 kn = kernfs_dentry_node(dentry); 1097 root = kernfs_root(kn); 1098 down_read(&root->kernfs_rwsem); 1099 1100 /* The kernfs node has been deactivated */ 1101 if (!kernfs_active(kn)) 1102 goto out_bad; 1103 1104 /* The kernfs node has been moved? */ 1105 if (kernfs_dentry_node(dentry->d_parent) != kn->parent) 1106 goto out_bad; 1107 1108 /* The kernfs node has been renamed */ 1109 if (strcmp(dentry->d_name.name, kn->name) != 0) 1110 goto out_bad; 1111 1112 /* The kernfs node has been moved to a different namespace */ 1113 if (kn->parent && kernfs_ns_enabled(kn->parent) && 1114 kernfs_info(dentry->d_sb)->ns != kn->ns) 1115 goto out_bad; 1116 1117 up_read(&root->kernfs_rwsem); 1118 return 1; 1119 out_bad: 1120 up_read(&root->kernfs_rwsem); 1121 return 0; 1122 } 1123 1124 const struct dentry_operations kernfs_dops = { 1125 .d_revalidate = kernfs_dop_revalidate, 1126 }; 1127 1128 static struct dentry *kernfs_iop_lookup(struct inode *dir, 1129 struct dentry *dentry, 1130 unsigned int flags) 1131 { 1132 struct kernfs_node *parent = dir->i_private; 1133 struct kernfs_node *kn; 1134 struct kernfs_root *root; 1135 struct inode *inode = NULL; 1136 const void *ns = NULL; 1137 1138 root = kernfs_root(parent); 1139 down_read(&root->kernfs_rwsem); 1140 if (kernfs_ns_enabled(parent)) 1141 ns = kernfs_info(dir->i_sb)->ns; 1142 1143 kn = kernfs_find_ns(parent, dentry->d_name.name, ns); 1144 /* attach dentry and inode */ 1145 if (kn) { 1146 /* Inactive nodes are invisible to the VFS so don't 1147 * create a negative. 1148 */ 1149 if (!kernfs_active(kn)) { 1150 up_read(&root->kernfs_rwsem); 1151 return NULL; 1152 } 1153 inode = kernfs_get_inode(dir->i_sb, kn); 1154 if (!inode) 1155 inode = ERR_PTR(-ENOMEM); 1156 } 1157 /* 1158 * Needed for negative dentry validation. 1159 * The negative dentry can be created in kernfs_iop_lookup() 1160 * or transforms from positive dentry in dentry_unlink_inode() 1161 * called from vfs_rmdir(). 1162 */ 1163 if (!IS_ERR(inode)) 1164 kernfs_set_rev(parent, dentry); 1165 up_read(&root->kernfs_rwsem); 1166 1167 /* instantiate and hash (possibly negative) dentry */ 1168 return d_splice_alias(inode, dentry); 1169 } 1170 1171 static int kernfs_iop_mkdir(struct user_namespace *mnt_userns, 1172 struct inode *dir, struct dentry *dentry, 1173 umode_t mode) 1174 { 1175 struct kernfs_node *parent = dir->i_private; 1176 struct kernfs_syscall_ops *scops = kernfs_root(parent)->syscall_ops; 1177 int ret; 1178 1179 if (!scops || !scops->mkdir) 1180 return -EPERM; 1181 1182 if (!kernfs_get_active(parent)) 1183 return -ENODEV; 1184 1185 ret = scops->mkdir(parent, dentry->d_name.name, mode); 1186 1187 kernfs_put_active(parent); 1188 return ret; 1189 } 1190 1191 static int kernfs_iop_rmdir(struct inode *dir, struct dentry *dentry) 1192 { 1193 struct kernfs_node *kn = kernfs_dentry_node(dentry); 1194 struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops; 1195 int ret; 1196 1197 if (!scops || !scops->rmdir) 1198 return -EPERM; 1199 1200 if (!kernfs_get_active(kn)) 1201 return -ENODEV; 1202 1203 ret = scops->rmdir(kn); 1204 1205 kernfs_put_active(kn); 1206 return ret; 1207 } 1208 1209 static int kernfs_iop_rename(struct user_namespace *mnt_userns, 1210 struct inode *old_dir, struct dentry *old_dentry, 1211 struct inode *new_dir, struct dentry *new_dentry, 1212 unsigned int flags) 1213 { 1214 struct kernfs_node *kn = kernfs_dentry_node(old_dentry); 1215 struct kernfs_node *new_parent = new_dir->i_private; 1216 struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops; 1217 int ret; 1218 1219 if (flags) 1220 return -EINVAL; 1221 1222 if (!scops || !scops->rename) 1223 return -EPERM; 1224 1225 if (!kernfs_get_active(kn)) 1226 return -ENODEV; 1227 1228 if (!kernfs_get_active(new_parent)) { 1229 kernfs_put_active(kn); 1230 return -ENODEV; 1231 } 1232 1233 ret = scops->rename(kn, new_parent, new_dentry->d_name.name); 1234 1235 kernfs_put_active(new_parent); 1236 kernfs_put_active(kn); 1237 return ret; 1238 } 1239 1240 const struct inode_operations kernfs_dir_iops = { 1241 .lookup = kernfs_iop_lookup, 1242 .permission = kernfs_iop_permission, 1243 .setattr = kernfs_iop_setattr, 1244 .getattr = kernfs_iop_getattr, 1245 .listxattr = kernfs_iop_listxattr, 1246 1247 .mkdir = kernfs_iop_mkdir, 1248 .rmdir = kernfs_iop_rmdir, 1249 .rename = kernfs_iop_rename, 1250 }; 1251 1252 static struct kernfs_node *kernfs_leftmost_descendant(struct kernfs_node *pos) 1253 { 1254 struct kernfs_node *last; 1255 1256 while (true) { 1257 struct rb_node *rbn; 1258 1259 last = pos; 1260 1261 if (kernfs_type(pos) != KERNFS_DIR) 1262 break; 1263 1264 rbn = rb_first(&pos->dir.children); 1265 if (!rbn) 1266 break; 1267 1268 pos = rb_to_kn(rbn); 1269 } 1270 1271 return last; 1272 } 1273 1274 /** 1275 * kernfs_next_descendant_post - find the next descendant for post-order walk 1276 * @pos: the current position (%NULL to initiate traversal) 1277 * @root: kernfs_node whose descendants to walk 1278 * 1279 * Find the next descendant to visit for post-order traversal of @root's 1280 * descendants. @root is included in the iteration and the last node to be 1281 * visited. 1282 */ 1283 static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos, 1284 struct kernfs_node *root) 1285 { 1286 struct rb_node *rbn; 1287 1288 lockdep_assert_held_write(&kernfs_root(root)->kernfs_rwsem); 1289 1290 /* if first iteration, visit leftmost descendant which may be root */ 1291 if (!pos) 1292 return kernfs_leftmost_descendant(root); 1293 1294 /* if we visited @root, we're done */ 1295 if (pos == root) 1296 return NULL; 1297 1298 /* if there's an unvisited sibling, visit its leftmost descendant */ 1299 rbn = rb_next(&pos->rb); 1300 if (rbn) 1301 return kernfs_leftmost_descendant(rb_to_kn(rbn)); 1302 1303 /* no sibling left, visit parent */ 1304 return pos->parent; 1305 } 1306 1307 /** 1308 * kernfs_activate - activate a node which started deactivated 1309 * @kn: kernfs_node whose subtree is to be activated 1310 * 1311 * If the root has KERNFS_ROOT_CREATE_DEACTIVATED set, a newly created node 1312 * needs to be explicitly activated. A node which hasn't been activated 1313 * isn't visible to userland and deactivation is skipped during its 1314 * removal. This is useful to construct atomic init sequences where 1315 * creation of multiple nodes should either succeed or fail atomically. 1316 * 1317 * The caller is responsible for ensuring that this function is not called 1318 * after kernfs_remove*() is invoked on @kn. 1319 */ 1320 void kernfs_activate(struct kernfs_node *kn) 1321 { 1322 struct kernfs_node *pos; 1323 struct kernfs_root *root = kernfs_root(kn); 1324 1325 down_write(&root->kernfs_rwsem); 1326 1327 pos = NULL; 1328 while ((pos = kernfs_next_descendant_post(pos, kn))) { 1329 if (pos->flags & KERNFS_ACTIVATED) 1330 continue; 1331 1332 WARN_ON_ONCE(pos->parent && RB_EMPTY_NODE(&pos->rb)); 1333 WARN_ON_ONCE(atomic_read(&pos->active) != KN_DEACTIVATED_BIAS); 1334 1335 atomic_sub(KN_DEACTIVATED_BIAS, &pos->active); 1336 pos->flags |= KERNFS_ACTIVATED; 1337 } 1338 1339 up_write(&root->kernfs_rwsem); 1340 } 1341 1342 static void __kernfs_remove(struct kernfs_node *kn) 1343 { 1344 struct kernfs_node *pos; 1345 1346 lockdep_assert_held_write(&kernfs_root(kn)->kernfs_rwsem); 1347 1348 /* 1349 * Short-circuit if non-root @kn has already finished removal. 1350 * This is for kernfs_remove_self() which plays with active ref 1351 * after removal. 1352 */ 1353 if (!kn || (kn->parent && RB_EMPTY_NODE(&kn->rb))) 1354 return; 1355 1356 pr_debug("kernfs %s: removing\n", kn->name); 1357 1358 /* prevent any new usage under @kn by deactivating all nodes */ 1359 pos = NULL; 1360 while ((pos = kernfs_next_descendant_post(pos, kn))) 1361 if (kernfs_active(pos)) 1362 atomic_add(KN_DEACTIVATED_BIAS, &pos->active); 1363 1364 /* deactivate and unlink the subtree node-by-node */ 1365 do { 1366 pos = kernfs_leftmost_descendant(kn); 1367 1368 /* 1369 * kernfs_drain() drops kernfs_rwsem temporarily and @pos's 1370 * base ref could have been put by someone else by the time 1371 * the function returns. Make sure it doesn't go away 1372 * underneath us. 1373 */ 1374 kernfs_get(pos); 1375 1376 /* 1377 * Drain iff @kn was activated. This avoids draining and 1378 * its lockdep annotations for nodes which have never been 1379 * activated and allows embedding kernfs_remove() in create 1380 * error paths without worrying about draining. 1381 */ 1382 if (kn->flags & KERNFS_ACTIVATED) 1383 kernfs_drain(pos); 1384 else 1385 WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS); 1386 1387 /* 1388 * kernfs_unlink_sibling() succeeds once per node. Use it 1389 * to decide who's responsible for cleanups. 1390 */ 1391 if (!pos->parent || kernfs_unlink_sibling(pos)) { 1392 struct kernfs_iattrs *ps_iattr = 1393 pos->parent ? pos->parent->iattr : NULL; 1394 1395 /* update timestamps on the parent */ 1396 if (ps_iattr) { 1397 ktime_get_real_ts64(&ps_iattr->ia_ctime); 1398 ps_iattr->ia_mtime = ps_iattr->ia_ctime; 1399 } 1400 1401 kernfs_put(pos); 1402 } 1403 1404 kernfs_put(pos); 1405 } while (pos != kn); 1406 } 1407 1408 /** 1409 * kernfs_remove - remove a kernfs_node recursively 1410 * @kn: the kernfs_node to remove 1411 * 1412 * Remove @kn along with all its subdirectories and files. 1413 */ 1414 void kernfs_remove(struct kernfs_node *kn) 1415 { 1416 struct kernfs_root *root; 1417 1418 if (!kn) 1419 return; 1420 1421 root = kernfs_root(kn); 1422 1423 down_write(&root->kernfs_rwsem); 1424 __kernfs_remove(kn); 1425 up_write(&root->kernfs_rwsem); 1426 } 1427 1428 /** 1429 * kernfs_break_active_protection - break out of active protection 1430 * @kn: the self kernfs_node 1431 * 1432 * The caller must be running off of a kernfs operation which is invoked 1433 * with an active reference - e.g. one of kernfs_ops. Each invocation of 1434 * this function must also be matched with an invocation of 1435 * kernfs_unbreak_active_protection(). 1436 * 1437 * This function releases the active reference of @kn the caller is 1438 * holding. Once this function is called, @kn may be removed at any point 1439 * and the caller is solely responsible for ensuring that the objects it 1440 * dereferences are accessible. 1441 */ 1442 void kernfs_break_active_protection(struct kernfs_node *kn) 1443 { 1444 /* 1445 * Take out ourself out of the active ref dependency chain. If 1446 * we're called without an active ref, lockdep will complain. 1447 */ 1448 kernfs_put_active(kn); 1449 } 1450 1451 /** 1452 * kernfs_unbreak_active_protection - undo kernfs_break_active_protection() 1453 * @kn: the self kernfs_node 1454 * 1455 * If kernfs_break_active_protection() was called, this function must be 1456 * invoked before finishing the kernfs operation. Note that while this 1457 * function restores the active reference, it doesn't and can't actually 1458 * restore the active protection - @kn may already or be in the process of 1459 * being removed. Once kernfs_break_active_protection() is invoked, that 1460 * protection is irreversibly gone for the kernfs operation instance. 1461 * 1462 * While this function may be called at any point after 1463 * kernfs_break_active_protection() is invoked, its most useful location 1464 * would be right before the enclosing kernfs operation returns. 1465 */ 1466 void kernfs_unbreak_active_protection(struct kernfs_node *kn) 1467 { 1468 /* 1469 * @kn->active could be in any state; however, the increment we do 1470 * here will be undone as soon as the enclosing kernfs operation 1471 * finishes and this temporary bump can't break anything. If @kn 1472 * is alive, nothing changes. If @kn is being deactivated, the 1473 * soon-to-follow put will either finish deactivation or restore 1474 * deactivated state. If @kn is already removed, the temporary 1475 * bump is guaranteed to be gone before @kn is released. 1476 */ 1477 atomic_inc(&kn->active); 1478 if (kernfs_lockdep(kn)) 1479 rwsem_acquire(&kn->dep_map, 0, 1, _RET_IP_); 1480 } 1481 1482 /** 1483 * kernfs_remove_self - remove a kernfs_node from its own method 1484 * @kn: the self kernfs_node to remove 1485 * 1486 * The caller must be running off of a kernfs operation which is invoked 1487 * with an active reference - e.g. one of kernfs_ops. This can be used to 1488 * implement a file operation which deletes itself. 1489 * 1490 * For example, the "delete" file for a sysfs device directory can be 1491 * implemented by invoking kernfs_remove_self() on the "delete" file 1492 * itself. This function breaks the circular dependency of trying to 1493 * deactivate self while holding an active ref itself. It isn't necessary 1494 * to modify the usual removal path to use kernfs_remove_self(). The 1495 * "delete" implementation can simply invoke kernfs_remove_self() on self 1496 * before proceeding with the usual removal path. kernfs will ignore later 1497 * kernfs_remove() on self. 1498 * 1499 * kernfs_remove_self() can be called multiple times concurrently on the 1500 * same kernfs_node. Only the first one actually performs removal and 1501 * returns %true. All others will wait until the kernfs operation which 1502 * won self-removal finishes and return %false. Note that the losers wait 1503 * for the completion of not only the winning kernfs_remove_self() but also 1504 * the whole kernfs_ops which won the arbitration. This can be used to 1505 * guarantee, for example, all concurrent writes to a "delete" file to 1506 * finish only after the whole operation is complete. 1507 */ 1508 bool kernfs_remove_self(struct kernfs_node *kn) 1509 { 1510 bool ret; 1511 struct kernfs_root *root = kernfs_root(kn); 1512 1513 down_write(&root->kernfs_rwsem); 1514 kernfs_break_active_protection(kn); 1515 1516 /* 1517 * SUICIDAL is used to arbitrate among competing invocations. Only 1518 * the first one will actually perform removal. When the removal 1519 * is complete, SUICIDED is set and the active ref is restored 1520 * while kernfs_rwsem for held exclusive. The ones which lost 1521 * arbitration waits for SUICIDED && drained which can happen only 1522 * after the enclosing kernfs operation which executed the winning 1523 * instance of kernfs_remove_self() finished. 1524 */ 1525 if (!(kn->flags & KERNFS_SUICIDAL)) { 1526 kn->flags |= KERNFS_SUICIDAL; 1527 __kernfs_remove(kn); 1528 kn->flags |= KERNFS_SUICIDED; 1529 ret = true; 1530 } else { 1531 wait_queue_head_t *waitq = &kernfs_root(kn)->deactivate_waitq; 1532 DEFINE_WAIT(wait); 1533 1534 while (true) { 1535 prepare_to_wait(waitq, &wait, TASK_UNINTERRUPTIBLE); 1536 1537 if ((kn->flags & KERNFS_SUICIDED) && 1538 atomic_read(&kn->active) == KN_DEACTIVATED_BIAS) 1539 break; 1540 1541 up_write(&root->kernfs_rwsem); 1542 schedule(); 1543 down_write(&root->kernfs_rwsem); 1544 } 1545 finish_wait(waitq, &wait); 1546 WARN_ON_ONCE(!RB_EMPTY_NODE(&kn->rb)); 1547 ret = false; 1548 } 1549 1550 /* 1551 * This must be done while kernfs_rwsem held exclusive; otherwise, 1552 * waiting for SUICIDED && deactivated could finish prematurely. 1553 */ 1554 kernfs_unbreak_active_protection(kn); 1555 1556 up_write(&root->kernfs_rwsem); 1557 return ret; 1558 } 1559 1560 /** 1561 * kernfs_remove_by_name_ns - find a kernfs_node by name and remove it 1562 * @parent: parent of the target 1563 * @name: name of the kernfs_node to remove 1564 * @ns: namespace tag of the kernfs_node to remove 1565 * 1566 * Look for the kernfs_node with @name and @ns under @parent and remove it. 1567 * Returns 0 on success, -ENOENT if such entry doesn't exist. 1568 */ 1569 int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name, 1570 const void *ns) 1571 { 1572 struct kernfs_node *kn; 1573 struct kernfs_root *root; 1574 1575 if (!parent) { 1576 WARN(1, KERN_WARNING "kernfs: can not remove '%s', no directory\n", 1577 name); 1578 return -ENOENT; 1579 } 1580 1581 root = kernfs_root(parent); 1582 down_write(&root->kernfs_rwsem); 1583 1584 kn = kernfs_find_ns(parent, name, ns); 1585 if (kn) 1586 __kernfs_remove(kn); 1587 1588 up_write(&root->kernfs_rwsem); 1589 1590 if (kn) 1591 return 0; 1592 else 1593 return -ENOENT; 1594 } 1595 1596 /** 1597 * kernfs_rename_ns - move and rename a kernfs_node 1598 * @kn: target node 1599 * @new_parent: new parent to put @sd under 1600 * @new_name: new name 1601 * @new_ns: new namespace tag 1602 */ 1603 int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent, 1604 const char *new_name, const void *new_ns) 1605 { 1606 struct kernfs_node *old_parent; 1607 struct kernfs_root *root; 1608 const char *old_name = NULL; 1609 int error; 1610 1611 /* can't move or rename root */ 1612 if (!kn->parent) 1613 return -EINVAL; 1614 1615 root = kernfs_root(kn); 1616 down_write(&root->kernfs_rwsem); 1617 1618 error = -ENOENT; 1619 if (!kernfs_active(kn) || !kernfs_active(new_parent) || 1620 (new_parent->flags & KERNFS_EMPTY_DIR)) 1621 goto out; 1622 1623 error = 0; 1624 if ((kn->parent == new_parent) && (kn->ns == new_ns) && 1625 (strcmp(kn->name, new_name) == 0)) 1626 goto out; /* nothing to rename */ 1627 1628 error = -EEXIST; 1629 if (kernfs_find_ns(new_parent, new_name, new_ns)) 1630 goto out; 1631 1632 /* rename kernfs_node */ 1633 if (strcmp(kn->name, new_name) != 0) { 1634 error = -ENOMEM; 1635 new_name = kstrdup_const(new_name, GFP_KERNEL); 1636 if (!new_name) 1637 goto out; 1638 } else { 1639 new_name = NULL; 1640 } 1641 1642 /* 1643 * Move to the appropriate place in the appropriate directories rbtree. 1644 */ 1645 kernfs_unlink_sibling(kn); 1646 kernfs_get(new_parent); 1647 1648 /* rename_lock protects ->parent and ->name accessors */ 1649 spin_lock_irq(&kernfs_rename_lock); 1650 1651 old_parent = kn->parent; 1652 kn->parent = new_parent; 1653 1654 kn->ns = new_ns; 1655 if (new_name) { 1656 old_name = kn->name; 1657 kn->name = new_name; 1658 } 1659 1660 spin_unlock_irq(&kernfs_rename_lock); 1661 1662 kn->hash = kernfs_name_hash(kn->name, kn->ns); 1663 kernfs_link_sibling(kn); 1664 1665 kernfs_put(old_parent); 1666 kfree_const(old_name); 1667 1668 error = 0; 1669 out: 1670 up_write(&root->kernfs_rwsem); 1671 return error; 1672 } 1673 1674 /* Relationship between mode and the DT_xxx types */ 1675 static inline unsigned char dt_type(struct kernfs_node *kn) 1676 { 1677 return (kn->mode >> 12) & 15; 1678 } 1679 1680 static int kernfs_dir_fop_release(struct inode *inode, struct file *filp) 1681 { 1682 kernfs_put(filp->private_data); 1683 return 0; 1684 } 1685 1686 static struct kernfs_node *kernfs_dir_pos(const void *ns, 1687 struct kernfs_node *parent, loff_t hash, struct kernfs_node *pos) 1688 { 1689 if (pos) { 1690 int valid = kernfs_active(pos) && 1691 pos->parent == parent && hash == pos->hash; 1692 kernfs_put(pos); 1693 if (!valid) 1694 pos = NULL; 1695 } 1696 if (!pos && (hash > 1) && (hash < INT_MAX)) { 1697 struct rb_node *node = parent->dir.children.rb_node; 1698 while (node) { 1699 pos = rb_to_kn(node); 1700 1701 if (hash < pos->hash) 1702 node = node->rb_left; 1703 else if (hash > pos->hash) 1704 node = node->rb_right; 1705 else 1706 break; 1707 } 1708 } 1709 /* Skip over entries which are dying/dead or in the wrong namespace */ 1710 while (pos && (!kernfs_active(pos) || pos->ns != ns)) { 1711 struct rb_node *node = rb_next(&pos->rb); 1712 if (!node) 1713 pos = NULL; 1714 else 1715 pos = rb_to_kn(node); 1716 } 1717 return pos; 1718 } 1719 1720 static struct kernfs_node *kernfs_dir_next_pos(const void *ns, 1721 struct kernfs_node *parent, ino_t ino, struct kernfs_node *pos) 1722 { 1723 pos = kernfs_dir_pos(ns, parent, ino, pos); 1724 if (pos) { 1725 do { 1726 struct rb_node *node = rb_next(&pos->rb); 1727 if (!node) 1728 pos = NULL; 1729 else 1730 pos = rb_to_kn(node); 1731 } while (pos && (!kernfs_active(pos) || pos->ns != ns)); 1732 } 1733 return pos; 1734 } 1735 1736 static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx) 1737 { 1738 struct dentry *dentry = file->f_path.dentry; 1739 struct kernfs_node *parent = kernfs_dentry_node(dentry); 1740 struct kernfs_node *pos = file->private_data; 1741 struct kernfs_root *root; 1742 const void *ns = NULL; 1743 1744 if (!dir_emit_dots(file, ctx)) 1745 return 0; 1746 1747 root = kernfs_root(parent); 1748 down_read(&root->kernfs_rwsem); 1749 1750 if (kernfs_ns_enabled(parent)) 1751 ns = kernfs_info(dentry->d_sb)->ns; 1752 1753 for (pos = kernfs_dir_pos(ns, parent, ctx->pos, pos); 1754 pos; 1755 pos = kernfs_dir_next_pos(ns, parent, ctx->pos, pos)) { 1756 const char *name = pos->name; 1757 unsigned int type = dt_type(pos); 1758 int len = strlen(name); 1759 ino_t ino = kernfs_ino(pos); 1760 1761 ctx->pos = pos->hash; 1762 file->private_data = pos; 1763 kernfs_get(pos); 1764 1765 up_read(&root->kernfs_rwsem); 1766 if (!dir_emit(ctx, name, len, ino, type)) 1767 return 0; 1768 down_read(&root->kernfs_rwsem); 1769 } 1770 up_read(&root->kernfs_rwsem); 1771 file->private_data = NULL; 1772 ctx->pos = INT_MAX; 1773 return 0; 1774 } 1775 1776 const struct file_operations kernfs_dir_fops = { 1777 .read = generic_read_dir, 1778 .iterate_shared = kernfs_fop_readdir, 1779 .release = kernfs_dir_fop_release, 1780 .llseek = generic_file_llseek, 1781 }; 1782