1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * dir.c - Operations for configfs directories. 4 * 5 * Based on sysfs: 6 * sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel 7 * 8 * configfs Copyright (C) 2005 Oracle. All rights reserved. 9 */ 10 11 #undef DEBUG 12 13 #include <linux/fs.h> 14 #include <linux/fsnotify.h> 15 #include <linux/mount.h> 16 #include <linux/module.h> 17 #include <linux/slab.h> 18 #include <linux/err.h> 19 20 #include <linux/configfs.h> 21 #include "configfs_internal.h" 22 23 /* 24 * Protects mutations of configfs_dirent linkage together with proper i_mutex 25 * Also protects mutations of symlinks linkage to target configfs_dirent 26 * Mutators of configfs_dirent linkage must *both* have the proper inode locked 27 * and configfs_dirent_lock locked, in that order. 28 * This allows one to safely traverse configfs_dirent trees and symlinks without 29 * having to lock inodes. 30 * 31 * Protects setting of CONFIGFS_USET_DROPPING: checking the flag 32 * unlocked is not reliable unless in detach_groups() called from 33 * rmdir()/unregister() and from configfs_attach_group() 34 */ 35 DEFINE_SPINLOCK(configfs_dirent_lock); 36 37 /* 38 * All of link_obj/unlink_obj/link_group/unlink_group require that 39 * subsys->su_mutex is held. 40 * But parent configfs_subsystem is NULL when config_item is root. 41 * Use this mutex when config_item is root. 42 */ 43 static DEFINE_MUTEX(configfs_subsystem_mutex); 44 45 static void configfs_d_iput(struct dentry * dentry, 46 struct inode * inode) 47 { 48 struct configfs_dirent *sd = dentry->d_fsdata; 49 50 if (sd) { 51 /* Coordinate with configfs_readdir */ 52 spin_lock(&configfs_dirent_lock); 53 /* 54 * Set sd->s_dentry to null only when this dentry is the one 55 * that is going to be killed. Otherwise configfs_d_iput may 56 * run just after configfs_lookup and set sd->s_dentry to 57 * NULL even it's still in use. 58 */ 59 if (sd->s_dentry == dentry) 60 sd->s_dentry = NULL; 61 62 spin_unlock(&configfs_dirent_lock); 63 configfs_put(sd); 64 } 65 iput(inode); 66 } 67 68 const struct dentry_operations configfs_dentry_ops = { 69 .d_iput = configfs_d_iput, 70 .d_delete = always_delete_dentry, 71 }; 72 73 #ifdef CONFIG_LOCKDEP 74 75 /* 76 * Helpers to make lockdep happy with our recursive locking of default groups' 77 * inodes (see configfs_attach_group() and configfs_detach_group()). 78 * We put default groups i_mutexes in separate classes according to their depth 79 * from the youngest non-default group ancestor. 80 * 81 * For a non-default group A having default groups A/B, A/C, and A/C/D, default 82 * groups A/B and A/C will have their inode's mutex in class 83 * default_group_class[0], and default group A/C/D will be in 84 * default_group_class[1]. 85 * 86 * The lock classes are declared and assigned in inode.c, according to the 87 * s_depth value. 88 * The s_depth value is initialized to -1, adjusted to >= 0 when attaching 89 * default groups, and reset to -1 when all default groups are attached. During 90 * attachment, if configfs_create() sees s_depth > 0, the lock class of the new 91 * inode's mutex is set to default_group_class[s_depth - 1]. 92 */ 93 94 static void configfs_init_dirent_depth(struct configfs_dirent *sd) 95 { 96 sd->s_depth = -1; 97 } 98 99 static void configfs_set_dir_dirent_depth(struct configfs_dirent *parent_sd, 100 struct configfs_dirent *sd) 101 { 102 int parent_depth = parent_sd->s_depth; 103 104 if (parent_depth >= 0) 105 sd->s_depth = parent_depth + 1; 106 } 107 108 static void 109 configfs_adjust_dir_dirent_depth_before_populate(struct configfs_dirent *sd) 110 { 111 /* 112 * item's i_mutex class is already setup, so s_depth is now only 113 * used to set new sub-directories s_depth, which is always done 114 * with item's i_mutex locked. 115 */ 116 /* 117 * sd->s_depth == -1 iff we are a non default group. 118 * else (we are a default group) sd->s_depth > 0 (see 119 * create_dir()). 120 */ 121 if (sd->s_depth == -1) 122 /* 123 * We are a non default group and we are going to create 124 * default groups. 125 */ 126 sd->s_depth = 0; 127 } 128 129 static void 130 configfs_adjust_dir_dirent_depth_after_populate(struct configfs_dirent *sd) 131 { 132 /* We will not create default groups anymore. */ 133 sd->s_depth = -1; 134 } 135 136 #else /* CONFIG_LOCKDEP */ 137 138 static void configfs_init_dirent_depth(struct configfs_dirent *sd) 139 { 140 } 141 142 static void configfs_set_dir_dirent_depth(struct configfs_dirent *parent_sd, 143 struct configfs_dirent *sd) 144 { 145 } 146 147 static void 148 configfs_adjust_dir_dirent_depth_before_populate(struct configfs_dirent *sd) 149 { 150 } 151 152 static void 153 configfs_adjust_dir_dirent_depth_after_populate(struct configfs_dirent *sd) 154 { 155 } 156 157 #endif /* CONFIG_LOCKDEP */ 158 159 static struct configfs_fragment *new_fragment(void) 160 { 161 struct configfs_fragment *p; 162 163 p = kmalloc(sizeof(struct configfs_fragment), GFP_KERNEL); 164 if (p) { 165 atomic_set(&p->frag_count, 1); 166 init_rwsem(&p->frag_sem); 167 p->frag_dead = false; 168 } 169 return p; 170 } 171 172 void put_fragment(struct configfs_fragment *frag) 173 { 174 if (frag && atomic_dec_and_test(&frag->frag_count)) 175 kfree(frag); 176 } 177 178 struct configfs_fragment *get_fragment(struct configfs_fragment *frag) 179 { 180 if (likely(frag)) 181 atomic_inc(&frag->frag_count); 182 return frag; 183 } 184 185 /* 186 * Allocates a new configfs_dirent and links it to the parent configfs_dirent 187 */ 188 static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *parent_sd, 189 void *element, int type, 190 struct configfs_fragment *frag) 191 { 192 struct configfs_dirent * sd; 193 194 sd = kmem_cache_zalloc(configfs_dir_cachep, GFP_KERNEL); 195 if (!sd) 196 return ERR_PTR(-ENOMEM); 197 198 atomic_set(&sd->s_count, 1); 199 INIT_LIST_HEAD(&sd->s_children); 200 sd->s_element = element; 201 sd->s_type = type; 202 configfs_init_dirent_depth(sd); 203 spin_lock(&configfs_dirent_lock); 204 if (parent_sd->s_type & CONFIGFS_USET_DROPPING) { 205 spin_unlock(&configfs_dirent_lock); 206 kmem_cache_free(configfs_dir_cachep, sd); 207 return ERR_PTR(-ENOENT); 208 } 209 sd->s_frag = get_fragment(frag); 210 list_add(&sd->s_sibling, &parent_sd->s_children); 211 spin_unlock(&configfs_dirent_lock); 212 213 return sd; 214 } 215 216 /* 217 * 218 * Return -EEXIST if there is already a configfs element with the same 219 * name for the same parent. 220 * 221 * called with parent inode's i_mutex held 222 */ 223 static int configfs_dirent_exists(struct configfs_dirent *parent_sd, 224 const unsigned char *new) 225 { 226 struct configfs_dirent * sd; 227 228 list_for_each_entry(sd, &parent_sd->s_children, s_sibling) { 229 if (sd->s_element) { 230 const unsigned char *existing = configfs_get_name(sd); 231 if (strcmp(existing, new)) 232 continue; 233 else 234 return -EEXIST; 235 } 236 } 237 238 return 0; 239 } 240 241 242 int configfs_make_dirent(struct configfs_dirent * parent_sd, 243 struct dentry * dentry, void * element, 244 umode_t mode, int type, struct configfs_fragment *frag) 245 { 246 struct configfs_dirent * sd; 247 248 sd = configfs_new_dirent(parent_sd, element, type, frag); 249 if (IS_ERR(sd)) 250 return PTR_ERR(sd); 251 252 sd->s_mode = mode; 253 sd->s_dentry = dentry; 254 if (dentry) 255 dentry->d_fsdata = configfs_get(sd); 256 257 return 0; 258 } 259 260 static void configfs_remove_dirent(struct dentry *dentry) 261 { 262 struct configfs_dirent *sd = dentry->d_fsdata; 263 264 if (!sd) 265 return; 266 spin_lock(&configfs_dirent_lock); 267 list_del_init(&sd->s_sibling); 268 spin_unlock(&configfs_dirent_lock); 269 configfs_put(sd); 270 } 271 272 /** 273 * configfs_create_dir - create a directory for an config_item. 274 * @item: config_itemwe're creating directory for. 275 * @dentry: config_item's dentry. 276 * @frag: config_item's fragment. 277 * 278 * Note: user-created entries won't be allowed under this new directory 279 * until it is validated by configfs_dir_set_ready() 280 */ 281 282 static int configfs_create_dir(struct config_item *item, struct dentry *dentry, 283 struct configfs_fragment *frag) 284 { 285 int error; 286 umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO; 287 struct dentry *p = dentry->d_parent; 288 struct inode *inode; 289 290 BUG_ON(!item); 291 292 error = configfs_dirent_exists(p->d_fsdata, dentry->d_name.name); 293 if (unlikely(error)) 294 return error; 295 296 error = configfs_make_dirent(p->d_fsdata, dentry, item, mode, 297 CONFIGFS_DIR | CONFIGFS_USET_CREATING, 298 frag); 299 if (unlikely(error)) 300 return error; 301 302 configfs_set_dir_dirent_depth(p->d_fsdata, dentry->d_fsdata); 303 inode = configfs_create(dentry, mode); 304 if (IS_ERR(inode)) 305 goto out_remove; 306 307 inode->i_op = &configfs_dir_inode_operations; 308 inode->i_fop = &configfs_dir_operations; 309 /* directory inodes start off with i_nlink == 2 (for "." entry) */ 310 inc_nlink(inode); 311 d_instantiate(dentry, inode); 312 /* already hashed */ 313 dget(dentry); /* pin directory dentries in core */ 314 inc_nlink(d_inode(p)); 315 item->ci_dentry = dentry; 316 return 0; 317 318 out_remove: 319 configfs_remove_dirent(dentry); 320 return PTR_ERR(inode); 321 } 322 323 /* 324 * Allow userspace to create new entries under a new directory created with 325 * configfs_create_dir(), and under all of its chidlren directories recursively. 326 * @sd configfs_dirent of the new directory to validate 327 * 328 * Caller must hold configfs_dirent_lock. 329 */ 330 static void configfs_dir_set_ready(struct configfs_dirent *sd) 331 { 332 struct configfs_dirent *child_sd; 333 334 sd->s_type &= ~CONFIGFS_USET_CREATING; 335 list_for_each_entry(child_sd, &sd->s_children, s_sibling) 336 if (child_sd->s_type & CONFIGFS_USET_CREATING) 337 configfs_dir_set_ready(child_sd); 338 } 339 340 /* 341 * Check that a directory does not belong to a directory hierarchy being 342 * attached and not validated yet. 343 * @sd configfs_dirent of the directory to check 344 * 345 * @return non-zero iff the directory was validated 346 * 347 * Note: takes configfs_dirent_lock, so the result may change from false to true 348 * in two consecutive calls, but never from true to false. 349 */ 350 int configfs_dirent_is_ready(struct configfs_dirent *sd) 351 { 352 int ret; 353 354 spin_lock(&configfs_dirent_lock); 355 ret = !(sd->s_type & CONFIGFS_USET_CREATING); 356 spin_unlock(&configfs_dirent_lock); 357 358 return ret; 359 } 360 361 int configfs_create_link(struct configfs_dirent *target, struct dentry *parent, 362 struct dentry *dentry, char *body) 363 { 364 int err = 0; 365 umode_t mode = S_IFLNK | S_IRWXUGO; 366 struct configfs_dirent *p = parent->d_fsdata; 367 struct inode *inode; 368 369 err = configfs_make_dirent(p, dentry, target, mode, CONFIGFS_ITEM_LINK, 370 p->s_frag); 371 if (err) 372 return err; 373 374 inode = configfs_create(dentry, mode); 375 if (IS_ERR(inode)) 376 goto out_remove; 377 378 inode->i_link = body; 379 inode->i_op = &configfs_symlink_inode_operations; 380 d_instantiate(dentry, inode); 381 dget(dentry); /* pin link dentries in core */ 382 return 0; 383 384 out_remove: 385 configfs_remove_dirent(dentry); 386 return PTR_ERR(inode); 387 } 388 389 static void remove_dir(struct dentry * d) 390 { 391 struct dentry * parent = dget(d->d_parent); 392 393 configfs_remove_dirent(d); 394 395 if (d_really_is_positive(d)) 396 simple_rmdir(d_inode(parent),d); 397 398 pr_debug(" o %pd removing done (%d)\n", d, d_count(d)); 399 400 dput(parent); 401 } 402 403 /** 404 * configfs_remove_dir - remove an config_item's directory. 405 * @item: config_item we're removing. 406 * 407 * The only thing special about this is that we remove any files in 408 * the directory before we remove the directory, and we've inlined 409 * what used to be configfs_rmdir() below, instead of calling separately. 410 * 411 * Caller holds the mutex of the item's inode 412 */ 413 414 static void configfs_remove_dir(struct config_item * item) 415 { 416 struct dentry * dentry = dget(item->ci_dentry); 417 418 if (!dentry) 419 return; 420 421 remove_dir(dentry); 422 /** 423 * Drop reference from dget() on entrance. 424 */ 425 dput(dentry); 426 } 427 428 static struct dentry * configfs_lookup(struct inode *dir, 429 struct dentry *dentry, 430 unsigned int flags) 431 { 432 struct configfs_dirent * parent_sd = dentry->d_parent->d_fsdata; 433 struct configfs_dirent * sd; 434 struct inode *inode = NULL; 435 436 if (dentry->d_name.len > NAME_MAX) 437 return ERR_PTR(-ENAMETOOLONG); 438 439 /* 440 * Fake invisibility if dir belongs to a group/default groups hierarchy 441 * being attached 442 * 443 * This forbids userspace to read/write attributes of items which may 444 * not complete their initialization, since the dentries of the 445 * attributes won't be instantiated. 446 */ 447 if (!configfs_dirent_is_ready(parent_sd)) 448 return ERR_PTR(-ENOENT); 449 450 spin_lock(&configfs_dirent_lock); 451 list_for_each_entry(sd, &parent_sd->s_children, s_sibling) { 452 if ((sd->s_type & CONFIGFS_NOT_PINNED) && 453 !strcmp(configfs_get_name(sd), dentry->d_name.name)) { 454 struct configfs_attribute *attr = sd->s_element; 455 umode_t mode = (attr->ca_mode & S_IALLUGO) | S_IFREG; 456 457 dentry->d_fsdata = configfs_get(sd); 458 sd->s_dentry = dentry; 459 spin_unlock(&configfs_dirent_lock); 460 461 inode = configfs_create(dentry, mode); 462 if (IS_ERR(inode)) { 463 configfs_put(sd); 464 return ERR_CAST(inode); 465 } 466 if (sd->s_type & CONFIGFS_ITEM_BIN_ATTR) { 467 inode->i_size = 0; 468 inode->i_fop = &configfs_bin_file_operations; 469 } else { 470 inode->i_size = PAGE_SIZE; 471 inode->i_fop = &configfs_file_operations; 472 } 473 goto done; 474 } 475 } 476 spin_unlock(&configfs_dirent_lock); 477 done: 478 d_add(dentry, inode); 479 return NULL; 480 } 481 482 /* 483 * Only subdirectories count here. Files (CONFIGFS_NOT_PINNED) are 484 * attributes and are removed by rmdir(). We recurse, setting 485 * CONFIGFS_USET_DROPPING on all children that are candidates for 486 * default detach. 487 * If there is an error, the caller will reset the flags via 488 * configfs_detach_rollback(). 489 */ 490 static int configfs_detach_prep(struct dentry *dentry, struct dentry **wait) 491 { 492 struct configfs_dirent *parent_sd = dentry->d_fsdata; 493 struct configfs_dirent *sd; 494 int ret; 495 496 /* Mark that we're trying to drop the group */ 497 parent_sd->s_type |= CONFIGFS_USET_DROPPING; 498 499 ret = -EBUSY; 500 if (parent_sd->s_links) 501 goto out; 502 503 ret = 0; 504 list_for_each_entry(sd, &parent_sd->s_children, s_sibling) { 505 if (!sd->s_element || 506 (sd->s_type & CONFIGFS_NOT_PINNED)) 507 continue; 508 if (sd->s_type & CONFIGFS_USET_DEFAULT) { 509 /* Abort if racing with mkdir() */ 510 if (sd->s_type & CONFIGFS_USET_IN_MKDIR) { 511 if (wait) 512 *wait= dget(sd->s_dentry); 513 return -EAGAIN; 514 } 515 516 /* 517 * Yup, recursive. If there's a problem, blame 518 * deep nesting of default_groups 519 */ 520 ret = configfs_detach_prep(sd->s_dentry, wait); 521 if (!ret) 522 continue; 523 } else 524 ret = -ENOTEMPTY; 525 526 break; 527 } 528 529 out: 530 return ret; 531 } 532 533 /* 534 * Walk the tree, resetting CONFIGFS_USET_DROPPING wherever it was 535 * set. 536 */ 537 static void configfs_detach_rollback(struct dentry *dentry) 538 { 539 struct configfs_dirent *parent_sd = dentry->d_fsdata; 540 struct configfs_dirent *sd; 541 542 parent_sd->s_type &= ~CONFIGFS_USET_DROPPING; 543 544 list_for_each_entry(sd, &parent_sd->s_children, s_sibling) 545 if (sd->s_type & CONFIGFS_USET_DEFAULT) 546 configfs_detach_rollback(sd->s_dentry); 547 } 548 549 static void detach_attrs(struct config_item * item) 550 { 551 struct dentry * dentry = dget(item->ci_dentry); 552 struct configfs_dirent * parent_sd; 553 struct configfs_dirent * sd, * tmp; 554 555 if (!dentry) 556 return; 557 558 pr_debug("configfs %s: dropping attrs for dir\n", 559 dentry->d_name.name); 560 561 parent_sd = dentry->d_fsdata; 562 list_for_each_entry_safe(sd, tmp, &parent_sd->s_children, s_sibling) { 563 if (!sd->s_element || !(sd->s_type & CONFIGFS_NOT_PINNED)) 564 continue; 565 spin_lock(&configfs_dirent_lock); 566 list_del_init(&sd->s_sibling); 567 spin_unlock(&configfs_dirent_lock); 568 configfs_drop_dentry(sd, dentry); 569 configfs_put(sd); 570 } 571 572 /** 573 * Drop reference from dget() on entrance. 574 */ 575 dput(dentry); 576 } 577 578 static int populate_attrs(struct config_item *item) 579 { 580 const struct config_item_type *t = item->ci_type; 581 struct configfs_attribute *attr; 582 struct configfs_bin_attribute *bin_attr; 583 int error = 0; 584 int i; 585 586 if (!t) 587 return -EINVAL; 588 if (t->ct_attrs) { 589 for (i = 0; (attr = t->ct_attrs[i]) != NULL; i++) { 590 if ((error = configfs_create_file(item, attr))) 591 break; 592 } 593 } 594 if (t->ct_bin_attrs) { 595 for (i = 0; (bin_attr = t->ct_bin_attrs[i]) != NULL; i++) { 596 error = configfs_create_bin_file(item, bin_attr); 597 if (error) 598 break; 599 } 600 } 601 602 if (error) 603 detach_attrs(item); 604 605 return error; 606 } 607 608 static int configfs_attach_group(struct config_item *parent_item, 609 struct config_item *item, 610 struct dentry *dentry, 611 struct configfs_fragment *frag); 612 static void configfs_detach_group(struct config_item *item); 613 614 static void detach_groups(struct config_group *group) 615 { 616 struct dentry * dentry = dget(group->cg_item.ci_dentry); 617 struct dentry *child; 618 struct configfs_dirent *parent_sd; 619 struct configfs_dirent *sd, *tmp; 620 621 if (!dentry) 622 return; 623 624 parent_sd = dentry->d_fsdata; 625 list_for_each_entry_safe(sd, tmp, &parent_sd->s_children, s_sibling) { 626 if (!sd->s_element || 627 !(sd->s_type & CONFIGFS_USET_DEFAULT)) 628 continue; 629 630 child = sd->s_dentry; 631 632 inode_lock(d_inode(child)); 633 634 configfs_detach_group(sd->s_element); 635 d_inode(child)->i_flags |= S_DEAD; 636 dont_mount(child); 637 638 inode_unlock(d_inode(child)); 639 640 d_delete(child); 641 dput(child); 642 } 643 644 /** 645 * Drop reference from dget() on entrance. 646 */ 647 dput(dentry); 648 } 649 650 /* 651 * This fakes mkdir(2) on a default_groups[] entry. It 652 * creates a dentry, attachs it, and then does fixup 653 * on the sd->s_type. 654 * 655 * We could, perhaps, tweak our parent's ->mkdir for a minute and 656 * try using vfs_mkdir. Just a thought. 657 */ 658 static int create_default_group(struct config_group *parent_group, 659 struct config_group *group, 660 struct configfs_fragment *frag) 661 { 662 int ret; 663 struct configfs_dirent *sd; 664 /* We trust the caller holds a reference to parent */ 665 struct dentry *child, *parent = parent_group->cg_item.ci_dentry; 666 667 if (!group->cg_item.ci_name) 668 group->cg_item.ci_name = group->cg_item.ci_namebuf; 669 670 ret = -ENOMEM; 671 child = d_alloc_name(parent, group->cg_item.ci_name); 672 if (child) { 673 d_add(child, NULL); 674 675 ret = configfs_attach_group(&parent_group->cg_item, 676 &group->cg_item, child, frag); 677 if (!ret) { 678 sd = child->d_fsdata; 679 sd->s_type |= CONFIGFS_USET_DEFAULT; 680 } else { 681 BUG_ON(d_inode(child)); 682 d_drop(child); 683 dput(child); 684 } 685 } 686 687 return ret; 688 } 689 690 static int populate_groups(struct config_group *group, 691 struct configfs_fragment *frag) 692 { 693 struct config_group *new_group; 694 int ret = 0; 695 696 list_for_each_entry(new_group, &group->default_groups, group_entry) { 697 ret = create_default_group(group, new_group, frag); 698 if (ret) { 699 detach_groups(group); 700 break; 701 } 702 } 703 704 return ret; 705 } 706 707 void configfs_remove_default_groups(struct config_group *group) 708 { 709 struct config_group *g, *n; 710 711 list_for_each_entry_safe(g, n, &group->default_groups, group_entry) { 712 list_del(&g->group_entry); 713 config_item_put(&g->cg_item); 714 } 715 } 716 EXPORT_SYMBOL(configfs_remove_default_groups); 717 718 /* 719 * All of link_obj/unlink_obj/link_group/unlink_group require that 720 * subsys->su_mutex is held. 721 */ 722 723 static void unlink_obj(struct config_item *item) 724 { 725 struct config_group *group; 726 727 group = item->ci_group; 728 if (group) { 729 list_del_init(&item->ci_entry); 730 731 item->ci_group = NULL; 732 item->ci_parent = NULL; 733 734 /* Drop the reference for ci_entry */ 735 config_item_put(item); 736 737 /* Drop the reference for ci_parent */ 738 config_group_put(group); 739 } 740 } 741 742 static void link_obj(struct config_item *parent_item, struct config_item *item) 743 { 744 /* 745 * Parent seems redundant with group, but it makes certain 746 * traversals much nicer. 747 */ 748 item->ci_parent = parent_item; 749 750 /* 751 * We hold a reference on the parent for the child's ci_parent 752 * link. 753 */ 754 item->ci_group = config_group_get(to_config_group(parent_item)); 755 list_add_tail(&item->ci_entry, &item->ci_group->cg_children); 756 757 /* 758 * We hold a reference on the child for ci_entry on the parent's 759 * cg_children 760 */ 761 config_item_get(item); 762 } 763 764 static void unlink_group(struct config_group *group) 765 { 766 struct config_group *new_group; 767 768 list_for_each_entry(new_group, &group->default_groups, group_entry) 769 unlink_group(new_group); 770 771 group->cg_subsys = NULL; 772 unlink_obj(&group->cg_item); 773 } 774 775 static void link_group(struct config_group *parent_group, struct config_group *group) 776 { 777 struct config_group *new_group; 778 struct configfs_subsystem *subsys = NULL; /* gcc is a turd */ 779 780 link_obj(&parent_group->cg_item, &group->cg_item); 781 782 if (parent_group->cg_subsys) 783 subsys = parent_group->cg_subsys; 784 else if (configfs_is_root(&parent_group->cg_item)) 785 subsys = to_configfs_subsystem(group); 786 else 787 BUG(); 788 group->cg_subsys = subsys; 789 790 list_for_each_entry(new_group, &group->default_groups, group_entry) 791 link_group(group, new_group); 792 } 793 794 /* 795 * The goal is that configfs_attach_item() (and 796 * configfs_attach_group()) can be called from either the VFS or this 797 * module. That is, they assume that the items have been created, 798 * the dentry allocated, and the dcache is all ready to go. 799 * 800 * If they fail, they must clean up after themselves as if they 801 * had never been called. The caller (VFS or local function) will 802 * handle cleaning up the dcache bits. 803 * 804 * configfs_detach_group() and configfs_detach_item() behave similarly on 805 * the way out. They assume that the proper semaphores are held, they 806 * clean up the configfs items, and they expect their callers will 807 * handle the dcache bits. 808 */ 809 static int configfs_attach_item(struct config_item *parent_item, 810 struct config_item *item, 811 struct dentry *dentry, 812 struct configfs_fragment *frag) 813 { 814 int ret; 815 816 ret = configfs_create_dir(item, dentry, frag); 817 if (!ret) { 818 ret = populate_attrs(item); 819 if (ret) { 820 /* 821 * We are going to remove an inode and its dentry but 822 * the VFS may already have hit and used them. Thus, 823 * we must lock them as rmdir() would. 824 */ 825 inode_lock(d_inode(dentry)); 826 configfs_remove_dir(item); 827 d_inode(dentry)->i_flags |= S_DEAD; 828 dont_mount(dentry); 829 inode_unlock(d_inode(dentry)); 830 d_delete(dentry); 831 } 832 } 833 834 return ret; 835 } 836 837 /* Caller holds the mutex of the item's inode */ 838 static void configfs_detach_item(struct config_item *item) 839 { 840 detach_attrs(item); 841 configfs_remove_dir(item); 842 } 843 844 static int configfs_attach_group(struct config_item *parent_item, 845 struct config_item *item, 846 struct dentry *dentry, 847 struct configfs_fragment *frag) 848 { 849 int ret; 850 struct configfs_dirent *sd; 851 852 ret = configfs_attach_item(parent_item, item, dentry, frag); 853 if (!ret) { 854 sd = dentry->d_fsdata; 855 sd->s_type |= CONFIGFS_USET_DIR; 856 857 /* 858 * FYI, we're faking mkdir in populate_groups() 859 * We must lock the group's inode to avoid races with the VFS 860 * which can already hit the inode and try to add/remove entries 861 * under it. 862 * 863 * We must also lock the inode to remove it safely in case of 864 * error, as rmdir() would. 865 */ 866 inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD); 867 configfs_adjust_dir_dirent_depth_before_populate(sd); 868 ret = populate_groups(to_config_group(item), frag); 869 if (ret) { 870 configfs_detach_item(item); 871 d_inode(dentry)->i_flags |= S_DEAD; 872 dont_mount(dentry); 873 } 874 configfs_adjust_dir_dirent_depth_after_populate(sd); 875 inode_unlock(d_inode(dentry)); 876 if (ret) 877 d_delete(dentry); 878 } 879 880 return ret; 881 } 882 883 /* Caller holds the mutex of the group's inode */ 884 static void configfs_detach_group(struct config_item *item) 885 { 886 detach_groups(to_config_group(item)); 887 configfs_detach_item(item); 888 } 889 890 /* 891 * After the item has been detached from the filesystem view, we are 892 * ready to tear it out of the hierarchy. Notify the client before 893 * we do that so they can perform any cleanup that requires 894 * navigating the hierarchy. A client does not need to provide this 895 * callback. The subsystem semaphore MUST be held by the caller, and 896 * references must be valid for both items. It also assumes the 897 * caller has validated ci_type. 898 */ 899 static void client_disconnect_notify(struct config_item *parent_item, 900 struct config_item *item) 901 { 902 const struct config_item_type *type; 903 904 type = parent_item->ci_type; 905 BUG_ON(!type); 906 907 if (type->ct_group_ops && type->ct_group_ops->disconnect_notify) 908 type->ct_group_ops->disconnect_notify(to_config_group(parent_item), 909 item); 910 } 911 912 /* 913 * Drop the initial reference from make_item()/make_group() 914 * This function assumes that reference is held on item 915 * and that item holds a valid reference to the parent. Also, it 916 * assumes the caller has validated ci_type. 917 */ 918 static void client_drop_item(struct config_item *parent_item, 919 struct config_item *item) 920 { 921 const struct config_item_type *type; 922 923 type = parent_item->ci_type; 924 BUG_ON(!type); 925 926 /* 927 * If ->drop_item() exists, it is responsible for the 928 * config_item_put(). 929 */ 930 if (type->ct_group_ops && type->ct_group_ops->drop_item) 931 type->ct_group_ops->drop_item(to_config_group(parent_item), 932 item); 933 else 934 config_item_put(item); 935 } 936 937 #ifdef DEBUG 938 static void configfs_dump_one(struct configfs_dirent *sd, int level) 939 { 940 pr_info("%*s\"%s\":\n", level, " ", configfs_get_name(sd)); 941 942 #define type_print(_type) if (sd->s_type & _type) pr_info("%*s %s\n", level, " ", #_type); 943 type_print(CONFIGFS_ROOT); 944 type_print(CONFIGFS_DIR); 945 type_print(CONFIGFS_ITEM_ATTR); 946 type_print(CONFIGFS_ITEM_LINK); 947 type_print(CONFIGFS_USET_DIR); 948 type_print(CONFIGFS_USET_DEFAULT); 949 type_print(CONFIGFS_USET_DROPPING); 950 #undef type_print 951 } 952 953 static int configfs_dump(struct configfs_dirent *sd, int level) 954 { 955 struct configfs_dirent *child_sd; 956 int ret = 0; 957 958 configfs_dump_one(sd, level); 959 960 if (!(sd->s_type & (CONFIGFS_DIR|CONFIGFS_ROOT))) 961 return 0; 962 963 list_for_each_entry(child_sd, &sd->s_children, s_sibling) { 964 ret = configfs_dump(child_sd, level + 2); 965 if (ret) 966 break; 967 } 968 969 return ret; 970 } 971 #endif 972 973 974 /* 975 * configfs_depend_item() and configfs_undepend_item() 976 * 977 * WARNING: Do not call these from a configfs callback! 978 * 979 * This describes these functions and their helpers. 980 * 981 * Allow another kernel system to depend on a config_item. If this 982 * happens, the item cannot go away until the dependent can live without 983 * it. The idea is to give client modules as simple an interface as 984 * possible. When a system asks them to depend on an item, they just 985 * call configfs_depend_item(). If the item is live and the client 986 * driver is in good shape, we'll happily do the work for them. 987 * 988 * Why is the locking complex? Because configfs uses the VFS to handle 989 * all locking, but this function is called outside the normal 990 * VFS->configfs path. So it must take VFS locks to prevent the 991 * VFS->configfs stuff (configfs_mkdir(), configfs_rmdir(), etc). This is 992 * why you can't call these functions underneath configfs callbacks. 993 * 994 * Note, btw, that this can be called at *any* time, even when a configfs 995 * subsystem isn't registered, or when configfs is loading or unloading. 996 * Just like configfs_register_subsystem(). So we take the same 997 * precautions. We pin the filesystem. We lock configfs_dirent_lock. 998 * If we can find the target item in the 999 * configfs tree, it must be part of the subsystem tree as well, so we 1000 * do not need the subsystem semaphore. Holding configfs_dirent_lock helps 1001 * locking out mkdir() and rmdir(), who might be racing us. 1002 */ 1003 1004 /* 1005 * configfs_depend_prep() 1006 * 1007 * Only subdirectories count here. Files (CONFIGFS_NOT_PINNED) are 1008 * attributes. This is similar but not the same to configfs_detach_prep(). 1009 * Note that configfs_detach_prep() expects the parent to be locked when it 1010 * is called, but we lock the parent *inside* configfs_depend_prep(). We 1011 * do that so we can unlock it if we find nothing. 1012 * 1013 * Here we do a depth-first search of the dentry hierarchy looking for 1014 * our object. 1015 * We deliberately ignore items tagged as dropping since they are virtually 1016 * dead, as well as items in the middle of attachment since they virtually 1017 * do not exist yet. This completes the locking out of racing mkdir() and 1018 * rmdir(). 1019 * Note: subdirectories in the middle of attachment start with s_type = 1020 * CONFIGFS_DIR|CONFIGFS_USET_CREATING set by create_dir(). When 1021 * CONFIGFS_USET_CREATING is set, we ignore the item. The actual set of 1022 * s_type is in configfs_new_dirent(), which has configfs_dirent_lock. 1023 * 1024 * If the target is not found, -ENOENT is bubbled up. 1025 * 1026 * This adds a requirement that all config_items be unique! 1027 * 1028 * This is recursive. There isn't 1029 * much on the stack, though, so folks that need this function - be careful 1030 * about your stack! Patches will be accepted to make it iterative. 1031 */ 1032 static int configfs_depend_prep(struct dentry *origin, 1033 struct config_item *target) 1034 { 1035 struct configfs_dirent *child_sd, *sd; 1036 int ret = 0; 1037 1038 BUG_ON(!origin || !origin->d_fsdata); 1039 sd = origin->d_fsdata; 1040 1041 if (sd->s_element == target) /* Boo-yah */ 1042 goto out; 1043 1044 list_for_each_entry(child_sd, &sd->s_children, s_sibling) { 1045 if ((child_sd->s_type & CONFIGFS_DIR) && 1046 !(child_sd->s_type & CONFIGFS_USET_DROPPING) && 1047 !(child_sd->s_type & CONFIGFS_USET_CREATING)) { 1048 ret = configfs_depend_prep(child_sd->s_dentry, 1049 target); 1050 if (!ret) 1051 goto out; /* Child path boo-yah */ 1052 } 1053 } 1054 1055 /* We looped all our children and didn't find target */ 1056 ret = -ENOENT; 1057 1058 out: 1059 return ret; 1060 } 1061 1062 static int configfs_do_depend_item(struct dentry *subsys_dentry, 1063 struct config_item *target) 1064 { 1065 struct configfs_dirent *p; 1066 int ret; 1067 1068 spin_lock(&configfs_dirent_lock); 1069 /* Scan the tree, return 0 if found */ 1070 ret = configfs_depend_prep(subsys_dentry, target); 1071 if (ret) 1072 goto out_unlock_dirent_lock; 1073 1074 /* 1075 * We are sure that the item is not about to be removed by rmdir(), and 1076 * not in the middle of attachment by mkdir(). 1077 */ 1078 p = target->ci_dentry->d_fsdata; 1079 p->s_dependent_count += 1; 1080 1081 out_unlock_dirent_lock: 1082 spin_unlock(&configfs_dirent_lock); 1083 1084 return ret; 1085 } 1086 1087 static inline struct configfs_dirent * 1088 configfs_find_subsys_dentry(struct configfs_dirent *root_sd, 1089 struct config_item *subsys_item) 1090 { 1091 struct configfs_dirent *p; 1092 struct configfs_dirent *ret = NULL; 1093 1094 list_for_each_entry(p, &root_sd->s_children, s_sibling) { 1095 if (p->s_type & CONFIGFS_DIR && 1096 p->s_element == subsys_item) { 1097 ret = p; 1098 break; 1099 } 1100 } 1101 1102 return ret; 1103 } 1104 1105 1106 int configfs_depend_item(struct configfs_subsystem *subsys, 1107 struct config_item *target) 1108 { 1109 int ret; 1110 struct configfs_dirent *subsys_sd; 1111 struct config_item *s_item = &subsys->su_group.cg_item; 1112 struct dentry *root; 1113 1114 /* 1115 * Pin the configfs filesystem. This means we can safely access 1116 * the root of the configfs filesystem. 1117 */ 1118 root = configfs_pin_fs(); 1119 if (IS_ERR(root)) 1120 return PTR_ERR(root); 1121 1122 /* 1123 * Next, lock the root directory. We're going to check that the 1124 * subsystem is really registered, and so we need to lock out 1125 * configfs_[un]register_subsystem(). 1126 */ 1127 inode_lock(d_inode(root)); 1128 1129 subsys_sd = configfs_find_subsys_dentry(root->d_fsdata, s_item); 1130 if (!subsys_sd) { 1131 ret = -ENOENT; 1132 goto out_unlock_fs; 1133 } 1134 1135 /* Ok, now we can trust subsys/s_item */ 1136 ret = configfs_do_depend_item(subsys_sd->s_dentry, target); 1137 1138 out_unlock_fs: 1139 inode_unlock(d_inode(root)); 1140 1141 /* 1142 * If we succeeded, the fs is pinned via other methods. If not, 1143 * we're done with it anyway. So release_fs() is always right. 1144 */ 1145 configfs_release_fs(); 1146 1147 return ret; 1148 } 1149 EXPORT_SYMBOL(configfs_depend_item); 1150 1151 /* 1152 * Release the dependent linkage. This is much simpler than 1153 * configfs_depend_item() because we know that the client driver is 1154 * pinned, thus the subsystem is pinned, and therefore configfs is pinned. 1155 */ 1156 void configfs_undepend_item(struct config_item *target) 1157 { 1158 struct configfs_dirent *sd; 1159 1160 /* 1161 * Since we can trust everything is pinned, we just need 1162 * configfs_dirent_lock. 1163 */ 1164 spin_lock(&configfs_dirent_lock); 1165 1166 sd = target->ci_dentry->d_fsdata; 1167 BUG_ON(sd->s_dependent_count < 1); 1168 1169 sd->s_dependent_count -= 1; 1170 1171 /* 1172 * After this unlock, we cannot trust the item to stay alive! 1173 * DO NOT REFERENCE item after this unlock. 1174 */ 1175 spin_unlock(&configfs_dirent_lock); 1176 } 1177 EXPORT_SYMBOL(configfs_undepend_item); 1178 1179 /* 1180 * caller_subsys is a caller's subsystem not target's. This is used to 1181 * determine if we should lock root and check subsys or not. When we are 1182 * in the same subsystem as our target there is no need to do locking as 1183 * we know that subsys is valid and is not unregistered during this function 1184 * as we are called from callback of one of his children and VFS holds a lock 1185 * on some inode. Otherwise we have to lock our root to ensure that target's 1186 * subsystem it is not unregistered during this function. 1187 */ 1188 int configfs_depend_item_unlocked(struct configfs_subsystem *caller_subsys, 1189 struct config_item *target) 1190 { 1191 struct configfs_subsystem *target_subsys; 1192 struct config_group *root, *parent; 1193 struct configfs_dirent *subsys_sd; 1194 int ret = -ENOENT; 1195 1196 /* Disallow this function for configfs root */ 1197 if (configfs_is_root(target)) 1198 return -EINVAL; 1199 1200 parent = target->ci_group; 1201 /* 1202 * This may happen when someone is trying to depend root 1203 * directory of some subsystem 1204 */ 1205 if (configfs_is_root(&parent->cg_item)) { 1206 target_subsys = to_configfs_subsystem(to_config_group(target)); 1207 root = parent; 1208 } else { 1209 target_subsys = parent->cg_subsys; 1210 /* Find a cofnigfs root as we may need it for locking */ 1211 for (root = parent; !configfs_is_root(&root->cg_item); 1212 root = root->cg_item.ci_group) 1213 ; 1214 } 1215 1216 if (target_subsys != caller_subsys) { 1217 /* 1218 * We are in other configfs subsystem, so we have to do 1219 * additional locking to prevent other subsystem from being 1220 * unregistered 1221 */ 1222 inode_lock(d_inode(root->cg_item.ci_dentry)); 1223 1224 /* 1225 * As we are trying to depend item from other subsystem 1226 * we have to check if this subsystem is still registered 1227 */ 1228 subsys_sd = configfs_find_subsys_dentry( 1229 root->cg_item.ci_dentry->d_fsdata, 1230 &target_subsys->su_group.cg_item); 1231 if (!subsys_sd) 1232 goto out_root_unlock; 1233 } else { 1234 subsys_sd = target_subsys->su_group.cg_item.ci_dentry->d_fsdata; 1235 } 1236 1237 /* Now we can execute core of depend item */ 1238 ret = configfs_do_depend_item(subsys_sd->s_dentry, target); 1239 1240 if (target_subsys != caller_subsys) 1241 out_root_unlock: 1242 /* 1243 * We were called from subsystem other than our target so we 1244 * took some locks so now it's time to release them 1245 */ 1246 inode_unlock(d_inode(root->cg_item.ci_dentry)); 1247 1248 return ret; 1249 } 1250 EXPORT_SYMBOL(configfs_depend_item_unlocked); 1251 1252 static int configfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir, 1253 struct dentry *dentry, umode_t mode) 1254 { 1255 int ret = 0; 1256 int module_got = 0; 1257 struct config_group *group = NULL; 1258 struct config_item *item = NULL; 1259 struct config_item *parent_item; 1260 struct configfs_subsystem *subsys; 1261 struct configfs_dirent *sd; 1262 const struct config_item_type *type; 1263 struct module *subsys_owner = NULL, *new_item_owner = NULL; 1264 struct configfs_fragment *frag; 1265 char *name; 1266 1267 sd = dentry->d_parent->d_fsdata; 1268 1269 /* 1270 * Fake invisibility if dir belongs to a group/default groups hierarchy 1271 * being attached 1272 */ 1273 if (!configfs_dirent_is_ready(sd)) { 1274 ret = -ENOENT; 1275 goto out; 1276 } 1277 1278 if (!(sd->s_type & CONFIGFS_USET_DIR)) { 1279 ret = -EPERM; 1280 goto out; 1281 } 1282 1283 frag = new_fragment(); 1284 if (!frag) { 1285 ret = -ENOMEM; 1286 goto out; 1287 } 1288 1289 /* Get a working ref for the duration of this function */ 1290 parent_item = configfs_get_config_item(dentry->d_parent); 1291 type = parent_item->ci_type; 1292 subsys = to_config_group(parent_item)->cg_subsys; 1293 BUG_ON(!subsys); 1294 1295 if (!type || !type->ct_group_ops || 1296 (!type->ct_group_ops->make_group && 1297 !type->ct_group_ops->make_item)) { 1298 ret = -EPERM; /* Lack-of-mkdir returns -EPERM */ 1299 goto out_put; 1300 } 1301 1302 /* 1303 * The subsystem may belong to a different module than the item 1304 * being created. We don't want to safely pin the new item but 1305 * fail to pin the subsystem it sits under. 1306 */ 1307 if (!subsys->su_group.cg_item.ci_type) { 1308 ret = -EINVAL; 1309 goto out_put; 1310 } 1311 subsys_owner = subsys->su_group.cg_item.ci_type->ct_owner; 1312 if (!try_module_get(subsys_owner)) { 1313 ret = -EINVAL; 1314 goto out_put; 1315 } 1316 1317 name = kmalloc(dentry->d_name.len + 1, GFP_KERNEL); 1318 if (!name) { 1319 ret = -ENOMEM; 1320 goto out_subsys_put; 1321 } 1322 1323 snprintf(name, dentry->d_name.len + 1, "%s", dentry->d_name.name); 1324 1325 mutex_lock(&subsys->su_mutex); 1326 if (type->ct_group_ops->make_group) { 1327 group = type->ct_group_ops->make_group(to_config_group(parent_item), name); 1328 if (!group) 1329 group = ERR_PTR(-ENOMEM); 1330 if (!IS_ERR(group)) { 1331 link_group(to_config_group(parent_item), group); 1332 item = &group->cg_item; 1333 } else 1334 ret = PTR_ERR(group); 1335 } else { 1336 item = type->ct_group_ops->make_item(to_config_group(parent_item), name); 1337 if (!item) 1338 item = ERR_PTR(-ENOMEM); 1339 if (!IS_ERR(item)) 1340 link_obj(parent_item, item); 1341 else 1342 ret = PTR_ERR(item); 1343 } 1344 mutex_unlock(&subsys->su_mutex); 1345 1346 kfree(name); 1347 if (ret) { 1348 /* 1349 * If ret != 0, then link_obj() was never called. 1350 * There are no extra references to clean up. 1351 */ 1352 goto out_subsys_put; 1353 } 1354 1355 /* 1356 * link_obj() has been called (via link_group() for groups). 1357 * From here on out, errors must clean that up. 1358 */ 1359 1360 type = item->ci_type; 1361 if (!type) { 1362 ret = -EINVAL; 1363 goto out_unlink; 1364 } 1365 1366 new_item_owner = type->ct_owner; 1367 if (!try_module_get(new_item_owner)) { 1368 ret = -EINVAL; 1369 goto out_unlink; 1370 } 1371 1372 /* 1373 * I hate doing it this way, but if there is 1374 * an error, module_put() probably should 1375 * happen after any cleanup. 1376 */ 1377 module_got = 1; 1378 1379 /* 1380 * Make racing rmdir() fail if it did not tag parent with 1381 * CONFIGFS_USET_DROPPING 1382 * Note: if CONFIGFS_USET_DROPPING is already set, attach_group() will 1383 * fail and let rmdir() terminate correctly 1384 */ 1385 spin_lock(&configfs_dirent_lock); 1386 /* This will make configfs_detach_prep() fail */ 1387 sd->s_type |= CONFIGFS_USET_IN_MKDIR; 1388 spin_unlock(&configfs_dirent_lock); 1389 1390 if (group) 1391 ret = configfs_attach_group(parent_item, item, dentry, frag); 1392 else 1393 ret = configfs_attach_item(parent_item, item, dentry, frag); 1394 1395 spin_lock(&configfs_dirent_lock); 1396 sd->s_type &= ~CONFIGFS_USET_IN_MKDIR; 1397 if (!ret) 1398 configfs_dir_set_ready(dentry->d_fsdata); 1399 spin_unlock(&configfs_dirent_lock); 1400 1401 out_unlink: 1402 if (ret) { 1403 /* Tear down everything we built up */ 1404 mutex_lock(&subsys->su_mutex); 1405 1406 client_disconnect_notify(parent_item, item); 1407 if (group) 1408 unlink_group(group); 1409 else 1410 unlink_obj(item); 1411 client_drop_item(parent_item, item); 1412 1413 mutex_unlock(&subsys->su_mutex); 1414 1415 if (module_got) 1416 module_put(new_item_owner); 1417 } 1418 1419 out_subsys_put: 1420 if (ret) 1421 module_put(subsys_owner); 1422 1423 out_put: 1424 /* 1425 * link_obj()/link_group() took a reference from child->parent, 1426 * so the parent is safely pinned. We can drop our working 1427 * reference. 1428 */ 1429 config_item_put(parent_item); 1430 put_fragment(frag); 1431 1432 out: 1433 return ret; 1434 } 1435 1436 static int configfs_rmdir(struct inode *dir, struct dentry *dentry) 1437 { 1438 struct config_item *parent_item; 1439 struct config_item *item; 1440 struct configfs_subsystem *subsys; 1441 struct configfs_dirent *sd; 1442 struct configfs_fragment *frag; 1443 struct module *subsys_owner = NULL, *dead_item_owner = NULL; 1444 int ret; 1445 1446 sd = dentry->d_fsdata; 1447 if (sd->s_type & CONFIGFS_USET_DEFAULT) 1448 return -EPERM; 1449 1450 /* Get a working ref until we have the child */ 1451 parent_item = configfs_get_config_item(dentry->d_parent); 1452 subsys = to_config_group(parent_item)->cg_subsys; 1453 BUG_ON(!subsys); 1454 1455 if (!parent_item->ci_type) { 1456 config_item_put(parent_item); 1457 return -EINVAL; 1458 } 1459 1460 /* configfs_mkdir() shouldn't have allowed this */ 1461 BUG_ON(!subsys->su_group.cg_item.ci_type); 1462 subsys_owner = subsys->su_group.cg_item.ci_type->ct_owner; 1463 1464 /* 1465 * Ensure that no racing symlink() will make detach_prep() fail while 1466 * the new link is temporarily attached 1467 */ 1468 do { 1469 struct dentry *wait; 1470 1471 mutex_lock(&configfs_symlink_mutex); 1472 spin_lock(&configfs_dirent_lock); 1473 /* 1474 * Here's where we check for dependents. We're protected by 1475 * configfs_dirent_lock. 1476 * If no dependent, atomically tag the item as dropping. 1477 */ 1478 ret = sd->s_dependent_count ? -EBUSY : 0; 1479 if (!ret) { 1480 ret = configfs_detach_prep(dentry, &wait); 1481 if (ret) 1482 configfs_detach_rollback(dentry); 1483 } 1484 spin_unlock(&configfs_dirent_lock); 1485 mutex_unlock(&configfs_symlink_mutex); 1486 1487 if (ret) { 1488 if (ret != -EAGAIN) { 1489 config_item_put(parent_item); 1490 return ret; 1491 } 1492 1493 /* Wait until the racing operation terminates */ 1494 inode_lock(d_inode(wait)); 1495 inode_unlock(d_inode(wait)); 1496 dput(wait); 1497 } 1498 } while (ret == -EAGAIN); 1499 1500 frag = sd->s_frag; 1501 if (down_write_killable(&frag->frag_sem)) { 1502 spin_lock(&configfs_dirent_lock); 1503 configfs_detach_rollback(dentry); 1504 spin_unlock(&configfs_dirent_lock); 1505 config_item_put(parent_item); 1506 return -EINTR; 1507 } 1508 frag->frag_dead = true; 1509 up_write(&frag->frag_sem); 1510 1511 /* Get a working ref for the duration of this function */ 1512 item = configfs_get_config_item(dentry); 1513 1514 /* Drop reference from above, item already holds one. */ 1515 config_item_put(parent_item); 1516 1517 if (item->ci_type) 1518 dead_item_owner = item->ci_type->ct_owner; 1519 1520 if (sd->s_type & CONFIGFS_USET_DIR) { 1521 configfs_detach_group(item); 1522 1523 mutex_lock(&subsys->su_mutex); 1524 client_disconnect_notify(parent_item, item); 1525 unlink_group(to_config_group(item)); 1526 } else { 1527 configfs_detach_item(item); 1528 1529 mutex_lock(&subsys->su_mutex); 1530 client_disconnect_notify(parent_item, item); 1531 unlink_obj(item); 1532 } 1533 1534 client_drop_item(parent_item, item); 1535 mutex_unlock(&subsys->su_mutex); 1536 1537 /* Drop our reference from above */ 1538 config_item_put(item); 1539 1540 module_put(dead_item_owner); 1541 module_put(subsys_owner); 1542 1543 return 0; 1544 } 1545 1546 const struct inode_operations configfs_dir_inode_operations = { 1547 .mkdir = configfs_mkdir, 1548 .rmdir = configfs_rmdir, 1549 .symlink = configfs_symlink, 1550 .unlink = configfs_unlink, 1551 .lookup = configfs_lookup, 1552 .setattr = configfs_setattr, 1553 }; 1554 1555 const struct inode_operations configfs_root_inode_operations = { 1556 .lookup = configfs_lookup, 1557 .setattr = configfs_setattr, 1558 }; 1559 1560 static int configfs_dir_open(struct inode *inode, struct file *file) 1561 { 1562 struct dentry * dentry = file->f_path.dentry; 1563 struct configfs_dirent * parent_sd = dentry->d_fsdata; 1564 int err; 1565 1566 inode_lock(d_inode(dentry)); 1567 /* 1568 * Fake invisibility if dir belongs to a group/default groups hierarchy 1569 * being attached 1570 */ 1571 err = -ENOENT; 1572 if (configfs_dirent_is_ready(parent_sd)) { 1573 file->private_data = configfs_new_dirent(parent_sd, NULL, 0, NULL); 1574 if (IS_ERR(file->private_data)) 1575 err = PTR_ERR(file->private_data); 1576 else 1577 err = 0; 1578 } 1579 inode_unlock(d_inode(dentry)); 1580 1581 return err; 1582 } 1583 1584 static int configfs_dir_close(struct inode *inode, struct file *file) 1585 { 1586 struct dentry * dentry = file->f_path.dentry; 1587 struct configfs_dirent * cursor = file->private_data; 1588 1589 inode_lock(d_inode(dentry)); 1590 spin_lock(&configfs_dirent_lock); 1591 list_del_init(&cursor->s_sibling); 1592 spin_unlock(&configfs_dirent_lock); 1593 inode_unlock(d_inode(dentry)); 1594 1595 release_configfs_dirent(cursor); 1596 1597 return 0; 1598 } 1599 1600 /* Relationship between s_mode and the DT_xxx types */ 1601 static inline unsigned char dt_type(struct configfs_dirent *sd) 1602 { 1603 return (sd->s_mode >> 12) & 15; 1604 } 1605 1606 static int configfs_readdir(struct file *file, struct dir_context *ctx) 1607 { 1608 struct dentry *dentry = file->f_path.dentry; 1609 struct super_block *sb = dentry->d_sb; 1610 struct configfs_dirent * parent_sd = dentry->d_fsdata; 1611 struct configfs_dirent *cursor = file->private_data; 1612 struct list_head *p, *q = &cursor->s_sibling; 1613 ino_t ino = 0; 1614 1615 if (!dir_emit_dots(file, ctx)) 1616 return 0; 1617 spin_lock(&configfs_dirent_lock); 1618 if (ctx->pos == 2) 1619 list_move(q, &parent_sd->s_children); 1620 for (p = q->next; p != &parent_sd->s_children; p = p->next) { 1621 struct configfs_dirent *next; 1622 const char *name; 1623 int len; 1624 struct inode *inode = NULL; 1625 1626 next = list_entry(p, struct configfs_dirent, s_sibling); 1627 if (!next->s_element) 1628 continue; 1629 1630 /* 1631 * We'll have a dentry and an inode for 1632 * PINNED items and for open attribute 1633 * files. We lock here to prevent a race 1634 * with configfs_d_iput() clearing 1635 * s_dentry before calling iput(). 1636 * 1637 * Why do we go to the trouble? If 1638 * someone has an attribute file open, 1639 * the inode number should match until 1640 * they close it. Beyond that, we don't 1641 * care. 1642 */ 1643 dentry = next->s_dentry; 1644 if (dentry) 1645 inode = d_inode(dentry); 1646 if (inode) 1647 ino = inode->i_ino; 1648 spin_unlock(&configfs_dirent_lock); 1649 if (!inode) 1650 ino = iunique(sb, 2); 1651 1652 name = configfs_get_name(next); 1653 len = strlen(name); 1654 1655 if (!dir_emit(ctx, name, len, ino, dt_type(next))) 1656 return 0; 1657 1658 spin_lock(&configfs_dirent_lock); 1659 list_move(q, p); 1660 p = q; 1661 ctx->pos++; 1662 } 1663 spin_unlock(&configfs_dirent_lock); 1664 return 0; 1665 } 1666 1667 static loff_t configfs_dir_lseek(struct file *file, loff_t offset, int whence) 1668 { 1669 struct dentry * dentry = file->f_path.dentry; 1670 1671 switch (whence) { 1672 case 1: 1673 offset += file->f_pos; 1674 fallthrough; 1675 case 0: 1676 if (offset >= 0) 1677 break; 1678 fallthrough; 1679 default: 1680 return -EINVAL; 1681 } 1682 if (offset != file->f_pos) { 1683 file->f_pos = offset; 1684 if (file->f_pos >= 2) { 1685 struct configfs_dirent *sd = dentry->d_fsdata; 1686 struct configfs_dirent *cursor = file->private_data; 1687 struct list_head *p; 1688 loff_t n = file->f_pos - 2; 1689 1690 spin_lock(&configfs_dirent_lock); 1691 list_del(&cursor->s_sibling); 1692 p = sd->s_children.next; 1693 while (n && p != &sd->s_children) { 1694 struct configfs_dirent *next; 1695 next = list_entry(p, struct configfs_dirent, 1696 s_sibling); 1697 if (next->s_element) 1698 n--; 1699 p = p->next; 1700 } 1701 list_add_tail(&cursor->s_sibling, p); 1702 spin_unlock(&configfs_dirent_lock); 1703 } 1704 } 1705 return offset; 1706 } 1707 1708 const struct file_operations configfs_dir_operations = { 1709 .open = configfs_dir_open, 1710 .release = configfs_dir_close, 1711 .llseek = configfs_dir_lseek, 1712 .read = generic_read_dir, 1713 .iterate_shared = configfs_readdir, 1714 }; 1715 1716 /** 1717 * configfs_register_group - creates a parent-child relation between two groups 1718 * @parent_group: parent group 1719 * @group: child group 1720 * 1721 * link groups, creates dentry for the child and attaches it to the 1722 * parent dentry. 1723 * 1724 * Return: 0 on success, negative errno code on error 1725 */ 1726 int configfs_register_group(struct config_group *parent_group, 1727 struct config_group *group) 1728 { 1729 struct configfs_subsystem *subsys = parent_group->cg_subsys; 1730 struct dentry *parent; 1731 struct configfs_fragment *frag; 1732 int ret; 1733 1734 frag = new_fragment(); 1735 if (!frag) 1736 return -ENOMEM; 1737 1738 mutex_lock(&subsys->su_mutex); 1739 link_group(parent_group, group); 1740 mutex_unlock(&subsys->su_mutex); 1741 1742 parent = parent_group->cg_item.ci_dentry; 1743 1744 inode_lock_nested(d_inode(parent), I_MUTEX_PARENT); 1745 ret = create_default_group(parent_group, group, frag); 1746 if (ret) 1747 goto err_out; 1748 1749 spin_lock(&configfs_dirent_lock); 1750 configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata); 1751 spin_unlock(&configfs_dirent_lock); 1752 inode_unlock(d_inode(parent)); 1753 put_fragment(frag); 1754 return 0; 1755 err_out: 1756 inode_unlock(d_inode(parent)); 1757 mutex_lock(&subsys->su_mutex); 1758 unlink_group(group); 1759 mutex_unlock(&subsys->su_mutex); 1760 put_fragment(frag); 1761 return ret; 1762 } 1763 EXPORT_SYMBOL(configfs_register_group); 1764 1765 /** 1766 * configfs_unregister_group() - unregisters a child group from its parent 1767 * @group: parent group to be unregistered 1768 * 1769 * Undoes configfs_register_group() 1770 */ 1771 void configfs_unregister_group(struct config_group *group) 1772 { 1773 struct configfs_subsystem *subsys = group->cg_subsys; 1774 struct dentry *dentry = group->cg_item.ci_dentry; 1775 struct dentry *parent = group->cg_item.ci_parent->ci_dentry; 1776 struct configfs_dirent *sd = dentry->d_fsdata; 1777 struct configfs_fragment *frag = sd->s_frag; 1778 1779 down_write(&frag->frag_sem); 1780 frag->frag_dead = true; 1781 up_write(&frag->frag_sem); 1782 1783 inode_lock_nested(d_inode(parent), I_MUTEX_PARENT); 1784 spin_lock(&configfs_dirent_lock); 1785 configfs_detach_prep(dentry, NULL); 1786 spin_unlock(&configfs_dirent_lock); 1787 1788 configfs_detach_group(&group->cg_item); 1789 d_inode(dentry)->i_flags |= S_DEAD; 1790 dont_mount(dentry); 1791 d_drop(dentry); 1792 fsnotify_rmdir(d_inode(parent), dentry); 1793 inode_unlock(d_inode(parent)); 1794 1795 dput(dentry); 1796 1797 mutex_lock(&subsys->su_mutex); 1798 unlink_group(group); 1799 mutex_unlock(&subsys->su_mutex); 1800 } 1801 EXPORT_SYMBOL(configfs_unregister_group); 1802 1803 /** 1804 * configfs_register_default_group() - allocates and registers a child group 1805 * @parent_group: parent group 1806 * @name: child group name 1807 * @item_type: child item type description 1808 * 1809 * boilerplate to allocate and register a child group with its parent. We need 1810 * kzalloc'ed memory because child's default_group is initially empty. 1811 * 1812 * Return: allocated config group or ERR_PTR() on error 1813 */ 1814 struct config_group * 1815 configfs_register_default_group(struct config_group *parent_group, 1816 const char *name, 1817 const struct config_item_type *item_type) 1818 { 1819 int ret; 1820 struct config_group *group; 1821 1822 group = kzalloc(sizeof(*group), GFP_KERNEL); 1823 if (!group) 1824 return ERR_PTR(-ENOMEM); 1825 config_group_init_type_name(group, name, item_type); 1826 1827 ret = configfs_register_group(parent_group, group); 1828 if (ret) { 1829 kfree(group); 1830 return ERR_PTR(ret); 1831 } 1832 return group; 1833 } 1834 EXPORT_SYMBOL(configfs_register_default_group); 1835 1836 /** 1837 * configfs_unregister_default_group() - unregisters and frees a child group 1838 * @group: the group to act on 1839 */ 1840 void configfs_unregister_default_group(struct config_group *group) 1841 { 1842 configfs_unregister_group(group); 1843 kfree(group); 1844 } 1845 EXPORT_SYMBOL(configfs_unregister_default_group); 1846 1847 int configfs_register_subsystem(struct configfs_subsystem *subsys) 1848 { 1849 int err; 1850 struct config_group *group = &subsys->su_group; 1851 struct dentry *dentry; 1852 struct dentry *root; 1853 struct configfs_dirent *sd; 1854 struct configfs_fragment *frag; 1855 1856 frag = new_fragment(); 1857 if (!frag) 1858 return -ENOMEM; 1859 1860 root = configfs_pin_fs(); 1861 if (IS_ERR(root)) { 1862 put_fragment(frag); 1863 return PTR_ERR(root); 1864 } 1865 1866 if (!group->cg_item.ci_name) 1867 group->cg_item.ci_name = group->cg_item.ci_namebuf; 1868 1869 sd = root->d_fsdata; 1870 mutex_lock(&configfs_subsystem_mutex); 1871 link_group(to_config_group(sd->s_element), group); 1872 mutex_unlock(&configfs_subsystem_mutex); 1873 1874 inode_lock_nested(d_inode(root), I_MUTEX_PARENT); 1875 1876 err = -ENOMEM; 1877 dentry = d_alloc_name(root, group->cg_item.ci_name); 1878 if (dentry) { 1879 d_add(dentry, NULL); 1880 1881 err = configfs_attach_group(sd->s_element, &group->cg_item, 1882 dentry, frag); 1883 if (err) { 1884 BUG_ON(d_inode(dentry)); 1885 d_drop(dentry); 1886 dput(dentry); 1887 } else { 1888 spin_lock(&configfs_dirent_lock); 1889 configfs_dir_set_ready(dentry->d_fsdata); 1890 spin_unlock(&configfs_dirent_lock); 1891 } 1892 } 1893 1894 inode_unlock(d_inode(root)); 1895 1896 if (err) { 1897 mutex_lock(&configfs_subsystem_mutex); 1898 unlink_group(group); 1899 mutex_unlock(&configfs_subsystem_mutex); 1900 configfs_release_fs(); 1901 } 1902 put_fragment(frag); 1903 1904 return err; 1905 } 1906 1907 void configfs_unregister_subsystem(struct configfs_subsystem *subsys) 1908 { 1909 struct config_group *group = &subsys->su_group; 1910 struct dentry *dentry = group->cg_item.ci_dentry; 1911 struct dentry *root = dentry->d_sb->s_root; 1912 struct configfs_dirent *sd = dentry->d_fsdata; 1913 struct configfs_fragment *frag = sd->s_frag; 1914 1915 if (dentry->d_parent != root) { 1916 pr_err("Tried to unregister non-subsystem!\n"); 1917 return; 1918 } 1919 1920 down_write(&frag->frag_sem); 1921 frag->frag_dead = true; 1922 up_write(&frag->frag_sem); 1923 1924 inode_lock_nested(d_inode(root), 1925 I_MUTEX_PARENT); 1926 inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD); 1927 mutex_lock(&configfs_symlink_mutex); 1928 spin_lock(&configfs_dirent_lock); 1929 if (configfs_detach_prep(dentry, NULL)) { 1930 pr_err("Tried to unregister non-empty subsystem!\n"); 1931 } 1932 spin_unlock(&configfs_dirent_lock); 1933 mutex_unlock(&configfs_symlink_mutex); 1934 configfs_detach_group(&group->cg_item); 1935 d_inode(dentry)->i_flags |= S_DEAD; 1936 dont_mount(dentry); 1937 inode_unlock(d_inode(dentry)); 1938 1939 d_drop(dentry); 1940 fsnotify_rmdir(d_inode(root), dentry); 1941 1942 inode_unlock(d_inode(root)); 1943 1944 dput(dentry); 1945 1946 mutex_lock(&configfs_subsystem_mutex); 1947 unlink_group(group); 1948 mutex_unlock(&configfs_subsystem_mutex); 1949 configfs_release_fs(); 1950 } 1951 1952 EXPORT_SYMBOL(configfs_register_subsystem); 1953 EXPORT_SYMBOL(configfs_unregister_subsystem); 1954