1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * dir.c - Operations for configfs directories. 4 * 5 * Based on sysfs: 6 * sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel 7 * 8 * configfs Copyright (C) 2005 Oracle. All rights reserved. 9 */ 10 11 #undef DEBUG 12 13 #include <linux/fs.h> 14 #include <linux/fsnotify.h> 15 #include <linux/mount.h> 16 #include <linux/module.h> 17 #include <linux/slab.h> 18 #include <linux/err.h> 19 20 #include <linux/configfs.h> 21 #include "configfs_internal.h" 22 23 /* 24 * Protects mutations of configfs_dirent linkage together with proper i_mutex 25 * Also protects mutations of symlinks linkage to target configfs_dirent 26 * Mutators of configfs_dirent linkage must *both* have the proper inode locked 27 * and configfs_dirent_lock locked, in that order. 28 * This allows one to safely traverse configfs_dirent trees and symlinks without 29 * having to lock inodes. 30 * 31 * Protects setting of CONFIGFS_USET_DROPPING: checking the flag 32 * unlocked is not reliable unless in detach_groups() called from 33 * rmdir()/unregister() and from configfs_attach_group() 34 */ 35 DEFINE_SPINLOCK(configfs_dirent_lock); 36 37 /* 38 * All of link_obj/unlink_obj/link_group/unlink_group require that 39 * subsys->su_mutex is held. 40 * But parent configfs_subsystem is NULL when config_item is root. 41 * Use this mutex when config_item is root. 42 */ 43 static DEFINE_MUTEX(configfs_subsystem_mutex); 44 45 static void configfs_d_iput(struct dentry * dentry, 46 struct inode * inode) 47 { 48 struct configfs_dirent *sd = dentry->d_fsdata; 49 50 if (sd) { 51 /* Coordinate with configfs_readdir */ 52 spin_lock(&configfs_dirent_lock); 53 /* 54 * Set sd->s_dentry to null only when this dentry is the one 55 * that is going to be killed. Otherwise configfs_d_iput may 56 * run just after configfs_lookup and set sd->s_dentry to 57 * NULL even it's still in use. 58 */ 59 if (sd->s_dentry == dentry) 60 sd->s_dentry = NULL; 61 62 spin_unlock(&configfs_dirent_lock); 63 configfs_put(sd); 64 } 65 iput(inode); 66 } 67 68 const struct dentry_operations configfs_dentry_ops = { 69 .d_iput = configfs_d_iput, 70 .d_delete = always_delete_dentry, 71 }; 72 73 #ifdef CONFIG_LOCKDEP 74 75 /* 76 * Helpers to make lockdep happy with our recursive locking of default groups' 77 * inodes (see configfs_attach_group() and configfs_detach_group()). 78 * We put default groups i_mutexes in separate classes according to their depth 79 * from the youngest non-default group ancestor. 80 * 81 * For a non-default group A having default groups A/B, A/C, and A/C/D, default 82 * groups A/B and A/C will have their inode's mutex in class 83 * default_group_class[0], and default group A/C/D will be in 84 * default_group_class[1]. 85 * 86 * The lock classes are declared and assigned in inode.c, according to the 87 * s_depth value. 88 * The s_depth value is initialized to -1, adjusted to >= 0 when attaching 89 * default groups, and reset to -1 when all default groups are attached. During 90 * attachment, if configfs_create() sees s_depth > 0, the lock class of the new 91 * inode's mutex is set to default_group_class[s_depth - 1]. 92 */ 93 94 static void configfs_init_dirent_depth(struct configfs_dirent *sd) 95 { 96 sd->s_depth = -1; 97 } 98 99 static void configfs_set_dir_dirent_depth(struct configfs_dirent *parent_sd, 100 struct configfs_dirent *sd) 101 { 102 int parent_depth = parent_sd->s_depth; 103 104 if (parent_depth >= 0) 105 sd->s_depth = parent_depth + 1; 106 } 107 108 static void 109 configfs_adjust_dir_dirent_depth_before_populate(struct configfs_dirent *sd) 110 { 111 /* 112 * item's i_mutex class is already setup, so s_depth is now only 113 * used to set new sub-directories s_depth, which is always done 114 * with item's i_mutex locked. 115 */ 116 /* 117 * sd->s_depth == -1 iff we are a non default group. 118 * else (we are a default group) sd->s_depth > 0 (see 119 * create_dir()). 120 */ 121 if (sd->s_depth == -1) 122 /* 123 * We are a non default group and we are going to create 124 * default groups. 125 */ 126 sd->s_depth = 0; 127 } 128 129 static void 130 configfs_adjust_dir_dirent_depth_after_populate(struct configfs_dirent *sd) 131 { 132 /* We will not create default groups anymore. */ 133 sd->s_depth = -1; 134 } 135 136 #else /* CONFIG_LOCKDEP */ 137 138 static void configfs_init_dirent_depth(struct configfs_dirent *sd) 139 { 140 } 141 142 static void configfs_set_dir_dirent_depth(struct configfs_dirent *parent_sd, 143 struct configfs_dirent *sd) 144 { 145 } 146 147 static void 148 configfs_adjust_dir_dirent_depth_before_populate(struct configfs_dirent *sd) 149 { 150 } 151 152 static void 153 configfs_adjust_dir_dirent_depth_after_populate(struct configfs_dirent *sd) 154 { 155 } 156 157 #endif /* CONFIG_LOCKDEP */ 158 159 static struct configfs_fragment *new_fragment(void) 160 { 161 struct configfs_fragment *p; 162 163 p = kmalloc(sizeof(struct configfs_fragment), GFP_KERNEL); 164 if (p) { 165 atomic_set(&p->frag_count, 1); 166 init_rwsem(&p->frag_sem); 167 p->frag_dead = false; 168 } 169 return p; 170 } 171 172 void put_fragment(struct configfs_fragment *frag) 173 { 174 if (frag && atomic_dec_and_test(&frag->frag_count)) 175 kfree(frag); 176 } 177 178 struct configfs_fragment *get_fragment(struct configfs_fragment *frag) 179 { 180 if (likely(frag)) 181 atomic_inc(&frag->frag_count); 182 return frag; 183 } 184 185 /* 186 * Allocates a new configfs_dirent and links it to the parent configfs_dirent 187 */ 188 static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *parent_sd, 189 void *element, int type, 190 struct configfs_fragment *frag) 191 { 192 struct configfs_dirent * sd; 193 194 sd = kmem_cache_zalloc(configfs_dir_cachep, GFP_KERNEL); 195 if (!sd) 196 return ERR_PTR(-ENOMEM); 197 198 atomic_set(&sd->s_count, 1); 199 INIT_LIST_HEAD(&sd->s_children); 200 sd->s_element = element; 201 sd->s_type = type; 202 configfs_init_dirent_depth(sd); 203 spin_lock(&configfs_dirent_lock); 204 if (parent_sd->s_type & CONFIGFS_USET_DROPPING) { 205 spin_unlock(&configfs_dirent_lock); 206 kmem_cache_free(configfs_dir_cachep, sd); 207 return ERR_PTR(-ENOENT); 208 } 209 sd->s_frag = get_fragment(frag); 210 list_add(&sd->s_sibling, &parent_sd->s_children); 211 spin_unlock(&configfs_dirent_lock); 212 213 return sd; 214 } 215 216 /* 217 * 218 * Return -EEXIST if there is already a configfs element with the same 219 * name for the same parent. 220 * 221 * called with parent inode's i_mutex held 222 */ 223 static int configfs_dirent_exists(struct configfs_dirent *parent_sd, 224 const unsigned char *new) 225 { 226 struct configfs_dirent * sd; 227 228 list_for_each_entry(sd, &parent_sd->s_children, s_sibling) { 229 if (sd->s_element) { 230 const unsigned char *existing = configfs_get_name(sd); 231 if (strcmp(existing, new)) 232 continue; 233 else 234 return -EEXIST; 235 } 236 } 237 238 return 0; 239 } 240 241 242 int configfs_make_dirent(struct configfs_dirent * parent_sd, 243 struct dentry * dentry, void * element, 244 umode_t mode, int type, struct configfs_fragment *frag) 245 { 246 struct configfs_dirent * sd; 247 248 sd = configfs_new_dirent(parent_sd, element, type, frag); 249 if (IS_ERR(sd)) 250 return PTR_ERR(sd); 251 252 sd->s_mode = mode; 253 sd->s_dentry = dentry; 254 if (dentry) 255 dentry->d_fsdata = configfs_get(sd); 256 257 return 0; 258 } 259 260 static void configfs_remove_dirent(struct dentry *dentry) 261 { 262 struct configfs_dirent *sd = dentry->d_fsdata; 263 264 if (!sd) 265 return; 266 spin_lock(&configfs_dirent_lock); 267 list_del_init(&sd->s_sibling); 268 spin_unlock(&configfs_dirent_lock); 269 configfs_put(sd); 270 } 271 272 /** 273 * configfs_create_dir - create a directory for an config_item. 274 * @item: config_itemwe're creating directory for. 275 * @dentry: config_item's dentry. 276 * @frag: config_item's fragment. 277 * 278 * Note: user-created entries won't be allowed under this new directory 279 * until it is validated by configfs_dir_set_ready() 280 */ 281 282 static int configfs_create_dir(struct config_item *item, struct dentry *dentry, 283 struct configfs_fragment *frag) 284 { 285 int error; 286 umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO; 287 struct dentry *p = dentry->d_parent; 288 struct inode *inode; 289 290 BUG_ON(!item); 291 292 error = configfs_dirent_exists(p->d_fsdata, dentry->d_name.name); 293 if (unlikely(error)) 294 return error; 295 296 error = configfs_make_dirent(p->d_fsdata, dentry, item, mode, 297 CONFIGFS_DIR | CONFIGFS_USET_CREATING, 298 frag); 299 if (unlikely(error)) 300 return error; 301 302 configfs_set_dir_dirent_depth(p->d_fsdata, dentry->d_fsdata); 303 inode = configfs_create(dentry, mode); 304 if (IS_ERR(inode)) 305 goto out_remove; 306 307 inode->i_op = &configfs_dir_inode_operations; 308 inode->i_fop = &configfs_dir_operations; 309 /* directory inodes start off with i_nlink == 2 (for "." entry) */ 310 inc_nlink(inode); 311 d_instantiate(dentry, inode); 312 /* already hashed */ 313 dget(dentry); /* pin directory dentries in core */ 314 inc_nlink(d_inode(p)); 315 item->ci_dentry = dentry; 316 return 0; 317 318 out_remove: 319 configfs_put(dentry->d_fsdata); 320 configfs_remove_dirent(dentry); 321 return PTR_ERR(inode); 322 } 323 324 /* 325 * Allow userspace to create new entries under a new directory created with 326 * configfs_create_dir(), and under all of its chidlren directories recursively. 327 * @sd configfs_dirent of the new directory to validate 328 * 329 * Caller must hold configfs_dirent_lock. 330 */ 331 static void configfs_dir_set_ready(struct configfs_dirent *sd) 332 { 333 struct configfs_dirent *child_sd; 334 335 sd->s_type &= ~CONFIGFS_USET_CREATING; 336 list_for_each_entry(child_sd, &sd->s_children, s_sibling) 337 if (child_sd->s_type & CONFIGFS_USET_CREATING) 338 configfs_dir_set_ready(child_sd); 339 } 340 341 /* 342 * Check that a directory does not belong to a directory hierarchy being 343 * attached and not validated yet. 344 * @sd configfs_dirent of the directory to check 345 * 346 * @return non-zero iff the directory was validated 347 * 348 * Note: takes configfs_dirent_lock, so the result may change from false to true 349 * in two consecutive calls, but never from true to false. 350 */ 351 int configfs_dirent_is_ready(struct configfs_dirent *sd) 352 { 353 int ret; 354 355 spin_lock(&configfs_dirent_lock); 356 ret = !(sd->s_type & CONFIGFS_USET_CREATING); 357 spin_unlock(&configfs_dirent_lock); 358 359 return ret; 360 } 361 362 int configfs_create_link(struct configfs_dirent *target, struct dentry *parent, 363 struct dentry *dentry, char *body) 364 { 365 int err = 0; 366 umode_t mode = S_IFLNK | S_IRWXUGO; 367 struct configfs_dirent *p = parent->d_fsdata; 368 struct inode *inode; 369 370 err = configfs_make_dirent(p, dentry, target, mode, CONFIGFS_ITEM_LINK, 371 p->s_frag); 372 if (err) 373 return err; 374 375 inode = configfs_create(dentry, mode); 376 if (IS_ERR(inode)) 377 goto out_remove; 378 379 inode->i_link = body; 380 inode->i_op = &configfs_symlink_inode_operations; 381 d_instantiate(dentry, inode); 382 dget(dentry); /* pin link dentries in core */ 383 return 0; 384 385 out_remove: 386 configfs_put(dentry->d_fsdata); 387 configfs_remove_dirent(dentry); 388 return PTR_ERR(inode); 389 } 390 391 static void remove_dir(struct dentry * d) 392 { 393 struct dentry * parent = dget(d->d_parent); 394 395 configfs_remove_dirent(d); 396 397 if (d_really_is_positive(d)) 398 simple_rmdir(d_inode(parent),d); 399 400 pr_debug(" o %pd removing done (%d)\n", d, d_count(d)); 401 402 dput(parent); 403 } 404 405 /** 406 * configfs_remove_dir - remove an config_item's directory. 407 * @item: config_item we're removing. 408 * 409 * The only thing special about this is that we remove any files in 410 * the directory before we remove the directory, and we've inlined 411 * what used to be configfs_rmdir() below, instead of calling separately. 412 * 413 * Caller holds the mutex of the item's inode 414 */ 415 416 static void configfs_remove_dir(struct config_item * item) 417 { 418 struct dentry * dentry = dget(item->ci_dentry); 419 420 if (!dentry) 421 return; 422 423 remove_dir(dentry); 424 /** 425 * Drop reference from dget() on entrance. 426 */ 427 dput(dentry); 428 } 429 430 static struct dentry * configfs_lookup(struct inode *dir, 431 struct dentry *dentry, 432 unsigned int flags) 433 { 434 struct configfs_dirent * parent_sd = dentry->d_parent->d_fsdata; 435 struct configfs_dirent * sd; 436 struct inode *inode = NULL; 437 438 if (dentry->d_name.len > NAME_MAX) 439 return ERR_PTR(-ENAMETOOLONG); 440 441 /* 442 * Fake invisibility if dir belongs to a group/default groups hierarchy 443 * being attached 444 * 445 * This forbids userspace to read/write attributes of items which may 446 * not complete their initialization, since the dentries of the 447 * attributes won't be instantiated. 448 */ 449 if (!configfs_dirent_is_ready(parent_sd)) 450 return ERR_PTR(-ENOENT); 451 452 spin_lock(&configfs_dirent_lock); 453 list_for_each_entry(sd, &parent_sd->s_children, s_sibling) { 454 if ((sd->s_type & CONFIGFS_NOT_PINNED) && 455 !strcmp(configfs_get_name(sd), dentry->d_name.name)) { 456 struct configfs_attribute *attr = sd->s_element; 457 umode_t mode = (attr->ca_mode & S_IALLUGO) | S_IFREG; 458 459 dentry->d_fsdata = configfs_get(sd); 460 sd->s_dentry = dentry; 461 spin_unlock(&configfs_dirent_lock); 462 463 inode = configfs_create(dentry, mode); 464 if (IS_ERR(inode)) { 465 configfs_put(sd); 466 return ERR_CAST(inode); 467 } 468 if (sd->s_type & CONFIGFS_ITEM_BIN_ATTR) { 469 inode->i_size = 0; 470 inode->i_fop = &configfs_bin_file_operations; 471 } else { 472 inode->i_size = PAGE_SIZE; 473 inode->i_fop = &configfs_file_operations; 474 } 475 goto done; 476 } 477 } 478 spin_unlock(&configfs_dirent_lock); 479 done: 480 d_add(dentry, inode); 481 return NULL; 482 } 483 484 /* 485 * Only subdirectories count here. Files (CONFIGFS_NOT_PINNED) are 486 * attributes and are removed by rmdir(). We recurse, setting 487 * CONFIGFS_USET_DROPPING on all children that are candidates for 488 * default detach. 489 * If there is an error, the caller will reset the flags via 490 * configfs_detach_rollback(). 491 */ 492 static int configfs_detach_prep(struct dentry *dentry, struct dentry **wait) 493 { 494 struct configfs_dirent *parent_sd = dentry->d_fsdata; 495 struct configfs_dirent *sd; 496 int ret; 497 498 /* Mark that we're trying to drop the group */ 499 parent_sd->s_type |= CONFIGFS_USET_DROPPING; 500 501 ret = -EBUSY; 502 if (parent_sd->s_links) 503 goto out; 504 505 ret = 0; 506 list_for_each_entry(sd, &parent_sd->s_children, s_sibling) { 507 if (!sd->s_element || 508 (sd->s_type & CONFIGFS_NOT_PINNED)) 509 continue; 510 if (sd->s_type & CONFIGFS_USET_DEFAULT) { 511 /* Abort if racing with mkdir() */ 512 if (sd->s_type & CONFIGFS_USET_IN_MKDIR) { 513 if (wait) 514 *wait= dget(sd->s_dentry); 515 return -EAGAIN; 516 } 517 518 /* 519 * Yup, recursive. If there's a problem, blame 520 * deep nesting of default_groups 521 */ 522 ret = configfs_detach_prep(sd->s_dentry, wait); 523 if (!ret) 524 continue; 525 } else 526 ret = -ENOTEMPTY; 527 528 break; 529 } 530 531 out: 532 return ret; 533 } 534 535 /* 536 * Walk the tree, resetting CONFIGFS_USET_DROPPING wherever it was 537 * set. 538 */ 539 static void configfs_detach_rollback(struct dentry *dentry) 540 { 541 struct configfs_dirent *parent_sd = dentry->d_fsdata; 542 struct configfs_dirent *sd; 543 544 parent_sd->s_type &= ~CONFIGFS_USET_DROPPING; 545 546 list_for_each_entry(sd, &parent_sd->s_children, s_sibling) 547 if (sd->s_type & CONFIGFS_USET_DEFAULT) 548 configfs_detach_rollback(sd->s_dentry); 549 } 550 551 static void detach_attrs(struct config_item * item) 552 { 553 struct dentry * dentry = dget(item->ci_dentry); 554 struct configfs_dirent * parent_sd; 555 struct configfs_dirent * sd, * tmp; 556 557 if (!dentry) 558 return; 559 560 pr_debug("configfs %s: dropping attrs for dir\n", 561 dentry->d_name.name); 562 563 parent_sd = dentry->d_fsdata; 564 list_for_each_entry_safe(sd, tmp, &parent_sd->s_children, s_sibling) { 565 if (!sd->s_element || !(sd->s_type & CONFIGFS_NOT_PINNED)) 566 continue; 567 spin_lock(&configfs_dirent_lock); 568 list_del_init(&sd->s_sibling); 569 spin_unlock(&configfs_dirent_lock); 570 configfs_drop_dentry(sd, dentry); 571 configfs_put(sd); 572 } 573 574 /** 575 * Drop reference from dget() on entrance. 576 */ 577 dput(dentry); 578 } 579 580 static int populate_attrs(struct config_item *item) 581 { 582 const struct config_item_type *t = item->ci_type; 583 struct configfs_attribute *attr; 584 struct configfs_bin_attribute *bin_attr; 585 int error = 0; 586 int i; 587 588 if (!t) 589 return -EINVAL; 590 if (t->ct_attrs) { 591 for (i = 0; (attr = t->ct_attrs[i]) != NULL; i++) { 592 if ((error = configfs_create_file(item, attr))) 593 break; 594 } 595 } 596 if (t->ct_bin_attrs) { 597 for (i = 0; (bin_attr = t->ct_bin_attrs[i]) != NULL; i++) { 598 error = configfs_create_bin_file(item, bin_attr); 599 if (error) 600 break; 601 } 602 } 603 604 if (error) 605 detach_attrs(item); 606 607 return error; 608 } 609 610 static int configfs_attach_group(struct config_item *parent_item, 611 struct config_item *item, 612 struct dentry *dentry, 613 struct configfs_fragment *frag); 614 static void configfs_detach_group(struct config_item *item); 615 616 static void detach_groups(struct config_group *group) 617 { 618 struct dentry * dentry = dget(group->cg_item.ci_dentry); 619 struct dentry *child; 620 struct configfs_dirent *parent_sd; 621 struct configfs_dirent *sd, *tmp; 622 623 if (!dentry) 624 return; 625 626 parent_sd = dentry->d_fsdata; 627 list_for_each_entry_safe(sd, tmp, &parent_sd->s_children, s_sibling) { 628 if (!sd->s_element || 629 !(sd->s_type & CONFIGFS_USET_DEFAULT)) 630 continue; 631 632 child = sd->s_dentry; 633 634 inode_lock(d_inode(child)); 635 636 configfs_detach_group(sd->s_element); 637 d_inode(child)->i_flags |= S_DEAD; 638 dont_mount(child); 639 640 inode_unlock(d_inode(child)); 641 642 d_delete(child); 643 dput(child); 644 } 645 646 /** 647 * Drop reference from dget() on entrance. 648 */ 649 dput(dentry); 650 } 651 652 /* 653 * This fakes mkdir(2) on a default_groups[] entry. It 654 * creates a dentry, attachs it, and then does fixup 655 * on the sd->s_type. 656 * 657 * We could, perhaps, tweak our parent's ->mkdir for a minute and 658 * try using vfs_mkdir. Just a thought. 659 */ 660 static int create_default_group(struct config_group *parent_group, 661 struct config_group *group, 662 struct configfs_fragment *frag) 663 { 664 int ret; 665 struct configfs_dirent *sd; 666 /* We trust the caller holds a reference to parent */ 667 struct dentry *child, *parent = parent_group->cg_item.ci_dentry; 668 669 if (!group->cg_item.ci_name) 670 group->cg_item.ci_name = group->cg_item.ci_namebuf; 671 672 ret = -ENOMEM; 673 child = d_alloc_name(parent, group->cg_item.ci_name); 674 if (child) { 675 d_add(child, NULL); 676 677 ret = configfs_attach_group(&parent_group->cg_item, 678 &group->cg_item, child, frag); 679 if (!ret) { 680 sd = child->d_fsdata; 681 sd->s_type |= CONFIGFS_USET_DEFAULT; 682 } else { 683 BUG_ON(d_inode(child)); 684 d_drop(child); 685 dput(child); 686 } 687 } 688 689 return ret; 690 } 691 692 static int populate_groups(struct config_group *group, 693 struct configfs_fragment *frag) 694 { 695 struct config_group *new_group; 696 int ret = 0; 697 698 list_for_each_entry(new_group, &group->default_groups, group_entry) { 699 ret = create_default_group(group, new_group, frag); 700 if (ret) { 701 detach_groups(group); 702 break; 703 } 704 } 705 706 return ret; 707 } 708 709 void configfs_remove_default_groups(struct config_group *group) 710 { 711 struct config_group *g, *n; 712 713 list_for_each_entry_safe(g, n, &group->default_groups, group_entry) { 714 list_del(&g->group_entry); 715 config_item_put(&g->cg_item); 716 } 717 } 718 EXPORT_SYMBOL(configfs_remove_default_groups); 719 720 /* 721 * All of link_obj/unlink_obj/link_group/unlink_group require that 722 * subsys->su_mutex is held. 723 */ 724 725 static void unlink_obj(struct config_item *item) 726 { 727 struct config_group *group; 728 729 group = item->ci_group; 730 if (group) { 731 list_del_init(&item->ci_entry); 732 733 item->ci_group = NULL; 734 item->ci_parent = NULL; 735 736 /* Drop the reference for ci_entry */ 737 config_item_put(item); 738 739 /* Drop the reference for ci_parent */ 740 config_group_put(group); 741 } 742 } 743 744 static void link_obj(struct config_item *parent_item, struct config_item *item) 745 { 746 /* 747 * Parent seems redundant with group, but it makes certain 748 * traversals much nicer. 749 */ 750 item->ci_parent = parent_item; 751 752 /* 753 * We hold a reference on the parent for the child's ci_parent 754 * link. 755 */ 756 item->ci_group = config_group_get(to_config_group(parent_item)); 757 list_add_tail(&item->ci_entry, &item->ci_group->cg_children); 758 759 /* 760 * We hold a reference on the child for ci_entry on the parent's 761 * cg_children 762 */ 763 config_item_get(item); 764 } 765 766 static void unlink_group(struct config_group *group) 767 { 768 struct config_group *new_group; 769 770 list_for_each_entry(new_group, &group->default_groups, group_entry) 771 unlink_group(new_group); 772 773 group->cg_subsys = NULL; 774 unlink_obj(&group->cg_item); 775 } 776 777 static void link_group(struct config_group *parent_group, struct config_group *group) 778 { 779 struct config_group *new_group; 780 struct configfs_subsystem *subsys = NULL; /* gcc is a turd */ 781 782 link_obj(&parent_group->cg_item, &group->cg_item); 783 784 if (parent_group->cg_subsys) 785 subsys = parent_group->cg_subsys; 786 else if (configfs_is_root(&parent_group->cg_item)) 787 subsys = to_configfs_subsystem(group); 788 else 789 BUG(); 790 group->cg_subsys = subsys; 791 792 list_for_each_entry(new_group, &group->default_groups, group_entry) 793 link_group(group, new_group); 794 } 795 796 /* 797 * The goal is that configfs_attach_item() (and 798 * configfs_attach_group()) can be called from either the VFS or this 799 * module. That is, they assume that the items have been created, 800 * the dentry allocated, and the dcache is all ready to go. 801 * 802 * If they fail, they must clean up after themselves as if they 803 * had never been called. The caller (VFS or local function) will 804 * handle cleaning up the dcache bits. 805 * 806 * configfs_detach_group() and configfs_detach_item() behave similarly on 807 * the way out. They assume that the proper semaphores are held, they 808 * clean up the configfs items, and they expect their callers will 809 * handle the dcache bits. 810 */ 811 static int configfs_attach_item(struct config_item *parent_item, 812 struct config_item *item, 813 struct dentry *dentry, 814 struct configfs_fragment *frag) 815 { 816 int ret; 817 818 ret = configfs_create_dir(item, dentry, frag); 819 if (!ret) { 820 ret = populate_attrs(item); 821 if (ret) { 822 /* 823 * We are going to remove an inode and its dentry but 824 * the VFS may already have hit and used them. Thus, 825 * we must lock them as rmdir() would. 826 */ 827 inode_lock(d_inode(dentry)); 828 configfs_remove_dir(item); 829 d_inode(dentry)->i_flags |= S_DEAD; 830 dont_mount(dentry); 831 inode_unlock(d_inode(dentry)); 832 d_delete(dentry); 833 } 834 } 835 836 return ret; 837 } 838 839 /* Caller holds the mutex of the item's inode */ 840 static void configfs_detach_item(struct config_item *item) 841 { 842 detach_attrs(item); 843 configfs_remove_dir(item); 844 } 845 846 static int configfs_attach_group(struct config_item *parent_item, 847 struct config_item *item, 848 struct dentry *dentry, 849 struct configfs_fragment *frag) 850 { 851 int ret; 852 struct configfs_dirent *sd; 853 854 ret = configfs_attach_item(parent_item, item, dentry, frag); 855 if (!ret) { 856 sd = dentry->d_fsdata; 857 sd->s_type |= CONFIGFS_USET_DIR; 858 859 /* 860 * FYI, we're faking mkdir in populate_groups() 861 * We must lock the group's inode to avoid races with the VFS 862 * which can already hit the inode and try to add/remove entries 863 * under it. 864 * 865 * We must also lock the inode to remove it safely in case of 866 * error, as rmdir() would. 867 */ 868 inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD); 869 configfs_adjust_dir_dirent_depth_before_populate(sd); 870 ret = populate_groups(to_config_group(item), frag); 871 if (ret) { 872 configfs_detach_item(item); 873 d_inode(dentry)->i_flags |= S_DEAD; 874 dont_mount(dentry); 875 } 876 configfs_adjust_dir_dirent_depth_after_populate(sd); 877 inode_unlock(d_inode(dentry)); 878 if (ret) 879 d_delete(dentry); 880 } 881 882 return ret; 883 } 884 885 /* Caller holds the mutex of the group's inode */ 886 static void configfs_detach_group(struct config_item *item) 887 { 888 detach_groups(to_config_group(item)); 889 configfs_detach_item(item); 890 } 891 892 /* 893 * After the item has been detached from the filesystem view, we are 894 * ready to tear it out of the hierarchy. Notify the client before 895 * we do that so they can perform any cleanup that requires 896 * navigating the hierarchy. A client does not need to provide this 897 * callback. The subsystem semaphore MUST be held by the caller, and 898 * references must be valid for both items. It also assumes the 899 * caller has validated ci_type. 900 */ 901 static void client_disconnect_notify(struct config_item *parent_item, 902 struct config_item *item) 903 { 904 const struct config_item_type *type; 905 906 type = parent_item->ci_type; 907 BUG_ON(!type); 908 909 if (type->ct_group_ops && type->ct_group_ops->disconnect_notify) 910 type->ct_group_ops->disconnect_notify(to_config_group(parent_item), 911 item); 912 } 913 914 /* 915 * Drop the initial reference from make_item()/make_group() 916 * This function assumes that reference is held on item 917 * and that item holds a valid reference to the parent. Also, it 918 * assumes the caller has validated ci_type. 919 */ 920 static void client_drop_item(struct config_item *parent_item, 921 struct config_item *item) 922 { 923 const struct config_item_type *type; 924 925 type = parent_item->ci_type; 926 BUG_ON(!type); 927 928 /* 929 * If ->drop_item() exists, it is responsible for the 930 * config_item_put(). 931 */ 932 if (type->ct_group_ops && type->ct_group_ops->drop_item) 933 type->ct_group_ops->drop_item(to_config_group(parent_item), 934 item); 935 else 936 config_item_put(item); 937 } 938 939 #ifdef DEBUG 940 static void configfs_dump_one(struct configfs_dirent *sd, int level) 941 { 942 pr_info("%*s\"%s\":\n", level, " ", configfs_get_name(sd)); 943 944 #define type_print(_type) if (sd->s_type & _type) pr_info("%*s %s\n", level, " ", #_type); 945 type_print(CONFIGFS_ROOT); 946 type_print(CONFIGFS_DIR); 947 type_print(CONFIGFS_ITEM_ATTR); 948 type_print(CONFIGFS_ITEM_LINK); 949 type_print(CONFIGFS_USET_DIR); 950 type_print(CONFIGFS_USET_DEFAULT); 951 type_print(CONFIGFS_USET_DROPPING); 952 #undef type_print 953 } 954 955 static int configfs_dump(struct configfs_dirent *sd, int level) 956 { 957 struct configfs_dirent *child_sd; 958 int ret = 0; 959 960 configfs_dump_one(sd, level); 961 962 if (!(sd->s_type & (CONFIGFS_DIR|CONFIGFS_ROOT))) 963 return 0; 964 965 list_for_each_entry(child_sd, &sd->s_children, s_sibling) { 966 ret = configfs_dump(child_sd, level + 2); 967 if (ret) 968 break; 969 } 970 971 return ret; 972 } 973 #endif 974 975 976 /* 977 * configfs_depend_item() and configfs_undepend_item() 978 * 979 * WARNING: Do not call these from a configfs callback! 980 * 981 * This describes these functions and their helpers. 982 * 983 * Allow another kernel system to depend on a config_item. If this 984 * happens, the item cannot go away until the dependent can live without 985 * it. The idea is to give client modules as simple an interface as 986 * possible. When a system asks them to depend on an item, they just 987 * call configfs_depend_item(). If the item is live and the client 988 * driver is in good shape, we'll happily do the work for them. 989 * 990 * Why is the locking complex? Because configfs uses the VFS to handle 991 * all locking, but this function is called outside the normal 992 * VFS->configfs path. So it must take VFS locks to prevent the 993 * VFS->configfs stuff (configfs_mkdir(), configfs_rmdir(), etc). This is 994 * why you can't call these functions underneath configfs callbacks. 995 * 996 * Note, btw, that this can be called at *any* time, even when a configfs 997 * subsystem isn't registered, or when configfs is loading or unloading. 998 * Just like configfs_register_subsystem(). So we take the same 999 * precautions. We pin the filesystem. We lock configfs_dirent_lock. 1000 * If we can find the target item in the 1001 * configfs tree, it must be part of the subsystem tree as well, so we 1002 * do not need the subsystem semaphore. Holding configfs_dirent_lock helps 1003 * locking out mkdir() and rmdir(), who might be racing us. 1004 */ 1005 1006 /* 1007 * configfs_depend_prep() 1008 * 1009 * Only subdirectories count here. Files (CONFIGFS_NOT_PINNED) are 1010 * attributes. This is similar but not the same to configfs_detach_prep(). 1011 * Note that configfs_detach_prep() expects the parent to be locked when it 1012 * is called, but we lock the parent *inside* configfs_depend_prep(). We 1013 * do that so we can unlock it if we find nothing. 1014 * 1015 * Here we do a depth-first search of the dentry hierarchy looking for 1016 * our object. 1017 * We deliberately ignore items tagged as dropping since they are virtually 1018 * dead, as well as items in the middle of attachment since they virtually 1019 * do not exist yet. This completes the locking out of racing mkdir() and 1020 * rmdir(). 1021 * Note: subdirectories in the middle of attachment start with s_type = 1022 * CONFIGFS_DIR|CONFIGFS_USET_CREATING set by create_dir(). When 1023 * CONFIGFS_USET_CREATING is set, we ignore the item. The actual set of 1024 * s_type is in configfs_new_dirent(), which has configfs_dirent_lock. 1025 * 1026 * If the target is not found, -ENOENT is bubbled up. 1027 * 1028 * This adds a requirement that all config_items be unique! 1029 * 1030 * This is recursive. There isn't 1031 * much on the stack, though, so folks that need this function - be careful 1032 * about your stack! Patches will be accepted to make it iterative. 1033 */ 1034 static int configfs_depend_prep(struct dentry *origin, 1035 struct config_item *target) 1036 { 1037 struct configfs_dirent *child_sd, *sd; 1038 int ret = 0; 1039 1040 BUG_ON(!origin || !origin->d_fsdata); 1041 sd = origin->d_fsdata; 1042 1043 if (sd->s_element == target) /* Boo-yah */ 1044 goto out; 1045 1046 list_for_each_entry(child_sd, &sd->s_children, s_sibling) { 1047 if ((child_sd->s_type & CONFIGFS_DIR) && 1048 !(child_sd->s_type & CONFIGFS_USET_DROPPING) && 1049 !(child_sd->s_type & CONFIGFS_USET_CREATING)) { 1050 ret = configfs_depend_prep(child_sd->s_dentry, 1051 target); 1052 if (!ret) 1053 goto out; /* Child path boo-yah */ 1054 } 1055 } 1056 1057 /* We looped all our children and didn't find target */ 1058 ret = -ENOENT; 1059 1060 out: 1061 return ret; 1062 } 1063 1064 static int configfs_do_depend_item(struct dentry *subsys_dentry, 1065 struct config_item *target) 1066 { 1067 struct configfs_dirent *p; 1068 int ret; 1069 1070 spin_lock(&configfs_dirent_lock); 1071 /* Scan the tree, return 0 if found */ 1072 ret = configfs_depend_prep(subsys_dentry, target); 1073 if (ret) 1074 goto out_unlock_dirent_lock; 1075 1076 /* 1077 * We are sure that the item is not about to be removed by rmdir(), and 1078 * not in the middle of attachment by mkdir(). 1079 */ 1080 p = target->ci_dentry->d_fsdata; 1081 p->s_dependent_count += 1; 1082 1083 out_unlock_dirent_lock: 1084 spin_unlock(&configfs_dirent_lock); 1085 1086 return ret; 1087 } 1088 1089 static inline struct configfs_dirent * 1090 configfs_find_subsys_dentry(struct configfs_dirent *root_sd, 1091 struct config_item *subsys_item) 1092 { 1093 struct configfs_dirent *p; 1094 struct configfs_dirent *ret = NULL; 1095 1096 list_for_each_entry(p, &root_sd->s_children, s_sibling) { 1097 if (p->s_type & CONFIGFS_DIR && 1098 p->s_element == subsys_item) { 1099 ret = p; 1100 break; 1101 } 1102 } 1103 1104 return ret; 1105 } 1106 1107 1108 int configfs_depend_item(struct configfs_subsystem *subsys, 1109 struct config_item *target) 1110 { 1111 int ret; 1112 struct configfs_dirent *subsys_sd; 1113 struct config_item *s_item = &subsys->su_group.cg_item; 1114 struct dentry *root; 1115 1116 /* 1117 * Pin the configfs filesystem. This means we can safely access 1118 * the root of the configfs filesystem. 1119 */ 1120 root = configfs_pin_fs(); 1121 if (IS_ERR(root)) 1122 return PTR_ERR(root); 1123 1124 /* 1125 * Next, lock the root directory. We're going to check that the 1126 * subsystem is really registered, and so we need to lock out 1127 * configfs_[un]register_subsystem(). 1128 */ 1129 inode_lock(d_inode(root)); 1130 1131 subsys_sd = configfs_find_subsys_dentry(root->d_fsdata, s_item); 1132 if (!subsys_sd) { 1133 ret = -ENOENT; 1134 goto out_unlock_fs; 1135 } 1136 1137 /* Ok, now we can trust subsys/s_item */ 1138 ret = configfs_do_depend_item(subsys_sd->s_dentry, target); 1139 1140 out_unlock_fs: 1141 inode_unlock(d_inode(root)); 1142 1143 /* 1144 * If we succeeded, the fs is pinned via other methods. If not, 1145 * we're done with it anyway. So release_fs() is always right. 1146 */ 1147 configfs_release_fs(); 1148 1149 return ret; 1150 } 1151 EXPORT_SYMBOL(configfs_depend_item); 1152 1153 /* 1154 * Release the dependent linkage. This is much simpler than 1155 * configfs_depend_item() because we know that the client driver is 1156 * pinned, thus the subsystem is pinned, and therefore configfs is pinned. 1157 */ 1158 void configfs_undepend_item(struct config_item *target) 1159 { 1160 struct configfs_dirent *sd; 1161 1162 /* 1163 * Since we can trust everything is pinned, we just need 1164 * configfs_dirent_lock. 1165 */ 1166 spin_lock(&configfs_dirent_lock); 1167 1168 sd = target->ci_dentry->d_fsdata; 1169 BUG_ON(sd->s_dependent_count < 1); 1170 1171 sd->s_dependent_count -= 1; 1172 1173 /* 1174 * After this unlock, we cannot trust the item to stay alive! 1175 * DO NOT REFERENCE item after this unlock. 1176 */ 1177 spin_unlock(&configfs_dirent_lock); 1178 } 1179 EXPORT_SYMBOL(configfs_undepend_item); 1180 1181 /* 1182 * caller_subsys is a caller's subsystem not target's. This is used to 1183 * determine if we should lock root and check subsys or not. When we are 1184 * in the same subsystem as our target there is no need to do locking as 1185 * we know that subsys is valid and is not unregistered during this function 1186 * as we are called from callback of one of his children and VFS holds a lock 1187 * on some inode. Otherwise we have to lock our root to ensure that target's 1188 * subsystem it is not unregistered during this function. 1189 */ 1190 int configfs_depend_item_unlocked(struct configfs_subsystem *caller_subsys, 1191 struct config_item *target) 1192 { 1193 struct configfs_subsystem *target_subsys; 1194 struct config_group *root, *parent; 1195 struct configfs_dirent *subsys_sd; 1196 int ret = -ENOENT; 1197 1198 /* Disallow this function for configfs root */ 1199 if (configfs_is_root(target)) 1200 return -EINVAL; 1201 1202 parent = target->ci_group; 1203 /* 1204 * This may happen when someone is trying to depend root 1205 * directory of some subsystem 1206 */ 1207 if (configfs_is_root(&parent->cg_item)) { 1208 target_subsys = to_configfs_subsystem(to_config_group(target)); 1209 root = parent; 1210 } else { 1211 target_subsys = parent->cg_subsys; 1212 /* Find a cofnigfs root as we may need it for locking */ 1213 for (root = parent; !configfs_is_root(&root->cg_item); 1214 root = root->cg_item.ci_group) 1215 ; 1216 } 1217 1218 if (target_subsys != caller_subsys) { 1219 /* 1220 * We are in other configfs subsystem, so we have to do 1221 * additional locking to prevent other subsystem from being 1222 * unregistered 1223 */ 1224 inode_lock(d_inode(root->cg_item.ci_dentry)); 1225 1226 /* 1227 * As we are trying to depend item from other subsystem 1228 * we have to check if this subsystem is still registered 1229 */ 1230 subsys_sd = configfs_find_subsys_dentry( 1231 root->cg_item.ci_dentry->d_fsdata, 1232 &target_subsys->su_group.cg_item); 1233 if (!subsys_sd) 1234 goto out_root_unlock; 1235 } else { 1236 subsys_sd = target_subsys->su_group.cg_item.ci_dentry->d_fsdata; 1237 } 1238 1239 /* Now we can execute core of depend item */ 1240 ret = configfs_do_depend_item(subsys_sd->s_dentry, target); 1241 1242 if (target_subsys != caller_subsys) 1243 out_root_unlock: 1244 /* 1245 * We were called from subsystem other than our target so we 1246 * took some locks so now it's time to release them 1247 */ 1248 inode_unlock(d_inode(root->cg_item.ci_dentry)); 1249 1250 return ret; 1251 } 1252 EXPORT_SYMBOL(configfs_depend_item_unlocked); 1253 1254 static int configfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, 1255 struct dentry *dentry, umode_t mode) 1256 { 1257 int ret = 0; 1258 int module_got = 0; 1259 struct config_group *group = NULL; 1260 struct config_item *item = NULL; 1261 struct config_item *parent_item; 1262 struct configfs_subsystem *subsys; 1263 struct configfs_dirent *sd; 1264 const struct config_item_type *type; 1265 struct module *subsys_owner = NULL, *new_item_owner = NULL; 1266 struct configfs_fragment *frag; 1267 char *name; 1268 1269 sd = dentry->d_parent->d_fsdata; 1270 1271 /* 1272 * Fake invisibility if dir belongs to a group/default groups hierarchy 1273 * being attached 1274 */ 1275 if (!configfs_dirent_is_ready(sd)) { 1276 ret = -ENOENT; 1277 goto out; 1278 } 1279 1280 if (!(sd->s_type & CONFIGFS_USET_DIR)) { 1281 ret = -EPERM; 1282 goto out; 1283 } 1284 1285 frag = new_fragment(); 1286 if (!frag) { 1287 ret = -ENOMEM; 1288 goto out; 1289 } 1290 1291 /* Get a working ref for the duration of this function */ 1292 parent_item = configfs_get_config_item(dentry->d_parent); 1293 type = parent_item->ci_type; 1294 subsys = to_config_group(parent_item)->cg_subsys; 1295 BUG_ON(!subsys); 1296 1297 if (!type || !type->ct_group_ops || 1298 (!type->ct_group_ops->make_group && 1299 !type->ct_group_ops->make_item)) { 1300 ret = -EPERM; /* Lack-of-mkdir returns -EPERM */ 1301 goto out_put; 1302 } 1303 1304 /* 1305 * The subsystem may belong to a different module than the item 1306 * being created. We don't want to safely pin the new item but 1307 * fail to pin the subsystem it sits under. 1308 */ 1309 if (!subsys->su_group.cg_item.ci_type) { 1310 ret = -EINVAL; 1311 goto out_put; 1312 } 1313 subsys_owner = subsys->su_group.cg_item.ci_type->ct_owner; 1314 if (!try_module_get(subsys_owner)) { 1315 ret = -EINVAL; 1316 goto out_put; 1317 } 1318 1319 name = kmalloc(dentry->d_name.len + 1, GFP_KERNEL); 1320 if (!name) { 1321 ret = -ENOMEM; 1322 goto out_subsys_put; 1323 } 1324 1325 snprintf(name, dentry->d_name.len + 1, "%s", dentry->d_name.name); 1326 1327 mutex_lock(&subsys->su_mutex); 1328 if (type->ct_group_ops->make_group) { 1329 group = type->ct_group_ops->make_group(to_config_group(parent_item), name); 1330 if (!group) 1331 group = ERR_PTR(-ENOMEM); 1332 if (!IS_ERR(group)) { 1333 link_group(to_config_group(parent_item), group); 1334 item = &group->cg_item; 1335 } else 1336 ret = PTR_ERR(group); 1337 } else { 1338 item = type->ct_group_ops->make_item(to_config_group(parent_item), name); 1339 if (!item) 1340 item = ERR_PTR(-ENOMEM); 1341 if (!IS_ERR(item)) 1342 link_obj(parent_item, item); 1343 else 1344 ret = PTR_ERR(item); 1345 } 1346 mutex_unlock(&subsys->su_mutex); 1347 1348 kfree(name); 1349 if (ret) { 1350 /* 1351 * If ret != 0, then link_obj() was never called. 1352 * There are no extra references to clean up. 1353 */ 1354 goto out_subsys_put; 1355 } 1356 1357 /* 1358 * link_obj() has been called (via link_group() for groups). 1359 * From here on out, errors must clean that up. 1360 */ 1361 1362 type = item->ci_type; 1363 if (!type) { 1364 ret = -EINVAL; 1365 goto out_unlink; 1366 } 1367 1368 new_item_owner = type->ct_owner; 1369 if (!try_module_get(new_item_owner)) { 1370 ret = -EINVAL; 1371 goto out_unlink; 1372 } 1373 1374 /* 1375 * I hate doing it this way, but if there is 1376 * an error, module_put() probably should 1377 * happen after any cleanup. 1378 */ 1379 module_got = 1; 1380 1381 /* 1382 * Make racing rmdir() fail if it did not tag parent with 1383 * CONFIGFS_USET_DROPPING 1384 * Note: if CONFIGFS_USET_DROPPING is already set, attach_group() will 1385 * fail and let rmdir() terminate correctly 1386 */ 1387 spin_lock(&configfs_dirent_lock); 1388 /* This will make configfs_detach_prep() fail */ 1389 sd->s_type |= CONFIGFS_USET_IN_MKDIR; 1390 spin_unlock(&configfs_dirent_lock); 1391 1392 if (group) 1393 ret = configfs_attach_group(parent_item, item, dentry, frag); 1394 else 1395 ret = configfs_attach_item(parent_item, item, dentry, frag); 1396 1397 spin_lock(&configfs_dirent_lock); 1398 sd->s_type &= ~CONFIGFS_USET_IN_MKDIR; 1399 if (!ret) 1400 configfs_dir_set_ready(dentry->d_fsdata); 1401 spin_unlock(&configfs_dirent_lock); 1402 1403 out_unlink: 1404 if (ret) { 1405 /* Tear down everything we built up */ 1406 mutex_lock(&subsys->su_mutex); 1407 1408 client_disconnect_notify(parent_item, item); 1409 if (group) 1410 unlink_group(group); 1411 else 1412 unlink_obj(item); 1413 client_drop_item(parent_item, item); 1414 1415 mutex_unlock(&subsys->su_mutex); 1416 1417 if (module_got) 1418 module_put(new_item_owner); 1419 } 1420 1421 out_subsys_put: 1422 if (ret) 1423 module_put(subsys_owner); 1424 1425 out_put: 1426 /* 1427 * link_obj()/link_group() took a reference from child->parent, 1428 * so the parent is safely pinned. We can drop our working 1429 * reference. 1430 */ 1431 config_item_put(parent_item); 1432 put_fragment(frag); 1433 1434 out: 1435 return ret; 1436 } 1437 1438 static int configfs_rmdir(struct inode *dir, struct dentry *dentry) 1439 { 1440 struct config_item *parent_item; 1441 struct config_item *item; 1442 struct configfs_subsystem *subsys; 1443 struct configfs_dirent *sd; 1444 struct configfs_fragment *frag; 1445 struct module *subsys_owner = NULL, *dead_item_owner = NULL; 1446 int ret; 1447 1448 sd = dentry->d_fsdata; 1449 if (sd->s_type & CONFIGFS_USET_DEFAULT) 1450 return -EPERM; 1451 1452 /* Get a working ref until we have the child */ 1453 parent_item = configfs_get_config_item(dentry->d_parent); 1454 subsys = to_config_group(parent_item)->cg_subsys; 1455 BUG_ON(!subsys); 1456 1457 if (!parent_item->ci_type) { 1458 config_item_put(parent_item); 1459 return -EINVAL; 1460 } 1461 1462 /* configfs_mkdir() shouldn't have allowed this */ 1463 BUG_ON(!subsys->su_group.cg_item.ci_type); 1464 subsys_owner = subsys->su_group.cg_item.ci_type->ct_owner; 1465 1466 /* 1467 * Ensure that no racing symlink() will make detach_prep() fail while 1468 * the new link is temporarily attached 1469 */ 1470 do { 1471 struct dentry *wait; 1472 1473 mutex_lock(&configfs_symlink_mutex); 1474 spin_lock(&configfs_dirent_lock); 1475 /* 1476 * Here's where we check for dependents. We're protected by 1477 * configfs_dirent_lock. 1478 * If no dependent, atomically tag the item as dropping. 1479 */ 1480 ret = sd->s_dependent_count ? -EBUSY : 0; 1481 if (!ret) { 1482 ret = configfs_detach_prep(dentry, &wait); 1483 if (ret) 1484 configfs_detach_rollback(dentry); 1485 } 1486 spin_unlock(&configfs_dirent_lock); 1487 mutex_unlock(&configfs_symlink_mutex); 1488 1489 if (ret) { 1490 if (ret != -EAGAIN) { 1491 config_item_put(parent_item); 1492 return ret; 1493 } 1494 1495 /* Wait until the racing operation terminates */ 1496 inode_lock(d_inode(wait)); 1497 inode_unlock(d_inode(wait)); 1498 dput(wait); 1499 } 1500 } while (ret == -EAGAIN); 1501 1502 frag = sd->s_frag; 1503 if (down_write_killable(&frag->frag_sem)) { 1504 spin_lock(&configfs_dirent_lock); 1505 configfs_detach_rollback(dentry); 1506 spin_unlock(&configfs_dirent_lock); 1507 config_item_put(parent_item); 1508 return -EINTR; 1509 } 1510 frag->frag_dead = true; 1511 up_write(&frag->frag_sem); 1512 1513 /* Get a working ref for the duration of this function */ 1514 item = configfs_get_config_item(dentry); 1515 1516 /* Drop reference from above, item already holds one. */ 1517 config_item_put(parent_item); 1518 1519 if (item->ci_type) 1520 dead_item_owner = item->ci_type->ct_owner; 1521 1522 if (sd->s_type & CONFIGFS_USET_DIR) { 1523 configfs_detach_group(item); 1524 1525 mutex_lock(&subsys->su_mutex); 1526 client_disconnect_notify(parent_item, item); 1527 unlink_group(to_config_group(item)); 1528 } else { 1529 configfs_detach_item(item); 1530 1531 mutex_lock(&subsys->su_mutex); 1532 client_disconnect_notify(parent_item, item); 1533 unlink_obj(item); 1534 } 1535 1536 client_drop_item(parent_item, item); 1537 mutex_unlock(&subsys->su_mutex); 1538 1539 /* Drop our reference from above */ 1540 config_item_put(item); 1541 1542 module_put(dead_item_owner); 1543 module_put(subsys_owner); 1544 1545 return 0; 1546 } 1547 1548 const struct inode_operations configfs_dir_inode_operations = { 1549 .mkdir = configfs_mkdir, 1550 .rmdir = configfs_rmdir, 1551 .symlink = configfs_symlink, 1552 .unlink = configfs_unlink, 1553 .lookup = configfs_lookup, 1554 .setattr = configfs_setattr, 1555 }; 1556 1557 const struct inode_operations configfs_root_inode_operations = { 1558 .lookup = configfs_lookup, 1559 .setattr = configfs_setattr, 1560 }; 1561 1562 static int configfs_dir_open(struct inode *inode, struct file *file) 1563 { 1564 struct dentry * dentry = file->f_path.dentry; 1565 struct configfs_dirent * parent_sd = dentry->d_fsdata; 1566 int err; 1567 1568 inode_lock(d_inode(dentry)); 1569 /* 1570 * Fake invisibility if dir belongs to a group/default groups hierarchy 1571 * being attached 1572 */ 1573 err = -ENOENT; 1574 if (configfs_dirent_is_ready(parent_sd)) { 1575 file->private_data = configfs_new_dirent(parent_sd, NULL, 0, NULL); 1576 if (IS_ERR(file->private_data)) 1577 err = PTR_ERR(file->private_data); 1578 else 1579 err = 0; 1580 } 1581 inode_unlock(d_inode(dentry)); 1582 1583 return err; 1584 } 1585 1586 static int configfs_dir_close(struct inode *inode, struct file *file) 1587 { 1588 struct dentry * dentry = file->f_path.dentry; 1589 struct configfs_dirent * cursor = file->private_data; 1590 1591 inode_lock(d_inode(dentry)); 1592 spin_lock(&configfs_dirent_lock); 1593 list_del_init(&cursor->s_sibling); 1594 spin_unlock(&configfs_dirent_lock); 1595 inode_unlock(d_inode(dentry)); 1596 1597 release_configfs_dirent(cursor); 1598 1599 return 0; 1600 } 1601 1602 static int configfs_readdir(struct file *file, struct dir_context *ctx) 1603 { 1604 struct dentry *dentry = file->f_path.dentry; 1605 struct super_block *sb = dentry->d_sb; 1606 struct configfs_dirent * parent_sd = dentry->d_fsdata; 1607 struct configfs_dirent *cursor = file->private_data; 1608 struct list_head *p, *q = &cursor->s_sibling; 1609 ino_t ino = 0; 1610 1611 if (!dir_emit_dots(file, ctx)) 1612 return 0; 1613 spin_lock(&configfs_dirent_lock); 1614 if (ctx->pos == 2) 1615 list_move(q, &parent_sd->s_children); 1616 for (p = q->next; p != &parent_sd->s_children; p = p->next) { 1617 struct configfs_dirent *next; 1618 const char *name; 1619 int len; 1620 struct inode *inode = NULL; 1621 1622 next = list_entry(p, struct configfs_dirent, s_sibling); 1623 if (!next->s_element) 1624 continue; 1625 1626 /* 1627 * We'll have a dentry and an inode for 1628 * PINNED items and for open attribute 1629 * files. We lock here to prevent a race 1630 * with configfs_d_iput() clearing 1631 * s_dentry before calling iput(). 1632 * 1633 * Why do we go to the trouble? If 1634 * someone has an attribute file open, 1635 * the inode number should match until 1636 * they close it. Beyond that, we don't 1637 * care. 1638 */ 1639 dentry = next->s_dentry; 1640 if (dentry) 1641 inode = d_inode(dentry); 1642 if (inode) 1643 ino = inode->i_ino; 1644 spin_unlock(&configfs_dirent_lock); 1645 if (!inode) 1646 ino = iunique(sb, 2); 1647 1648 name = configfs_get_name(next); 1649 len = strlen(name); 1650 1651 if (!dir_emit(ctx, name, len, ino, 1652 fs_umode_to_dtype(next->s_mode))) 1653 return 0; 1654 1655 spin_lock(&configfs_dirent_lock); 1656 list_move(q, p); 1657 p = q; 1658 ctx->pos++; 1659 } 1660 spin_unlock(&configfs_dirent_lock); 1661 return 0; 1662 } 1663 1664 static loff_t configfs_dir_lseek(struct file *file, loff_t offset, int whence) 1665 { 1666 struct dentry * dentry = file->f_path.dentry; 1667 1668 switch (whence) { 1669 case 1: 1670 offset += file->f_pos; 1671 fallthrough; 1672 case 0: 1673 if (offset >= 0) 1674 break; 1675 fallthrough; 1676 default: 1677 return -EINVAL; 1678 } 1679 if (offset != file->f_pos) { 1680 file->f_pos = offset; 1681 if (file->f_pos >= 2) { 1682 struct configfs_dirent *sd = dentry->d_fsdata; 1683 struct configfs_dirent *cursor = file->private_data; 1684 struct list_head *p; 1685 loff_t n = file->f_pos - 2; 1686 1687 spin_lock(&configfs_dirent_lock); 1688 list_del(&cursor->s_sibling); 1689 p = sd->s_children.next; 1690 while (n && p != &sd->s_children) { 1691 struct configfs_dirent *next; 1692 next = list_entry(p, struct configfs_dirent, 1693 s_sibling); 1694 if (next->s_element) 1695 n--; 1696 p = p->next; 1697 } 1698 list_add_tail(&cursor->s_sibling, p); 1699 spin_unlock(&configfs_dirent_lock); 1700 } 1701 } 1702 return offset; 1703 } 1704 1705 const struct file_operations configfs_dir_operations = { 1706 .open = configfs_dir_open, 1707 .release = configfs_dir_close, 1708 .llseek = configfs_dir_lseek, 1709 .read = generic_read_dir, 1710 .iterate_shared = configfs_readdir, 1711 }; 1712 1713 /** 1714 * configfs_register_group - creates a parent-child relation between two groups 1715 * @parent_group: parent group 1716 * @group: child group 1717 * 1718 * link groups, creates dentry for the child and attaches it to the 1719 * parent dentry. 1720 * 1721 * Return: 0 on success, negative errno code on error 1722 */ 1723 int configfs_register_group(struct config_group *parent_group, 1724 struct config_group *group) 1725 { 1726 struct configfs_subsystem *subsys = parent_group->cg_subsys; 1727 struct dentry *parent; 1728 struct configfs_fragment *frag; 1729 int ret; 1730 1731 frag = new_fragment(); 1732 if (!frag) 1733 return -ENOMEM; 1734 1735 mutex_lock(&subsys->su_mutex); 1736 link_group(parent_group, group); 1737 mutex_unlock(&subsys->su_mutex); 1738 1739 parent = parent_group->cg_item.ci_dentry; 1740 1741 inode_lock_nested(d_inode(parent), I_MUTEX_PARENT); 1742 ret = create_default_group(parent_group, group, frag); 1743 if (ret) 1744 goto err_out; 1745 1746 spin_lock(&configfs_dirent_lock); 1747 configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata); 1748 spin_unlock(&configfs_dirent_lock); 1749 inode_unlock(d_inode(parent)); 1750 put_fragment(frag); 1751 return 0; 1752 err_out: 1753 inode_unlock(d_inode(parent)); 1754 mutex_lock(&subsys->su_mutex); 1755 unlink_group(group); 1756 mutex_unlock(&subsys->su_mutex); 1757 put_fragment(frag); 1758 return ret; 1759 } 1760 EXPORT_SYMBOL(configfs_register_group); 1761 1762 /** 1763 * configfs_unregister_group() - unregisters a child group from its parent 1764 * @group: parent group to be unregistered 1765 * 1766 * Undoes configfs_register_group() 1767 */ 1768 void configfs_unregister_group(struct config_group *group) 1769 { 1770 struct configfs_subsystem *subsys = group->cg_subsys; 1771 struct dentry *dentry = group->cg_item.ci_dentry; 1772 struct dentry *parent = group->cg_item.ci_parent->ci_dentry; 1773 struct configfs_dirent *sd = dentry->d_fsdata; 1774 struct configfs_fragment *frag = sd->s_frag; 1775 1776 down_write(&frag->frag_sem); 1777 frag->frag_dead = true; 1778 up_write(&frag->frag_sem); 1779 1780 inode_lock_nested(d_inode(parent), I_MUTEX_PARENT); 1781 spin_lock(&configfs_dirent_lock); 1782 configfs_detach_prep(dentry, NULL); 1783 spin_unlock(&configfs_dirent_lock); 1784 1785 configfs_detach_group(&group->cg_item); 1786 d_inode(dentry)->i_flags |= S_DEAD; 1787 dont_mount(dentry); 1788 d_drop(dentry); 1789 fsnotify_rmdir(d_inode(parent), dentry); 1790 inode_unlock(d_inode(parent)); 1791 1792 dput(dentry); 1793 1794 mutex_lock(&subsys->su_mutex); 1795 unlink_group(group); 1796 mutex_unlock(&subsys->su_mutex); 1797 } 1798 EXPORT_SYMBOL(configfs_unregister_group); 1799 1800 /** 1801 * configfs_register_default_group() - allocates and registers a child group 1802 * @parent_group: parent group 1803 * @name: child group name 1804 * @item_type: child item type description 1805 * 1806 * boilerplate to allocate and register a child group with its parent. We need 1807 * kzalloc'ed memory because child's default_group is initially empty. 1808 * 1809 * Return: allocated config group or ERR_PTR() on error 1810 */ 1811 struct config_group * 1812 configfs_register_default_group(struct config_group *parent_group, 1813 const char *name, 1814 const struct config_item_type *item_type) 1815 { 1816 int ret; 1817 struct config_group *group; 1818 1819 group = kzalloc(sizeof(*group), GFP_KERNEL); 1820 if (!group) 1821 return ERR_PTR(-ENOMEM); 1822 config_group_init_type_name(group, name, item_type); 1823 1824 ret = configfs_register_group(parent_group, group); 1825 if (ret) { 1826 kfree(group); 1827 return ERR_PTR(ret); 1828 } 1829 return group; 1830 } 1831 EXPORT_SYMBOL(configfs_register_default_group); 1832 1833 /** 1834 * configfs_unregister_default_group() - unregisters and frees a child group 1835 * @group: the group to act on 1836 */ 1837 void configfs_unregister_default_group(struct config_group *group) 1838 { 1839 configfs_unregister_group(group); 1840 kfree(group); 1841 } 1842 EXPORT_SYMBOL(configfs_unregister_default_group); 1843 1844 int configfs_register_subsystem(struct configfs_subsystem *subsys) 1845 { 1846 int err; 1847 struct config_group *group = &subsys->su_group; 1848 struct dentry *dentry; 1849 struct dentry *root; 1850 struct configfs_dirent *sd; 1851 struct configfs_fragment *frag; 1852 1853 frag = new_fragment(); 1854 if (!frag) 1855 return -ENOMEM; 1856 1857 root = configfs_pin_fs(); 1858 if (IS_ERR(root)) { 1859 put_fragment(frag); 1860 return PTR_ERR(root); 1861 } 1862 1863 if (!group->cg_item.ci_name) 1864 group->cg_item.ci_name = group->cg_item.ci_namebuf; 1865 1866 sd = root->d_fsdata; 1867 mutex_lock(&configfs_subsystem_mutex); 1868 link_group(to_config_group(sd->s_element), group); 1869 mutex_unlock(&configfs_subsystem_mutex); 1870 1871 inode_lock_nested(d_inode(root), I_MUTEX_PARENT); 1872 1873 err = -ENOMEM; 1874 dentry = d_alloc_name(root, group->cg_item.ci_name); 1875 if (dentry) { 1876 d_add(dentry, NULL); 1877 1878 err = configfs_attach_group(sd->s_element, &group->cg_item, 1879 dentry, frag); 1880 if (err) { 1881 BUG_ON(d_inode(dentry)); 1882 d_drop(dentry); 1883 dput(dentry); 1884 } else { 1885 spin_lock(&configfs_dirent_lock); 1886 configfs_dir_set_ready(dentry->d_fsdata); 1887 spin_unlock(&configfs_dirent_lock); 1888 } 1889 } 1890 1891 inode_unlock(d_inode(root)); 1892 1893 if (err) { 1894 mutex_lock(&configfs_subsystem_mutex); 1895 unlink_group(group); 1896 mutex_unlock(&configfs_subsystem_mutex); 1897 configfs_release_fs(); 1898 } 1899 put_fragment(frag); 1900 1901 return err; 1902 } 1903 1904 void configfs_unregister_subsystem(struct configfs_subsystem *subsys) 1905 { 1906 struct config_group *group = &subsys->su_group; 1907 struct dentry *dentry = group->cg_item.ci_dentry; 1908 struct dentry *root = dentry->d_sb->s_root; 1909 struct configfs_dirent *sd = dentry->d_fsdata; 1910 struct configfs_fragment *frag = sd->s_frag; 1911 1912 if (dentry->d_parent != root) { 1913 pr_err("Tried to unregister non-subsystem!\n"); 1914 return; 1915 } 1916 1917 down_write(&frag->frag_sem); 1918 frag->frag_dead = true; 1919 up_write(&frag->frag_sem); 1920 1921 inode_lock_nested(d_inode(root), 1922 I_MUTEX_PARENT); 1923 inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD); 1924 mutex_lock(&configfs_symlink_mutex); 1925 spin_lock(&configfs_dirent_lock); 1926 if (configfs_detach_prep(dentry, NULL)) { 1927 pr_err("Tried to unregister non-empty subsystem!\n"); 1928 } 1929 spin_unlock(&configfs_dirent_lock); 1930 mutex_unlock(&configfs_symlink_mutex); 1931 configfs_detach_group(&group->cg_item); 1932 d_inode(dentry)->i_flags |= S_DEAD; 1933 dont_mount(dentry); 1934 inode_unlock(d_inode(dentry)); 1935 1936 d_drop(dentry); 1937 fsnotify_rmdir(d_inode(root), dentry); 1938 1939 inode_unlock(d_inode(root)); 1940 1941 dput(dentry); 1942 1943 mutex_lock(&configfs_subsystem_mutex); 1944 unlink_group(group); 1945 mutex_unlock(&configfs_subsystem_mutex); 1946 configfs_release_fs(); 1947 } 1948 1949 EXPORT_SYMBOL(configfs_register_subsystem); 1950 EXPORT_SYMBOL(configfs_unregister_subsystem); 1951