1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * drivers/base/core.c - core driver model code (device registration, etc) 4 * 5 * Copyright (c) 2002-3 Patrick Mochel 6 * Copyright (c) 2002-3 Open Source Development Labs 7 * Copyright (c) 2006 Greg Kroah-Hartman <gregkh@suse.de> 8 * Copyright (c) 2006 Novell, Inc. 9 */ 10 11 #include <linux/acpi.h> 12 #include <linux/cpufreq.h> 13 #include <linux/device.h> 14 #include <linux/err.h> 15 #include <linux/fwnode.h> 16 #include <linux/init.h> 17 #include <linux/module.h> 18 #include <linux/slab.h> 19 #include <linux/string.h> 20 #include <linux/kdev_t.h> 21 #include <linux/notifier.h> 22 #include <linux/of.h> 23 #include <linux/of_device.h> 24 #include <linux/genhd.h> 25 #include <linux/mutex.h> 26 #include <linux/pm_runtime.h> 27 #include <linux/netdevice.h> 28 #include <linux/sched/signal.h> 29 #include <linux/sched/mm.h> 30 #include <linux/sysfs.h> 31 32 #include "base.h" 33 #include "power/power.h" 34 35 #ifdef CONFIG_SYSFS_DEPRECATED 36 #ifdef CONFIG_SYSFS_DEPRECATED_V2 37 long sysfs_deprecated = 1; 38 #else 39 long sysfs_deprecated = 0; 40 #endif 41 static int __init sysfs_deprecated_setup(char *arg) 42 { 43 return kstrtol(arg, 10, &sysfs_deprecated); 44 } 45 early_param("sysfs.deprecated", sysfs_deprecated_setup); 46 #endif 47 48 /* Device links support. */ 49 static LIST_HEAD(wait_for_suppliers); 50 static DEFINE_MUTEX(wfs_lock); 51 static LIST_HEAD(deferred_sync); 52 static unsigned int defer_sync_state_count = 1; 53 static unsigned int defer_fw_devlink_count; 54 static LIST_HEAD(deferred_fw_devlink); 55 static DEFINE_MUTEX(defer_fw_devlink_lock); 56 static bool fw_devlink_is_permissive(void); 57 58 #ifdef CONFIG_SRCU 59 static DEFINE_MUTEX(device_links_lock); 60 DEFINE_STATIC_SRCU(device_links_srcu); 61 62 static inline void device_links_write_lock(void) 63 { 64 mutex_lock(&device_links_lock); 65 } 66 67 static inline void device_links_write_unlock(void) 68 { 69 mutex_unlock(&device_links_lock); 70 } 71 72 int device_links_read_lock(void) __acquires(&device_links_srcu) 73 { 74 return srcu_read_lock(&device_links_srcu); 75 } 76 77 void device_links_read_unlock(int idx) __releases(&device_links_srcu) 78 { 79 srcu_read_unlock(&device_links_srcu, idx); 80 } 81 82 int device_links_read_lock_held(void) 83 { 84 return srcu_read_lock_held(&device_links_srcu); 85 } 86 #else /* !CONFIG_SRCU */ 87 static DECLARE_RWSEM(device_links_lock); 88 89 static inline void device_links_write_lock(void) 90 { 91 down_write(&device_links_lock); 92 } 93 94 static inline void device_links_write_unlock(void) 95 { 96 up_write(&device_links_lock); 97 } 98 99 int device_links_read_lock(void) 100 { 101 down_read(&device_links_lock); 102 return 0; 103 } 104 105 void device_links_read_unlock(int not_used) 106 { 107 up_read(&device_links_lock); 108 } 109 110 #ifdef CONFIG_DEBUG_LOCK_ALLOC 111 int device_links_read_lock_held(void) 112 { 113 return lockdep_is_held(&device_links_lock); 114 } 115 #endif 116 #endif /* !CONFIG_SRCU */ 117 118 /** 119 * device_is_dependent - Check if one device depends on another one 120 * @dev: Device to check dependencies for. 121 * @target: Device to check against. 122 * 123 * Check if @target depends on @dev or any device dependent on it (its child or 124 * its consumer etc). Return 1 if that is the case or 0 otherwise. 125 */ 126 int device_is_dependent(struct device *dev, void *target) 127 { 128 struct device_link *link; 129 int ret; 130 131 if (dev == target) 132 return 1; 133 134 ret = device_for_each_child(dev, target, device_is_dependent); 135 if (ret) 136 return ret; 137 138 list_for_each_entry(link, &dev->links.consumers, s_node) { 139 if (link->flags == (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED)) 140 continue; 141 142 if (link->consumer == target) 143 return 1; 144 145 ret = device_is_dependent(link->consumer, target); 146 if (ret) 147 break; 148 } 149 return ret; 150 } 151 152 static void device_link_init_status(struct device_link *link, 153 struct device *consumer, 154 struct device *supplier) 155 { 156 switch (supplier->links.status) { 157 case DL_DEV_PROBING: 158 switch (consumer->links.status) { 159 case DL_DEV_PROBING: 160 /* 161 * A consumer driver can create a link to a supplier 162 * that has not completed its probing yet as long as it 163 * knows that the supplier is already functional (for 164 * example, it has just acquired some resources from the 165 * supplier). 166 */ 167 link->status = DL_STATE_CONSUMER_PROBE; 168 break; 169 default: 170 link->status = DL_STATE_DORMANT; 171 break; 172 } 173 break; 174 case DL_DEV_DRIVER_BOUND: 175 switch (consumer->links.status) { 176 case DL_DEV_PROBING: 177 link->status = DL_STATE_CONSUMER_PROBE; 178 break; 179 case DL_DEV_DRIVER_BOUND: 180 link->status = DL_STATE_ACTIVE; 181 break; 182 default: 183 link->status = DL_STATE_AVAILABLE; 184 break; 185 } 186 break; 187 case DL_DEV_UNBINDING: 188 link->status = DL_STATE_SUPPLIER_UNBIND; 189 break; 190 default: 191 link->status = DL_STATE_DORMANT; 192 break; 193 } 194 } 195 196 static int device_reorder_to_tail(struct device *dev, void *not_used) 197 { 198 struct device_link *link; 199 200 /* 201 * Devices that have not been registered yet will be put to the ends 202 * of the lists during the registration, so skip them here. 203 */ 204 if (device_is_registered(dev)) 205 devices_kset_move_last(dev); 206 207 if (device_pm_initialized(dev)) 208 device_pm_move_last(dev); 209 210 device_for_each_child(dev, NULL, device_reorder_to_tail); 211 list_for_each_entry(link, &dev->links.consumers, s_node) { 212 if (link->flags == (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED)) 213 continue; 214 device_reorder_to_tail(link->consumer, NULL); 215 } 216 217 return 0; 218 } 219 220 /** 221 * device_pm_move_to_tail - Move set of devices to the end of device lists 222 * @dev: Device to move 223 * 224 * This is a device_reorder_to_tail() wrapper taking the requisite locks. 225 * 226 * It moves the @dev along with all of its children and all of its consumers 227 * to the ends of the device_kset and dpm_list, recursively. 228 */ 229 void device_pm_move_to_tail(struct device *dev) 230 { 231 int idx; 232 233 idx = device_links_read_lock(); 234 device_pm_lock(); 235 device_reorder_to_tail(dev, NULL); 236 device_pm_unlock(); 237 device_links_read_unlock(idx); 238 } 239 240 #define to_devlink(dev) container_of((dev), struct device_link, link_dev) 241 242 static ssize_t status_show(struct device *dev, 243 struct device_attribute *attr, char *buf) 244 { 245 const char *output; 246 247 switch (to_devlink(dev)->status) { 248 case DL_STATE_NONE: 249 output = "not tracked"; 250 break; 251 case DL_STATE_DORMANT: 252 output = "dormant"; 253 break; 254 case DL_STATE_AVAILABLE: 255 output = "available"; 256 break; 257 case DL_STATE_CONSUMER_PROBE: 258 output = "consumer probing"; 259 break; 260 case DL_STATE_ACTIVE: 261 output = "active"; 262 break; 263 case DL_STATE_SUPPLIER_UNBIND: 264 output = "supplier unbinding"; 265 break; 266 default: 267 output = "unknown"; 268 break; 269 } 270 271 return sysfs_emit(buf, "%s\n", output); 272 } 273 static DEVICE_ATTR_RO(status); 274 275 static ssize_t auto_remove_on_show(struct device *dev, 276 struct device_attribute *attr, char *buf) 277 { 278 struct device_link *link = to_devlink(dev); 279 const char *output; 280 281 if (link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) 282 output = "supplier unbind"; 283 else if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) 284 output = "consumer unbind"; 285 else 286 output = "never"; 287 288 return sysfs_emit(buf, "%s\n", output); 289 } 290 static DEVICE_ATTR_RO(auto_remove_on); 291 292 static ssize_t runtime_pm_show(struct device *dev, 293 struct device_attribute *attr, char *buf) 294 { 295 struct device_link *link = to_devlink(dev); 296 297 return sysfs_emit(buf, "%d\n", !!(link->flags & DL_FLAG_PM_RUNTIME)); 298 } 299 static DEVICE_ATTR_RO(runtime_pm); 300 301 static ssize_t sync_state_only_show(struct device *dev, 302 struct device_attribute *attr, char *buf) 303 { 304 struct device_link *link = to_devlink(dev); 305 306 return sysfs_emit(buf, "%d\n", 307 !!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); 308 } 309 static DEVICE_ATTR_RO(sync_state_only); 310 311 static struct attribute *devlink_attrs[] = { 312 &dev_attr_status.attr, 313 &dev_attr_auto_remove_on.attr, 314 &dev_attr_runtime_pm.attr, 315 &dev_attr_sync_state_only.attr, 316 NULL, 317 }; 318 ATTRIBUTE_GROUPS(devlink); 319 320 static void device_link_free(struct device_link *link) 321 { 322 while (refcount_dec_not_one(&link->rpm_active)) 323 pm_runtime_put(link->supplier); 324 325 put_device(link->consumer); 326 put_device(link->supplier); 327 kfree(link); 328 } 329 330 #ifdef CONFIG_SRCU 331 static void __device_link_free_srcu(struct rcu_head *rhead) 332 { 333 device_link_free(container_of(rhead, struct device_link, rcu_head)); 334 } 335 336 static void devlink_dev_release(struct device *dev) 337 { 338 struct device_link *link = to_devlink(dev); 339 340 call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu); 341 } 342 #else 343 static void devlink_dev_release(struct device *dev) 344 { 345 device_link_free(to_devlink(dev)); 346 } 347 #endif 348 349 static struct class devlink_class = { 350 .name = "devlink", 351 .owner = THIS_MODULE, 352 .dev_groups = devlink_groups, 353 .dev_release = devlink_dev_release, 354 }; 355 356 static int devlink_add_symlinks(struct device *dev, 357 struct class_interface *class_intf) 358 { 359 int ret; 360 size_t len; 361 struct device_link *link = to_devlink(dev); 362 struct device *sup = link->supplier; 363 struct device *con = link->consumer; 364 char *buf; 365 366 len = max(strlen(dev_name(sup)), strlen(dev_name(con))); 367 len += strlen("supplier:") + 1; 368 buf = kzalloc(len, GFP_KERNEL); 369 if (!buf) 370 return -ENOMEM; 371 372 ret = sysfs_create_link(&link->link_dev.kobj, &sup->kobj, "supplier"); 373 if (ret) 374 goto out; 375 376 ret = sysfs_create_link(&link->link_dev.kobj, &con->kobj, "consumer"); 377 if (ret) 378 goto err_con; 379 380 snprintf(buf, len, "consumer:%s", dev_name(con)); 381 ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf); 382 if (ret) 383 goto err_con_dev; 384 385 snprintf(buf, len, "supplier:%s", dev_name(sup)); 386 ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf); 387 if (ret) 388 goto err_sup_dev; 389 390 goto out; 391 392 err_sup_dev: 393 snprintf(buf, len, "consumer:%s", dev_name(con)); 394 sysfs_remove_link(&sup->kobj, buf); 395 err_con_dev: 396 sysfs_remove_link(&link->link_dev.kobj, "consumer"); 397 err_con: 398 sysfs_remove_link(&link->link_dev.kobj, "supplier"); 399 out: 400 kfree(buf); 401 return ret; 402 } 403 404 static void devlink_remove_symlinks(struct device *dev, 405 struct class_interface *class_intf) 406 { 407 struct device_link *link = to_devlink(dev); 408 size_t len; 409 struct device *sup = link->supplier; 410 struct device *con = link->consumer; 411 char *buf; 412 413 sysfs_remove_link(&link->link_dev.kobj, "consumer"); 414 sysfs_remove_link(&link->link_dev.kobj, "supplier"); 415 416 len = max(strlen(dev_name(sup)), strlen(dev_name(con))); 417 len += strlen("supplier:") + 1; 418 buf = kzalloc(len, GFP_KERNEL); 419 if (!buf) { 420 WARN(1, "Unable to properly free device link symlinks!\n"); 421 return; 422 } 423 424 snprintf(buf, len, "supplier:%s", dev_name(sup)); 425 sysfs_remove_link(&con->kobj, buf); 426 snprintf(buf, len, "consumer:%s", dev_name(con)); 427 sysfs_remove_link(&sup->kobj, buf); 428 kfree(buf); 429 } 430 431 static struct class_interface devlink_class_intf = { 432 .class = &devlink_class, 433 .add_dev = devlink_add_symlinks, 434 .remove_dev = devlink_remove_symlinks, 435 }; 436 437 static int __init devlink_class_init(void) 438 { 439 int ret; 440 441 ret = class_register(&devlink_class); 442 if (ret) 443 return ret; 444 445 ret = class_interface_register(&devlink_class_intf); 446 if (ret) 447 class_unregister(&devlink_class); 448 449 return ret; 450 } 451 postcore_initcall(devlink_class_init); 452 453 #define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \ 454 DL_FLAG_AUTOREMOVE_SUPPLIER | \ 455 DL_FLAG_AUTOPROBE_CONSUMER | \ 456 DL_FLAG_SYNC_STATE_ONLY) 457 458 #define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \ 459 DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE) 460 461 /** 462 * device_link_add - Create a link between two devices. 463 * @consumer: Consumer end of the link. 464 * @supplier: Supplier end of the link. 465 * @flags: Link flags. 466 * 467 * The caller is responsible for the proper synchronization of the link creation 468 * with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the 469 * runtime PM framework to take the link into account. Second, if the 470 * DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will 471 * be forced into the active metastate and reference-counted upon the creation 472 * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be 473 * ignored. 474 * 475 * If DL_FLAG_STATELESS is set in @flags, the caller of this function is 476 * expected to release the link returned by it directly with the help of either 477 * device_link_del() or device_link_remove(). 478 * 479 * If that flag is not set, however, the caller of this function is handing the 480 * management of the link over to the driver core entirely and its return value 481 * can only be used to check whether or not the link is present. In that case, 482 * the DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_AUTOREMOVE_SUPPLIER device link 483 * flags can be used to indicate to the driver core when the link can be safely 484 * deleted. Namely, setting one of them in @flags indicates to the driver core 485 * that the link is not going to be used (by the given caller of this function) 486 * after unbinding the consumer or supplier driver, respectively, from its 487 * device, so the link can be deleted at that point. If none of them is set, 488 * the link will be maintained until one of the devices pointed to by it (either 489 * the consumer or the supplier) is unregistered. 490 * 491 * Also, if DL_FLAG_STATELESS, DL_FLAG_AUTOREMOVE_CONSUMER and 492 * DL_FLAG_AUTOREMOVE_SUPPLIER are not set in @flags (that is, a persistent 493 * managed device link is being added), the DL_FLAG_AUTOPROBE_CONSUMER flag can 494 * be used to request the driver core to automaticall probe for a consmer 495 * driver after successfully binding a driver to the supplier device. 496 * 497 * The combination of DL_FLAG_STATELESS and one of DL_FLAG_AUTOREMOVE_CONSUMER, 498 * DL_FLAG_AUTOREMOVE_SUPPLIER, or DL_FLAG_AUTOPROBE_CONSUMER set in @flags at 499 * the same time is invalid and will cause NULL to be returned upfront. 500 * However, if a device link between the given @consumer and @supplier pair 501 * exists already when this function is called for them, the existing link will 502 * be returned regardless of its current type and status (the link's flags may 503 * be modified then). The caller of this function is then expected to treat 504 * the link as though it has just been created, so (in particular) if 505 * DL_FLAG_STATELESS was passed in @flags, the link needs to be released 506 * explicitly when not needed any more (as stated above). 507 * 508 * A side effect of the link creation is re-ordering of dpm_list and the 509 * devices_kset list by moving the consumer device and all devices depending 510 * on it to the ends of these lists (that does not happen to devices that have 511 * not been registered when this function is called). 512 * 513 * The supplier device is required to be registered when this function is called 514 * and NULL will be returned if that is not the case. The consumer device need 515 * not be registered, however. 516 */ 517 struct device_link *device_link_add(struct device *consumer, 518 struct device *supplier, u32 flags) 519 { 520 struct device_link *link; 521 522 if (!consumer || !supplier || flags & ~DL_ADD_VALID_FLAGS || 523 (flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) || 524 (flags & DL_FLAG_SYNC_STATE_ONLY && 525 flags != DL_FLAG_SYNC_STATE_ONLY) || 526 (flags & DL_FLAG_AUTOPROBE_CONSUMER && 527 flags & (DL_FLAG_AUTOREMOVE_CONSUMER | 528 DL_FLAG_AUTOREMOVE_SUPPLIER))) 529 return NULL; 530 531 if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) { 532 if (pm_runtime_get_sync(supplier) < 0) { 533 pm_runtime_put_noidle(supplier); 534 return NULL; 535 } 536 } 537 538 if (!(flags & DL_FLAG_STATELESS)) 539 flags |= DL_FLAG_MANAGED; 540 541 device_links_write_lock(); 542 device_pm_lock(); 543 544 /* 545 * If the supplier has not been fully registered yet or there is a 546 * reverse (non-SYNC_STATE_ONLY) dependency between the consumer and 547 * the supplier already in the graph, return NULL. If the link is a 548 * SYNC_STATE_ONLY link, we don't check for reverse dependencies 549 * because it only affects sync_state() callbacks. 550 */ 551 if (!device_pm_initialized(supplier) 552 || (!(flags & DL_FLAG_SYNC_STATE_ONLY) && 553 device_is_dependent(consumer, supplier))) { 554 link = NULL; 555 goto out; 556 } 557 558 /* 559 * DL_FLAG_AUTOREMOVE_SUPPLIER indicates that the link will be needed 560 * longer than for DL_FLAG_AUTOREMOVE_CONSUMER and setting them both 561 * together doesn't make sense, so prefer DL_FLAG_AUTOREMOVE_SUPPLIER. 562 */ 563 if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) 564 flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; 565 566 list_for_each_entry(link, &supplier->links.consumers, s_node) { 567 if (link->consumer != consumer) 568 continue; 569 570 if (flags & DL_FLAG_PM_RUNTIME) { 571 if (!(link->flags & DL_FLAG_PM_RUNTIME)) { 572 pm_runtime_new_link(consumer); 573 link->flags |= DL_FLAG_PM_RUNTIME; 574 } 575 if (flags & DL_FLAG_RPM_ACTIVE) 576 refcount_inc(&link->rpm_active); 577 } 578 579 if (flags & DL_FLAG_STATELESS) { 580 kref_get(&link->kref); 581 if (link->flags & DL_FLAG_SYNC_STATE_ONLY && 582 !(link->flags & DL_FLAG_STATELESS)) { 583 link->flags |= DL_FLAG_STATELESS; 584 goto reorder; 585 } else { 586 link->flags |= DL_FLAG_STATELESS; 587 goto out; 588 } 589 } 590 591 /* 592 * If the life time of the link following from the new flags is 593 * longer than indicated by the flags of the existing link, 594 * update the existing link to stay around longer. 595 */ 596 if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) { 597 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) { 598 link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; 599 link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER; 600 } 601 } else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) { 602 link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER | 603 DL_FLAG_AUTOREMOVE_SUPPLIER); 604 } 605 if (!(link->flags & DL_FLAG_MANAGED)) { 606 kref_get(&link->kref); 607 link->flags |= DL_FLAG_MANAGED; 608 device_link_init_status(link, consumer, supplier); 609 } 610 if (link->flags & DL_FLAG_SYNC_STATE_ONLY && 611 !(flags & DL_FLAG_SYNC_STATE_ONLY)) { 612 link->flags &= ~DL_FLAG_SYNC_STATE_ONLY; 613 goto reorder; 614 } 615 616 goto out; 617 } 618 619 link = kzalloc(sizeof(*link), GFP_KERNEL); 620 if (!link) 621 goto out; 622 623 refcount_set(&link->rpm_active, 1); 624 625 get_device(supplier); 626 link->supplier = supplier; 627 INIT_LIST_HEAD(&link->s_node); 628 get_device(consumer); 629 link->consumer = consumer; 630 INIT_LIST_HEAD(&link->c_node); 631 link->flags = flags; 632 kref_init(&link->kref); 633 634 link->link_dev.class = &devlink_class; 635 device_set_pm_not_required(&link->link_dev); 636 dev_set_name(&link->link_dev, "%s--%s", 637 dev_name(supplier), dev_name(consumer)); 638 if (device_register(&link->link_dev)) { 639 put_device(consumer); 640 put_device(supplier); 641 kfree(link); 642 link = NULL; 643 goto out; 644 } 645 646 if (flags & DL_FLAG_PM_RUNTIME) { 647 if (flags & DL_FLAG_RPM_ACTIVE) 648 refcount_inc(&link->rpm_active); 649 650 pm_runtime_new_link(consumer); 651 } 652 653 /* Determine the initial link state. */ 654 if (flags & DL_FLAG_STATELESS) 655 link->status = DL_STATE_NONE; 656 else 657 device_link_init_status(link, consumer, supplier); 658 659 /* 660 * Some callers expect the link creation during consumer driver probe to 661 * resume the supplier even without DL_FLAG_RPM_ACTIVE. 662 */ 663 if (link->status == DL_STATE_CONSUMER_PROBE && 664 flags & DL_FLAG_PM_RUNTIME) 665 pm_runtime_resume(supplier); 666 667 list_add_tail_rcu(&link->s_node, &supplier->links.consumers); 668 list_add_tail_rcu(&link->c_node, &consumer->links.suppliers); 669 670 if (flags & DL_FLAG_SYNC_STATE_ONLY) { 671 dev_dbg(consumer, 672 "Linked as a sync state only consumer to %s\n", 673 dev_name(supplier)); 674 goto out; 675 } 676 677 reorder: 678 /* 679 * Move the consumer and all of the devices depending on it to the end 680 * of dpm_list and the devices_kset list. 681 * 682 * It is necessary to hold dpm_list locked throughout all that or else 683 * we may end up suspending with a wrong ordering of it. 684 */ 685 device_reorder_to_tail(consumer, NULL); 686 687 dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier)); 688 689 out: 690 device_pm_unlock(); 691 device_links_write_unlock(); 692 693 if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link) 694 pm_runtime_put(supplier); 695 696 return link; 697 } 698 EXPORT_SYMBOL_GPL(device_link_add); 699 700 /** 701 * device_link_wait_for_supplier - Add device to wait_for_suppliers list 702 * @consumer: Consumer device 703 * 704 * Marks the @consumer device as waiting for suppliers to become available by 705 * adding it to the wait_for_suppliers list. The consumer device will never be 706 * probed until it's removed from the wait_for_suppliers list. 707 * 708 * The caller is responsible for adding the links to the supplier devices once 709 * they are available and removing the @consumer device from the 710 * wait_for_suppliers list once links to all the suppliers have been created. 711 * 712 * This function is NOT meant to be called from the probe function of the 713 * consumer but rather from code that creates/adds the consumer device. 714 */ 715 static void device_link_wait_for_supplier(struct device *consumer, 716 bool need_for_probe) 717 { 718 mutex_lock(&wfs_lock); 719 list_add_tail(&consumer->links.needs_suppliers, &wait_for_suppliers); 720 consumer->links.need_for_probe = need_for_probe; 721 mutex_unlock(&wfs_lock); 722 } 723 724 static void device_link_wait_for_mandatory_supplier(struct device *consumer) 725 { 726 device_link_wait_for_supplier(consumer, true); 727 } 728 729 static void device_link_wait_for_optional_supplier(struct device *consumer) 730 { 731 device_link_wait_for_supplier(consumer, false); 732 } 733 734 /** 735 * device_link_add_missing_supplier_links - Add links from consumer devices to 736 * supplier devices, leaving any 737 * consumer with inactive suppliers on 738 * the wait_for_suppliers list 739 * 740 * Loops through all consumers waiting on suppliers and tries to add all their 741 * supplier links. If that succeeds, the consumer device is removed from 742 * wait_for_suppliers list. Otherwise, they are left in the wait_for_suppliers 743 * list. Devices left on the wait_for_suppliers list will not be probed. 744 * 745 * The fwnode add_links callback is expected to return 0 if it has found and 746 * added all the supplier links for the consumer device. It should return an 747 * error if it isn't able to do so. 748 * 749 * The caller of device_link_wait_for_supplier() is expected to call this once 750 * it's aware of potential suppliers becoming available. 751 */ 752 static void device_link_add_missing_supplier_links(void) 753 { 754 struct device *dev, *tmp; 755 756 mutex_lock(&wfs_lock); 757 list_for_each_entry_safe(dev, tmp, &wait_for_suppliers, 758 links.needs_suppliers) { 759 int ret = fwnode_call_int_op(dev->fwnode, add_links, dev); 760 if (!ret) 761 list_del_init(&dev->links.needs_suppliers); 762 else if (ret != -ENODEV || fw_devlink_is_permissive()) 763 dev->links.need_for_probe = false; 764 } 765 mutex_unlock(&wfs_lock); 766 } 767 768 #ifdef CONFIG_SRCU 769 static void __device_link_del(struct kref *kref) 770 { 771 struct device_link *link = container_of(kref, struct device_link, kref); 772 773 dev_dbg(link->consumer, "Dropping the link to %s\n", 774 dev_name(link->supplier)); 775 776 pm_runtime_drop_link(link); 777 778 list_del_rcu(&link->s_node); 779 list_del_rcu(&link->c_node); 780 device_unregister(&link->link_dev); 781 } 782 #else /* !CONFIG_SRCU */ 783 static void __device_link_del(struct kref *kref) 784 { 785 struct device_link *link = container_of(kref, struct device_link, kref); 786 787 dev_info(link->consumer, "Dropping the link to %s\n", 788 dev_name(link->supplier)); 789 790 pm_runtime_drop_link(link); 791 792 list_del(&link->s_node); 793 list_del(&link->c_node); 794 device_unregister(&link->link_dev); 795 } 796 #endif /* !CONFIG_SRCU */ 797 798 static void device_link_put_kref(struct device_link *link) 799 { 800 if (link->flags & DL_FLAG_STATELESS) 801 kref_put(&link->kref, __device_link_del); 802 else 803 WARN(1, "Unable to drop a managed device link reference\n"); 804 } 805 806 /** 807 * device_link_del - Delete a stateless link between two devices. 808 * @link: Device link to delete. 809 * 810 * The caller must ensure proper synchronization of this function with runtime 811 * PM. If the link was added multiple times, it needs to be deleted as often. 812 * Care is required for hotplugged devices: Their links are purged on removal 813 * and calling device_link_del() is then no longer allowed. 814 */ 815 void device_link_del(struct device_link *link) 816 { 817 device_links_write_lock(); 818 device_link_put_kref(link); 819 device_links_write_unlock(); 820 } 821 EXPORT_SYMBOL_GPL(device_link_del); 822 823 /** 824 * device_link_remove - Delete a stateless link between two devices. 825 * @consumer: Consumer end of the link. 826 * @supplier: Supplier end of the link. 827 * 828 * The caller must ensure proper synchronization of this function with runtime 829 * PM. 830 */ 831 void device_link_remove(void *consumer, struct device *supplier) 832 { 833 struct device_link *link; 834 835 if (WARN_ON(consumer == supplier)) 836 return; 837 838 device_links_write_lock(); 839 840 list_for_each_entry(link, &supplier->links.consumers, s_node) { 841 if (link->consumer == consumer) { 842 device_link_put_kref(link); 843 break; 844 } 845 } 846 847 device_links_write_unlock(); 848 } 849 EXPORT_SYMBOL_GPL(device_link_remove); 850 851 static void device_links_missing_supplier(struct device *dev) 852 { 853 struct device_link *link; 854 855 list_for_each_entry(link, &dev->links.suppliers, c_node) { 856 if (link->status != DL_STATE_CONSUMER_PROBE) 857 continue; 858 859 if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) { 860 WRITE_ONCE(link->status, DL_STATE_AVAILABLE); 861 } else { 862 WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); 863 WRITE_ONCE(link->status, DL_STATE_DORMANT); 864 } 865 } 866 } 867 868 /** 869 * device_links_check_suppliers - Check presence of supplier drivers. 870 * @dev: Consumer device. 871 * 872 * Check links from this device to any suppliers. Walk the list of the device's 873 * links to suppliers and see if all of them are available. If not, simply 874 * return -EPROBE_DEFER. 875 * 876 * We need to guarantee that the supplier will not go away after the check has 877 * been positive here. It only can go away in __device_release_driver() and 878 * that function checks the device's links to consumers. This means we need to 879 * mark the link as "consumer probe in progress" to make the supplier removal 880 * wait for us to complete (or bad things may happen). 881 * 882 * Links without the DL_FLAG_MANAGED flag set are ignored. 883 */ 884 int device_links_check_suppliers(struct device *dev) 885 { 886 struct device_link *link; 887 int ret = 0; 888 889 /* 890 * Device waiting for supplier to become available is not allowed to 891 * probe. 892 */ 893 mutex_lock(&wfs_lock); 894 if (!list_empty(&dev->links.needs_suppliers) && 895 dev->links.need_for_probe) { 896 mutex_unlock(&wfs_lock); 897 return -EPROBE_DEFER; 898 } 899 mutex_unlock(&wfs_lock); 900 901 device_links_write_lock(); 902 903 list_for_each_entry(link, &dev->links.suppliers, c_node) { 904 if (!(link->flags & DL_FLAG_MANAGED)) 905 continue; 906 907 if (link->status != DL_STATE_AVAILABLE && 908 !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) { 909 device_links_missing_supplier(dev); 910 ret = -EPROBE_DEFER; 911 break; 912 } 913 WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE); 914 } 915 dev->links.status = DL_DEV_PROBING; 916 917 device_links_write_unlock(); 918 return ret; 919 } 920 921 /** 922 * __device_links_queue_sync_state - Queue a device for sync_state() callback 923 * @dev: Device to call sync_state() on 924 * @list: List head to queue the @dev on 925 * 926 * Queues a device for a sync_state() callback when the device links write lock 927 * isn't held. This allows the sync_state() execution flow to use device links 928 * APIs. The caller must ensure this function is called with 929 * device_links_write_lock() held. 930 * 931 * This function does a get_device() to make sure the device is not freed while 932 * on this list. 933 * 934 * So the caller must also ensure that device_links_flush_sync_list() is called 935 * as soon as the caller releases device_links_write_lock(). This is necessary 936 * to make sure the sync_state() is called in a timely fashion and the 937 * put_device() is called on this device. 938 */ 939 static void __device_links_queue_sync_state(struct device *dev, 940 struct list_head *list) 941 { 942 struct device_link *link; 943 944 if (!dev_has_sync_state(dev)) 945 return; 946 if (dev->state_synced) 947 return; 948 949 list_for_each_entry(link, &dev->links.consumers, s_node) { 950 if (!(link->flags & DL_FLAG_MANAGED)) 951 continue; 952 if (link->status != DL_STATE_ACTIVE) 953 return; 954 } 955 956 /* 957 * Set the flag here to avoid adding the same device to a list more 958 * than once. This can happen if new consumers get added to the device 959 * and probed before the list is flushed. 960 */ 961 dev->state_synced = true; 962 963 if (WARN_ON(!list_empty(&dev->links.defer_hook))) 964 return; 965 966 get_device(dev); 967 list_add_tail(&dev->links.defer_hook, list); 968 } 969 970 /** 971 * device_links_flush_sync_list - Call sync_state() on a list of devices 972 * @list: List of devices to call sync_state() on 973 * @dont_lock_dev: Device for which lock is already held by the caller 974 * 975 * Calls sync_state() on all the devices that have been queued for it. This 976 * function is used in conjunction with __device_links_queue_sync_state(). The 977 * @dont_lock_dev parameter is useful when this function is called from a 978 * context where a device lock is already held. 979 */ 980 static void device_links_flush_sync_list(struct list_head *list, 981 struct device *dont_lock_dev) 982 { 983 struct device *dev, *tmp; 984 985 list_for_each_entry_safe(dev, tmp, list, links.defer_hook) { 986 list_del_init(&dev->links.defer_hook); 987 988 if (dev != dont_lock_dev) 989 device_lock(dev); 990 991 if (dev->bus->sync_state) 992 dev->bus->sync_state(dev); 993 else if (dev->driver && dev->driver->sync_state) 994 dev->driver->sync_state(dev); 995 996 if (dev != dont_lock_dev) 997 device_unlock(dev); 998 999 put_device(dev); 1000 } 1001 } 1002 1003 void device_links_supplier_sync_state_pause(void) 1004 { 1005 device_links_write_lock(); 1006 defer_sync_state_count++; 1007 device_links_write_unlock(); 1008 } 1009 1010 void device_links_supplier_sync_state_resume(void) 1011 { 1012 struct device *dev, *tmp; 1013 LIST_HEAD(sync_list); 1014 1015 device_links_write_lock(); 1016 if (!defer_sync_state_count) { 1017 WARN(true, "Unmatched sync_state pause/resume!"); 1018 goto out; 1019 } 1020 defer_sync_state_count--; 1021 if (defer_sync_state_count) 1022 goto out; 1023 1024 list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_hook) { 1025 /* 1026 * Delete from deferred_sync list before queuing it to 1027 * sync_list because defer_hook is used for both lists. 1028 */ 1029 list_del_init(&dev->links.defer_hook); 1030 __device_links_queue_sync_state(dev, &sync_list); 1031 } 1032 out: 1033 device_links_write_unlock(); 1034 1035 device_links_flush_sync_list(&sync_list, NULL); 1036 } 1037 1038 static int sync_state_resume_initcall(void) 1039 { 1040 device_links_supplier_sync_state_resume(); 1041 return 0; 1042 } 1043 late_initcall(sync_state_resume_initcall); 1044 1045 static void __device_links_supplier_defer_sync(struct device *sup) 1046 { 1047 if (list_empty(&sup->links.defer_hook) && dev_has_sync_state(sup)) 1048 list_add_tail(&sup->links.defer_hook, &deferred_sync); 1049 } 1050 1051 static void device_link_drop_managed(struct device_link *link) 1052 { 1053 link->flags &= ~DL_FLAG_MANAGED; 1054 WRITE_ONCE(link->status, DL_STATE_NONE); 1055 kref_put(&link->kref, __device_link_del); 1056 } 1057 1058 static ssize_t waiting_for_supplier_show(struct device *dev, 1059 struct device_attribute *attr, 1060 char *buf) 1061 { 1062 bool val; 1063 1064 device_lock(dev); 1065 mutex_lock(&wfs_lock); 1066 val = !list_empty(&dev->links.needs_suppliers) 1067 && dev->links.need_for_probe; 1068 mutex_unlock(&wfs_lock); 1069 device_unlock(dev); 1070 return sysfs_emit(buf, "%u\n", val); 1071 } 1072 static DEVICE_ATTR_RO(waiting_for_supplier); 1073 1074 /** 1075 * device_links_driver_bound - Update device links after probing its driver. 1076 * @dev: Device to update the links for. 1077 * 1078 * The probe has been successful, so update links from this device to any 1079 * consumers by changing their status to "available". 1080 * 1081 * Also change the status of @dev's links to suppliers to "active". 1082 * 1083 * Links without the DL_FLAG_MANAGED flag set are ignored. 1084 */ 1085 void device_links_driver_bound(struct device *dev) 1086 { 1087 struct device_link *link, *ln; 1088 LIST_HEAD(sync_list); 1089 1090 /* 1091 * If a device probes successfully, it's expected to have created all 1092 * the device links it needs to or make new device links as it needs 1093 * them. So, it no longer needs to wait on any suppliers. 1094 */ 1095 mutex_lock(&wfs_lock); 1096 list_del_init(&dev->links.needs_suppliers); 1097 mutex_unlock(&wfs_lock); 1098 device_remove_file(dev, &dev_attr_waiting_for_supplier); 1099 1100 device_links_write_lock(); 1101 1102 list_for_each_entry(link, &dev->links.consumers, s_node) { 1103 if (!(link->flags & DL_FLAG_MANAGED)) 1104 continue; 1105 1106 /* 1107 * Links created during consumer probe may be in the "consumer 1108 * probe" state to start with if the supplier is still probing 1109 * when they are created and they may become "active" if the 1110 * consumer probe returns first. Skip them here. 1111 */ 1112 if (link->status == DL_STATE_CONSUMER_PROBE || 1113 link->status == DL_STATE_ACTIVE) 1114 continue; 1115 1116 WARN_ON(link->status != DL_STATE_DORMANT); 1117 WRITE_ONCE(link->status, DL_STATE_AVAILABLE); 1118 1119 if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER) 1120 driver_deferred_probe_add(link->consumer); 1121 } 1122 1123 if (defer_sync_state_count) 1124 __device_links_supplier_defer_sync(dev); 1125 else 1126 __device_links_queue_sync_state(dev, &sync_list); 1127 1128 list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) { 1129 struct device *supplier; 1130 1131 if (!(link->flags & DL_FLAG_MANAGED)) 1132 continue; 1133 1134 supplier = link->supplier; 1135 if (link->flags & DL_FLAG_SYNC_STATE_ONLY) { 1136 /* 1137 * When DL_FLAG_SYNC_STATE_ONLY is set, it means no 1138 * other DL_MANAGED_LINK_FLAGS have been set. So, it's 1139 * save to drop the managed link completely. 1140 */ 1141 device_link_drop_managed(link); 1142 } else { 1143 WARN_ON(link->status != DL_STATE_CONSUMER_PROBE); 1144 WRITE_ONCE(link->status, DL_STATE_ACTIVE); 1145 } 1146 1147 /* 1148 * This needs to be done even for the deleted 1149 * DL_FLAG_SYNC_STATE_ONLY device link in case it was the last 1150 * device link that was preventing the supplier from getting a 1151 * sync_state() call. 1152 */ 1153 if (defer_sync_state_count) 1154 __device_links_supplier_defer_sync(supplier); 1155 else 1156 __device_links_queue_sync_state(supplier, &sync_list); 1157 } 1158 1159 dev->links.status = DL_DEV_DRIVER_BOUND; 1160 1161 device_links_write_unlock(); 1162 1163 device_links_flush_sync_list(&sync_list, dev); 1164 } 1165 1166 /** 1167 * __device_links_no_driver - Update links of a device without a driver. 1168 * @dev: Device without a drvier. 1169 * 1170 * Delete all non-persistent links from this device to any suppliers. 1171 * 1172 * Persistent links stay around, but their status is changed to "available", 1173 * unless they already are in the "supplier unbind in progress" state in which 1174 * case they need not be updated. 1175 * 1176 * Links without the DL_FLAG_MANAGED flag set are ignored. 1177 */ 1178 static void __device_links_no_driver(struct device *dev) 1179 { 1180 struct device_link *link, *ln; 1181 1182 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) { 1183 if (!(link->flags & DL_FLAG_MANAGED)) 1184 continue; 1185 1186 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) { 1187 device_link_drop_managed(link); 1188 continue; 1189 } 1190 1191 if (link->status != DL_STATE_CONSUMER_PROBE && 1192 link->status != DL_STATE_ACTIVE) 1193 continue; 1194 1195 if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) { 1196 WRITE_ONCE(link->status, DL_STATE_AVAILABLE); 1197 } else { 1198 WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); 1199 WRITE_ONCE(link->status, DL_STATE_DORMANT); 1200 } 1201 } 1202 1203 dev->links.status = DL_DEV_NO_DRIVER; 1204 } 1205 1206 /** 1207 * device_links_no_driver - Update links after failing driver probe. 1208 * @dev: Device whose driver has just failed to probe. 1209 * 1210 * Clean up leftover links to consumers for @dev and invoke 1211 * %__device_links_no_driver() to update links to suppliers for it as 1212 * appropriate. 1213 * 1214 * Links without the DL_FLAG_MANAGED flag set are ignored. 1215 */ 1216 void device_links_no_driver(struct device *dev) 1217 { 1218 struct device_link *link; 1219 1220 device_links_write_lock(); 1221 1222 list_for_each_entry(link, &dev->links.consumers, s_node) { 1223 if (!(link->flags & DL_FLAG_MANAGED)) 1224 continue; 1225 1226 /* 1227 * The probe has failed, so if the status of the link is 1228 * "consumer probe" or "active", it must have been added by 1229 * a probing consumer while this device was still probing. 1230 * Change its state to "dormant", as it represents a valid 1231 * relationship, but it is not functionally meaningful. 1232 */ 1233 if (link->status == DL_STATE_CONSUMER_PROBE || 1234 link->status == DL_STATE_ACTIVE) 1235 WRITE_ONCE(link->status, DL_STATE_DORMANT); 1236 } 1237 1238 __device_links_no_driver(dev); 1239 1240 device_links_write_unlock(); 1241 } 1242 1243 /** 1244 * device_links_driver_cleanup - Update links after driver removal. 1245 * @dev: Device whose driver has just gone away. 1246 * 1247 * Update links to consumers for @dev by changing their status to "dormant" and 1248 * invoke %__device_links_no_driver() to update links to suppliers for it as 1249 * appropriate. 1250 * 1251 * Links without the DL_FLAG_MANAGED flag set are ignored. 1252 */ 1253 void device_links_driver_cleanup(struct device *dev) 1254 { 1255 struct device_link *link, *ln; 1256 1257 device_links_write_lock(); 1258 1259 list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) { 1260 if (!(link->flags & DL_FLAG_MANAGED)) 1261 continue; 1262 1263 WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER); 1264 WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND); 1265 1266 /* 1267 * autoremove the links between this @dev and its consumer 1268 * devices that are not active, i.e. where the link state 1269 * has moved to DL_STATE_SUPPLIER_UNBIND. 1270 */ 1271 if (link->status == DL_STATE_SUPPLIER_UNBIND && 1272 link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) 1273 device_link_drop_managed(link); 1274 1275 WRITE_ONCE(link->status, DL_STATE_DORMANT); 1276 } 1277 1278 list_del_init(&dev->links.defer_hook); 1279 __device_links_no_driver(dev); 1280 1281 device_links_write_unlock(); 1282 } 1283 1284 /** 1285 * device_links_busy - Check if there are any busy links to consumers. 1286 * @dev: Device to check. 1287 * 1288 * Check each consumer of the device and return 'true' if its link's status 1289 * is one of "consumer probe" or "active" (meaning that the given consumer is 1290 * probing right now or its driver is present). Otherwise, change the link 1291 * state to "supplier unbind" to prevent the consumer from being probed 1292 * successfully going forward. 1293 * 1294 * Return 'false' if there are no probing or active consumers. 1295 * 1296 * Links without the DL_FLAG_MANAGED flag set are ignored. 1297 */ 1298 bool device_links_busy(struct device *dev) 1299 { 1300 struct device_link *link; 1301 bool ret = false; 1302 1303 device_links_write_lock(); 1304 1305 list_for_each_entry(link, &dev->links.consumers, s_node) { 1306 if (!(link->flags & DL_FLAG_MANAGED)) 1307 continue; 1308 1309 if (link->status == DL_STATE_CONSUMER_PROBE 1310 || link->status == DL_STATE_ACTIVE) { 1311 ret = true; 1312 break; 1313 } 1314 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND); 1315 } 1316 1317 dev->links.status = DL_DEV_UNBINDING; 1318 1319 device_links_write_unlock(); 1320 return ret; 1321 } 1322 1323 /** 1324 * device_links_unbind_consumers - Force unbind consumers of the given device. 1325 * @dev: Device to unbind the consumers of. 1326 * 1327 * Walk the list of links to consumers for @dev and if any of them is in the 1328 * "consumer probe" state, wait for all device probes in progress to complete 1329 * and start over. 1330 * 1331 * If that's not the case, change the status of the link to "supplier unbind" 1332 * and check if the link was in the "active" state. If so, force the consumer 1333 * driver to unbind and start over (the consumer will not re-probe as we have 1334 * changed the state of the link already). 1335 * 1336 * Links without the DL_FLAG_MANAGED flag set are ignored. 1337 */ 1338 void device_links_unbind_consumers(struct device *dev) 1339 { 1340 struct device_link *link; 1341 1342 start: 1343 device_links_write_lock(); 1344 1345 list_for_each_entry(link, &dev->links.consumers, s_node) { 1346 enum device_link_state status; 1347 1348 if (!(link->flags & DL_FLAG_MANAGED) || 1349 link->flags & DL_FLAG_SYNC_STATE_ONLY) 1350 continue; 1351 1352 status = link->status; 1353 if (status == DL_STATE_CONSUMER_PROBE) { 1354 device_links_write_unlock(); 1355 1356 wait_for_device_probe(); 1357 goto start; 1358 } 1359 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND); 1360 if (status == DL_STATE_ACTIVE) { 1361 struct device *consumer = link->consumer; 1362 1363 get_device(consumer); 1364 1365 device_links_write_unlock(); 1366 1367 device_release_driver_internal(consumer, NULL, 1368 consumer->parent); 1369 put_device(consumer); 1370 goto start; 1371 } 1372 } 1373 1374 device_links_write_unlock(); 1375 } 1376 1377 /** 1378 * device_links_purge - Delete existing links to other devices. 1379 * @dev: Target device. 1380 */ 1381 static void device_links_purge(struct device *dev) 1382 { 1383 struct device_link *link, *ln; 1384 1385 if (dev->class == &devlink_class) 1386 return; 1387 1388 mutex_lock(&wfs_lock); 1389 list_del(&dev->links.needs_suppliers); 1390 mutex_unlock(&wfs_lock); 1391 1392 /* 1393 * Delete all of the remaining links from this device to any other 1394 * devices (either consumers or suppliers). 1395 */ 1396 device_links_write_lock(); 1397 1398 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) { 1399 WARN_ON(link->status == DL_STATE_ACTIVE); 1400 __device_link_del(&link->kref); 1401 } 1402 1403 list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) { 1404 WARN_ON(link->status != DL_STATE_DORMANT && 1405 link->status != DL_STATE_NONE); 1406 __device_link_del(&link->kref); 1407 } 1408 1409 device_links_write_unlock(); 1410 } 1411 1412 static u32 fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY; 1413 static int __init fw_devlink_setup(char *arg) 1414 { 1415 if (!arg) 1416 return -EINVAL; 1417 1418 if (strcmp(arg, "off") == 0) { 1419 fw_devlink_flags = 0; 1420 } else if (strcmp(arg, "permissive") == 0) { 1421 fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY; 1422 } else if (strcmp(arg, "on") == 0) { 1423 fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER; 1424 } else if (strcmp(arg, "rpm") == 0) { 1425 fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER | 1426 DL_FLAG_PM_RUNTIME; 1427 } 1428 return 0; 1429 } 1430 early_param("fw_devlink", fw_devlink_setup); 1431 1432 u32 fw_devlink_get_flags(void) 1433 { 1434 return fw_devlink_flags; 1435 } 1436 1437 static bool fw_devlink_is_permissive(void) 1438 { 1439 return fw_devlink_flags == DL_FLAG_SYNC_STATE_ONLY; 1440 } 1441 1442 static void fw_devlink_link_device(struct device *dev) 1443 { 1444 int fw_ret; 1445 1446 if (!fw_devlink_flags) 1447 return; 1448 1449 mutex_lock(&defer_fw_devlink_lock); 1450 if (!defer_fw_devlink_count) 1451 device_link_add_missing_supplier_links(); 1452 1453 /* 1454 * The device's fwnode not having add_links() doesn't affect if other 1455 * consumers can find this device as a supplier. So, this check is 1456 * intentionally placed after device_link_add_missing_supplier_links(). 1457 */ 1458 if (!fwnode_has_op(dev->fwnode, add_links)) 1459 goto out; 1460 1461 /* 1462 * If fw_devlink is being deferred, assume all devices have mandatory 1463 * suppliers they need to link to later. Then, when the fw_devlink is 1464 * resumed, all these devices will get a chance to try and link to any 1465 * suppliers they have. 1466 */ 1467 if (!defer_fw_devlink_count) { 1468 fw_ret = fwnode_call_int_op(dev->fwnode, add_links, dev); 1469 if (fw_ret == -ENODEV && fw_devlink_is_permissive()) 1470 fw_ret = -EAGAIN; 1471 } else { 1472 fw_ret = -ENODEV; 1473 /* 1474 * defer_hook is not used to add device to deferred_sync list 1475 * until device is bound. Since deferred fw devlink also blocks 1476 * probing, same list hook can be used for deferred_fw_devlink. 1477 */ 1478 list_add_tail(&dev->links.defer_hook, &deferred_fw_devlink); 1479 } 1480 1481 if (fw_ret == -ENODEV) 1482 device_link_wait_for_mandatory_supplier(dev); 1483 else if (fw_ret) 1484 device_link_wait_for_optional_supplier(dev); 1485 1486 out: 1487 mutex_unlock(&defer_fw_devlink_lock); 1488 } 1489 1490 /** 1491 * fw_devlink_pause - Pause parsing of fwnode to create device links 1492 * 1493 * Calling this function defers any fwnode parsing to create device links until 1494 * fw_devlink_resume() is called. Both these functions are ref counted and the 1495 * caller needs to match the calls. 1496 * 1497 * While fw_devlink is paused: 1498 * - Any device that is added won't have its fwnode parsed to create device 1499 * links. 1500 * - The probe of the device will also be deferred during this period. 1501 * - Any devices that were already added, but waiting for suppliers won't be 1502 * able to link to newly added devices. 1503 * 1504 * Once fw_devlink_resume(): 1505 * - All the fwnodes that was not parsed will be parsed. 1506 * - All the devices that were deferred probing will be reattempted if they 1507 * aren't waiting for any more suppliers. 1508 * 1509 * This pair of functions, is mainly meant to optimize the parsing of fwnodes 1510 * when a lot of devices that need to link to each other are added in a short 1511 * interval of time. For example, adding all the top level devices in a system. 1512 * 1513 * For example, if N devices are added and: 1514 * - All the consumers are added before their suppliers 1515 * - All the suppliers of the N devices are part of the N devices 1516 * 1517 * Then: 1518 * 1519 * - With the use of fw_devlink_pause() and fw_devlink_resume(), each device 1520 * will only need one parsing of its fwnode because it is guaranteed to find 1521 * all the supplier devices already registered and ready to link to. It won't 1522 * have to do another pass later to find one or more suppliers it couldn't 1523 * find in the first parse of the fwnode. So, we'll only need O(N) fwnode 1524 * parses. 1525 * 1526 * - Without the use of fw_devlink_pause() and fw_devlink_resume(), we would 1527 * end up doing O(N^2) parses of fwnodes because every device that's added is 1528 * guaranteed to trigger a parse of the fwnode of every device added before 1529 * it. This O(N^2) parse is made worse by the fact that when a fwnode of a 1530 * device is parsed, all it descendant devices might need to have their 1531 * fwnodes parsed too (even if the devices themselves aren't added). 1532 */ 1533 void fw_devlink_pause(void) 1534 { 1535 mutex_lock(&defer_fw_devlink_lock); 1536 defer_fw_devlink_count++; 1537 mutex_unlock(&defer_fw_devlink_lock); 1538 } 1539 1540 /** fw_devlink_resume - Resume parsing of fwnode to create device links 1541 * 1542 * This function is used in conjunction with fw_devlink_pause() and is ref 1543 * counted. See documentation for fw_devlink_pause() for more details. 1544 */ 1545 void fw_devlink_resume(void) 1546 { 1547 struct device *dev, *tmp; 1548 LIST_HEAD(probe_list); 1549 1550 mutex_lock(&defer_fw_devlink_lock); 1551 if (!defer_fw_devlink_count) { 1552 WARN(true, "Unmatched fw_devlink pause/resume!"); 1553 goto out; 1554 } 1555 1556 defer_fw_devlink_count--; 1557 if (defer_fw_devlink_count) 1558 goto out; 1559 1560 device_link_add_missing_supplier_links(); 1561 list_splice_tail_init(&deferred_fw_devlink, &probe_list); 1562 out: 1563 mutex_unlock(&defer_fw_devlink_lock); 1564 1565 /* 1566 * bus_probe_device() can cause new devices to get added and they'll 1567 * try to grab defer_fw_devlink_lock. So, this needs to be done outside 1568 * the defer_fw_devlink_lock. 1569 */ 1570 list_for_each_entry_safe(dev, tmp, &probe_list, links.defer_hook) { 1571 list_del_init(&dev->links.defer_hook); 1572 bus_probe_device(dev); 1573 } 1574 } 1575 /* Device links support end. */ 1576 1577 int (*platform_notify)(struct device *dev) = NULL; 1578 int (*platform_notify_remove)(struct device *dev) = NULL; 1579 static struct kobject *dev_kobj; 1580 struct kobject *sysfs_dev_char_kobj; 1581 struct kobject *sysfs_dev_block_kobj; 1582 1583 static DEFINE_MUTEX(device_hotplug_lock); 1584 1585 void lock_device_hotplug(void) 1586 { 1587 mutex_lock(&device_hotplug_lock); 1588 } 1589 1590 void unlock_device_hotplug(void) 1591 { 1592 mutex_unlock(&device_hotplug_lock); 1593 } 1594 1595 int lock_device_hotplug_sysfs(void) 1596 { 1597 if (mutex_trylock(&device_hotplug_lock)) 1598 return 0; 1599 1600 /* Avoid busy looping (5 ms of sleep should do). */ 1601 msleep(5); 1602 return restart_syscall(); 1603 } 1604 1605 #ifdef CONFIG_BLOCK 1606 static inline int device_is_not_partition(struct device *dev) 1607 { 1608 return !(dev->type == &part_type); 1609 } 1610 #else 1611 static inline int device_is_not_partition(struct device *dev) 1612 { 1613 return 1; 1614 } 1615 #endif 1616 1617 static int 1618 device_platform_notify(struct device *dev, enum kobject_action action) 1619 { 1620 int ret; 1621 1622 ret = acpi_platform_notify(dev, action); 1623 if (ret) 1624 return ret; 1625 1626 ret = software_node_notify(dev, action); 1627 if (ret) 1628 return ret; 1629 1630 if (platform_notify && action == KOBJ_ADD) 1631 platform_notify(dev); 1632 else if (platform_notify_remove && action == KOBJ_REMOVE) 1633 platform_notify_remove(dev); 1634 return 0; 1635 } 1636 1637 /** 1638 * dev_driver_string - Return a device's driver name, if at all possible 1639 * @dev: struct device to get the name of 1640 * 1641 * Will return the device's driver's name if it is bound to a device. If 1642 * the device is not bound to a driver, it will return the name of the bus 1643 * it is attached to. If it is not attached to a bus either, an empty 1644 * string will be returned. 1645 */ 1646 const char *dev_driver_string(const struct device *dev) 1647 { 1648 struct device_driver *drv; 1649 1650 /* dev->driver can change to NULL underneath us because of unbinding, 1651 * so be careful about accessing it. dev->bus and dev->class should 1652 * never change once they are set, so they don't need special care. 1653 */ 1654 drv = READ_ONCE(dev->driver); 1655 return drv ? drv->name : 1656 (dev->bus ? dev->bus->name : 1657 (dev->class ? dev->class->name : "")); 1658 } 1659 EXPORT_SYMBOL(dev_driver_string); 1660 1661 #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr) 1662 1663 static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr, 1664 char *buf) 1665 { 1666 struct device_attribute *dev_attr = to_dev_attr(attr); 1667 struct device *dev = kobj_to_dev(kobj); 1668 ssize_t ret = -EIO; 1669 1670 if (dev_attr->show) 1671 ret = dev_attr->show(dev, dev_attr, buf); 1672 if (ret >= (ssize_t)PAGE_SIZE) { 1673 printk("dev_attr_show: %pS returned bad count\n", 1674 dev_attr->show); 1675 } 1676 return ret; 1677 } 1678 1679 static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr, 1680 const char *buf, size_t count) 1681 { 1682 struct device_attribute *dev_attr = to_dev_attr(attr); 1683 struct device *dev = kobj_to_dev(kobj); 1684 ssize_t ret = -EIO; 1685 1686 if (dev_attr->store) 1687 ret = dev_attr->store(dev, dev_attr, buf, count); 1688 return ret; 1689 } 1690 1691 static const struct sysfs_ops dev_sysfs_ops = { 1692 .show = dev_attr_show, 1693 .store = dev_attr_store, 1694 }; 1695 1696 #define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr) 1697 1698 ssize_t device_store_ulong(struct device *dev, 1699 struct device_attribute *attr, 1700 const char *buf, size_t size) 1701 { 1702 struct dev_ext_attribute *ea = to_ext_attr(attr); 1703 int ret; 1704 unsigned long new; 1705 1706 ret = kstrtoul(buf, 0, &new); 1707 if (ret) 1708 return ret; 1709 *(unsigned long *)(ea->var) = new; 1710 /* Always return full write size even if we didn't consume all */ 1711 return size; 1712 } 1713 EXPORT_SYMBOL_GPL(device_store_ulong); 1714 1715 ssize_t device_show_ulong(struct device *dev, 1716 struct device_attribute *attr, 1717 char *buf) 1718 { 1719 struct dev_ext_attribute *ea = to_ext_attr(attr); 1720 return sysfs_emit(buf, "%lx\n", *(unsigned long *)(ea->var)); 1721 } 1722 EXPORT_SYMBOL_GPL(device_show_ulong); 1723 1724 ssize_t device_store_int(struct device *dev, 1725 struct device_attribute *attr, 1726 const char *buf, size_t size) 1727 { 1728 struct dev_ext_attribute *ea = to_ext_attr(attr); 1729 int ret; 1730 long new; 1731 1732 ret = kstrtol(buf, 0, &new); 1733 if (ret) 1734 return ret; 1735 1736 if (new > INT_MAX || new < INT_MIN) 1737 return -EINVAL; 1738 *(int *)(ea->var) = new; 1739 /* Always return full write size even if we didn't consume all */ 1740 return size; 1741 } 1742 EXPORT_SYMBOL_GPL(device_store_int); 1743 1744 ssize_t device_show_int(struct device *dev, 1745 struct device_attribute *attr, 1746 char *buf) 1747 { 1748 struct dev_ext_attribute *ea = to_ext_attr(attr); 1749 1750 return sysfs_emit(buf, "%d\n", *(int *)(ea->var)); 1751 } 1752 EXPORT_SYMBOL_GPL(device_show_int); 1753 1754 ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, 1755 const char *buf, size_t size) 1756 { 1757 struct dev_ext_attribute *ea = to_ext_attr(attr); 1758 1759 if (strtobool(buf, ea->var) < 0) 1760 return -EINVAL; 1761 1762 return size; 1763 } 1764 EXPORT_SYMBOL_GPL(device_store_bool); 1765 1766 ssize_t device_show_bool(struct device *dev, struct device_attribute *attr, 1767 char *buf) 1768 { 1769 struct dev_ext_attribute *ea = to_ext_attr(attr); 1770 1771 return sysfs_emit(buf, "%d\n", *(bool *)(ea->var)); 1772 } 1773 EXPORT_SYMBOL_GPL(device_show_bool); 1774 1775 /** 1776 * device_release - free device structure. 1777 * @kobj: device's kobject. 1778 * 1779 * This is called once the reference count for the object 1780 * reaches 0. We forward the call to the device's release 1781 * method, which should handle actually freeing the structure. 1782 */ 1783 static void device_release(struct kobject *kobj) 1784 { 1785 struct device *dev = kobj_to_dev(kobj); 1786 struct device_private *p = dev->p; 1787 1788 /* 1789 * Some platform devices are driven without driver attached 1790 * and managed resources may have been acquired. Make sure 1791 * all resources are released. 1792 * 1793 * Drivers still can add resources into device after device 1794 * is deleted but alive, so release devres here to avoid 1795 * possible memory leak. 1796 */ 1797 devres_release_all(dev); 1798 1799 kfree(dev->dma_range_map); 1800 1801 if (dev->release) 1802 dev->release(dev); 1803 else if (dev->type && dev->type->release) 1804 dev->type->release(dev); 1805 else if (dev->class && dev->class->dev_release) 1806 dev->class->dev_release(dev); 1807 else 1808 WARN(1, KERN_ERR "Device '%s' does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n", 1809 dev_name(dev)); 1810 kfree(p); 1811 } 1812 1813 static const void *device_namespace(struct kobject *kobj) 1814 { 1815 struct device *dev = kobj_to_dev(kobj); 1816 const void *ns = NULL; 1817 1818 if (dev->class && dev->class->ns_type) 1819 ns = dev->class->namespace(dev); 1820 1821 return ns; 1822 } 1823 1824 static void device_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid) 1825 { 1826 struct device *dev = kobj_to_dev(kobj); 1827 1828 if (dev->class && dev->class->get_ownership) 1829 dev->class->get_ownership(dev, uid, gid); 1830 } 1831 1832 static struct kobj_type device_ktype = { 1833 .release = device_release, 1834 .sysfs_ops = &dev_sysfs_ops, 1835 .namespace = device_namespace, 1836 .get_ownership = device_get_ownership, 1837 }; 1838 1839 1840 static int dev_uevent_filter(struct kset *kset, struct kobject *kobj) 1841 { 1842 struct kobj_type *ktype = get_ktype(kobj); 1843 1844 if (ktype == &device_ktype) { 1845 struct device *dev = kobj_to_dev(kobj); 1846 if (dev->bus) 1847 return 1; 1848 if (dev->class) 1849 return 1; 1850 } 1851 return 0; 1852 } 1853 1854 static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj) 1855 { 1856 struct device *dev = kobj_to_dev(kobj); 1857 1858 if (dev->bus) 1859 return dev->bus->name; 1860 if (dev->class) 1861 return dev->class->name; 1862 return NULL; 1863 } 1864 1865 static int dev_uevent(struct kset *kset, struct kobject *kobj, 1866 struct kobj_uevent_env *env) 1867 { 1868 struct device *dev = kobj_to_dev(kobj); 1869 int retval = 0; 1870 1871 /* add device node properties if present */ 1872 if (MAJOR(dev->devt)) { 1873 const char *tmp; 1874 const char *name; 1875 umode_t mode = 0; 1876 kuid_t uid = GLOBAL_ROOT_UID; 1877 kgid_t gid = GLOBAL_ROOT_GID; 1878 1879 add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt)); 1880 add_uevent_var(env, "MINOR=%u", MINOR(dev->devt)); 1881 name = device_get_devnode(dev, &mode, &uid, &gid, &tmp); 1882 if (name) { 1883 add_uevent_var(env, "DEVNAME=%s", name); 1884 if (mode) 1885 add_uevent_var(env, "DEVMODE=%#o", mode & 0777); 1886 if (!uid_eq(uid, GLOBAL_ROOT_UID)) 1887 add_uevent_var(env, "DEVUID=%u", from_kuid(&init_user_ns, uid)); 1888 if (!gid_eq(gid, GLOBAL_ROOT_GID)) 1889 add_uevent_var(env, "DEVGID=%u", from_kgid(&init_user_ns, gid)); 1890 kfree(tmp); 1891 } 1892 } 1893 1894 if (dev->type && dev->type->name) 1895 add_uevent_var(env, "DEVTYPE=%s", dev->type->name); 1896 1897 if (dev->driver) 1898 add_uevent_var(env, "DRIVER=%s", dev->driver->name); 1899 1900 /* Add common DT information about the device */ 1901 of_device_uevent(dev, env); 1902 1903 /* have the bus specific function add its stuff */ 1904 if (dev->bus && dev->bus->uevent) { 1905 retval = dev->bus->uevent(dev, env); 1906 if (retval) 1907 pr_debug("device: '%s': %s: bus uevent() returned %d\n", 1908 dev_name(dev), __func__, retval); 1909 } 1910 1911 /* have the class specific function add its stuff */ 1912 if (dev->class && dev->class->dev_uevent) { 1913 retval = dev->class->dev_uevent(dev, env); 1914 if (retval) 1915 pr_debug("device: '%s': %s: class uevent() " 1916 "returned %d\n", dev_name(dev), 1917 __func__, retval); 1918 } 1919 1920 /* have the device type specific function add its stuff */ 1921 if (dev->type && dev->type->uevent) { 1922 retval = dev->type->uevent(dev, env); 1923 if (retval) 1924 pr_debug("device: '%s': %s: dev_type uevent() " 1925 "returned %d\n", dev_name(dev), 1926 __func__, retval); 1927 } 1928 1929 return retval; 1930 } 1931 1932 static const struct kset_uevent_ops device_uevent_ops = { 1933 .filter = dev_uevent_filter, 1934 .name = dev_uevent_name, 1935 .uevent = dev_uevent, 1936 }; 1937 1938 static ssize_t uevent_show(struct device *dev, struct device_attribute *attr, 1939 char *buf) 1940 { 1941 struct kobject *top_kobj; 1942 struct kset *kset; 1943 struct kobj_uevent_env *env = NULL; 1944 int i; 1945 int len = 0; 1946 int retval; 1947 1948 /* search the kset, the device belongs to */ 1949 top_kobj = &dev->kobj; 1950 while (!top_kobj->kset && top_kobj->parent) 1951 top_kobj = top_kobj->parent; 1952 if (!top_kobj->kset) 1953 goto out; 1954 1955 kset = top_kobj->kset; 1956 if (!kset->uevent_ops || !kset->uevent_ops->uevent) 1957 goto out; 1958 1959 /* respect filter */ 1960 if (kset->uevent_ops && kset->uevent_ops->filter) 1961 if (!kset->uevent_ops->filter(kset, &dev->kobj)) 1962 goto out; 1963 1964 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL); 1965 if (!env) 1966 return -ENOMEM; 1967 1968 /* let the kset specific function add its keys */ 1969 retval = kset->uevent_ops->uevent(kset, &dev->kobj, env); 1970 if (retval) 1971 goto out; 1972 1973 /* copy keys to file */ 1974 for (i = 0; i < env->envp_idx; i++) 1975 len += sysfs_emit_at(buf, len, "%s\n", env->envp[i]); 1976 out: 1977 kfree(env); 1978 return len; 1979 } 1980 1981 static ssize_t uevent_store(struct device *dev, struct device_attribute *attr, 1982 const char *buf, size_t count) 1983 { 1984 int rc; 1985 1986 rc = kobject_synth_uevent(&dev->kobj, buf, count); 1987 1988 if (rc) { 1989 dev_err(dev, "uevent: failed to send synthetic uevent\n"); 1990 return rc; 1991 } 1992 1993 return count; 1994 } 1995 static DEVICE_ATTR_RW(uevent); 1996 1997 static ssize_t online_show(struct device *dev, struct device_attribute *attr, 1998 char *buf) 1999 { 2000 bool val; 2001 2002 device_lock(dev); 2003 val = !dev->offline; 2004 device_unlock(dev); 2005 return sysfs_emit(buf, "%u\n", val); 2006 } 2007 2008 static ssize_t online_store(struct device *dev, struct device_attribute *attr, 2009 const char *buf, size_t count) 2010 { 2011 bool val; 2012 int ret; 2013 2014 ret = strtobool(buf, &val); 2015 if (ret < 0) 2016 return ret; 2017 2018 ret = lock_device_hotplug_sysfs(); 2019 if (ret) 2020 return ret; 2021 2022 ret = val ? device_online(dev) : device_offline(dev); 2023 unlock_device_hotplug(); 2024 return ret < 0 ? ret : count; 2025 } 2026 static DEVICE_ATTR_RW(online); 2027 2028 int device_add_groups(struct device *dev, const struct attribute_group **groups) 2029 { 2030 return sysfs_create_groups(&dev->kobj, groups); 2031 } 2032 EXPORT_SYMBOL_GPL(device_add_groups); 2033 2034 void device_remove_groups(struct device *dev, 2035 const struct attribute_group **groups) 2036 { 2037 sysfs_remove_groups(&dev->kobj, groups); 2038 } 2039 EXPORT_SYMBOL_GPL(device_remove_groups); 2040 2041 union device_attr_group_devres { 2042 const struct attribute_group *group; 2043 const struct attribute_group **groups; 2044 }; 2045 2046 static int devm_attr_group_match(struct device *dev, void *res, void *data) 2047 { 2048 return ((union device_attr_group_devres *)res)->group == data; 2049 } 2050 2051 static void devm_attr_group_remove(struct device *dev, void *res) 2052 { 2053 union device_attr_group_devres *devres = res; 2054 const struct attribute_group *group = devres->group; 2055 2056 dev_dbg(dev, "%s: removing group %p\n", __func__, group); 2057 sysfs_remove_group(&dev->kobj, group); 2058 } 2059 2060 static void devm_attr_groups_remove(struct device *dev, void *res) 2061 { 2062 union device_attr_group_devres *devres = res; 2063 const struct attribute_group **groups = devres->groups; 2064 2065 dev_dbg(dev, "%s: removing groups %p\n", __func__, groups); 2066 sysfs_remove_groups(&dev->kobj, groups); 2067 } 2068 2069 /** 2070 * devm_device_add_group - given a device, create a managed attribute group 2071 * @dev: The device to create the group for 2072 * @grp: The attribute group to create 2073 * 2074 * This function creates a group for the first time. It will explicitly 2075 * warn and error if any of the attribute files being created already exist. 2076 * 2077 * Returns 0 on success or error code on failure. 2078 */ 2079 int devm_device_add_group(struct device *dev, const struct attribute_group *grp) 2080 { 2081 union device_attr_group_devres *devres; 2082 int error; 2083 2084 devres = devres_alloc(devm_attr_group_remove, 2085 sizeof(*devres), GFP_KERNEL); 2086 if (!devres) 2087 return -ENOMEM; 2088 2089 error = sysfs_create_group(&dev->kobj, grp); 2090 if (error) { 2091 devres_free(devres); 2092 return error; 2093 } 2094 2095 devres->group = grp; 2096 devres_add(dev, devres); 2097 return 0; 2098 } 2099 EXPORT_SYMBOL_GPL(devm_device_add_group); 2100 2101 /** 2102 * devm_device_remove_group: remove a managed group from a device 2103 * @dev: device to remove the group from 2104 * @grp: group to remove 2105 * 2106 * This function removes a group of attributes from a device. The attributes 2107 * previously have to have been created for this group, otherwise it will fail. 2108 */ 2109 void devm_device_remove_group(struct device *dev, 2110 const struct attribute_group *grp) 2111 { 2112 WARN_ON(devres_release(dev, devm_attr_group_remove, 2113 devm_attr_group_match, 2114 /* cast away const */ (void *)grp)); 2115 } 2116 EXPORT_SYMBOL_GPL(devm_device_remove_group); 2117 2118 /** 2119 * devm_device_add_groups - create a bunch of managed attribute groups 2120 * @dev: The device to create the group for 2121 * @groups: The attribute groups to create, NULL terminated 2122 * 2123 * This function creates a bunch of managed attribute groups. If an error 2124 * occurs when creating a group, all previously created groups will be 2125 * removed, unwinding everything back to the original state when this 2126 * function was called. It will explicitly warn and error if any of the 2127 * attribute files being created already exist. 2128 * 2129 * Returns 0 on success or error code from sysfs_create_group on failure. 2130 */ 2131 int devm_device_add_groups(struct device *dev, 2132 const struct attribute_group **groups) 2133 { 2134 union device_attr_group_devres *devres; 2135 int error; 2136 2137 devres = devres_alloc(devm_attr_groups_remove, 2138 sizeof(*devres), GFP_KERNEL); 2139 if (!devres) 2140 return -ENOMEM; 2141 2142 error = sysfs_create_groups(&dev->kobj, groups); 2143 if (error) { 2144 devres_free(devres); 2145 return error; 2146 } 2147 2148 devres->groups = groups; 2149 devres_add(dev, devres); 2150 return 0; 2151 } 2152 EXPORT_SYMBOL_GPL(devm_device_add_groups); 2153 2154 /** 2155 * devm_device_remove_groups - remove a list of managed groups 2156 * 2157 * @dev: The device for the groups to be removed from 2158 * @groups: NULL terminated list of groups to be removed 2159 * 2160 * If groups is not NULL, remove the specified groups from the device. 2161 */ 2162 void devm_device_remove_groups(struct device *dev, 2163 const struct attribute_group **groups) 2164 { 2165 WARN_ON(devres_release(dev, devm_attr_groups_remove, 2166 devm_attr_group_match, 2167 /* cast away const */ (void *)groups)); 2168 } 2169 EXPORT_SYMBOL_GPL(devm_device_remove_groups); 2170 2171 static int device_add_attrs(struct device *dev) 2172 { 2173 struct class *class = dev->class; 2174 const struct device_type *type = dev->type; 2175 int error; 2176 2177 if (class) { 2178 error = device_add_groups(dev, class->dev_groups); 2179 if (error) 2180 return error; 2181 } 2182 2183 if (type) { 2184 error = device_add_groups(dev, type->groups); 2185 if (error) 2186 goto err_remove_class_groups; 2187 } 2188 2189 error = device_add_groups(dev, dev->groups); 2190 if (error) 2191 goto err_remove_type_groups; 2192 2193 if (device_supports_offline(dev) && !dev->offline_disabled) { 2194 error = device_create_file(dev, &dev_attr_online); 2195 if (error) 2196 goto err_remove_dev_groups; 2197 } 2198 2199 if (fw_devlink_flags && !fw_devlink_is_permissive()) { 2200 error = device_create_file(dev, &dev_attr_waiting_for_supplier); 2201 if (error) 2202 goto err_remove_dev_online; 2203 } 2204 2205 return 0; 2206 2207 err_remove_dev_online: 2208 device_remove_file(dev, &dev_attr_online); 2209 err_remove_dev_groups: 2210 device_remove_groups(dev, dev->groups); 2211 err_remove_type_groups: 2212 if (type) 2213 device_remove_groups(dev, type->groups); 2214 err_remove_class_groups: 2215 if (class) 2216 device_remove_groups(dev, class->dev_groups); 2217 2218 return error; 2219 } 2220 2221 static void device_remove_attrs(struct device *dev) 2222 { 2223 struct class *class = dev->class; 2224 const struct device_type *type = dev->type; 2225 2226 device_remove_file(dev, &dev_attr_waiting_for_supplier); 2227 device_remove_file(dev, &dev_attr_online); 2228 device_remove_groups(dev, dev->groups); 2229 2230 if (type) 2231 device_remove_groups(dev, type->groups); 2232 2233 if (class) 2234 device_remove_groups(dev, class->dev_groups); 2235 } 2236 2237 static ssize_t dev_show(struct device *dev, struct device_attribute *attr, 2238 char *buf) 2239 { 2240 return print_dev_t(buf, dev->devt); 2241 } 2242 static DEVICE_ATTR_RO(dev); 2243 2244 /* /sys/devices/ */ 2245 struct kset *devices_kset; 2246 2247 /** 2248 * devices_kset_move_before - Move device in the devices_kset's list. 2249 * @deva: Device to move. 2250 * @devb: Device @deva should come before. 2251 */ 2252 static void devices_kset_move_before(struct device *deva, struct device *devb) 2253 { 2254 if (!devices_kset) 2255 return; 2256 pr_debug("devices_kset: Moving %s before %s\n", 2257 dev_name(deva), dev_name(devb)); 2258 spin_lock(&devices_kset->list_lock); 2259 list_move_tail(&deva->kobj.entry, &devb->kobj.entry); 2260 spin_unlock(&devices_kset->list_lock); 2261 } 2262 2263 /** 2264 * devices_kset_move_after - Move device in the devices_kset's list. 2265 * @deva: Device to move 2266 * @devb: Device @deva should come after. 2267 */ 2268 static void devices_kset_move_after(struct device *deva, struct device *devb) 2269 { 2270 if (!devices_kset) 2271 return; 2272 pr_debug("devices_kset: Moving %s after %s\n", 2273 dev_name(deva), dev_name(devb)); 2274 spin_lock(&devices_kset->list_lock); 2275 list_move(&deva->kobj.entry, &devb->kobj.entry); 2276 spin_unlock(&devices_kset->list_lock); 2277 } 2278 2279 /** 2280 * devices_kset_move_last - move the device to the end of devices_kset's list. 2281 * @dev: device to move 2282 */ 2283 void devices_kset_move_last(struct device *dev) 2284 { 2285 if (!devices_kset) 2286 return; 2287 pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev)); 2288 spin_lock(&devices_kset->list_lock); 2289 list_move_tail(&dev->kobj.entry, &devices_kset->list); 2290 spin_unlock(&devices_kset->list_lock); 2291 } 2292 2293 /** 2294 * device_create_file - create sysfs attribute file for device. 2295 * @dev: device. 2296 * @attr: device attribute descriptor. 2297 */ 2298 int device_create_file(struct device *dev, 2299 const struct device_attribute *attr) 2300 { 2301 int error = 0; 2302 2303 if (dev) { 2304 WARN(((attr->attr.mode & S_IWUGO) && !attr->store), 2305 "Attribute %s: write permission without 'store'\n", 2306 attr->attr.name); 2307 WARN(((attr->attr.mode & S_IRUGO) && !attr->show), 2308 "Attribute %s: read permission without 'show'\n", 2309 attr->attr.name); 2310 error = sysfs_create_file(&dev->kobj, &attr->attr); 2311 } 2312 2313 return error; 2314 } 2315 EXPORT_SYMBOL_GPL(device_create_file); 2316 2317 /** 2318 * device_remove_file - remove sysfs attribute file. 2319 * @dev: device. 2320 * @attr: device attribute descriptor. 2321 */ 2322 void device_remove_file(struct device *dev, 2323 const struct device_attribute *attr) 2324 { 2325 if (dev) 2326 sysfs_remove_file(&dev->kobj, &attr->attr); 2327 } 2328 EXPORT_SYMBOL_GPL(device_remove_file); 2329 2330 /** 2331 * device_remove_file_self - remove sysfs attribute file from its own method. 2332 * @dev: device. 2333 * @attr: device attribute descriptor. 2334 * 2335 * See kernfs_remove_self() for details. 2336 */ 2337 bool device_remove_file_self(struct device *dev, 2338 const struct device_attribute *attr) 2339 { 2340 if (dev) 2341 return sysfs_remove_file_self(&dev->kobj, &attr->attr); 2342 else 2343 return false; 2344 } 2345 EXPORT_SYMBOL_GPL(device_remove_file_self); 2346 2347 /** 2348 * device_create_bin_file - create sysfs binary attribute file for device. 2349 * @dev: device. 2350 * @attr: device binary attribute descriptor. 2351 */ 2352 int device_create_bin_file(struct device *dev, 2353 const struct bin_attribute *attr) 2354 { 2355 int error = -EINVAL; 2356 if (dev) 2357 error = sysfs_create_bin_file(&dev->kobj, attr); 2358 return error; 2359 } 2360 EXPORT_SYMBOL_GPL(device_create_bin_file); 2361 2362 /** 2363 * device_remove_bin_file - remove sysfs binary attribute file 2364 * @dev: device. 2365 * @attr: device binary attribute descriptor. 2366 */ 2367 void device_remove_bin_file(struct device *dev, 2368 const struct bin_attribute *attr) 2369 { 2370 if (dev) 2371 sysfs_remove_bin_file(&dev->kobj, attr); 2372 } 2373 EXPORT_SYMBOL_GPL(device_remove_bin_file); 2374 2375 static void klist_children_get(struct klist_node *n) 2376 { 2377 struct device_private *p = to_device_private_parent(n); 2378 struct device *dev = p->device; 2379 2380 get_device(dev); 2381 } 2382 2383 static void klist_children_put(struct klist_node *n) 2384 { 2385 struct device_private *p = to_device_private_parent(n); 2386 struct device *dev = p->device; 2387 2388 put_device(dev); 2389 } 2390 2391 /** 2392 * device_initialize - init device structure. 2393 * @dev: device. 2394 * 2395 * This prepares the device for use by other layers by initializing 2396 * its fields. 2397 * It is the first half of device_register(), if called by 2398 * that function, though it can also be called separately, so one 2399 * may use @dev's fields. In particular, get_device()/put_device() 2400 * may be used for reference counting of @dev after calling this 2401 * function. 2402 * 2403 * All fields in @dev must be initialized by the caller to 0, except 2404 * for those explicitly set to some other value. The simplest 2405 * approach is to use kzalloc() to allocate the structure containing 2406 * @dev. 2407 * 2408 * NOTE: Use put_device() to give up your reference instead of freeing 2409 * @dev directly once you have called this function. 2410 */ 2411 void device_initialize(struct device *dev) 2412 { 2413 dev->kobj.kset = devices_kset; 2414 kobject_init(&dev->kobj, &device_ktype); 2415 INIT_LIST_HEAD(&dev->dma_pools); 2416 mutex_init(&dev->mutex); 2417 #ifdef CONFIG_PROVE_LOCKING 2418 mutex_init(&dev->lockdep_mutex); 2419 #endif 2420 lockdep_set_novalidate_class(&dev->mutex); 2421 spin_lock_init(&dev->devres_lock); 2422 INIT_LIST_HEAD(&dev->devres_head); 2423 device_pm_init(dev); 2424 set_dev_node(dev, -1); 2425 #ifdef CONFIG_GENERIC_MSI_IRQ 2426 INIT_LIST_HEAD(&dev->msi_list); 2427 #endif 2428 INIT_LIST_HEAD(&dev->links.consumers); 2429 INIT_LIST_HEAD(&dev->links.suppliers); 2430 INIT_LIST_HEAD(&dev->links.needs_suppliers); 2431 INIT_LIST_HEAD(&dev->links.defer_hook); 2432 dev->links.status = DL_DEV_NO_DRIVER; 2433 } 2434 EXPORT_SYMBOL_GPL(device_initialize); 2435 2436 struct kobject *virtual_device_parent(struct device *dev) 2437 { 2438 static struct kobject *virtual_dir = NULL; 2439 2440 if (!virtual_dir) 2441 virtual_dir = kobject_create_and_add("virtual", 2442 &devices_kset->kobj); 2443 2444 return virtual_dir; 2445 } 2446 2447 struct class_dir { 2448 struct kobject kobj; 2449 struct class *class; 2450 }; 2451 2452 #define to_class_dir(obj) container_of(obj, struct class_dir, kobj) 2453 2454 static void class_dir_release(struct kobject *kobj) 2455 { 2456 struct class_dir *dir = to_class_dir(kobj); 2457 kfree(dir); 2458 } 2459 2460 static const 2461 struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj) 2462 { 2463 struct class_dir *dir = to_class_dir(kobj); 2464 return dir->class->ns_type; 2465 } 2466 2467 static struct kobj_type class_dir_ktype = { 2468 .release = class_dir_release, 2469 .sysfs_ops = &kobj_sysfs_ops, 2470 .child_ns_type = class_dir_child_ns_type 2471 }; 2472 2473 static struct kobject * 2474 class_dir_create_and_add(struct class *class, struct kobject *parent_kobj) 2475 { 2476 struct class_dir *dir; 2477 int retval; 2478 2479 dir = kzalloc(sizeof(*dir), GFP_KERNEL); 2480 if (!dir) 2481 return ERR_PTR(-ENOMEM); 2482 2483 dir->class = class; 2484 kobject_init(&dir->kobj, &class_dir_ktype); 2485 2486 dir->kobj.kset = &class->p->glue_dirs; 2487 2488 retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name); 2489 if (retval < 0) { 2490 kobject_put(&dir->kobj); 2491 return ERR_PTR(retval); 2492 } 2493 return &dir->kobj; 2494 } 2495 2496 static DEFINE_MUTEX(gdp_mutex); 2497 2498 static struct kobject *get_device_parent(struct device *dev, 2499 struct device *parent) 2500 { 2501 if (dev->class) { 2502 struct kobject *kobj = NULL; 2503 struct kobject *parent_kobj; 2504 struct kobject *k; 2505 2506 #ifdef CONFIG_BLOCK 2507 /* block disks show up in /sys/block */ 2508 if (sysfs_deprecated && dev->class == &block_class) { 2509 if (parent && parent->class == &block_class) 2510 return &parent->kobj; 2511 return &block_class.p->subsys.kobj; 2512 } 2513 #endif 2514 2515 /* 2516 * If we have no parent, we live in "virtual". 2517 * Class-devices with a non class-device as parent, live 2518 * in a "glue" directory to prevent namespace collisions. 2519 */ 2520 if (parent == NULL) 2521 parent_kobj = virtual_device_parent(dev); 2522 else if (parent->class && !dev->class->ns_type) 2523 return &parent->kobj; 2524 else 2525 parent_kobj = &parent->kobj; 2526 2527 mutex_lock(&gdp_mutex); 2528 2529 /* find our class-directory at the parent and reference it */ 2530 spin_lock(&dev->class->p->glue_dirs.list_lock); 2531 list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry) 2532 if (k->parent == parent_kobj) { 2533 kobj = kobject_get(k); 2534 break; 2535 } 2536 spin_unlock(&dev->class->p->glue_dirs.list_lock); 2537 if (kobj) { 2538 mutex_unlock(&gdp_mutex); 2539 return kobj; 2540 } 2541 2542 /* or create a new class-directory at the parent device */ 2543 k = class_dir_create_and_add(dev->class, parent_kobj); 2544 /* do not emit an uevent for this simple "glue" directory */ 2545 mutex_unlock(&gdp_mutex); 2546 return k; 2547 } 2548 2549 /* subsystems can specify a default root directory for their devices */ 2550 if (!parent && dev->bus && dev->bus->dev_root) 2551 return &dev->bus->dev_root->kobj; 2552 2553 if (parent) 2554 return &parent->kobj; 2555 return NULL; 2556 } 2557 2558 static inline bool live_in_glue_dir(struct kobject *kobj, 2559 struct device *dev) 2560 { 2561 if (!kobj || !dev->class || 2562 kobj->kset != &dev->class->p->glue_dirs) 2563 return false; 2564 return true; 2565 } 2566 2567 static inline struct kobject *get_glue_dir(struct device *dev) 2568 { 2569 return dev->kobj.parent; 2570 } 2571 2572 /* 2573 * make sure cleaning up dir as the last step, we need to make 2574 * sure .release handler of kobject is run with holding the 2575 * global lock 2576 */ 2577 static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) 2578 { 2579 unsigned int ref; 2580 2581 /* see if we live in a "glue" directory */ 2582 if (!live_in_glue_dir(glue_dir, dev)) 2583 return; 2584 2585 mutex_lock(&gdp_mutex); 2586 /** 2587 * There is a race condition between removing glue directory 2588 * and adding a new device under the glue directory. 2589 * 2590 * CPU1: CPU2: 2591 * 2592 * device_add() 2593 * get_device_parent() 2594 * class_dir_create_and_add() 2595 * kobject_add_internal() 2596 * create_dir() // create glue_dir 2597 * 2598 * device_add() 2599 * get_device_parent() 2600 * kobject_get() // get glue_dir 2601 * 2602 * device_del() 2603 * cleanup_glue_dir() 2604 * kobject_del(glue_dir) 2605 * 2606 * kobject_add() 2607 * kobject_add_internal() 2608 * create_dir() // in glue_dir 2609 * sysfs_create_dir_ns() 2610 * kernfs_create_dir_ns(sd) 2611 * 2612 * sysfs_remove_dir() // glue_dir->sd=NULL 2613 * sysfs_put() // free glue_dir->sd 2614 * 2615 * // sd is freed 2616 * kernfs_new_node(sd) 2617 * kernfs_get(glue_dir) 2618 * kernfs_add_one() 2619 * kernfs_put() 2620 * 2621 * Before CPU1 remove last child device under glue dir, if CPU2 add 2622 * a new device under glue dir, the glue_dir kobject reference count 2623 * will be increase to 2 in kobject_get(k). And CPU2 has been called 2624 * kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir() 2625 * and sysfs_put(). This result in glue_dir->sd is freed. 2626 * 2627 * Then the CPU2 will see a stale "empty" but still potentially used 2628 * glue dir around in kernfs_new_node(). 2629 * 2630 * In order to avoid this happening, we also should make sure that 2631 * kernfs_node for glue_dir is released in CPU1 only when refcount 2632 * for glue_dir kobj is 1. 2633 */ 2634 ref = kref_read(&glue_dir->kref); 2635 if (!kobject_has_children(glue_dir) && !--ref) 2636 kobject_del(glue_dir); 2637 kobject_put(glue_dir); 2638 mutex_unlock(&gdp_mutex); 2639 } 2640 2641 static int device_add_class_symlinks(struct device *dev) 2642 { 2643 struct device_node *of_node = dev_of_node(dev); 2644 int error; 2645 2646 if (of_node) { 2647 error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node"); 2648 if (error) 2649 dev_warn(dev, "Error %d creating of_node link\n",error); 2650 /* An error here doesn't warrant bringing down the device */ 2651 } 2652 2653 if (!dev->class) 2654 return 0; 2655 2656 error = sysfs_create_link(&dev->kobj, 2657 &dev->class->p->subsys.kobj, 2658 "subsystem"); 2659 if (error) 2660 goto out_devnode; 2661 2662 if (dev->parent && device_is_not_partition(dev)) { 2663 error = sysfs_create_link(&dev->kobj, &dev->parent->kobj, 2664 "device"); 2665 if (error) 2666 goto out_subsys; 2667 } 2668 2669 #ifdef CONFIG_BLOCK 2670 /* /sys/block has directories and does not need symlinks */ 2671 if (sysfs_deprecated && dev->class == &block_class) 2672 return 0; 2673 #endif 2674 2675 /* link in the class directory pointing to the device */ 2676 error = sysfs_create_link(&dev->class->p->subsys.kobj, 2677 &dev->kobj, dev_name(dev)); 2678 if (error) 2679 goto out_device; 2680 2681 return 0; 2682 2683 out_device: 2684 sysfs_remove_link(&dev->kobj, "device"); 2685 2686 out_subsys: 2687 sysfs_remove_link(&dev->kobj, "subsystem"); 2688 out_devnode: 2689 sysfs_remove_link(&dev->kobj, "of_node"); 2690 return error; 2691 } 2692 2693 static void device_remove_class_symlinks(struct device *dev) 2694 { 2695 if (dev_of_node(dev)) 2696 sysfs_remove_link(&dev->kobj, "of_node"); 2697 2698 if (!dev->class) 2699 return; 2700 2701 if (dev->parent && device_is_not_partition(dev)) 2702 sysfs_remove_link(&dev->kobj, "device"); 2703 sysfs_remove_link(&dev->kobj, "subsystem"); 2704 #ifdef CONFIG_BLOCK 2705 if (sysfs_deprecated && dev->class == &block_class) 2706 return; 2707 #endif 2708 sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev)); 2709 } 2710 2711 /** 2712 * dev_set_name - set a device name 2713 * @dev: device 2714 * @fmt: format string for the device's name 2715 */ 2716 int dev_set_name(struct device *dev, const char *fmt, ...) 2717 { 2718 va_list vargs; 2719 int err; 2720 2721 va_start(vargs, fmt); 2722 err = kobject_set_name_vargs(&dev->kobj, fmt, vargs); 2723 va_end(vargs); 2724 return err; 2725 } 2726 EXPORT_SYMBOL_GPL(dev_set_name); 2727 2728 /** 2729 * device_to_dev_kobj - select a /sys/dev/ directory for the device 2730 * @dev: device 2731 * 2732 * By default we select char/ for new entries. Setting class->dev_obj 2733 * to NULL prevents an entry from being created. class->dev_kobj must 2734 * be set (or cleared) before any devices are registered to the class 2735 * otherwise device_create_sys_dev_entry() and 2736 * device_remove_sys_dev_entry() will disagree about the presence of 2737 * the link. 2738 */ 2739 static struct kobject *device_to_dev_kobj(struct device *dev) 2740 { 2741 struct kobject *kobj; 2742 2743 if (dev->class) 2744 kobj = dev->class->dev_kobj; 2745 else 2746 kobj = sysfs_dev_char_kobj; 2747 2748 return kobj; 2749 } 2750 2751 static int device_create_sys_dev_entry(struct device *dev) 2752 { 2753 struct kobject *kobj = device_to_dev_kobj(dev); 2754 int error = 0; 2755 char devt_str[15]; 2756 2757 if (kobj) { 2758 format_dev_t(devt_str, dev->devt); 2759 error = sysfs_create_link(kobj, &dev->kobj, devt_str); 2760 } 2761 2762 return error; 2763 } 2764 2765 static void device_remove_sys_dev_entry(struct device *dev) 2766 { 2767 struct kobject *kobj = device_to_dev_kobj(dev); 2768 char devt_str[15]; 2769 2770 if (kobj) { 2771 format_dev_t(devt_str, dev->devt); 2772 sysfs_remove_link(kobj, devt_str); 2773 } 2774 } 2775 2776 static int device_private_init(struct device *dev) 2777 { 2778 dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL); 2779 if (!dev->p) 2780 return -ENOMEM; 2781 dev->p->device = dev; 2782 klist_init(&dev->p->klist_children, klist_children_get, 2783 klist_children_put); 2784 INIT_LIST_HEAD(&dev->p->deferred_probe); 2785 return 0; 2786 } 2787 2788 /** 2789 * device_add - add device to device hierarchy. 2790 * @dev: device. 2791 * 2792 * This is part 2 of device_register(), though may be called 2793 * separately _iff_ device_initialize() has been called separately. 2794 * 2795 * This adds @dev to the kobject hierarchy via kobject_add(), adds it 2796 * to the global and sibling lists for the device, then 2797 * adds it to the other relevant subsystems of the driver model. 2798 * 2799 * Do not call this routine or device_register() more than once for 2800 * any device structure. The driver model core is not designed to work 2801 * with devices that get unregistered and then spring back to life. 2802 * (Among other things, it's very hard to guarantee that all references 2803 * to the previous incarnation of @dev have been dropped.) Allocate 2804 * and register a fresh new struct device instead. 2805 * 2806 * NOTE: _Never_ directly free @dev after calling this function, even 2807 * if it returned an error! Always use put_device() to give up your 2808 * reference instead. 2809 * 2810 * Rule of thumb is: if device_add() succeeds, you should call 2811 * device_del() when you want to get rid of it. If device_add() has 2812 * *not* succeeded, use *only* put_device() to drop the reference 2813 * count. 2814 */ 2815 int device_add(struct device *dev) 2816 { 2817 struct device *parent; 2818 struct kobject *kobj; 2819 struct class_interface *class_intf; 2820 int error = -EINVAL; 2821 struct kobject *glue_dir = NULL; 2822 2823 dev = get_device(dev); 2824 if (!dev) 2825 goto done; 2826 2827 if (!dev->p) { 2828 error = device_private_init(dev); 2829 if (error) 2830 goto done; 2831 } 2832 2833 /* 2834 * for statically allocated devices, which should all be converted 2835 * some day, we need to initialize the name. We prevent reading back 2836 * the name, and force the use of dev_name() 2837 */ 2838 if (dev->init_name) { 2839 dev_set_name(dev, "%s", dev->init_name); 2840 dev->init_name = NULL; 2841 } 2842 2843 /* subsystems can specify simple device enumeration */ 2844 if (!dev_name(dev) && dev->bus && dev->bus->dev_name) 2845 dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id); 2846 2847 if (!dev_name(dev)) { 2848 error = -EINVAL; 2849 goto name_error; 2850 } 2851 2852 pr_debug("device: '%s': %s\n", dev_name(dev), __func__); 2853 2854 parent = get_device(dev->parent); 2855 kobj = get_device_parent(dev, parent); 2856 if (IS_ERR(kobj)) { 2857 error = PTR_ERR(kobj); 2858 goto parent_error; 2859 } 2860 if (kobj) 2861 dev->kobj.parent = kobj; 2862 2863 /* use parent numa_node */ 2864 if (parent && (dev_to_node(dev) == NUMA_NO_NODE)) 2865 set_dev_node(dev, dev_to_node(parent)); 2866 2867 /* first, register with generic layer. */ 2868 /* we require the name to be set before, and pass NULL */ 2869 error = kobject_add(&dev->kobj, dev->kobj.parent, NULL); 2870 if (error) { 2871 glue_dir = get_glue_dir(dev); 2872 goto Error; 2873 } 2874 2875 /* notify platform of device entry */ 2876 error = device_platform_notify(dev, KOBJ_ADD); 2877 if (error) 2878 goto platform_error; 2879 2880 error = device_create_file(dev, &dev_attr_uevent); 2881 if (error) 2882 goto attrError; 2883 2884 error = device_add_class_symlinks(dev); 2885 if (error) 2886 goto SymlinkError; 2887 error = device_add_attrs(dev); 2888 if (error) 2889 goto AttrsError; 2890 error = bus_add_device(dev); 2891 if (error) 2892 goto BusError; 2893 error = dpm_sysfs_add(dev); 2894 if (error) 2895 goto DPMError; 2896 device_pm_add(dev); 2897 2898 if (MAJOR(dev->devt)) { 2899 error = device_create_file(dev, &dev_attr_dev); 2900 if (error) 2901 goto DevAttrError; 2902 2903 error = device_create_sys_dev_entry(dev); 2904 if (error) 2905 goto SysEntryError; 2906 2907 devtmpfs_create_node(dev); 2908 } 2909 2910 /* Notify clients of device addition. This call must come 2911 * after dpm_sysfs_add() and before kobject_uevent(). 2912 */ 2913 if (dev->bus) 2914 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 2915 BUS_NOTIFY_ADD_DEVICE, dev); 2916 2917 kobject_uevent(&dev->kobj, KOBJ_ADD); 2918 2919 /* 2920 * Check if any of the other devices (consumers) have been waiting for 2921 * this device (supplier) to be added so that they can create a device 2922 * link to it. 2923 * 2924 * This needs to happen after device_pm_add() because device_link_add() 2925 * requires the supplier be registered before it's called. 2926 * 2927 * But this also needs to happen before bus_probe_device() to make sure 2928 * waiting consumers can link to it before the driver is bound to the 2929 * device and the driver sync_state callback is called for this device. 2930 */ 2931 if (dev->fwnode && !dev->fwnode->dev) { 2932 dev->fwnode->dev = dev; 2933 fw_devlink_link_device(dev); 2934 } 2935 2936 bus_probe_device(dev); 2937 if (parent) 2938 klist_add_tail(&dev->p->knode_parent, 2939 &parent->p->klist_children); 2940 2941 if (dev->class) { 2942 mutex_lock(&dev->class->p->mutex); 2943 /* tie the class to the device */ 2944 klist_add_tail(&dev->p->knode_class, 2945 &dev->class->p->klist_devices); 2946 2947 /* notify any interfaces that the device is here */ 2948 list_for_each_entry(class_intf, 2949 &dev->class->p->interfaces, node) 2950 if (class_intf->add_dev) 2951 class_intf->add_dev(dev, class_intf); 2952 mutex_unlock(&dev->class->p->mutex); 2953 } 2954 done: 2955 put_device(dev); 2956 return error; 2957 SysEntryError: 2958 if (MAJOR(dev->devt)) 2959 device_remove_file(dev, &dev_attr_dev); 2960 DevAttrError: 2961 device_pm_remove(dev); 2962 dpm_sysfs_remove(dev); 2963 DPMError: 2964 bus_remove_device(dev); 2965 BusError: 2966 device_remove_attrs(dev); 2967 AttrsError: 2968 device_remove_class_symlinks(dev); 2969 SymlinkError: 2970 device_remove_file(dev, &dev_attr_uevent); 2971 attrError: 2972 device_platform_notify(dev, KOBJ_REMOVE); 2973 platform_error: 2974 kobject_uevent(&dev->kobj, KOBJ_REMOVE); 2975 glue_dir = get_glue_dir(dev); 2976 kobject_del(&dev->kobj); 2977 Error: 2978 cleanup_glue_dir(dev, glue_dir); 2979 parent_error: 2980 put_device(parent); 2981 name_error: 2982 kfree(dev->p); 2983 dev->p = NULL; 2984 goto done; 2985 } 2986 EXPORT_SYMBOL_GPL(device_add); 2987 2988 /** 2989 * device_register - register a device with the system. 2990 * @dev: pointer to the device structure 2991 * 2992 * This happens in two clean steps - initialize the device 2993 * and add it to the system. The two steps can be called 2994 * separately, but this is the easiest and most common. 2995 * I.e. you should only call the two helpers separately if 2996 * have a clearly defined need to use and refcount the device 2997 * before it is added to the hierarchy. 2998 * 2999 * For more information, see the kerneldoc for device_initialize() 3000 * and device_add(). 3001 * 3002 * NOTE: _Never_ directly free @dev after calling this function, even 3003 * if it returned an error! Always use put_device() to give up the 3004 * reference initialized in this function instead. 3005 */ 3006 int device_register(struct device *dev) 3007 { 3008 device_initialize(dev); 3009 return device_add(dev); 3010 } 3011 EXPORT_SYMBOL_GPL(device_register); 3012 3013 /** 3014 * get_device - increment reference count for device. 3015 * @dev: device. 3016 * 3017 * This simply forwards the call to kobject_get(), though 3018 * we do take care to provide for the case that we get a NULL 3019 * pointer passed in. 3020 */ 3021 struct device *get_device(struct device *dev) 3022 { 3023 return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL; 3024 } 3025 EXPORT_SYMBOL_GPL(get_device); 3026 3027 /** 3028 * put_device - decrement reference count. 3029 * @dev: device in question. 3030 */ 3031 void put_device(struct device *dev) 3032 { 3033 /* might_sleep(); */ 3034 if (dev) 3035 kobject_put(&dev->kobj); 3036 } 3037 EXPORT_SYMBOL_GPL(put_device); 3038 3039 bool kill_device(struct device *dev) 3040 { 3041 /* 3042 * Require the device lock and set the "dead" flag to guarantee that 3043 * the update behavior is consistent with the other bitfields near 3044 * it and that we cannot have an asynchronous probe routine trying 3045 * to run while we are tearing out the bus/class/sysfs from 3046 * underneath the device. 3047 */ 3048 lockdep_assert_held(&dev->mutex); 3049 3050 if (dev->p->dead) 3051 return false; 3052 dev->p->dead = true; 3053 return true; 3054 } 3055 EXPORT_SYMBOL_GPL(kill_device); 3056 3057 /** 3058 * device_del - delete device from system. 3059 * @dev: device. 3060 * 3061 * This is the first part of the device unregistration 3062 * sequence. This removes the device from the lists we control 3063 * from here, has it removed from the other driver model 3064 * subsystems it was added to in device_add(), and removes it 3065 * from the kobject hierarchy. 3066 * 3067 * NOTE: this should be called manually _iff_ device_add() was 3068 * also called manually. 3069 */ 3070 void device_del(struct device *dev) 3071 { 3072 struct device *parent = dev->parent; 3073 struct kobject *glue_dir = NULL; 3074 struct class_interface *class_intf; 3075 unsigned int noio_flag; 3076 3077 device_lock(dev); 3078 kill_device(dev); 3079 device_unlock(dev); 3080 3081 if (dev->fwnode && dev->fwnode->dev == dev) 3082 dev->fwnode->dev = NULL; 3083 3084 /* Notify clients of device removal. This call must come 3085 * before dpm_sysfs_remove(). 3086 */ 3087 noio_flag = memalloc_noio_save(); 3088 if (dev->bus) 3089 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 3090 BUS_NOTIFY_DEL_DEVICE, dev); 3091 3092 dpm_sysfs_remove(dev); 3093 if (parent) 3094 klist_del(&dev->p->knode_parent); 3095 if (MAJOR(dev->devt)) { 3096 devtmpfs_delete_node(dev); 3097 device_remove_sys_dev_entry(dev); 3098 device_remove_file(dev, &dev_attr_dev); 3099 } 3100 if (dev->class) { 3101 device_remove_class_symlinks(dev); 3102 3103 mutex_lock(&dev->class->p->mutex); 3104 /* notify any interfaces that the device is now gone */ 3105 list_for_each_entry(class_intf, 3106 &dev->class->p->interfaces, node) 3107 if (class_intf->remove_dev) 3108 class_intf->remove_dev(dev, class_intf); 3109 /* remove the device from the class list */ 3110 klist_del(&dev->p->knode_class); 3111 mutex_unlock(&dev->class->p->mutex); 3112 } 3113 device_remove_file(dev, &dev_attr_uevent); 3114 device_remove_attrs(dev); 3115 bus_remove_device(dev); 3116 device_pm_remove(dev); 3117 driver_deferred_probe_del(dev); 3118 device_platform_notify(dev, KOBJ_REMOVE); 3119 device_remove_properties(dev); 3120 device_links_purge(dev); 3121 3122 if (dev->bus) 3123 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 3124 BUS_NOTIFY_REMOVED_DEVICE, dev); 3125 kobject_uevent(&dev->kobj, KOBJ_REMOVE); 3126 glue_dir = get_glue_dir(dev); 3127 kobject_del(&dev->kobj); 3128 cleanup_glue_dir(dev, glue_dir); 3129 memalloc_noio_restore(noio_flag); 3130 put_device(parent); 3131 } 3132 EXPORT_SYMBOL_GPL(device_del); 3133 3134 /** 3135 * device_unregister - unregister device from system. 3136 * @dev: device going away. 3137 * 3138 * We do this in two parts, like we do device_register(). First, 3139 * we remove it from all the subsystems with device_del(), then 3140 * we decrement the reference count via put_device(). If that 3141 * is the final reference count, the device will be cleaned up 3142 * via device_release() above. Otherwise, the structure will 3143 * stick around until the final reference to the device is dropped. 3144 */ 3145 void device_unregister(struct device *dev) 3146 { 3147 pr_debug("device: '%s': %s\n", dev_name(dev), __func__); 3148 device_del(dev); 3149 put_device(dev); 3150 } 3151 EXPORT_SYMBOL_GPL(device_unregister); 3152 3153 static struct device *prev_device(struct klist_iter *i) 3154 { 3155 struct klist_node *n = klist_prev(i); 3156 struct device *dev = NULL; 3157 struct device_private *p; 3158 3159 if (n) { 3160 p = to_device_private_parent(n); 3161 dev = p->device; 3162 } 3163 return dev; 3164 } 3165 3166 static struct device *next_device(struct klist_iter *i) 3167 { 3168 struct klist_node *n = klist_next(i); 3169 struct device *dev = NULL; 3170 struct device_private *p; 3171 3172 if (n) { 3173 p = to_device_private_parent(n); 3174 dev = p->device; 3175 } 3176 return dev; 3177 } 3178 3179 /** 3180 * device_get_devnode - path of device node file 3181 * @dev: device 3182 * @mode: returned file access mode 3183 * @uid: returned file owner 3184 * @gid: returned file group 3185 * @tmp: possibly allocated string 3186 * 3187 * Return the relative path of a possible device node. 3188 * Non-default names may need to allocate a memory to compose 3189 * a name. This memory is returned in tmp and needs to be 3190 * freed by the caller. 3191 */ 3192 const char *device_get_devnode(struct device *dev, 3193 umode_t *mode, kuid_t *uid, kgid_t *gid, 3194 const char **tmp) 3195 { 3196 char *s; 3197 3198 *tmp = NULL; 3199 3200 /* the device type may provide a specific name */ 3201 if (dev->type && dev->type->devnode) 3202 *tmp = dev->type->devnode(dev, mode, uid, gid); 3203 if (*tmp) 3204 return *tmp; 3205 3206 /* the class may provide a specific name */ 3207 if (dev->class && dev->class->devnode) 3208 *tmp = dev->class->devnode(dev, mode); 3209 if (*tmp) 3210 return *tmp; 3211 3212 /* return name without allocation, tmp == NULL */ 3213 if (strchr(dev_name(dev), '!') == NULL) 3214 return dev_name(dev); 3215 3216 /* replace '!' in the name with '/' */ 3217 s = kstrdup(dev_name(dev), GFP_KERNEL); 3218 if (!s) 3219 return NULL; 3220 strreplace(s, '!', '/'); 3221 return *tmp = s; 3222 } 3223 3224 /** 3225 * device_for_each_child - device child iterator. 3226 * @parent: parent struct device. 3227 * @fn: function to be called for each device. 3228 * @data: data for the callback. 3229 * 3230 * Iterate over @parent's child devices, and call @fn for each, 3231 * passing it @data. 3232 * 3233 * We check the return of @fn each time. If it returns anything 3234 * other than 0, we break out and return that value. 3235 */ 3236 int device_for_each_child(struct device *parent, void *data, 3237 int (*fn)(struct device *dev, void *data)) 3238 { 3239 struct klist_iter i; 3240 struct device *child; 3241 int error = 0; 3242 3243 if (!parent->p) 3244 return 0; 3245 3246 klist_iter_init(&parent->p->klist_children, &i); 3247 while (!error && (child = next_device(&i))) 3248 error = fn(child, data); 3249 klist_iter_exit(&i); 3250 return error; 3251 } 3252 EXPORT_SYMBOL_GPL(device_for_each_child); 3253 3254 /** 3255 * device_for_each_child_reverse - device child iterator in reversed order. 3256 * @parent: parent struct device. 3257 * @fn: function to be called for each device. 3258 * @data: data for the callback. 3259 * 3260 * Iterate over @parent's child devices, and call @fn for each, 3261 * passing it @data. 3262 * 3263 * We check the return of @fn each time. If it returns anything 3264 * other than 0, we break out and return that value. 3265 */ 3266 int device_for_each_child_reverse(struct device *parent, void *data, 3267 int (*fn)(struct device *dev, void *data)) 3268 { 3269 struct klist_iter i; 3270 struct device *child; 3271 int error = 0; 3272 3273 if (!parent->p) 3274 return 0; 3275 3276 klist_iter_init(&parent->p->klist_children, &i); 3277 while ((child = prev_device(&i)) && !error) 3278 error = fn(child, data); 3279 klist_iter_exit(&i); 3280 return error; 3281 } 3282 EXPORT_SYMBOL_GPL(device_for_each_child_reverse); 3283 3284 /** 3285 * device_find_child - device iterator for locating a particular device. 3286 * @parent: parent struct device 3287 * @match: Callback function to check device 3288 * @data: Data to pass to match function 3289 * 3290 * This is similar to the device_for_each_child() function above, but it 3291 * returns a reference to a device that is 'found' for later use, as 3292 * determined by the @match callback. 3293 * 3294 * The callback should return 0 if the device doesn't match and non-zero 3295 * if it does. If the callback returns non-zero and a reference to the 3296 * current device can be obtained, this function will return to the caller 3297 * and not iterate over any more devices. 3298 * 3299 * NOTE: you will need to drop the reference with put_device() after use. 3300 */ 3301 struct device *device_find_child(struct device *parent, void *data, 3302 int (*match)(struct device *dev, void *data)) 3303 { 3304 struct klist_iter i; 3305 struct device *child; 3306 3307 if (!parent) 3308 return NULL; 3309 3310 klist_iter_init(&parent->p->klist_children, &i); 3311 while ((child = next_device(&i))) 3312 if (match(child, data) && get_device(child)) 3313 break; 3314 klist_iter_exit(&i); 3315 return child; 3316 } 3317 EXPORT_SYMBOL_GPL(device_find_child); 3318 3319 /** 3320 * device_find_child_by_name - device iterator for locating a child device. 3321 * @parent: parent struct device 3322 * @name: name of the child device 3323 * 3324 * This is similar to the device_find_child() function above, but it 3325 * returns a reference to a device that has the name @name. 3326 * 3327 * NOTE: you will need to drop the reference with put_device() after use. 3328 */ 3329 struct device *device_find_child_by_name(struct device *parent, 3330 const char *name) 3331 { 3332 struct klist_iter i; 3333 struct device *child; 3334 3335 if (!parent) 3336 return NULL; 3337 3338 klist_iter_init(&parent->p->klist_children, &i); 3339 while ((child = next_device(&i))) 3340 if (sysfs_streq(dev_name(child), name) && get_device(child)) 3341 break; 3342 klist_iter_exit(&i); 3343 return child; 3344 } 3345 EXPORT_SYMBOL_GPL(device_find_child_by_name); 3346 3347 int __init devices_init(void) 3348 { 3349 devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL); 3350 if (!devices_kset) 3351 return -ENOMEM; 3352 dev_kobj = kobject_create_and_add("dev", NULL); 3353 if (!dev_kobj) 3354 goto dev_kobj_err; 3355 sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj); 3356 if (!sysfs_dev_block_kobj) 3357 goto block_kobj_err; 3358 sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj); 3359 if (!sysfs_dev_char_kobj) 3360 goto char_kobj_err; 3361 3362 return 0; 3363 3364 char_kobj_err: 3365 kobject_put(sysfs_dev_block_kobj); 3366 block_kobj_err: 3367 kobject_put(dev_kobj); 3368 dev_kobj_err: 3369 kset_unregister(devices_kset); 3370 return -ENOMEM; 3371 } 3372 3373 static int device_check_offline(struct device *dev, void *not_used) 3374 { 3375 int ret; 3376 3377 ret = device_for_each_child(dev, NULL, device_check_offline); 3378 if (ret) 3379 return ret; 3380 3381 return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0; 3382 } 3383 3384 /** 3385 * device_offline - Prepare the device for hot-removal. 3386 * @dev: Device to be put offline. 3387 * 3388 * Execute the device bus type's .offline() callback, if present, to prepare 3389 * the device for a subsequent hot-removal. If that succeeds, the device must 3390 * not be used until either it is removed or its bus type's .online() callback 3391 * is executed. 3392 * 3393 * Call under device_hotplug_lock. 3394 */ 3395 int device_offline(struct device *dev) 3396 { 3397 int ret; 3398 3399 if (dev->offline_disabled) 3400 return -EPERM; 3401 3402 ret = device_for_each_child(dev, NULL, device_check_offline); 3403 if (ret) 3404 return ret; 3405 3406 device_lock(dev); 3407 if (device_supports_offline(dev)) { 3408 if (dev->offline) { 3409 ret = 1; 3410 } else { 3411 ret = dev->bus->offline(dev); 3412 if (!ret) { 3413 kobject_uevent(&dev->kobj, KOBJ_OFFLINE); 3414 dev->offline = true; 3415 } 3416 } 3417 } 3418 device_unlock(dev); 3419 3420 return ret; 3421 } 3422 3423 /** 3424 * device_online - Put the device back online after successful device_offline(). 3425 * @dev: Device to be put back online. 3426 * 3427 * If device_offline() has been successfully executed for @dev, but the device 3428 * has not been removed subsequently, execute its bus type's .online() callback 3429 * to indicate that the device can be used again. 3430 * 3431 * Call under device_hotplug_lock. 3432 */ 3433 int device_online(struct device *dev) 3434 { 3435 int ret = 0; 3436 3437 device_lock(dev); 3438 if (device_supports_offline(dev)) { 3439 if (dev->offline) { 3440 ret = dev->bus->online(dev); 3441 if (!ret) { 3442 kobject_uevent(&dev->kobj, KOBJ_ONLINE); 3443 dev->offline = false; 3444 } 3445 } else { 3446 ret = 1; 3447 } 3448 } 3449 device_unlock(dev); 3450 3451 return ret; 3452 } 3453 3454 struct root_device { 3455 struct device dev; 3456 struct module *owner; 3457 }; 3458 3459 static inline struct root_device *to_root_device(struct device *d) 3460 { 3461 return container_of(d, struct root_device, dev); 3462 } 3463 3464 static void root_device_release(struct device *dev) 3465 { 3466 kfree(to_root_device(dev)); 3467 } 3468 3469 /** 3470 * __root_device_register - allocate and register a root device 3471 * @name: root device name 3472 * @owner: owner module of the root device, usually THIS_MODULE 3473 * 3474 * This function allocates a root device and registers it 3475 * using device_register(). In order to free the returned 3476 * device, use root_device_unregister(). 3477 * 3478 * Root devices are dummy devices which allow other devices 3479 * to be grouped under /sys/devices. Use this function to 3480 * allocate a root device and then use it as the parent of 3481 * any device which should appear under /sys/devices/{name} 3482 * 3483 * The /sys/devices/{name} directory will also contain a 3484 * 'module' symlink which points to the @owner directory 3485 * in sysfs. 3486 * 3487 * Returns &struct device pointer on success, or ERR_PTR() on error. 3488 * 3489 * Note: You probably want to use root_device_register(). 3490 */ 3491 struct device *__root_device_register(const char *name, struct module *owner) 3492 { 3493 struct root_device *root; 3494 int err = -ENOMEM; 3495 3496 root = kzalloc(sizeof(struct root_device), GFP_KERNEL); 3497 if (!root) 3498 return ERR_PTR(err); 3499 3500 err = dev_set_name(&root->dev, "%s", name); 3501 if (err) { 3502 kfree(root); 3503 return ERR_PTR(err); 3504 } 3505 3506 root->dev.release = root_device_release; 3507 3508 err = device_register(&root->dev); 3509 if (err) { 3510 put_device(&root->dev); 3511 return ERR_PTR(err); 3512 } 3513 3514 #ifdef CONFIG_MODULES /* gotta find a "cleaner" way to do this */ 3515 if (owner) { 3516 struct module_kobject *mk = &owner->mkobj; 3517 3518 err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module"); 3519 if (err) { 3520 device_unregister(&root->dev); 3521 return ERR_PTR(err); 3522 } 3523 root->owner = owner; 3524 } 3525 #endif 3526 3527 return &root->dev; 3528 } 3529 EXPORT_SYMBOL_GPL(__root_device_register); 3530 3531 /** 3532 * root_device_unregister - unregister and free a root device 3533 * @dev: device going away 3534 * 3535 * This function unregisters and cleans up a device that was created by 3536 * root_device_register(). 3537 */ 3538 void root_device_unregister(struct device *dev) 3539 { 3540 struct root_device *root = to_root_device(dev); 3541 3542 if (root->owner) 3543 sysfs_remove_link(&root->dev.kobj, "module"); 3544 3545 device_unregister(dev); 3546 } 3547 EXPORT_SYMBOL_GPL(root_device_unregister); 3548 3549 3550 static void device_create_release(struct device *dev) 3551 { 3552 pr_debug("device: '%s': %s\n", dev_name(dev), __func__); 3553 kfree(dev); 3554 } 3555 3556 static __printf(6, 0) struct device * 3557 device_create_groups_vargs(struct class *class, struct device *parent, 3558 dev_t devt, void *drvdata, 3559 const struct attribute_group **groups, 3560 const char *fmt, va_list args) 3561 { 3562 struct device *dev = NULL; 3563 int retval = -ENODEV; 3564 3565 if (class == NULL || IS_ERR(class)) 3566 goto error; 3567 3568 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 3569 if (!dev) { 3570 retval = -ENOMEM; 3571 goto error; 3572 } 3573 3574 device_initialize(dev); 3575 dev->devt = devt; 3576 dev->class = class; 3577 dev->parent = parent; 3578 dev->groups = groups; 3579 dev->release = device_create_release; 3580 dev_set_drvdata(dev, drvdata); 3581 3582 retval = kobject_set_name_vargs(&dev->kobj, fmt, args); 3583 if (retval) 3584 goto error; 3585 3586 retval = device_add(dev); 3587 if (retval) 3588 goto error; 3589 3590 return dev; 3591 3592 error: 3593 put_device(dev); 3594 return ERR_PTR(retval); 3595 } 3596 3597 /** 3598 * device_create - creates a device and registers it with sysfs 3599 * @class: pointer to the struct class that this device should be registered to 3600 * @parent: pointer to the parent struct device of this new device, if any 3601 * @devt: the dev_t for the char device to be added 3602 * @drvdata: the data to be added to the device for callbacks 3603 * @fmt: string for the device's name 3604 * 3605 * This function can be used by char device classes. A struct device 3606 * will be created in sysfs, registered to the specified class. 3607 * 3608 * A "dev" file will be created, showing the dev_t for the device, if 3609 * the dev_t is not 0,0. 3610 * If a pointer to a parent struct device is passed in, the newly created 3611 * struct device will be a child of that device in sysfs. 3612 * The pointer to the struct device will be returned from the call. 3613 * Any further sysfs files that might be required can be created using this 3614 * pointer. 3615 * 3616 * Returns &struct device pointer on success, or ERR_PTR() on error. 3617 * 3618 * Note: the struct class passed to this function must have previously 3619 * been created with a call to class_create(). 3620 */ 3621 struct device *device_create(struct class *class, struct device *parent, 3622 dev_t devt, void *drvdata, const char *fmt, ...) 3623 { 3624 va_list vargs; 3625 struct device *dev; 3626 3627 va_start(vargs, fmt); 3628 dev = device_create_groups_vargs(class, parent, devt, drvdata, NULL, 3629 fmt, vargs); 3630 va_end(vargs); 3631 return dev; 3632 } 3633 EXPORT_SYMBOL_GPL(device_create); 3634 3635 /** 3636 * device_create_with_groups - creates a device and registers it with sysfs 3637 * @class: pointer to the struct class that this device should be registered to 3638 * @parent: pointer to the parent struct device of this new device, if any 3639 * @devt: the dev_t for the char device to be added 3640 * @drvdata: the data to be added to the device for callbacks 3641 * @groups: NULL-terminated list of attribute groups to be created 3642 * @fmt: string for the device's name 3643 * 3644 * This function can be used by char device classes. A struct device 3645 * will be created in sysfs, registered to the specified class. 3646 * Additional attributes specified in the groups parameter will also 3647 * be created automatically. 3648 * 3649 * A "dev" file will be created, showing the dev_t for the device, if 3650 * the dev_t is not 0,0. 3651 * If a pointer to a parent struct device is passed in, the newly created 3652 * struct device will be a child of that device in sysfs. 3653 * The pointer to the struct device will be returned from the call. 3654 * Any further sysfs files that might be required can be created using this 3655 * pointer. 3656 * 3657 * Returns &struct device pointer on success, or ERR_PTR() on error. 3658 * 3659 * Note: the struct class passed to this function must have previously 3660 * been created with a call to class_create(). 3661 */ 3662 struct device *device_create_with_groups(struct class *class, 3663 struct device *parent, dev_t devt, 3664 void *drvdata, 3665 const struct attribute_group **groups, 3666 const char *fmt, ...) 3667 { 3668 va_list vargs; 3669 struct device *dev; 3670 3671 va_start(vargs, fmt); 3672 dev = device_create_groups_vargs(class, parent, devt, drvdata, groups, 3673 fmt, vargs); 3674 va_end(vargs); 3675 return dev; 3676 } 3677 EXPORT_SYMBOL_GPL(device_create_with_groups); 3678 3679 /** 3680 * device_destroy - removes a device that was created with device_create() 3681 * @class: pointer to the struct class that this device was registered with 3682 * @devt: the dev_t of the device that was previously registered 3683 * 3684 * This call unregisters and cleans up a device that was created with a 3685 * call to device_create(). 3686 */ 3687 void device_destroy(struct class *class, dev_t devt) 3688 { 3689 struct device *dev; 3690 3691 dev = class_find_device_by_devt(class, devt); 3692 if (dev) { 3693 put_device(dev); 3694 device_unregister(dev); 3695 } 3696 } 3697 EXPORT_SYMBOL_GPL(device_destroy); 3698 3699 /** 3700 * device_rename - renames a device 3701 * @dev: the pointer to the struct device to be renamed 3702 * @new_name: the new name of the device 3703 * 3704 * It is the responsibility of the caller to provide mutual 3705 * exclusion between two different calls of device_rename 3706 * on the same device to ensure that new_name is valid and 3707 * won't conflict with other devices. 3708 * 3709 * Note: Don't call this function. Currently, the networking layer calls this 3710 * function, but that will change. The following text from Kay Sievers offers 3711 * some insight: 3712 * 3713 * Renaming devices is racy at many levels, symlinks and other stuff are not 3714 * replaced atomically, and you get a "move" uevent, but it's not easy to 3715 * connect the event to the old and new device. Device nodes are not renamed at 3716 * all, there isn't even support for that in the kernel now. 3717 * 3718 * In the meantime, during renaming, your target name might be taken by another 3719 * driver, creating conflicts. Or the old name is taken directly after you 3720 * renamed it -- then you get events for the same DEVPATH, before you even see 3721 * the "move" event. It's just a mess, and nothing new should ever rely on 3722 * kernel device renaming. Besides that, it's not even implemented now for 3723 * other things than (driver-core wise very simple) network devices. 3724 * 3725 * We are currently about to change network renaming in udev to completely 3726 * disallow renaming of devices in the same namespace as the kernel uses, 3727 * because we can't solve the problems properly, that arise with swapping names 3728 * of multiple interfaces without races. Means, renaming of eth[0-9]* will only 3729 * be allowed to some other name than eth[0-9]*, for the aforementioned 3730 * reasons. 3731 * 3732 * Make up a "real" name in the driver before you register anything, or add 3733 * some other attributes for userspace to find the device, or use udev to add 3734 * symlinks -- but never rename kernel devices later, it's a complete mess. We 3735 * don't even want to get into that and try to implement the missing pieces in 3736 * the core. We really have other pieces to fix in the driver core mess. :) 3737 */ 3738 int device_rename(struct device *dev, const char *new_name) 3739 { 3740 struct kobject *kobj = &dev->kobj; 3741 char *old_device_name = NULL; 3742 int error; 3743 3744 dev = get_device(dev); 3745 if (!dev) 3746 return -EINVAL; 3747 3748 dev_dbg(dev, "renaming to %s\n", new_name); 3749 3750 old_device_name = kstrdup(dev_name(dev), GFP_KERNEL); 3751 if (!old_device_name) { 3752 error = -ENOMEM; 3753 goto out; 3754 } 3755 3756 if (dev->class) { 3757 error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj, 3758 kobj, old_device_name, 3759 new_name, kobject_namespace(kobj)); 3760 if (error) 3761 goto out; 3762 } 3763 3764 error = kobject_rename(kobj, new_name); 3765 if (error) 3766 goto out; 3767 3768 out: 3769 put_device(dev); 3770 3771 kfree(old_device_name); 3772 3773 return error; 3774 } 3775 EXPORT_SYMBOL_GPL(device_rename); 3776 3777 static int device_move_class_links(struct device *dev, 3778 struct device *old_parent, 3779 struct device *new_parent) 3780 { 3781 int error = 0; 3782 3783 if (old_parent) 3784 sysfs_remove_link(&dev->kobj, "device"); 3785 if (new_parent) 3786 error = sysfs_create_link(&dev->kobj, &new_parent->kobj, 3787 "device"); 3788 return error; 3789 } 3790 3791 /** 3792 * device_move - moves a device to a new parent 3793 * @dev: the pointer to the struct device to be moved 3794 * @new_parent: the new parent of the device (can be NULL) 3795 * @dpm_order: how to reorder the dpm_list 3796 */ 3797 int device_move(struct device *dev, struct device *new_parent, 3798 enum dpm_order dpm_order) 3799 { 3800 int error; 3801 struct device *old_parent; 3802 struct kobject *new_parent_kobj; 3803 3804 dev = get_device(dev); 3805 if (!dev) 3806 return -EINVAL; 3807 3808 device_pm_lock(); 3809 new_parent = get_device(new_parent); 3810 new_parent_kobj = get_device_parent(dev, new_parent); 3811 if (IS_ERR(new_parent_kobj)) { 3812 error = PTR_ERR(new_parent_kobj); 3813 put_device(new_parent); 3814 goto out; 3815 } 3816 3817 pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev), 3818 __func__, new_parent ? dev_name(new_parent) : "<NULL>"); 3819 error = kobject_move(&dev->kobj, new_parent_kobj); 3820 if (error) { 3821 cleanup_glue_dir(dev, new_parent_kobj); 3822 put_device(new_parent); 3823 goto out; 3824 } 3825 old_parent = dev->parent; 3826 dev->parent = new_parent; 3827 if (old_parent) 3828 klist_remove(&dev->p->knode_parent); 3829 if (new_parent) { 3830 klist_add_tail(&dev->p->knode_parent, 3831 &new_parent->p->klist_children); 3832 set_dev_node(dev, dev_to_node(new_parent)); 3833 } 3834 3835 if (dev->class) { 3836 error = device_move_class_links(dev, old_parent, new_parent); 3837 if (error) { 3838 /* We ignore errors on cleanup since we're hosed anyway... */ 3839 device_move_class_links(dev, new_parent, old_parent); 3840 if (!kobject_move(&dev->kobj, &old_parent->kobj)) { 3841 if (new_parent) 3842 klist_remove(&dev->p->knode_parent); 3843 dev->parent = old_parent; 3844 if (old_parent) { 3845 klist_add_tail(&dev->p->knode_parent, 3846 &old_parent->p->klist_children); 3847 set_dev_node(dev, dev_to_node(old_parent)); 3848 } 3849 } 3850 cleanup_glue_dir(dev, new_parent_kobj); 3851 put_device(new_parent); 3852 goto out; 3853 } 3854 } 3855 switch (dpm_order) { 3856 case DPM_ORDER_NONE: 3857 break; 3858 case DPM_ORDER_DEV_AFTER_PARENT: 3859 device_pm_move_after(dev, new_parent); 3860 devices_kset_move_after(dev, new_parent); 3861 break; 3862 case DPM_ORDER_PARENT_BEFORE_DEV: 3863 device_pm_move_before(new_parent, dev); 3864 devices_kset_move_before(new_parent, dev); 3865 break; 3866 case DPM_ORDER_DEV_LAST: 3867 device_pm_move_last(dev); 3868 devices_kset_move_last(dev); 3869 break; 3870 } 3871 3872 put_device(old_parent); 3873 out: 3874 device_pm_unlock(); 3875 put_device(dev); 3876 return error; 3877 } 3878 EXPORT_SYMBOL_GPL(device_move); 3879 3880 static int device_attrs_change_owner(struct device *dev, kuid_t kuid, 3881 kgid_t kgid) 3882 { 3883 struct kobject *kobj = &dev->kobj; 3884 struct class *class = dev->class; 3885 const struct device_type *type = dev->type; 3886 int error; 3887 3888 if (class) { 3889 /* 3890 * Change the device groups of the device class for @dev to 3891 * @kuid/@kgid. 3892 */ 3893 error = sysfs_groups_change_owner(kobj, class->dev_groups, kuid, 3894 kgid); 3895 if (error) 3896 return error; 3897 } 3898 3899 if (type) { 3900 /* 3901 * Change the device groups of the device type for @dev to 3902 * @kuid/@kgid. 3903 */ 3904 error = sysfs_groups_change_owner(kobj, type->groups, kuid, 3905 kgid); 3906 if (error) 3907 return error; 3908 } 3909 3910 /* Change the device groups of @dev to @kuid/@kgid. */ 3911 error = sysfs_groups_change_owner(kobj, dev->groups, kuid, kgid); 3912 if (error) 3913 return error; 3914 3915 if (device_supports_offline(dev) && !dev->offline_disabled) { 3916 /* Change online device attributes of @dev to @kuid/@kgid. */ 3917 error = sysfs_file_change_owner(kobj, dev_attr_online.attr.name, 3918 kuid, kgid); 3919 if (error) 3920 return error; 3921 } 3922 3923 return 0; 3924 } 3925 3926 /** 3927 * device_change_owner - change the owner of an existing device. 3928 * @dev: device. 3929 * @kuid: new owner's kuid 3930 * @kgid: new owner's kgid 3931 * 3932 * This changes the owner of @dev and its corresponding sysfs entries to 3933 * @kuid/@kgid. This function closely mirrors how @dev was added via driver 3934 * core. 3935 * 3936 * Returns 0 on success or error code on failure. 3937 */ 3938 int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid) 3939 { 3940 int error; 3941 struct kobject *kobj = &dev->kobj; 3942 3943 dev = get_device(dev); 3944 if (!dev) 3945 return -EINVAL; 3946 3947 /* 3948 * Change the kobject and the default attributes and groups of the 3949 * ktype associated with it to @kuid/@kgid. 3950 */ 3951 error = sysfs_change_owner(kobj, kuid, kgid); 3952 if (error) 3953 goto out; 3954 3955 /* 3956 * Change the uevent file for @dev to the new owner. The uevent file 3957 * was created in a separate step when @dev got added and we mirror 3958 * that step here. 3959 */ 3960 error = sysfs_file_change_owner(kobj, dev_attr_uevent.attr.name, kuid, 3961 kgid); 3962 if (error) 3963 goto out; 3964 3965 /* 3966 * Change the device groups, the device groups associated with the 3967 * device class, and the groups associated with the device type of @dev 3968 * to @kuid/@kgid. 3969 */ 3970 error = device_attrs_change_owner(dev, kuid, kgid); 3971 if (error) 3972 goto out; 3973 3974 error = dpm_sysfs_change_owner(dev, kuid, kgid); 3975 if (error) 3976 goto out; 3977 3978 #ifdef CONFIG_BLOCK 3979 if (sysfs_deprecated && dev->class == &block_class) 3980 goto out; 3981 #endif 3982 3983 /* 3984 * Change the owner of the symlink located in the class directory of 3985 * the device class associated with @dev which points to the actual 3986 * directory entry for @dev to @kuid/@kgid. This ensures that the 3987 * symlink shows the same permissions as its target. 3988 */ 3989 error = sysfs_link_change_owner(&dev->class->p->subsys.kobj, &dev->kobj, 3990 dev_name(dev), kuid, kgid); 3991 if (error) 3992 goto out; 3993 3994 out: 3995 put_device(dev); 3996 return error; 3997 } 3998 EXPORT_SYMBOL_GPL(device_change_owner); 3999 4000 /** 4001 * device_shutdown - call ->shutdown() on each device to shutdown. 4002 */ 4003 void device_shutdown(void) 4004 { 4005 struct device *dev, *parent; 4006 4007 wait_for_device_probe(); 4008 device_block_probing(); 4009 4010 cpufreq_suspend(); 4011 4012 spin_lock(&devices_kset->list_lock); 4013 /* 4014 * Walk the devices list backward, shutting down each in turn. 4015 * Beware that device unplug events may also start pulling 4016 * devices offline, even as the system is shutting down. 4017 */ 4018 while (!list_empty(&devices_kset->list)) { 4019 dev = list_entry(devices_kset->list.prev, struct device, 4020 kobj.entry); 4021 4022 /* 4023 * hold reference count of device's parent to 4024 * prevent it from being freed because parent's 4025 * lock is to be held 4026 */ 4027 parent = get_device(dev->parent); 4028 get_device(dev); 4029 /* 4030 * Make sure the device is off the kset list, in the 4031 * event that dev->*->shutdown() doesn't remove it. 4032 */ 4033 list_del_init(&dev->kobj.entry); 4034 spin_unlock(&devices_kset->list_lock); 4035 4036 /* hold lock to avoid race with probe/release */ 4037 if (parent) 4038 device_lock(parent); 4039 device_lock(dev); 4040 4041 /* Don't allow any more runtime suspends */ 4042 pm_runtime_get_noresume(dev); 4043 pm_runtime_barrier(dev); 4044 4045 if (dev->class && dev->class->shutdown_pre) { 4046 if (initcall_debug) 4047 dev_info(dev, "shutdown_pre\n"); 4048 dev->class->shutdown_pre(dev); 4049 } 4050 if (dev->bus && dev->bus->shutdown) { 4051 if (initcall_debug) 4052 dev_info(dev, "shutdown\n"); 4053 dev->bus->shutdown(dev); 4054 } else if (dev->driver && dev->driver->shutdown) { 4055 if (initcall_debug) 4056 dev_info(dev, "shutdown\n"); 4057 dev->driver->shutdown(dev); 4058 } 4059 4060 device_unlock(dev); 4061 if (parent) 4062 device_unlock(parent); 4063 4064 put_device(dev); 4065 put_device(parent); 4066 4067 spin_lock(&devices_kset->list_lock); 4068 } 4069 spin_unlock(&devices_kset->list_lock); 4070 } 4071 4072 /* 4073 * Device logging functions 4074 */ 4075 4076 #ifdef CONFIG_PRINTK 4077 static void 4078 set_dev_info(const struct device *dev, struct dev_printk_info *dev_info) 4079 { 4080 const char *subsys; 4081 4082 memset(dev_info, 0, sizeof(*dev_info)); 4083 4084 if (dev->class) 4085 subsys = dev->class->name; 4086 else if (dev->bus) 4087 subsys = dev->bus->name; 4088 else 4089 return; 4090 4091 strscpy(dev_info->subsystem, subsys, sizeof(dev_info->subsystem)); 4092 4093 /* 4094 * Add device identifier DEVICE=: 4095 * b12:8 block dev_t 4096 * c127:3 char dev_t 4097 * n8 netdev ifindex 4098 * +sound:card0 subsystem:devname 4099 */ 4100 if (MAJOR(dev->devt)) { 4101 char c; 4102 4103 if (strcmp(subsys, "block") == 0) 4104 c = 'b'; 4105 else 4106 c = 'c'; 4107 4108 snprintf(dev_info->device, sizeof(dev_info->device), 4109 "%c%u:%u", c, MAJOR(dev->devt), MINOR(dev->devt)); 4110 } else if (strcmp(subsys, "net") == 0) { 4111 struct net_device *net = to_net_dev(dev); 4112 4113 snprintf(dev_info->device, sizeof(dev_info->device), 4114 "n%u", net->ifindex); 4115 } else { 4116 snprintf(dev_info->device, sizeof(dev_info->device), 4117 "+%s:%s", subsys, dev_name(dev)); 4118 } 4119 } 4120 4121 int dev_vprintk_emit(int level, const struct device *dev, 4122 const char *fmt, va_list args) 4123 { 4124 struct dev_printk_info dev_info; 4125 4126 set_dev_info(dev, &dev_info); 4127 4128 return vprintk_emit(0, level, &dev_info, fmt, args); 4129 } 4130 EXPORT_SYMBOL(dev_vprintk_emit); 4131 4132 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...) 4133 { 4134 va_list args; 4135 int r; 4136 4137 va_start(args, fmt); 4138 4139 r = dev_vprintk_emit(level, dev, fmt, args); 4140 4141 va_end(args); 4142 4143 return r; 4144 } 4145 EXPORT_SYMBOL(dev_printk_emit); 4146 4147 static void __dev_printk(const char *level, const struct device *dev, 4148 struct va_format *vaf) 4149 { 4150 if (dev) 4151 dev_printk_emit(level[1] - '0', dev, "%s %s: %pV", 4152 dev_driver_string(dev), dev_name(dev), vaf); 4153 else 4154 printk("%s(NULL device *): %pV", level, vaf); 4155 } 4156 4157 void dev_printk(const char *level, const struct device *dev, 4158 const char *fmt, ...) 4159 { 4160 struct va_format vaf; 4161 va_list args; 4162 4163 va_start(args, fmt); 4164 4165 vaf.fmt = fmt; 4166 vaf.va = &args; 4167 4168 __dev_printk(level, dev, &vaf); 4169 4170 va_end(args); 4171 } 4172 EXPORT_SYMBOL(dev_printk); 4173 4174 #define define_dev_printk_level(func, kern_level) \ 4175 void func(const struct device *dev, const char *fmt, ...) \ 4176 { \ 4177 struct va_format vaf; \ 4178 va_list args; \ 4179 \ 4180 va_start(args, fmt); \ 4181 \ 4182 vaf.fmt = fmt; \ 4183 vaf.va = &args; \ 4184 \ 4185 __dev_printk(kern_level, dev, &vaf); \ 4186 \ 4187 va_end(args); \ 4188 } \ 4189 EXPORT_SYMBOL(func); 4190 4191 define_dev_printk_level(_dev_emerg, KERN_EMERG); 4192 define_dev_printk_level(_dev_alert, KERN_ALERT); 4193 define_dev_printk_level(_dev_crit, KERN_CRIT); 4194 define_dev_printk_level(_dev_err, KERN_ERR); 4195 define_dev_printk_level(_dev_warn, KERN_WARNING); 4196 define_dev_printk_level(_dev_notice, KERN_NOTICE); 4197 define_dev_printk_level(_dev_info, KERN_INFO); 4198 4199 #endif 4200 4201 /** 4202 * dev_err_probe - probe error check and log helper 4203 * @dev: the pointer to the struct device 4204 * @err: error value to test 4205 * @fmt: printf-style format string 4206 * @...: arguments as specified in the format string 4207 * 4208 * This helper implements common pattern present in probe functions for error 4209 * checking: print debug or error message depending if the error value is 4210 * -EPROBE_DEFER and propagate error upwards. 4211 * In case of -EPROBE_DEFER it sets also defer probe reason, which can be 4212 * checked later by reading devices_deferred debugfs attribute. 4213 * It replaces code sequence:: 4214 * 4215 * if (err != -EPROBE_DEFER) 4216 * dev_err(dev, ...); 4217 * else 4218 * dev_dbg(dev, ...); 4219 * return err; 4220 * 4221 * with:: 4222 * 4223 * return dev_err_probe(dev, err, ...); 4224 * 4225 * Returns @err. 4226 * 4227 */ 4228 int dev_err_probe(const struct device *dev, int err, const char *fmt, ...) 4229 { 4230 struct va_format vaf; 4231 va_list args; 4232 4233 va_start(args, fmt); 4234 vaf.fmt = fmt; 4235 vaf.va = &args; 4236 4237 if (err != -EPROBE_DEFER) { 4238 dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf); 4239 } else { 4240 device_set_deferred_probe_reason(dev, &vaf); 4241 dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf); 4242 } 4243 4244 va_end(args); 4245 4246 return err; 4247 } 4248 EXPORT_SYMBOL_GPL(dev_err_probe); 4249 4250 static inline bool fwnode_is_primary(struct fwnode_handle *fwnode) 4251 { 4252 return fwnode && !IS_ERR(fwnode->secondary); 4253 } 4254 4255 /** 4256 * set_primary_fwnode - Change the primary firmware node of a given device. 4257 * @dev: Device to handle. 4258 * @fwnode: New primary firmware node of the device. 4259 * 4260 * Set the device's firmware node pointer to @fwnode, but if a secondary 4261 * firmware node of the device is present, preserve it. 4262 */ 4263 void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) 4264 { 4265 struct device *parent = dev->parent; 4266 struct fwnode_handle *fn = dev->fwnode; 4267 4268 if (fwnode) { 4269 if (fwnode_is_primary(fn)) 4270 fn = fn->secondary; 4271 4272 if (fn) { 4273 WARN_ON(fwnode->secondary); 4274 fwnode->secondary = fn; 4275 } 4276 dev->fwnode = fwnode; 4277 } else { 4278 if (fwnode_is_primary(fn)) { 4279 dev->fwnode = fn->secondary; 4280 if (!(parent && fn == parent->fwnode)) 4281 fn->secondary = ERR_PTR(-ENODEV); 4282 } else { 4283 dev->fwnode = NULL; 4284 } 4285 } 4286 } 4287 EXPORT_SYMBOL_GPL(set_primary_fwnode); 4288 4289 /** 4290 * set_secondary_fwnode - Change the secondary firmware node of a given device. 4291 * @dev: Device to handle. 4292 * @fwnode: New secondary firmware node of the device. 4293 * 4294 * If a primary firmware node of the device is present, set its secondary 4295 * pointer to @fwnode. Otherwise, set the device's firmware node pointer to 4296 * @fwnode. 4297 */ 4298 void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode) 4299 { 4300 if (fwnode) 4301 fwnode->secondary = ERR_PTR(-ENODEV); 4302 4303 if (fwnode_is_primary(dev->fwnode)) 4304 dev->fwnode->secondary = fwnode; 4305 else 4306 dev->fwnode = fwnode; 4307 } 4308 EXPORT_SYMBOL_GPL(set_secondary_fwnode); 4309 4310 /** 4311 * device_set_of_node_from_dev - reuse device-tree node of another device 4312 * @dev: device whose device-tree node is being set 4313 * @dev2: device whose device-tree node is being reused 4314 * 4315 * Takes another reference to the new device-tree node after first dropping 4316 * any reference held to the old node. 4317 */ 4318 void device_set_of_node_from_dev(struct device *dev, const struct device *dev2) 4319 { 4320 of_node_put(dev->of_node); 4321 dev->of_node = of_node_get(dev2->of_node); 4322 dev->of_node_reused = true; 4323 } 4324 EXPORT_SYMBOL_GPL(device_set_of_node_from_dev); 4325 4326 int device_match_name(struct device *dev, const void *name) 4327 { 4328 return sysfs_streq(dev_name(dev), name); 4329 } 4330 EXPORT_SYMBOL_GPL(device_match_name); 4331 4332 int device_match_of_node(struct device *dev, const void *np) 4333 { 4334 return dev->of_node == np; 4335 } 4336 EXPORT_SYMBOL_GPL(device_match_of_node); 4337 4338 int device_match_fwnode(struct device *dev, const void *fwnode) 4339 { 4340 return dev_fwnode(dev) == fwnode; 4341 } 4342 EXPORT_SYMBOL_GPL(device_match_fwnode); 4343 4344 int device_match_devt(struct device *dev, const void *pdevt) 4345 { 4346 return dev->devt == *(dev_t *)pdevt; 4347 } 4348 EXPORT_SYMBOL_GPL(device_match_devt); 4349 4350 int device_match_acpi_dev(struct device *dev, const void *adev) 4351 { 4352 return ACPI_COMPANION(dev) == adev; 4353 } 4354 EXPORT_SYMBOL(device_match_acpi_dev); 4355 4356 int device_match_any(struct device *dev, const void *unused) 4357 { 4358 return 1; 4359 } 4360 EXPORT_SYMBOL_GPL(device_match_any); 4361