1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * drivers/base/core.c - core driver model code (device registration, etc) 4 * 5 * Copyright (c) 2002-3 Patrick Mochel 6 * Copyright (c) 2002-3 Open Source Development Labs 7 * Copyright (c) 2006 Greg Kroah-Hartman <gregkh@suse.de> 8 * Copyright (c) 2006 Novell, Inc. 9 */ 10 11 #include <linux/acpi.h> 12 #include <linux/cpufreq.h> 13 #include <linux/device.h> 14 #include <linux/err.h> 15 #include <linux/fwnode.h> 16 #include <linux/init.h> 17 #include <linux/module.h> 18 #include <linux/slab.h> 19 #include <linux/string.h> 20 #include <linux/kdev_t.h> 21 #include <linux/notifier.h> 22 #include <linux/of.h> 23 #include <linux/of_device.h> 24 #include <linux/blkdev.h> 25 #include <linux/mutex.h> 26 #include <linux/pm_runtime.h> 27 #include <linux/netdevice.h> 28 #include <linux/sched/signal.h> 29 #include <linux/sched/mm.h> 30 #include <linux/swiotlb.h> 31 #include <linux/sysfs.h> 32 #include <linux/dma-map-ops.h> /* for dma_default_coherent */ 33 34 #include "base.h" 35 #include "physical_location.h" 36 #include "power/power.h" 37 38 #ifdef CONFIG_SYSFS_DEPRECATED 39 #ifdef CONFIG_SYSFS_DEPRECATED_V2 40 long sysfs_deprecated = 1; 41 #else 42 long sysfs_deprecated = 0; 43 #endif 44 static int __init sysfs_deprecated_setup(char *arg) 45 { 46 return kstrtol(arg, 10, &sysfs_deprecated); 47 } 48 early_param("sysfs.deprecated", sysfs_deprecated_setup); 49 #endif 50 51 /* Device links support. */ 52 static LIST_HEAD(deferred_sync); 53 static unsigned int defer_sync_state_count = 1; 54 static DEFINE_MUTEX(fwnode_link_lock); 55 static bool fw_devlink_is_permissive(void); 56 static bool fw_devlink_drv_reg_done; 57 static bool fw_devlink_best_effort; 58 59 /** 60 * fwnode_link_add - Create a link between two fwnode_handles. 61 * @con: Consumer end of the link. 62 * @sup: Supplier end of the link. 63 * 64 * Create a fwnode link between fwnode handles @con and @sup. The fwnode link 65 * represents the detail that the firmware lists @sup fwnode as supplying a 66 * resource to @con. 67 * 68 * The driver core will use the fwnode link to create a device link between the 69 * two device objects corresponding to @con and @sup when they are created. The 70 * driver core will automatically delete the fwnode link between @con and @sup 71 * after doing that. 72 * 73 * Attempts to create duplicate links between the same pair of fwnode handles 74 * are ignored and there is no reference counting. 75 */ 76 int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup) 77 { 78 struct fwnode_link *link; 79 int ret = 0; 80 81 mutex_lock(&fwnode_link_lock); 82 83 list_for_each_entry(link, &sup->consumers, s_hook) 84 if (link->consumer == con) 85 goto out; 86 87 link = kzalloc(sizeof(*link), GFP_KERNEL); 88 if (!link) { 89 ret = -ENOMEM; 90 goto out; 91 } 92 93 link->supplier = sup; 94 INIT_LIST_HEAD(&link->s_hook); 95 link->consumer = con; 96 INIT_LIST_HEAD(&link->c_hook); 97 98 list_add(&link->s_hook, &sup->consumers); 99 list_add(&link->c_hook, &con->suppliers); 100 pr_debug("%pfwP Linked as a fwnode consumer to %pfwP\n", 101 con, sup); 102 out: 103 mutex_unlock(&fwnode_link_lock); 104 105 return ret; 106 } 107 108 /** 109 * __fwnode_link_del - Delete a link between two fwnode_handles. 110 * @link: the fwnode_link to be deleted 111 * 112 * The fwnode_link_lock needs to be held when this function is called. 113 */ 114 static void __fwnode_link_del(struct fwnode_link *link) 115 { 116 pr_debug("%pfwP Dropping the fwnode link to %pfwP\n", 117 link->consumer, link->supplier); 118 list_del(&link->s_hook); 119 list_del(&link->c_hook); 120 kfree(link); 121 } 122 123 /** 124 * fwnode_links_purge_suppliers - Delete all supplier links of fwnode_handle. 125 * @fwnode: fwnode whose supplier links need to be deleted 126 * 127 * Deletes all supplier links connecting directly to @fwnode. 128 */ 129 static void fwnode_links_purge_suppliers(struct fwnode_handle *fwnode) 130 { 131 struct fwnode_link *link, *tmp; 132 133 mutex_lock(&fwnode_link_lock); 134 list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) 135 __fwnode_link_del(link); 136 mutex_unlock(&fwnode_link_lock); 137 } 138 139 /** 140 * fwnode_links_purge_consumers - Delete all consumer links of fwnode_handle. 141 * @fwnode: fwnode whose consumer links need to be deleted 142 * 143 * Deletes all consumer links connecting directly to @fwnode. 144 */ 145 static void fwnode_links_purge_consumers(struct fwnode_handle *fwnode) 146 { 147 struct fwnode_link *link, *tmp; 148 149 mutex_lock(&fwnode_link_lock); 150 list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) 151 __fwnode_link_del(link); 152 mutex_unlock(&fwnode_link_lock); 153 } 154 155 /** 156 * fwnode_links_purge - Delete all links connected to a fwnode_handle. 157 * @fwnode: fwnode whose links needs to be deleted 158 * 159 * Deletes all links connecting directly to a fwnode. 160 */ 161 void fwnode_links_purge(struct fwnode_handle *fwnode) 162 { 163 fwnode_links_purge_suppliers(fwnode); 164 fwnode_links_purge_consumers(fwnode); 165 } 166 167 void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode) 168 { 169 struct fwnode_handle *child; 170 171 /* Don't purge consumer links of an added child */ 172 if (fwnode->dev) 173 return; 174 175 fwnode->flags |= FWNODE_FLAG_NOT_DEVICE; 176 fwnode_links_purge_consumers(fwnode); 177 178 fwnode_for_each_available_child_node(fwnode, child) 179 fw_devlink_purge_absent_suppliers(child); 180 } 181 EXPORT_SYMBOL_GPL(fw_devlink_purge_absent_suppliers); 182 183 #ifdef CONFIG_SRCU 184 static DEFINE_MUTEX(device_links_lock); 185 DEFINE_STATIC_SRCU(device_links_srcu); 186 187 static inline void device_links_write_lock(void) 188 { 189 mutex_lock(&device_links_lock); 190 } 191 192 static inline void device_links_write_unlock(void) 193 { 194 mutex_unlock(&device_links_lock); 195 } 196 197 int device_links_read_lock(void) __acquires(&device_links_srcu) 198 { 199 return srcu_read_lock(&device_links_srcu); 200 } 201 202 void device_links_read_unlock(int idx) __releases(&device_links_srcu) 203 { 204 srcu_read_unlock(&device_links_srcu, idx); 205 } 206 207 int device_links_read_lock_held(void) 208 { 209 return srcu_read_lock_held(&device_links_srcu); 210 } 211 212 static void device_link_synchronize_removal(void) 213 { 214 synchronize_srcu(&device_links_srcu); 215 } 216 217 static void device_link_remove_from_lists(struct device_link *link) 218 { 219 list_del_rcu(&link->s_node); 220 list_del_rcu(&link->c_node); 221 } 222 #else /* !CONFIG_SRCU */ 223 static DECLARE_RWSEM(device_links_lock); 224 225 static inline void device_links_write_lock(void) 226 { 227 down_write(&device_links_lock); 228 } 229 230 static inline void device_links_write_unlock(void) 231 { 232 up_write(&device_links_lock); 233 } 234 235 int device_links_read_lock(void) 236 { 237 down_read(&device_links_lock); 238 return 0; 239 } 240 241 void device_links_read_unlock(int not_used) 242 { 243 up_read(&device_links_lock); 244 } 245 246 #ifdef CONFIG_DEBUG_LOCK_ALLOC 247 int device_links_read_lock_held(void) 248 { 249 return lockdep_is_held(&device_links_lock); 250 } 251 #endif 252 253 static inline void device_link_synchronize_removal(void) 254 { 255 } 256 257 static void device_link_remove_from_lists(struct device_link *link) 258 { 259 list_del(&link->s_node); 260 list_del(&link->c_node); 261 } 262 #endif /* !CONFIG_SRCU */ 263 264 static bool device_is_ancestor(struct device *dev, struct device *target) 265 { 266 while (target->parent) { 267 target = target->parent; 268 if (dev == target) 269 return true; 270 } 271 return false; 272 } 273 274 /** 275 * device_is_dependent - Check if one device depends on another one 276 * @dev: Device to check dependencies for. 277 * @target: Device to check against. 278 * 279 * Check if @target depends on @dev or any device dependent on it (its child or 280 * its consumer etc). Return 1 if that is the case or 0 otherwise. 281 */ 282 int device_is_dependent(struct device *dev, void *target) 283 { 284 struct device_link *link; 285 int ret; 286 287 /* 288 * The "ancestors" check is needed to catch the case when the target 289 * device has not been completely initialized yet and it is still 290 * missing from the list of children of its parent device. 291 */ 292 if (dev == target || device_is_ancestor(dev, target)) 293 return 1; 294 295 ret = device_for_each_child(dev, target, device_is_dependent); 296 if (ret) 297 return ret; 298 299 list_for_each_entry(link, &dev->links.consumers, s_node) { 300 if ((link->flags & ~DL_FLAG_INFERRED) == 301 (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED)) 302 continue; 303 304 if (link->consumer == target) 305 return 1; 306 307 ret = device_is_dependent(link->consumer, target); 308 if (ret) 309 break; 310 } 311 return ret; 312 } 313 314 static void device_link_init_status(struct device_link *link, 315 struct device *consumer, 316 struct device *supplier) 317 { 318 switch (supplier->links.status) { 319 case DL_DEV_PROBING: 320 switch (consumer->links.status) { 321 case DL_DEV_PROBING: 322 /* 323 * A consumer driver can create a link to a supplier 324 * that has not completed its probing yet as long as it 325 * knows that the supplier is already functional (for 326 * example, it has just acquired some resources from the 327 * supplier). 328 */ 329 link->status = DL_STATE_CONSUMER_PROBE; 330 break; 331 default: 332 link->status = DL_STATE_DORMANT; 333 break; 334 } 335 break; 336 case DL_DEV_DRIVER_BOUND: 337 switch (consumer->links.status) { 338 case DL_DEV_PROBING: 339 link->status = DL_STATE_CONSUMER_PROBE; 340 break; 341 case DL_DEV_DRIVER_BOUND: 342 link->status = DL_STATE_ACTIVE; 343 break; 344 default: 345 link->status = DL_STATE_AVAILABLE; 346 break; 347 } 348 break; 349 case DL_DEV_UNBINDING: 350 link->status = DL_STATE_SUPPLIER_UNBIND; 351 break; 352 default: 353 link->status = DL_STATE_DORMANT; 354 break; 355 } 356 } 357 358 static int device_reorder_to_tail(struct device *dev, void *not_used) 359 { 360 struct device_link *link; 361 362 /* 363 * Devices that have not been registered yet will be put to the ends 364 * of the lists during the registration, so skip them here. 365 */ 366 if (device_is_registered(dev)) 367 devices_kset_move_last(dev); 368 369 if (device_pm_initialized(dev)) 370 device_pm_move_last(dev); 371 372 device_for_each_child(dev, NULL, device_reorder_to_tail); 373 list_for_each_entry(link, &dev->links.consumers, s_node) { 374 if ((link->flags & ~DL_FLAG_INFERRED) == 375 (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED)) 376 continue; 377 device_reorder_to_tail(link->consumer, NULL); 378 } 379 380 return 0; 381 } 382 383 /** 384 * device_pm_move_to_tail - Move set of devices to the end of device lists 385 * @dev: Device to move 386 * 387 * This is a device_reorder_to_tail() wrapper taking the requisite locks. 388 * 389 * It moves the @dev along with all of its children and all of its consumers 390 * to the ends of the device_kset and dpm_list, recursively. 391 */ 392 void device_pm_move_to_tail(struct device *dev) 393 { 394 int idx; 395 396 idx = device_links_read_lock(); 397 device_pm_lock(); 398 device_reorder_to_tail(dev, NULL); 399 device_pm_unlock(); 400 device_links_read_unlock(idx); 401 } 402 403 #define to_devlink(dev) container_of((dev), struct device_link, link_dev) 404 405 static ssize_t status_show(struct device *dev, 406 struct device_attribute *attr, char *buf) 407 { 408 const char *output; 409 410 switch (to_devlink(dev)->status) { 411 case DL_STATE_NONE: 412 output = "not tracked"; 413 break; 414 case DL_STATE_DORMANT: 415 output = "dormant"; 416 break; 417 case DL_STATE_AVAILABLE: 418 output = "available"; 419 break; 420 case DL_STATE_CONSUMER_PROBE: 421 output = "consumer probing"; 422 break; 423 case DL_STATE_ACTIVE: 424 output = "active"; 425 break; 426 case DL_STATE_SUPPLIER_UNBIND: 427 output = "supplier unbinding"; 428 break; 429 default: 430 output = "unknown"; 431 break; 432 } 433 434 return sysfs_emit(buf, "%s\n", output); 435 } 436 static DEVICE_ATTR_RO(status); 437 438 static ssize_t auto_remove_on_show(struct device *dev, 439 struct device_attribute *attr, char *buf) 440 { 441 struct device_link *link = to_devlink(dev); 442 const char *output; 443 444 if (link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) 445 output = "supplier unbind"; 446 else if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) 447 output = "consumer unbind"; 448 else 449 output = "never"; 450 451 return sysfs_emit(buf, "%s\n", output); 452 } 453 static DEVICE_ATTR_RO(auto_remove_on); 454 455 static ssize_t runtime_pm_show(struct device *dev, 456 struct device_attribute *attr, char *buf) 457 { 458 struct device_link *link = to_devlink(dev); 459 460 return sysfs_emit(buf, "%d\n", !!(link->flags & DL_FLAG_PM_RUNTIME)); 461 } 462 static DEVICE_ATTR_RO(runtime_pm); 463 464 static ssize_t sync_state_only_show(struct device *dev, 465 struct device_attribute *attr, char *buf) 466 { 467 struct device_link *link = to_devlink(dev); 468 469 return sysfs_emit(buf, "%d\n", 470 !!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); 471 } 472 static DEVICE_ATTR_RO(sync_state_only); 473 474 static struct attribute *devlink_attrs[] = { 475 &dev_attr_status.attr, 476 &dev_attr_auto_remove_on.attr, 477 &dev_attr_runtime_pm.attr, 478 &dev_attr_sync_state_only.attr, 479 NULL, 480 }; 481 ATTRIBUTE_GROUPS(devlink); 482 483 static void device_link_release_fn(struct work_struct *work) 484 { 485 struct device_link *link = container_of(work, struct device_link, rm_work); 486 487 /* Ensure that all references to the link object have been dropped. */ 488 device_link_synchronize_removal(); 489 490 pm_runtime_release_supplier(link, true); 491 492 put_device(link->consumer); 493 put_device(link->supplier); 494 kfree(link); 495 } 496 497 static void devlink_dev_release(struct device *dev) 498 { 499 struct device_link *link = to_devlink(dev); 500 501 INIT_WORK(&link->rm_work, device_link_release_fn); 502 /* 503 * It may take a while to complete this work because of the SRCU 504 * synchronization in device_link_release_fn() and if the consumer or 505 * supplier devices get deleted when it runs, so put it into the "long" 506 * workqueue. 507 */ 508 queue_work(system_long_wq, &link->rm_work); 509 } 510 511 static struct class devlink_class = { 512 .name = "devlink", 513 .owner = THIS_MODULE, 514 .dev_groups = devlink_groups, 515 .dev_release = devlink_dev_release, 516 }; 517 518 static int devlink_add_symlinks(struct device *dev, 519 struct class_interface *class_intf) 520 { 521 int ret; 522 size_t len; 523 struct device_link *link = to_devlink(dev); 524 struct device *sup = link->supplier; 525 struct device *con = link->consumer; 526 char *buf; 527 528 len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)), 529 strlen(dev_bus_name(con)) + strlen(dev_name(con))); 530 len += strlen(":"); 531 len += strlen("supplier:") + 1; 532 buf = kzalloc(len, GFP_KERNEL); 533 if (!buf) 534 return -ENOMEM; 535 536 ret = sysfs_create_link(&link->link_dev.kobj, &sup->kobj, "supplier"); 537 if (ret) 538 goto out; 539 540 ret = sysfs_create_link(&link->link_dev.kobj, &con->kobj, "consumer"); 541 if (ret) 542 goto err_con; 543 544 snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con)); 545 ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf); 546 if (ret) 547 goto err_con_dev; 548 549 snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup)); 550 ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf); 551 if (ret) 552 goto err_sup_dev; 553 554 goto out; 555 556 err_sup_dev: 557 snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con)); 558 sysfs_remove_link(&sup->kobj, buf); 559 err_con_dev: 560 sysfs_remove_link(&link->link_dev.kobj, "consumer"); 561 err_con: 562 sysfs_remove_link(&link->link_dev.kobj, "supplier"); 563 out: 564 kfree(buf); 565 return ret; 566 } 567 568 static void devlink_remove_symlinks(struct device *dev, 569 struct class_interface *class_intf) 570 { 571 struct device_link *link = to_devlink(dev); 572 size_t len; 573 struct device *sup = link->supplier; 574 struct device *con = link->consumer; 575 char *buf; 576 577 sysfs_remove_link(&link->link_dev.kobj, "consumer"); 578 sysfs_remove_link(&link->link_dev.kobj, "supplier"); 579 580 len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)), 581 strlen(dev_bus_name(con)) + strlen(dev_name(con))); 582 len += strlen(":"); 583 len += strlen("supplier:") + 1; 584 buf = kzalloc(len, GFP_KERNEL); 585 if (!buf) { 586 WARN(1, "Unable to properly free device link symlinks!\n"); 587 return; 588 } 589 590 if (device_is_registered(con)) { 591 snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup)); 592 sysfs_remove_link(&con->kobj, buf); 593 } 594 snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con)); 595 sysfs_remove_link(&sup->kobj, buf); 596 kfree(buf); 597 } 598 599 static struct class_interface devlink_class_intf = { 600 .class = &devlink_class, 601 .add_dev = devlink_add_symlinks, 602 .remove_dev = devlink_remove_symlinks, 603 }; 604 605 static int __init devlink_class_init(void) 606 { 607 int ret; 608 609 ret = class_register(&devlink_class); 610 if (ret) 611 return ret; 612 613 ret = class_interface_register(&devlink_class_intf); 614 if (ret) 615 class_unregister(&devlink_class); 616 617 return ret; 618 } 619 postcore_initcall(devlink_class_init); 620 621 #define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \ 622 DL_FLAG_AUTOREMOVE_SUPPLIER | \ 623 DL_FLAG_AUTOPROBE_CONSUMER | \ 624 DL_FLAG_SYNC_STATE_ONLY | \ 625 DL_FLAG_INFERRED) 626 627 #define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \ 628 DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE) 629 630 /** 631 * device_link_add - Create a link between two devices. 632 * @consumer: Consumer end of the link. 633 * @supplier: Supplier end of the link. 634 * @flags: Link flags. 635 * 636 * The caller is responsible for the proper synchronization of the link creation 637 * with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the 638 * runtime PM framework to take the link into account. Second, if the 639 * DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will 640 * be forced into the active meta state and reference-counted upon the creation 641 * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be 642 * ignored. 643 * 644 * If DL_FLAG_STATELESS is set in @flags, the caller of this function is 645 * expected to release the link returned by it directly with the help of either 646 * device_link_del() or device_link_remove(). 647 * 648 * If that flag is not set, however, the caller of this function is handing the 649 * management of the link over to the driver core entirely and its return value 650 * can only be used to check whether or not the link is present. In that case, 651 * the DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_AUTOREMOVE_SUPPLIER device link 652 * flags can be used to indicate to the driver core when the link can be safely 653 * deleted. Namely, setting one of them in @flags indicates to the driver core 654 * that the link is not going to be used (by the given caller of this function) 655 * after unbinding the consumer or supplier driver, respectively, from its 656 * device, so the link can be deleted at that point. If none of them is set, 657 * the link will be maintained until one of the devices pointed to by it (either 658 * the consumer or the supplier) is unregistered. 659 * 660 * Also, if DL_FLAG_STATELESS, DL_FLAG_AUTOREMOVE_CONSUMER and 661 * DL_FLAG_AUTOREMOVE_SUPPLIER are not set in @flags (that is, a persistent 662 * managed device link is being added), the DL_FLAG_AUTOPROBE_CONSUMER flag can 663 * be used to request the driver core to automatically probe for a consumer 664 * driver after successfully binding a driver to the supplier device. 665 * 666 * The combination of DL_FLAG_STATELESS and one of DL_FLAG_AUTOREMOVE_CONSUMER, 667 * DL_FLAG_AUTOREMOVE_SUPPLIER, or DL_FLAG_AUTOPROBE_CONSUMER set in @flags at 668 * the same time is invalid and will cause NULL to be returned upfront. 669 * However, if a device link between the given @consumer and @supplier pair 670 * exists already when this function is called for them, the existing link will 671 * be returned regardless of its current type and status (the link's flags may 672 * be modified then). The caller of this function is then expected to treat 673 * the link as though it has just been created, so (in particular) if 674 * DL_FLAG_STATELESS was passed in @flags, the link needs to be released 675 * explicitly when not needed any more (as stated above). 676 * 677 * A side effect of the link creation is re-ordering of dpm_list and the 678 * devices_kset list by moving the consumer device and all devices depending 679 * on it to the ends of these lists (that does not happen to devices that have 680 * not been registered when this function is called). 681 * 682 * The supplier device is required to be registered when this function is called 683 * and NULL will be returned if that is not the case. The consumer device need 684 * not be registered, however. 685 */ 686 struct device_link *device_link_add(struct device *consumer, 687 struct device *supplier, u32 flags) 688 { 689 struct device_link *link; 690 691 if (!consumer || !supplier || consumer == supplier || 692 flags & ~DL_ADD_VALID_FLAGS || 693 (flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) || 694 (flags & DL_FLAG_SYNC_STATE_ONLY && 695 (flags & ~DL_FLAG_INFERRED) != DL_FLAG_SYNC_STATE_ONLY) || 696 (flags & DL_FLAG_AUTOPROBE_CONSUMER && 697 flags & (DL_FLAG_AUTOREMOVE_CONSUMER | 698 DL_FLAG_AUTOREMOVE_SUPPLIER))) 699 return NULL; 700 701 if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) { 702 if (pm_runtime_get_sync(supplier) < 0) { 703 pm_runtime_put_noidle(supplier); 704 return NULL; 705 } 706 } 707 708 if (!(flags & DL_FLAG_STATELESS)) 709 flags |= DL_FLAG_MANAGED; 710 711 device_links_write_lock(); 712 device_pm_lock(); 713 714 /* 715 * If the supplier has not been fully registered yet or there is a 716 * reverse (non-SYNC_STATE_ONLY) dependency between the consumer and 717 * the supplier already in the graph, return NULL. If the link is a 718 * SYNC_STATE_ONLY link, we don't check for reverse dependencies 719 * because it only affects sync_state() callbacks. 720 */ 721 if (!device_pm_initialized(supplier) 722 || (!(flags & DL_FLAG_SYNC_STATE_ONLY) && 723 device_is_dependent(consumer, supplier))) { 724 link = NULL; 725 goto out; 726 } 727 728 /* 729 * SYNC_STATE_ONLY links are useless once a consumer device has probed. 730 * So, only create it if the consumer hasn't probed yet. 731 */ 732 if (flags & DL_FLAG_SYNC_STATE_ONLY && 733 consumer->links.status != DL_DEV_NO_DRIVER && 734 consumer->links.status != DL_DEV_PROBING) { 735 link = NULL; 736 goto out; 737 } 738 739 /* 740 * DL_FLAG_AUTOREMOVE_SUPPLIER indicates that the link will be needed 741 * longer than for DL_FLAG_AUTOREMOVE_CONSUMER and setting them both 742 * together doesn't make sense, so prefer DL_FLAG_AUTOREMOVE_SUPPLIER. 743 */ 744 if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) 745 flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; 746 747 list_for_each_entry(link, &supplier->links.consumers, s_node) { 748 if (link->consumer != consumer) 749 continue; 750 751 if (link->flags & DL_FLAG_INFERRED && 752 !(flags & DL_FLAG_INFERRED)) 753 link->flags &= ~DL_FLAG_INFERRED; 754 755 if (flags & DL_FLAG_PM_RUNTIME) { 756 if (!(link->flags & DL_FLAG_PM_RUNTIME)) { 757 pm_runtime_new_link(consumer); 758 link->flags |= DL_FLAG_PM_RUNTIME; 759 } 760 if (flags & DL_FLAG_RPM_ACTIVE) 761 refcount_inc(&link->rpm_active); 762 } 763 764 if (flags & DL_FLAG_STATELESS) { 765 kref_get(&link->kref); 766 if (link->flags & DL_FLAG_SYNC_STATE_ONLY && 767 !(link->flags & DL_FLAG_STATELESS)) { 768 link->flags |= DL_FLAG_STATELESS; 769 goto reorder; 770 } else { 771 link->flags |= DL_FLAG_STATELESS; 772 goto out; 773 } 774 } 775 776 /* 777 * If the life time of the link following from the new flags is 778 * longer than indicated by the flags of the existing link, 779 * update the existing link to stay around longer. 780 */ 781 if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) { 782 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) { 783 link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; 784 link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER; 785 } 786 } else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) { 787 link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER | 788 DL_FLAG_AUTOREMOVE_SUPPLIER); 789 } 790 if (!(link->flags & DL_FLAG_MANAGED)) { 791 kref_get(&link->kref); 792 link->flags |= DL_FLAG_MANAGED; 793 device_link_init_status(link, consumer, supplier); 794 } 795 if (link->flags & DL_FLAG_SYNC_STATE_ONLY && 796 !(flags & DL_FLAG_SYNC_STATE_ONLY)) { 797 link->flags &= ~DL_FLAG_SYNC_STATE_ONLY; 798 goto reorder; 799 } 800 801 goto out; 802 } 803 804 link = kzalloc(sizeof(*link), GFP_KERNEL); 805 if (!link) 806 goto out; 807 808 refcount_set(&link->rpm_active, 1); 809 810 get_device(supplier); 811 link->supplier = supplier; 812 INIT_LIST_HEAD(&link->s_node); 813 get_device(consumer); 814 link->consumer = consumer; 815 INIT_LIST_HEAD(&link->c_node); 816 link->flags = flags; 817 kref_init(&link->kref); 818 819 link->link_dev.class = &devlink_class; 820 device_set_pm_not_required(&link->link_dev); 821 dev_set_name(&link->link_dev, "%s:%s--%s:%s", 822 dev_bus_name(supplier), dev_name(supplier), 823 dev_bus_name(consumer), dev_name(consumer)); 824 if (device_register(&link->link_dev)) { 825 put_device(&link->link_dev); 826 link = NULL; 827 goto out; 828 } 829 830 if (flags & DL_FLAG_PM_RUNTIME) { 831 if (flags & DL_FLAG_RPM_ACTIVE) 832 refcount_inc(&link->rpm_active); 833 834 pm_runtime_new_link(consumer); 835 } 836 837 /* Determine the initial link state. */ 838 if (flags & DL_FLAG_STATELESS) 839 link->status = DL_STATE_NONE; 840 else 841 device_link_init_status(link, consumer, supplier); 842 843 /* 844 * Some callers expect the link creation during consumer driver probe to 845 * resume the supplier even without DL_FLAG_RPM_ACTIVE. 846 */ 847 if (link->status == DL_STATE_CONSUMER_PROBE && 848 flags & DL_FLAG_PM_RUNTIME) 849 pm_runtime_resume(supplier); 850 851 list_add_tail_rcu(&link->s_node, &supplier->links.consumers); 852 list_add_tail_rcu(&link->c_node, &consumer->links.suppliers); 853 854 if (flags & DL_FLAG_SYNC_STATE_ONLY) { 855 dev_dbg(consumer, 856 "Linked as a sync state only consumer to %s\n", 857 dev_name(supplier)); 858 goto out; 859 } 860 861 reorder: 862 /* 863 * Move the consumer and all of the devices depending on it to the end 864 * of dpm_list and the devices_kset list. 865 * 866 * It is necessary to hold dpm_list locked throughout all that or else 867 * we may end up suspending with a wrong ordering of it. 868 */ 869 device_reorder_to_tail(consumer, NULL); 870 871 dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier)); 872 873 out: 874 device_pm_unlock(); 875 device_links_write_unlock(); 876 877 if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link) 878 pm_runtime_put(supplier); 879 880 return link; 881 } 882 EXPORT_SYMBOL_GPL(device_link_add); 883 884 static void __device_link_del(struct kref *kref) 885 { 886 struct device_link *link = container_of(kref, struct device_link, kref); 887 888 dev_dbg(link->consumer, "Dropping the link to %s\n", 889 dev_name(link->supplier)); 890 891 pm_runtime_drop_link(link); 892 893 device_link_remove_from_lists(link); 894 device_unregister(&link->link_dev); 895 } 896 897 static void device_link_put_kref(struct device_link *link) 898 { 899 if (link->flags & DL_FLAG_STATELESS) 900 kref_put(&link->kref, __device_link_del); 901 else if (!device_is_registered(link->consumer)) 902 __device_link_del(&link->kref); 903 else 904 WARN(1, "Unable to drop a managed device link reference\n"); 905 } 906 907 /** 908 * device_link_del - Delete a stateless link between two devices. 909 * @link: Device link to delete. 910 * 911 * The caller must ensure proper synchronization of this function with runtime 912 * PM. If the link was added multiple times, it needs to be deleted as often. 913 * Care is required for hotplugged devices: Their links are purged on removal 914 * and calling device_link_del() is then no longer allowed. 915 */ 916 void device_link_del(struct device_link *link) 917 { 918 device_links_write_lock(); 919 device_link_put_kref(link); 920 device_links_write_unlock(); 921 } 922 EXPORT_SYMBOL_GPL(device_link_del); 923 924 /** 925 * device_link_remove - Delete a stateless link between two devices. 926 * @consumer: Consumer end of the link. 927 * @supplier: Supplier end of the link. 928 * 929 * The caller must ensure proper synchronization of this function with runtime 930 * PM. 931 */ 932 void device_link_remove(void *consumer, struct device *supplier) 933 { 934 struct device_link *link; 935 936 if (WARN_ON(consumer == supplier)) 937 return; 938 939 device_links_write_lock(); 940 941 list_for_each_entry(link, &supplier->links.consumers, s_node) { 942 if (link->consumer == consumer) { 943 device_link_put_kref(link); 944 break; 945 } 946 } 947 948 device_links_write_unlock(); 949 } 950 EXPORT_SYMBOL_GPL(device_link_remove); 951 952 static void device_links_missing_supplier(struct device *dev) 953 { 954 struct device_link *link; 955 956 list_for_each_entry(link, &dev->links.suppliers, c_node) { 957 if (link->status != DL_STATE_CONSUMER_PROBE) 958 continue; 959 960 if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) { 961 WRITE_ONCE(link->status, DL_STATE_AVAILABLE); 962 } else { 963 WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); 964 WRITE_ONCE(link->status, DL_STATE_DORMANT); 965 } 966 } 967 } 968 969 static bool dev_is_best_effort(struct device *dev) 970 { 971 return (fw_devlink_best_effort && dev->can_match) || 972 (dev->fwnode && (dev->fwnode->flags & FWNODE_FLAG_BEST_EFFORT)); 973 } 974 975 /** 976 * device_links_check_suppliers - Check presence of supplier drivers. 977 * @dev: Consumer device. 978 * 979 * Check links from this device to any suppliers. Walk the list of the device's 980 * links to suppliers and see if all of them are available. If not, simply 981 * return -EPROBE_DEFER. 982 * 983 * We need to guarantee that the supplier will not go away after the check has 984 * been positive here. It only can go away in __device_release_driver() and 985 * that function checks the device's links to consumers. This means we need to 986 * mark the link as "consumer probe in progress" to make the supplier removal 987 * wait for us to complete (or bad things may happen). 988 * 989 * Links without the DL_FLAG_MANAGED flag set are ignored. 990 */ 991 int device_links_check_suppliers(struct device *dev) 992 { 993 struct device_link *link; 994 int ret = 0, fwnode_ret = 0; 995 struct fwnode_handle *sup_fw; 996 997 /* 998 * Device waiting for supplier to become available is not allowed to 999 * probe. 1000 */ 1001 mutex_lock(&fwnode_link_lock); 1002 if (dev->fwnode && !list_empty(&dev->fwnode->suppliers) && 1003 !fw_devlink_is_permissive()) { 1004 sup_fw = list_first_entry(&dev->fwnode->suppliers, 1005 struct fwnode_link, 1006 c_hook)->supplier; 1007 if (!dev_is_best_effort(dev)) { 1008 fwnode_ret = -EPROBE_DEFER; 1009 dev_err_probe(dev, -EPROBE_DEFER, 1010 "wait for supplier %pfwP\n", sup_fw); 1011 } else { 1012 fwnode_ret = -EAGAIN; 1013 } 1014 } 1015 mutex_unlock(&fwnode_link_lock); 1016 if (fwnode_ret == -EPROBE_DEFER) 1017 return fwnode_ret; 1018 1019 device_links_write_lock(); 1020 1021 list_for_each_entry(link, &dev->links.suppliers, c_node) { 1022 if (!(link->flags & DL_FLAG_MANAGED)) 1023 continue; 1024 1025 if (link->status != DL_STATE_AVAILABLE && 1026 !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) { 1027 1028 if (dev_is_best_effort(dev) && 1029 link->flags & DL_FLAG_INFERRED && 1030 !link->supplier->can_match) { 1031 ret = -EAGAIN; 1032 continue; 1033 } 1034 1035 device_links_missing_supplier(dev); 1036 dev_err_probe(dev, -EPROBE_DEFER, 1037 "supplier %s not ready\n", 1038 dev_name(link->supplier)); 1039 ret = -EPROBE_DEFER; 1040 break; 1041 } 1042 WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE); 1043 } 1044 dev->links.status = DL_DEV_PROBING; 1045 1046 device_links_write_unlock(); 1047 1048 return ret ? ret : fwnode_ret; 1049 } 1050 1051 /** 1052 * __device_links_queue_sync_state - Queue a device for sync_state() callback 1053 * @dev: Device to call sync_state() on 1054 * @list: List head to queue the @dev on 1055 * 1056 * Queues a device for a sync_state() callback when the device links write lock 1057 * isn't held. This allows the sync_state() execution flow to use device links 1058 * APIs. The caller must ensure this function is called with 1059 * device_links_write_lock() held. 1060 * 1061 * This function does a get_device() to make sure the device is not freed while 1062 * on this list. 1063 * 1064 * So the caller must also ensure that device_links_flush_sync_list() is called 1065 * as soon as the caller releases device_links_write_lock(). This is necessary 1066 * to make sure the sync_state() is called in a timely fashion and the 1067 * put_device() is called on this device. 1068 */ 1069 static void __device_links_queue_sync_state(struct device *dev, 1070 struct list_head *list) 1071 { 1072 struct device_link *link; 1073 1074 if (!dev_has_sync_state(dev)) 1075 return; 1076 if (dev->state_synced) 1077 return; 1078 1079 list_for_each_entry(link, &dev->links.consumers, s_node) { 1080 if (!(link->flags & DL_FLAG_MANAGED)) 1081 continue; 1082 if (link->status != DL_STATE_ACTIVE) 1083 return; 1084 } 1085 1086 /* 1087 * Set the flag here to avoid adding the same device to a list more 1088 * than once. This can happen if new consumers get added to the device 1089 * and probed before the list is flushed. 1090 */ 1091 dev->state_synced = true; 1092 1093 if (WARN_ON(!list_empty(&dev->links.defer_sync))) 1094 return; 1095 1096 get_device(dev); 1097 list_add_tail(&dev->links.defer_sync, list); 1098 } 1099 1100 /** 1101 * device_links_flush_sync_list - Call sync_state() on a list of devices 1102 * @list: List of devices to call sync_state() on 1103 * @dont_lock_dev: Device for which lock is already held by the caller 1104 * 1105 * Calls sync_state() on all the devices that have been queued for it. This 1106 * function is used in conjunction with __device_links_queue_sync_state(). The 1107 * @dont_lock_dev parameter is useful when this function is called from a 1108 * context where a device lock is already held. 1109 */ 1110 static void device_links_flush_sync_list(struct list_head *list, 1111 struct device *dont_lock_dev) 1112 { 1113 struct device *dev, *tmp; 1114 1115 list_for_each_entry_safe(dev, tmp, list, links.defer_sync) { 1116 list_del_init(&dev->links.defer_sync); 1117 1118 if (dev != dont_lock_dev) 1119 device_lock(dev); 1120 1121 if (dev->bus->sync_state) 1122 dev->bus->sync_state(dev); 1123 else if (dev->driver && dev->driver->sync_state) 1124 dev->driver->sync_state(dev); 1125 1126 if (dev != dont_lock_dev) 1127 device_unlock(dev); 1128 1129 put_device(dev); 1130 } 1131 } 1132 1133 void device_links_supplier_sync_state_pause(void) 1134 { 1135 device_links_write_lock(); 1136 defer_sync_state_count++; 1137 device_links_write_unlock(); 1138 } 1139 1140 void device_links_supplier_sync_state_resume(void) 1141 { 1142 struct device *dev, *tmp; 1143 LIST_HEAD(sync_list); 1144 1145 device_links_write_lock(); 1146 if (!defer_sync_state_count) { 1147 WARN(true, "Unmatched sync_state pause/resume!"); 1148 goto out; 1149 } 1150 defer_sync_state_count--; 1151 if (defer_sync_state_count) 1152 goto out; 1153 1154 list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) { 1155 /* 1156 * Delete from deferred_sync list before queuing it to 1157 * sync_list because defer_sync is used for both lists. 1158 */ 1159 list_del_init(&dev->links.defer_sync); 1160 __device_links_queue_sync_state(dev, &sync_list); 1161 } 1162 out: 1163 device_links_write_unlock(); 1164 1165 device_links_flush_sync_list(&sync_list, NULL); 1166 } 1167 1168 static int sync_state_resume_initcall(void) 1169 { 1170 device_links_supplier_sync_state_resume(); 1171 return 0; 1172 } 1173 late_initcall(sync_state_resume_initcall); 1174 1175 static void __device_links_supplier_defer_sync(struct device *sup) 1176 { 1177 if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup)) 1178 list_add_tail(&sup->links.defer_sync, &deferred_sync); 1179 } 1180 1181 static void device_link_drop_managed(struct device_link *link) 1182 { 1183 link->flags &= ~DL_FLAG_MANAGED; 1184 WRITE_ONCE(link->status, DL_STATE_NONE); 1185 kref_put(&link->kref, __device_link_del); 1186 } 1187 1188 static ssize_t waiting_for_supplier_show(struct device *dev, 1189 struct device_attribute *attr, 1190 char *buf) 1191 { 1192 bool val; 1193 1194 device_lock(dev); 1195 val = !list_empty(&dev->fwnode->suppliers); 1196 device_unlock(dev); 1197 return sysfs_emit(buf, "%u\n", val); 1198 } 1199 static DEVICE_ATTR_RO(waiting_for_supplier); 1200 1201 /** 1202 * device_links_force_bind - Prepares device to be force bound 1203 * @dev: Consumer device. 1204 * 1205 * device_bind_driver() force binds a device to a driver without calling any 1206 * driver probe functions. So the consumer really isn't going to wait for any 1207 * supplier before it's bound to the driver. We still want the device link 1208 * states to be sensible when this happens. 1209 * 1210 * In preparation for device_bind_driver(), this function goes through each 1211 * supplier device links and checks if the supplier is bound. If it is, then 1212 * the device link status is set to CONSUMER_PROBE. Otherwise, the device link 1213 * is dropped. Links without the DL_FLAG_MANAGED flag set are ignored. 1214 */ 1215 void device_links_force_bind(struct device *dev) 1216 { 1217 struct device_link *link, *ln; 1218 1219 device_links_write_lock(); 1220 1221 list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) { 1222 if (!(link->flags & DL_FLAG_MANAGED)) 1223 continue; 1224 1225 if (link->status != DL_STATE_AVAILABLE) { 1226 device_link_drop_managed(link); 1227 continue; 1228 } 1229 WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE); 1230 } 1231 dev->links.status = DL_DEV_PROBING; 1232 1233 device_links_write_unlock(); 1234 } 1235 1236 /** 1237 * device_links_driver_bound - Update device links after probing its driver. 1238 * @dev: Device to update the links for. 1239 * 1240 * The probe has been successful, so update links from this device to any 1241 * consumers by changing their status to "available". 1242 * 1243 * Also change the status of @dev's links to suppliers to "active". 1244 * 1245 * Links without the DL_FLAG_MANAGED flag set are ignored. 1246 */ 1247 void device_links_driver_bound(struct device *dev) 1248 { 1249 struct device_link *link, *ln; 1250 LIST_HEAD(sync_list); 1251 1252 /* 1253 * If a device binds successfully, it's expected to have created all 1254 * the device links it needs to or make new device links as it needs 1255 * them. So, fw_devlink no longer needs to create device links to any 1256 * of the device's suppliers. 1257 * 1258 * Also, if a child firmware node of this bound device is not added as 1259 * a device by now, assume it is never going to be added and make sure 1260 * other devices don't defer probe indefinitely by waiting for such a 1261 * child device. 1262 */ 1263 if (dev->fwnode && dev->fwnode->dev == dev) { 1264 struct fwnode_handle *child; 1265 fwnode_links_purge_suppliers(dev->fwnode); 1266 fwnode_for_each_available_child_node(dev->fwnode, child) 1267 fw_devlink_purge_absent_suppliers(child); 1268 } 1269 device_remove_file(dev, &dev_attr_waiting_for_supplier); 1270 1271 device_links_write_lock(); 1272 1273 list_for_each_entry(link, &dev->links.consumers, s_node) { 1274 if (!(link->flags & DL_FLAG_MANAGED)) 1275 continue; 1276 1277 /* 1278 * Links created during consumer probe may be in the "consumer 1279 * probe" state to start with if the supplier is still probing 1280 * when they are created and they may become "active" if the 1281 * consumer probe returns first. Skip them here. 1282 */ 1283 if (link->status == DL_STATE_CONSUMER_PROBE || 1284 link->status == DL_STATE_ACTIVE) 1285 continue; 1286 1287 WARN_ON(link->status != DL_STATE_DORMANT); 1288 WRITE_ONCE(link->status, DL_STATE_AVAILABLE); 1289 1290 if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER) 1291 driver_deferred_probe_add(link->consumer); 1292 } 1293 1294 if (defer_sync_state_count) 1295 __device_links_supplier_defer_sync(dev); 1296 else 1297 __device_links_queue_sync_state(dev, &sync_list); 1298 1299 list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) { 1300 struct device *supplier; 1301 1302 if (!(link->flags & DL_FLAG_MANAGED)) 1303 continue; 1304 1305 supplier = link->supplier; 1306 if (link->flags & DL_FLAG_SYNC_STATE_ONLY) { 1307 /* 1308 * When DL_FLAG_SYNC_STATE_ONLY is set, it means no 1309 * other DL_MANAGED_LINK_FLAGS have been set. So, it's 1310 * save to drop the managed link completely. 1311 */ 1312 device_link_drop_managed(link); 1313 } else if (dev_is_best_effort(dev) && 1314 link->flags & DL_FLAG_INFERRED && 1315 link->status != DL_STATE_CONSUMER_PROBE && 1316 !link->supplier->can_match) { 1317 /* 1318 * When dev_is_best_effort() is true, we ignore device 1319 * links to suppliers that don't have a driver. If the 1320 * consumer device still managed to probe, there's no 1321 * point in maintaining a device link in a weird state 1322 * (consumer probed before supplier). So delete it. 1323 */ 1324 device_link_drop_managed(link); 1325 } else { 1326 WARN_ON(link->status != DL_STATE_CONSUMER_PROBE); 1327 WRITE_ONCE(link->status, DL_STATE_ACTIVE); 1328 } 1329 1330 /* 1331 * This needs to be done even for the deleted 1332 * DL_FLAG_SYNC_STATE_ONLY device link in case it was the last 1333 * device link that was preventing the supplier from getting a 1334 * sync_state() call. 1335 */ 1336 if (defer_sync_state_count) 1337 __device_links_supplier_defer_sync(supplier); 1338 else 1339 __device_links_queue_sync_state(supplier, &sync_list); 1340 } 1341 1342 dev->links.status = DL_DEV_DRIVER_BOUND; 1343 1344 device_links_write_unlock(); 1345 1346 device_links_flush_sync_list(&sync_list, dev); 1347 } 1348 1349 /** 1350 * __device_links_no_driver - Update links of a device without a driver. 1351 * @dev: Device without a drvier. 1352 * 1353 * Delete all non-persistent links from this device to any suppliers. 1354 * 1355 * Persistent links stay around, but their status is changed to "available", 1356 * unless they already are in the "supplier unbind in progress" state in which 1357 * case they need not be updated. 1358 * 1359 * Links without the DL_FLAG_MANAGED flag set are ignored. 1360 */ 1361 static void __device_links_no_driver(struct device *dev) 1362 { 1363 struct device_link *link, *ln; 1364 1365 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) { 1366 if (!(link->flags & DL_FLAG_MANAGED)) 1367 continue; 1368 1369 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) { 1370 device_link_drop_managed(link); 1371 continue; 1372 } 1373 1374 if (link->status != DL_STATE_CONSUMER_PROBE && 1375 link->status != DL_STATE_ACTIVE) 1376 continue; 1377 1378 if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) { 1379 WRITE_ONCE(link->status, DL_STATE_AVAILABLE); 1380 } else { 1381 WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); 1382 WRITE_ONCE(link->status, DL_STATE_DORMANT); 1383 } 1384 } 1385 1386 dev->links.status = DL_DEV_NO_DRIVER; 1387 } 1388 1389 /** 1390 * device_links_no_driver - Update links after failing driver probe. 1391 * @dev: Device whose driver has just failed to probe. 1392 * 1393 * Clean up leftover links to consumers for @dev and invoke 1394 * %__device_links_no_driver() to update links to suppliers for it as 1395 * appropriate. 1396 * 1397 * Links without the DL_FLAG_MANAGED flag set are ignored. 1398 */ 1399 void device_links_no_driver(struct device *dev) 1400 { 1401 struct device_link *link; 1402 1403 device_links_write_lock(); 1404 1405 list_for_each_entry(link, &dev->links.consumers, s_node) { 1406 if (!(link->flags & DL_FLAG_MANAGED)) 1407 continue; 1408 1409 /* 1410 * The probe has failed, so if the status of the link is 1411 * "consumer probe" or "active", it must have been added by 1412 * a probing consumer while this device was still probing. 1413 * Change its state to "dormant", as it represents a valid 1414 * relationship, but it is not functionally meaningful. 1415 */ 1416 if (link->status == DL_STATE_CONSUMER_PROBE || 1417 link->status == DL_STATE_ACTIVE) 1418 WRITE_ONCE(link->status, DL_STATE_DORMANT); 1419 } 1420 1421 __device_links_no_driver(dev); 1422 1423 device_links_write_unlock(); 1424 } 1425 1426 /** 1427 * device_links_driver_cleanup - Update links after driver removal. 1428 * @dev: Device whose driver has just gone away. 1429 * 1430 * Update links to consumers for @dev by changing their status to "dormant" and 1431 * invoke %__device_links_no_driver() to update links to suppliers for it as 1432 * appropriate. 1433 * 1434 * Links without the DL_FLAG_MANAGED flag set are ignored. 1435 */ 1436 void device_links_driver_cleanup(struct device *dev) 1437 { 1438 struct device_link *link, *ln; 1439 1440 device_links_write_lock(); 1441 1442 list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) { 1443 if (!(link->flags & DL_FLAG_MANAGED)) 1444 continue; 1445 1446 WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER); 1447 WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND); 1448 1449 /* 1450 * autoremove the links between this @dev and its consumer 1451 * devices that are not active, i.e. where the link state 1452 * has moved to DL_STATE_SUPPLIER_UNBIND. 1453 */ 1454 if (link->status == DL_STATE_SUPPLIER_UNBIND && 1455 link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) 1456 device_link_drop_managed(link); 1457 1458 WRITE_ONCE(link->status, DL_STATE_DORMANT); 1459 } 1460 1461 list_del_init(&dev->links.defer_sync); 1462 __device_links_no_driver(dev); 1463 1464 device_links_write_unlock(); 1465 } 1466 1467 /** 1468 * device_links_busy - Check if there are any busy links to consumers. 1469 * @dev: Device to check. 1470 * 1471 * Check each consumer of the device and return 'true' if its link's status 1472 * is one of "consumer probe" or "active" (meaning that the given consumer is 1473 * probing right now or its driver is present). Otherwise, change the link 1474 * state to "supplier unbind" to prevent the consumer from being probed 1475 * successfully going forward. 1476 * 1477 * Return 'false' if there are no probing or active consumers. 1478 * 1479 * Links without the DL_FLAG_MANAGED flag set are ignored. 1480 */ 1481 bool device_links_busy(struct device *dev) 1482 { 1483 struct device_link *link; 1484 bool ret = false; 1485 1486 device_links_write_lock(); 1487 1488 list_for_each_entry(link, &dev->links.consumers, s_node) { 1489 if (!(link->flags & DL_FLAG_MANAGED)) 1490 continue; 1491 1492 if (link->status == DL_STATE_CONSUMER_PROBE 1493 || link->status == DL_STATE_ACTIVE) { 1494 ret = true; 1495 break; 1496 } 1497 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND); 1498 } 1499 1500 dev->links.status = DL_DEV_UNBINDING; 1501 1502 device_links_write_unlock(); 1503 return ret; 1504 } 1505 1506 /** 1507 * device_links_unbind_consumers - Force unbind consumers of the given device. 1508 * @dev: Device to unbind the consumers of. 1509 * 1510 * Walk the list of links to consumers for @dev and if any of them is in the 1511 * "consumer probe" state, wait for all device probes in progress to complete 1512 * and start over. 1513 * 1514 * If that's not the case, change the status of the link to "supplier unbind" 1515 * and check if the link was in the "active" state. If so, force the consumer 1516 * driver to unbind and start over (the consumer will not re-probe as we have 1517 * changed the state of the link already). 1518 * 1519 * Links without the DL_FLAG_MANAGED flag set are ignored. 1520 */ 1521 void device_links_unbind_consumers(struct device *dev) 1522 { 1523 struct device_link *link; 1524 1525 start: 1526 device_links_write_lock(); 1527 1528 list_for_each_entry(link, &dev->links.consumers, s_node) { 1529 enum device_link_state status; 1530 1531 if (!(link->flags & DL_FLAG_MANAGED) || 1532 link->flags & DL_FLAG_SYNC_STATE_ONLY) 1533 continue; 1534 1535 status = link->status; 1536 if (status == DL_STATE_CONSUMER_PROBE) { 1537 device_links_write_unlock(); 1538 1539 wait_for_device_probe(); 1540 goto start; 1541 } 1542 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND); 1543 if (status == DL_STATE_ACTIVE) { 1544 struct device *consumer = link->consumer; 1545 1546 get_device(consumer); 1547 1548 device_links_write_unlock(); 1549 1550 device_release_driver_internal(consumer, NULL, 1551 consumer->parent); 1552 put_device(consumer); 1553 goto start; 1554 } 1555 } 1556 1557 device_links_write_unlock(); 1558 } 1559 1560 /** 1561 * device_links_purge - Delete existing links to other devices. 1562 * @dev: Target device. 1563 */ 1564 static void device_links_purge(struct device *dev) 1565 { 1566 struct device_link *link, *ln; 1567 1568 if (dev->class == &devlink_class) 1569 return; 1570 1571 /* 1572 * Delete all of the remaining links from this device to any other 1573 * devices (either consumers or suppliers). 1574 */ 1575 device_links_write_lock(); 1576 1577 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) { 1578 WARN_ON(link->status == DL_STATE_ACTIVE); 1579 __device_link_del(&link->kref); 1580 } 1581 1582 list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) { 1583 WARN_ON(link->status != DL_STATE_DORMANT && 1584 link->status != DL_STATE_NONE); 1585 __device_link_del(&link->kref); 1586 } 1587 1588 device_links_write_unlock(); 1589 } 1590 1591 #define FW_DEVLINK_FLAGS_PERMISSIVE (DL_FLAG_INFERRED | \ 1592 DL_FLAG_SYNC_STATE_ONLY) 1593 #define FW_DEVLINK_FLAGS_ON (DL_FLAG_INFERRED | \ 1594 DL_FLAG_AUTOPROBE_CONSUMER) 1595 #define FW_DEVLINK_FLAGS_RPM (FW_DEVLINK_FLAGS_ON | \ 1596 DL_FLAG_PM_RUNTIME) 1597 1598 static u32 fw_devlink_flags = FW_DEVLINK_FLAGS_ON; 1599 static int __init fw_devlink_setup(char *arg) 1600 { 1601 if (!arg) 1602 return -EINVAL; 1603 1604 if (strcmp(arg, "off") == 0) { 1605 fw_devlink_flags = 0; 1606 } else if (strcmp(arg, "permissive") == 0) { 1607 fw_devlink_flags = FW_DEVLINK_FLAGS_PERMISSIVE; 1608 } else if (strcmp(arg, "on") == 0) { 1609 fw_devlink_flags = FW_DEVLINK_FLAGS_ON; 1610 } else if (strcmp(arg, "rpm") == 0) { 1611 fw_devlink_flags = FW_DEVLINK_FLAGS_RPM; 1612 } 1613 return 0; 1614 } 1615 early_param("fw_devlink", fw_devlink_setup); 1616 1617 static bool fw_devlink_strict = true; 1618 static int __init fw_devlink_strict_setup(char *arg) 1619 { 1620 return strtobool(arg, &fw_devlink_strict); 1621 } 1622 early_param("fw_devlink.strict", fw_devlink_strict_setup); 1623 1624 u32 fw_devlink_get_flags(void) 1625 { 1626 return fw_devlink_flags; 1627 } 1628 1629 static bool fw_devlink_is_permissive(void) 1630 { 1631 return fw_devlink_flags == FW_DEVLINK_FLAGS_PERMISSIVE; 1632 } 1633 1634 bool fw_devlink_is_strict(void) 1635 { 1636 return fw_devlink_strict && !fw_devlink_is_permissive(); 1637 } 1638 1639 static void fw_devlink_parse_fwnode(struct fwnode_handle *fwnode) 1640 { 1641 if (fwnode->flags & FWNODE_FLAG_LINKS_ADDED) 1642 return; 1643 1644 fwnode_call_int_op(fwnode, add_links); 1645 fwnode->flags |= FWNODE_FLAG_LINKS_ADDED; 1646 } 1647 1648 static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode) 1649 { 1650 struct fwnode_handle *child = NULL; 1651 1652 fw_devlink_parse_fwnode(fwnode); 1653 1654 while ((child = fwnode_get_next_available_child_node(fwnode, child))) 1655 fw_devlink_parse_fwtree(child); 1656 } 1657 1658 static void fw_devlink_relax_link(struct device_link *link) 1659 { 1660 if (!(link->flags & DL_FLAG_INFERRED)) 1661 return; 1662 1663 if (link->flags == (DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE)) 1664 return; 1665 1666 pm_runtime_drop_link(link); 1667 link->flags = DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE; 1668 dev_dbg(link->consumer, "Relaxing link with %s\n", 1669 dev_name(link->supplier)); 1670 } 1671 1672 static int fw_devlink_no_driver(struct device *dev, void *data) 1673 { 1674 struct device_link *link = to_devlink(dev); 1675 1676 if (!link->supplier->can_match) 1677 fw_devlink_relax_link(link); 1678 1679 return 0; 1680 } 1681 1682 void fw_devlink_drivers_done(void) 1683 { 1684 fw_devlink_drv_reg_done = true; 1685 device_links_write_lock(); 1686 class_for_each_device(&devlink_class, NULL, NULL, 1687 fw_devlink_no_driver); 1688 device_links_write_unlock(); 1689 } 1690 1691 /** 1692 * wait_for_init_devices_probe - Try to probe any device needed for init 1693 * 1694 * Some devices might need to be probed and bound successfully before the kernel 1695 * boot sequence can finish and move on to init/userspace. For example, a 1696 * network interface might need to be bound to be able to mount a NFS rootfs. 1697 * 1698 * With fw_devlink=on by default, some of these devices might be blocked from 1699 * probing because they are waiting on a optional supplier that doesn't have a 1700 * driver. While fw_devlink will eventually identify such devices and unblock 1701 * the probing automatically, it might be too late by the time it unblocks the 1702 * probing of devices. For example, the IP4 autoconfig might timeout before 1703 * fw_devlink unblocks probing of the network interface. 1704 * 1705 * This function is available to temporarily try and probe all devices that have 1706 * a driver even if some of their suppliers haven't been added or don't have 1707 * drivers. 1708 * 1709 * The drivers can then decide which of the suppliers are optional vs mandatory 1710 * and probe the device if possible. By the time this function returns, all such 1711 * "best effort" probes are guaranteed to be completed. If a device successfully 1712 * probes in this mode, we delete all fw_devlink discovered dependencies of that 1713 * device where the supplier hasn't yet probed successfully because they have to 1714 * be optional dependencies. 1715 * 1716 * Any devices that didn't successfully probe go back to being treated as if 1717 * this function was never called. 1718 * 1719 * This also means that some devices that aren't needed for init and could have 1720 * waited for their optional supplier to probe (when the supplier's module is 1721 * loaded later on) would end up probing prematurely with limited functionality. 1722 * So call this function only when boot would fail without it. 1723 */ 1724 void __init wait_for_init_devices_probe(void) 1725 { 1726 if (!fw_devlink_flags || fw_devlink_is_permissive()) 1727 return; 1728 1729 /* 1730 * Wait for all ongoing probes to finish so that the "best effort" is 1731 * only applied to devices that can't probe otherwise. 1732 */ 1733 wait_for_device_probe(); 1734 1735 pr_info("Trying to probe devices needed for running init ...\n"); 1736 fw_devlink_best_effort = true; 1737 driver_deferred_probe_trigger(); 1738 1739 /* 1740 * Wait for all "best effort" probes to finish before going back to 1741 * normal enforcement. 1742 */ 1743 wait_for_device_probe(); 1744 fw_devlink_best_effort = false; 1745 } 1746 1747 static void fw_devlink_unblock_consumers(struct device *dev) 1748 { 1749 struct device_link *link; 1750 1751 if (!fw_devlink_flags || fw_devlink_is_permissive()) 1752 return; 1753 1754 device_links_write_lock(); 1755 list_for_each_entry(link, &dev->links.consumers, s_node) 1756 fw_devlink_relax_link(link); 1757 device_links_write_unlock(); 1758 } 1759 1760 /** 1761 * fw_devlink_relax_cycle - Convert cyclic links to SYNC_STATE_ONLY links 1762 * @con: Device to check dependencies for. 1763 * @sup: Device to check against. 1764 * 1765 * Check if @sup depends on @con or any device dependent on it (its child or 1766 * its consumer etc). When such a cyclic dependency is found, convert all 1767 * device links created solely by fw_devlink into SYNC_STATE_ONLY device links. 1768 * This is the equivalent of doing fw_devlink=permissive just between the 1769 * devices in the cycle. We need to do this because, at this point, fw_devlink 1770 * can't tell which of these dependencies is not a real dependency. 1771 * 1772 * Return 1 if a cycle is found. Otherwise, return 0. 1773 */ 1774 static int fw_devlink_relax_cycle(struct device *con, void *sup) 1775 { 1776 struct device_link *link; 1777 int ret; 1778 1779 if (con == sup) 1780 return 1; 1781 1782 ret = device_for_each_child(con, sup, fw_devlink_relax_cycle); 1783 if (ret) 1784 return ret; 1785 1786 list_for_each_entry(link, &con->links.consumers, s_node) { 1787 if ((link->flags & ~DL_FLAG_INFERRED) == 1788 (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED)) 1789 continue; 1790 1791 if (!fw_devlink_relax_cycle(link->consumer, sup)) 1792 continue; 1793 1794 ret = 1; 1795 1796 fw_devlink_relax_link(link); 1797 } 1798 return ret; 1799 } 1800 1801 /** 1802 * fw_devlink_create_devlink - Create a device link from a consumer to fwnode 1803 * @con: consumer device for the device link 1804 * @sup_handle: fwnode handle of supplier 1805 * @flags: devlink flags 1806 * 1807 * This function will try to create a device link between the consumer device 1808 * @con and the supplier device represented by @sup_handle. 1809 * 1810 * The supplier has to be provided as a fwnode because incorrect cycles in 1811 * fwnode links can sometimes cause the supplier device to never be created. 1812 * This function detects such cases and returns an error if it cannot create a 1813 * device link from the consumer to a missing supplier. 1814 * 1815 * Returns, 1816 * 0 on successfully creating a device link 1817 * -EINVAL if the device link cannot be created as expected 1818 * -EAGAIN if the device link cannot be created right now, but it may be 1819 * possible to do that in the future 1820 */ 1821 static int fw_devlink_create_devlink(struct device *con, 1822 struct fwnode_handle *sup_handle, u32 flags) 1823 { 1824 struct device *sup_dev; 1825 int ret = 0; 1826 1827 /* 1828 * In some cases, a device P might also be a supplier to its child node 1829 * C. However, this would defer the probe of C until the probe of P 1830 * completes successfully. This is perfectly fine in the device driver 1831 * model. device_add() doesn't guarantee probe completion of the device 1832 * by the time it returns. 1833 * 1834 * However, there are a few drivers that assume C will finish probing 1835 * as soon as it's added and before P finishes probing. So, we provide 1836 * a flag to let fw_devlink know not to delay the probe of C until the 1837 * probe of P completes successfully. 1838 * 1839 * When such a flag is set, we can't create device links where P is the 1840 * supplier of C as that would delay the probe of C. 1841 */ 1842 if (sup_handle->flags & FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD && 1843 fwnode_is_ancestor_of(sup_handle, con->fwnode)) 1844 return -EINVAL; 1845 1846 sup_dev = get_dev_from_fwnode(sup_handle); 1847 if (sup_dev) { 1848 /* 1849 * If it's one of those drivers that don't actually bind to 1850 * their device using driver core, then don't wait on this 1851 * supplier device indefinitely. 1852 */ 1853 if (sup_dev->links.status == DL_DEV_NO_DRIVER && 1854 sup_handle->flags & FWNODE_FLAG_INITIALIZED) { 1855 ret = -EINVAL; 1856 goto out; 1857 } 1858 1859 /* 1860 * If this fails, it is due to cycles in device links. Just 1861 * give up on this link and treat it as invalid. 1862 */ 1863 if (!device_link_add(con, sup_dev, flags) && 1864 !(flags & DL_FLAG_SYNC_STATE_ONLY)) { 1865 dev_info(con, "Fixing up cyclic dependency with %s\n", 1866 dev_name(sup_dev)); 1867 device_links_write_lock(); 1868 fw_devlink_relax_cycle(con, sup_dev); 1869 device_links_write_unlock(); 1870 device_link_add(con, sup_dev, 1871 FW_DEVLINK_FLAGS_PERMISSIVE); 1872 ret = -EINVAL; 1873 } 1874 1875 goto out; 1876 } 1877 1878 /* Supplier that's already initialized without a struct device. */ 1879 if (sup_handle->flags & FWNODE_FLAG_INITIALIZED) 1880 return -EINVAL; 1881 1882 /* 1883 * DL_FLAG_SYNC_STATE_ONLY doesn't block probing and supports 1884 * cycles. So cycle detection isn't necessary and shouldn't be 1885 * done. 1886 */ 1887 if (flags & DL_FLAG_SYNC_STATE_ONLY) 1888 return -EAGAIN; 1889 1890 /* 1891 * If we can't find the supplier device from its fwnode, it might be 1892 * due to a cyclic dependency between fwnodes. Some of these cycles can 1893 * be broken by applying logic. Check for these types of cycles and 1894 * break them so that devices in the cycle probe properly. 1895 * 1896 * If the supplier's parent is dependent on the consumer, then the 1897 * consumer and supplier have a cyclic dependency. Since fw_devlink 1898 * can't tell which of the inferred dependencies are incorrect, don't 1899 * enforce probe ordering between any of the devices in this cyclic 1900 * dependency. Do this by relaxing all the fw_devlink device links in 1901 * this cycle and by treating the fwnode link between the consumer and 1902 * the supplier as an invalid dependency. 1903 */ 1904 sup_dev = fwnode_get_next_parent_dev(sup_handle); 1905 if (sup_dev && device_is_dependent(con, sup_dev)) { 1906 dev_info(con, "Fixing up cyclic dependency with %pfwP (%s)\n", 1907 sup_handle, dev_name(sup_dev)); 1908 device_links_write_lock(); 1909 fw_devlink_relax_cycle(con, sup_dev); 1910 device_links_write_unlock(); 1911 ret = -EINVAL; 1912 } else { 1913 /* 1914 * Can't check for cycles or no cycles. So let's try 1915 * again later. 1916 */ 1917 ret = -EAGAIN; 1918 } 1919 1920 out: 1921 put_device(sup_dev); 1922 return ret; 1923 } 1924 1925 /** 1926 * __fw_devlink_link_to_consumers - Create device links to consumers of a device 1927 * @dev: Device that needs to be linked to its consumers 1928 * 1929 * This function looks at all the consumer fwnodes of @dev and creates device 1930 * links between the consumer device and @dev (supplier). 1931 * 1932 * If the consumer device has not been added yet, then this function creates a 1933 * SYNC_STATE_ONLY link between @dev (supplier) and the closest ancestor device 1934 * of the consumer fwnode. This is necessary to make sure @dev doesn't get a 1935 * sync_state() callback before the real consumer device gets to be added and 1936 * then probed. 1937 * 1938 * Once device links are created from the real consumer to @dev (supplier), the 1939 * fwnode links are deleted. 1940 */ 1941 static void __fw_devlink_link_to_consumers(struct device *dev) 1942 { 1943 struct fwnode_handle *fwnode = dev->fwnode; 1944 struct fwnode_link *link, *tmp; 1945 1946 list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) { 1947 u32 dl_flags = fw_devlink_get_flags(); 1948 struct device *con_dev; 1949 bool own_link = true; 1950 int ret; 1951 1952 con_dev = get_dev_from_fwnode(link->consumer); 1953 /* 1954 * If consumer device is not available yet, make a "proxy" 1955 * SYNC_STATE_ONLY link from the consumer's parent device to 1956 * the supplier device. This is necessary to make sure the 1957 * supplier doesn't get a sync_state() callback before the real 1958 * consumer can create a device link to the supplier. 1959 * 1960 * This proxy link step is needed to handle the case where the 1961 * consumer's parent device is added before the supplier. 1962 */ 1963 if (!con_dev) { 1964 con_dev = fwnode_get_next_parent_dev(link->consumer); 1965 /* 1966 * However, if the consumer's parent device is also the 1967 * parent of the supplier, don't create a 1968 * consumer-supplier link from the parent to its child 1969 * device. Such a dependency is impossible. 1970 */ 1971 if (con_dev && 1972 fwnode_is_ancestor_of(con_dev->fwnode, fwnode)) { 1973 put_device(con_dev); 1974 con_dev = NULL; 1975 } else { 1976 own_link = false; 1977 dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE; 1978 } 1979 } 1980 1981 if (!con_dev) 1982 continue; 1983 1984 ret = fw_devlink_create_devlink(con_dev, fwnode, dl_flags); 1985 put_device(con_dev); 1986 if (!own_link || ret == -EAGAIN) 1987 continue; 1988 1989 __fwnode_link_del(link); 1990 } 1991 } 1992 1993 /** 1994 * __fw_devlink_link_to_suppliers - Create device links to suppliers of a device 1995 * @dev: The consumer device that needs to be linked to its suppliers 1996 * @fwnode: Root of the fwnode tree that is used to create device links 1997 * 1998 * This function looks at all the supplier fwnodes of fwnode tree rooted at 1999 * @fwnode and creates device links between @dev (consumer) and all the 2000 * supplier devices of the entire fwnode tree at @fwnode. 2001 * 2002 * The function creates normal (non-SYNC_STATE_ONLY) device links between @dev 2003 * and the real suppliers of @dev. Once these device links are created, the 2004 * fwnode links are deleted. When such device links are successfully created, 2005 * this function is called recursively on those supplier devices. This is 2006 * needed to detect and break some invalid cycles in fwnode links. See 2007 * fw_devlink_create_devlink() for more details. 2008 * 2009 * In addition, it also looks at all the suppliers of the entire fwnode tree 2010 * because some of the child devices of @dev that have not been added yet 2011 * (because @dev hasn't probed) might already have their suppliers added to 2012 * driver core. So, this function creates SYNC_STATE_ONLY device links between 2013 * @dev (consumer) and these suppliers to make sure they don't execute their 2014 * sync_state() callbacks before these child devices have a chance to create 2015 * their device links. The fwnode links that correspond to the child devices 2016 * aren't delete because they are needed later to create the device links 2017 * between the real consumer and supplier devices. 2018 */ 2019 static void __fw_devlink_link_to_suppliers(struct device *dev, 2020 struct fwnode_handle *fwnode) 2021 { 2022 bool own_link = (dev->fwnode == fwnode); 2023 struct fwnode_link *link, *tmp; 2024 struct fwnode_handle *child = NULL; 2025 u32 dl_flags; 2026 2027 if (own_link) 2028 dl_flags = fw_devlink_get_flags(); 2029 else 2030 dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE; 2031 2032 list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) { 2033 int ret; 2034 struct device *sup_dev; 2035 struct fwnode_handle *sup = link->supplier; 2036 2037 ret = fw_devlink_create_devlink(dev, sup, dl_flags); 2038 if (!own_link || ret == -EAGAIN) 2039 continue; 2040 2041 __fwnode_link_del(link); 2042 2043 /* If no device link was created, nothing more to do. */ 2044 if (ret) 2045 continue; 2046 2047 /* 2048 * If a device link was successfully created to a supplier, we 2049 * now need to try and link the supplier to all its suppliers. 2050 * 2051 * This is needed to detect and delete false dependencies in 2052 * fwnode links that haven't been converted to a device link 2053 * yet. See comments in fw_devlink_create_devlink() for more 2054 * details on the false dependency. 2055 * 2056 * Without deleting these false dependencies, some devices will 2057 * never probe because they'll keep waiting for their false 2058 * dependency fwnode links to be converted to device links. 2059 */ 2060 sup_dev = get_dev_from_fwnode(sup); 2061 __fw_devlink_link_to_suppliers(sup_dev, sup_dev->fwnode); 2062 put_device(sup_dev); 2063 } 2064 2065 /* 2066 * Make "proxy" SYNC_STATE_ONLY device links to represent the needs of 2067 * all the descendants. This proxy link step is needed to handle the 2068 * case where the supplier is added before the consumer's parent device 2069 * (@dev). 2070 */ 2071 while ((child = fwnode_get_next_available_child_node(fwnode, child))) 2072 __fw_devlink_link_to_suppliers(dev, child); 2073 } 2074 2075 static void fw_devlink_link_device(struct device *dev) 2076 { 2077 struct fwnode_handle *fwnode = dev->fwnode; 2078 2079 if (!fw_devlink_flags) 2080 return; 2081 2082 fw_devlink_parse_fwtree(fwnode); 2083 2084 mutex_lock(&fwnode_link_lock); 2085 __fw_devlink_link_to_consumers(dev); 2086 __fw_devlink_link_to_suppliers(dev, fwnode); 2087 mutex_unlock(&fwnode_link_lock); 2088 } 2089 2090 /* Device links support end. */ 2091 2092 int (*platform_notify)(struct device *dev) = NULL; 2093 int (*platform_notify_remove)(struct device *dev) = NULL; 2094 static struct kobject *dev_kobj; 2095 struct kobject *sysfs_dev_char_kobj; 2096 struct kobject *sysfs_dev_block_kobj; 2097 2098 static DEFINE_MUTEX(device_hotplug_lock); 2099 2100 void lock_device_hotplug(void) 2101 { 2102 mutex_lock(&device_hotplug_lock); 2103 } 2104 2105 void unlock_device_hotplug(void) 2106 { 2107 mutex_unlock(&device_hotplug_lock); 2108 } 2109 2110 int lock_device_hotplug_sysfs(void) 2111 { 2112 if (mutex_trylock(&device_hotplug_lock)) 2113 return 0; 2114 2115 /* Avoid busy looping (5 ms of sleep should do). */ 2116 msleep(5); 2117 return restart_syscall(); 2118 } 2119 2120 #ifdef CONFIG_BLOCK 2121 static inline int device_is_not_partition(struct device *dev) 2122 { 2123 return !(dev->type == &part_type); 2124 } 2125 #else 2126 static inline int device_is_not_partition(struct device *dev) 2127 { 2128 return 1; 2129 } 2130 #endif 2131 2132 static void device_platform_notify(struct device *dev) 2133 { 2134 acpi_device_notify(dev); 2135 2136 software_node_notify(dev); 2137 2138 if (platform_notify) 2139 platform_notify(dev); 2140 } 2141 2142 static void device_platform_notify_remove(struct device *dev) 2143 { 2144 acpi_device_notify_remove(dev); 2145 2146 software_node_notify_remove(dev); 2147 2148 if (platform_notify_remove) 2149 platform_notify_remove(dev); 2150 } 2151 2152 /** 2153 * dev_driver_string - Return a device's driver name, if at all possible 2154 * @dev: struct device to get the name of 2155 * 2156 * Will return the device's driver's name if it is bound to a device. If 2157 * the device is not bound to a driver, it will return the name of the bus 2158 * it is attached to. If it is not attached to a bus either, an empty 2159 * string will be returned. 2160 */ 2161 const char *dev_driver_string(const struct device *dev) 2162 { 2163 struct device_driver *drv; 2164 2165 /* dev->driver can change to NULL underneath us because of unbinding, 2166 * so be careful about accessing it. dev->bus and dev->class should 2167 * never change once they are set, so they don't need special care. 2168 */ 2169 drv = READ_ONCE(dev->driver); 2170 return drv ? drv->name : dev_bus_name(dev); 2171 } 2172 EXPORT_SYMBOL(dev_driver_string); 2173 2174 #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr) 2175 2176 static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr, 2177 char *buf) 2178 { 2179 struct device_attribute *dev_attr = to_dev_attr(attr); 2180 struct device *dev = kobj_to_dev(kobj); 2181 ssize_t ret = -EIO; 2182 2183 if (dev_attr->show) 2184 ret = dev_attr->show(dev, dev_attr, buf); 2185 if (ret >= (ssize_t)PAGE_SIZE) { 2186 printk("dev_attr_show: %pS returned bad count\n", 2187 dev_attr->show); 2188 } 2189 return ret; 2190 } 2191 2192 static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr, 2193 const char *buf, size_t count) 2194 { 2195 struct device_attribute *dev_attr = to_dev_attr(attr); 2196 struct device *dev = kobj_to_dev(kobj); 2197 ssize_t ret = -EIO; 2198 2199 if (dev_attr->store) 2200 ret = dev_attr->store(dev, dev_attr, buf, count); 2201 return ret; 2202 } 2203 2204 static const struct sysfs_ops dev_sysfs_ops = { 2205 .show = dev_attr_show, 2206 .store = dev_attr_store, 2207 }; 2208 2209 #define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr) 2210 2211 ssize_t device_store_ulong(struct device *dev, 2212 struct device_attribute *attr, 2213 const char *buf, size_t size) 2214 { 2215 struct dev_ext_attribute *ea = to_ext_attr(attr); 2216 int ret; 2217 unsigned long new; 2218 2219 ret = kstrtoul(buf, 0, &new); 2220 if (ret) 2221 return ret; 2222 *(unsigned long *)(ea->var) = new; 2223 /* Always return full write size even if we didn't consume all */ 2224 return size; 2225 } 2226 EXPORT_SYMBOL_GPL(device_store_ulong); 2227 2228 ssize_t device_show_ulong(struct device *dev, 2229 struct device_attribute *attr, 2230 char *buf) 2231 { 2232 struct dev_ext_attribute *ea = to_ext_attr(attr); 2233 return sysfs_emit(buf, "%lx\n", *(unsigned long *)(ea->var)); 2234 } 2235 EXPORT_SYMBOL_GPL(device_show_ulong); 2236 2237 ssize_t device_store_int(struct device *dev, 2238 struct device_attribute *attr, 2239 const char *buf, size_t size) 2240 { 2241 struct dev_ext_attribute *ea = to_ext_attr(attr); 2242 int ret; 2243 long new; 2244 2245 ret = kstrtol(buf, 0, &new); 2246 if (ret) 2247 return ret; 2248 2249 if (new > INT_MAX || new < INT_MIN) 2250 return -EINVAL; 2251 *(int *)(ea->var) = new; 2252 /* Always return full write size even if we didn't consume all */ 2253 return size; 2254 } 2255 EXPORT_SYMBOL_GPL(device_store_int); 2256 2257 ssize_t device_show_int(struct device *dev, 2258 struct device_attribute *attr, 2259 char *buf) 2260 { 2261 struct dev_ext_attribute *ea = to_ext_attr(attr); 2262 2263 return sysfs_emit(buf, "%d\n", *(int *)(ea->var)); 2264 } 2265 EXPORT_SYMBOL_GPL(device_show_int); 2266 2267 ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, 2268 const char *buf, size_t size) 2269 { 2270 struct dev_ext_attribute *ea = to_ext_attr(attr); 2271 2272 if (strtobool(buf, ea->var) < 0) 2273 return -EINVAL; 2274 2275 return size; 2276 } 2277 EXPORT_SYMBOL_GPL(device_store_bool); 2278 2279 ssize_t device_show_bool(struct device *dev, struct device_attribute *attr, 2280 char *buf) 2281 { 2282 struct dev_ext_attribute *ea = to_ext_attr(attr); 2283 2284 return sysfs_emit(buf, "%d\n", *(bool *)(ea->var)); 2285 } 2286 EXPORT_SYMBOL_GPL(device_show_bool); 2287 2288 /** 2289 * device_release - free device structure. 2290 * @kobj: device's kobject. 2291 * 2292 * This is called once the reference count for the object 2293 * reaches 0. We forward the call to the device's release 2294 * method, which should handle actually freeing the structure. 2295 */ 2296 static void device_release(struct kobject *kobj) 2297 { 2298 struct device *dev = kobj_to_dev(kobj); 2299 struct device_private *p = dev->p; 2300 2301 /* 2302 * Some platform devices are driven without driver attached 2303 * and managed resources may have been acquired. Make sure 2304 * all resources are released. 2305 * 2306 * Drivers still can add resources into device after device 2307 * is deleted but alive, so release devres here to avoid 2308 * possible memory leak. 2309 */ 2310 devres_release_all(dev); 2311 2312 kfree(dev->dma_range_map); 2313 2314 if (dev->release) 2315 dev->release(dev); 2316 else if (dev->type && dev->type->release) 2317 dev->type->release(dev); 2318 else if (dev->class && dev->class->dev_release) 2319 dev->class->dev_release(dev); 2320 else 2321 WARN(1, KERN_ERR "Device '%s' does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n", 2322 dev_name(dev)); 2323 kfree(p); 2324 } 2325 2326 static const void *device_namespace(struct kobject *kobj) 2327 { 2328 struct device *dev = kobj_to_dev(kobj); 2329 const void *ns = NULL; 2330 2331 if (dev->class && dev->class->ns_type) 2332 ns = dev->class->namespace(dev); 2333 2334 return ns; 2335 } 2336 2337 static void device_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid) 2338 { 2339 struct device *dev = kobj_to_dev(kobj); 2340 2341 if (dev->class && dev->class->get_ownership) 2342 dev->class->get_ownership(dev, uid, gid); 2343 } 2344 2345 static struct kobj_type device_ktype = { 2346 .release = device_release, 2347 .sysfs_ops = &dev_sysfs_ops, 2348 .namespace = device_namespace, 2349 .get_ownership = device_get_ownership, 2350 }; 2351 2352 2353 static int dev_uevent_filter(struct kobject *kobj) 2354 { 2355 const struct kobj_type *ktype = get_ktype(kobj); 2356 2357 if (ktype == &device_ktype) { 2358 struct device *dev = kobj_to_dev(kobj); 2359 if (dev->bus) 2360 return 1; 2361 if (dev->class) 2362 return 1; 2363 } 2364 return 0; 2365 } 2366 2367 static const char *dev_uevent_name(struct kobject *kobj) 2368 { 2369 struct device *dev = kobj_to_dev(kobj); 2370 2371 if (dev->bus) 2372 return dev->bus->name; 2373 if (dev->class) 2374 return dev->class->name; 2375 return NULL; 2376 } 2377 2378 static int dev_uevent(struct kobject *kobj, struct kobj_uevent_env *env) 2379 { 2380 struct device *dev = kobj_to_dev(kobj); 2381 int retval = 0; 2382 2383 /* add device node properties if present */ 2384 if (MAJOR(dev->devt)) { 2385 const char *tmp; 2386 const char *name; 2387 umode_t mode = 0; 2388 kuid_t uid = GLOBAL_ROOT_UID; 2389 kgid_t gid = GLOBAL_ROOT_GID; 2390 2391 add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt)); 2392 add_uevent_var(env, "MINOR=%u", MINOR(dev->devt)); 2393 name = device_get_devnode(dev, &mode, &uid, &gid, &tmp); 2394 if (name) { 2395 add_uevent_var(env, "DEVNAME=%s", name); 2396 if (mode) 2397 add_uevent_var(env, "DEVMODE=%#o", mode & 0777); 2398 if (!uid_eq(uid, GLOBAL_ROOT_UID)) 2399 add_uevent_var(env, "DEVUID=%u", from_kuid(&init_user_ns, uid)); 2400 if (!gid_eq(gid, GLOBAL_ROOT_GID)) 2401 add_uevent_var(env, "DEVGID=%u", from_kgid(&init_user_ns, gid)); 2402 kfree(tmp); 2403 } 2404 } 2405 2406 if (dev->type && dev->type->name) 2407 add_uevent_var(env, "DEVTYPE=%s", dev->type->name); 2408 2409 if (dev->driver) 2410 add_uevent_var(env, "DRIVER=%s", dev->driver->name); 2411 2412 /* Add common DT information about the device */ 2413 of_device_uevent(dev, env); 2414 2415 /* have the bus specific function add its stuff */ 2416 if (dev->bus && dev->bus->uevent) { 2417 retval = dev->bus->uevent(dev, env); 2418 if (retval) 2419 pr_debug("device: '%s': %s: bus uevent() returned %d\n", 2420 dev_name(dev), __func__, retval); 2421 } 2422 2423 /* have the class specific function add its stuff */ 2424 if (dev->class && dev->class->dev_uevent) { 2425 retval = dev->class->dev_uevent(dev, env); 2426 if (retval) 2427 pr_debug("device: '%s': %s: class uevent() " 2428 "returned %d\n", dev_name(dev), 2429 __func__, retval); 2430 } 2431 2432 /* have the device type specific function add its stuff */ 2433 if (dev->type && dev->type->uevent) { 2434 retval = dev->type->uevent(dev, env); 2435 if (retval) 2436 pr_debug("device: '%s': %s: dev_type uevent() " 2437 "returned %d\n", dev_name(dev), 2438 __func__, retval); 2439 } 2440 2441 return retval; 2442 } 2443 2444 static const struct kset_uevent_ops device_uevent_ops = { 2445 .filter = dev_uevent_filter, 2446 .name = dev_uevent_name, 2447 .uevent = dev_uevent, 2448 }; 2449 2450 static ssize_t uevent_show(struct device *dev, struct device_attribute *attr, 2451 char *buf) 2452 { 2453 struct kobject *top_kobj; 2454 struct kset *kset; 2455 struct kobj_uevent_env *env = NULL; 2456 int i; 2457 int len = 0; 2458 int retval; 2459 2460 /* search the kset, the device belongs to */ 2461 top_kobj = &dev->kobj; 2462 while (!top_kobj->kset && top_kobj->parent) 2463 top_kobj = top_kobj->parent; 2464 if (!top_kobj->kset) 2465 goto out; 2466 2467 kset = top_kobj->kset; 2468 if (!kset->uevent_ops || !kset->uevent_ops->uevent) 2469 goto out; 2470 2471 /* respect filter */ 2472 if (kset->uevent_ops && kset->uevent_ops->filter) 2473 if (!kset->uevent_ops->filter(&dev->kobj)) 2474 goto out; 2475 2476 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL); 2477 if (!env) 2478 return -ENOMEM; 2479 2480 /* let the kset specific function add its keys */ 2481 retval = kset->uevent_ops->uevent(&dev->kobj, env); 2482 if (retval) 2483 goto out; 2484 2485 /* copy keys to file */ 2486 for (i = 0; i < env->envp_idx; i++) 2487 len += sysfs_emit_at(buf, len, "%s\n", env->envp[i]); 2488 out: 2489 kfree(env); 2490 return len; 2491 } 2492 2493 static ssize_t uevent_store(struct device *dev, struct device_attribute *attr, 2494 const char *buf, size_t count) 2495 { 2496 int rc; 2497 2498 rc = kobject_synth_uevent(&dev->kobj, buf, count); 2499 2500 if (rc) { 2501 dev_err(dev, "uevent: failed to send synthetic uevent\n"); 2502 return rc; 2503 } 2504 2505 return count; 2506 } 2507 static DEVICE_ATTR_RW(uevent); 2508 2509 static ssize_t online_show(struct device *dev, struct device_attribute *attr, 2510 char *buf) 2511 { 2512 bool val; 2513 2514 device_lock(dev); 2515 val = !dev->offline; 2516 device_unlock(dev); 2517 return sysfs_emit(buf, "%u\n", val); 2518 } 2519 2520 static ssize_t online_store(struct device *dev, struct device_attribute *attr, 2521 const char *buf, size_t count) 2522 { 2523 bool val; 2524 int ret; 2525 2526 ret = strtobool(buf, &val); 2527 if (ret < 0) 2528 return ret; 2529 2530 ret = lock_device_hotplug_sysfs(); 2531 if (ret) 2532 return ret; 2533 2534 ret = val ? device_online(dev) : device_offline(dev); 2535 unlock_device_hotplug(); 2536 return ret < 0 ? ret : count; 2537 } 2538 static DEVICE_ATTR_RW(online); 2539 2540 static ssize_t removable_show(struct device *dev, struct device_attribute *attr, 2541 char *buf) 2542 { 2543 const char *loc; 2544 2545 switch (dev->removable) { 2546 case DEVICE_REMOVABLE: 2547 loc = "removable"; 2548 break; 2549 case DEVICE_FIXED: 2550 loc = "fixed"; 2551 break; 2552 default: 2553 loc = "unknown"; 2554 } 2555 return sysfs_emit(buf, "%s\n", loc); 2556 } 2557 static DEVICE_ATTR_RO(removable); 2558 2559 int device_add_groups(struct device *dev, const struct attribute_group **groups) 2560 { 2561 return sysfs_create_groups(&dev->kobj, groups); 2562 } 2563 EXPORT_SYMBOL_GPL(device_add_groups); 2564 2565 void device_remove_groups(struct device *dev, 2566 const struct attribute_group **groups) 2567 { 2568 sysfs_remove_groups(&dev->kobj, groups); 2569 } 2570 EXPORT_SYMBOL_GPL(device_remove_groups); 2571 2572 union device_attr_group_devres { 2573 const struct attribute_group *group; 2574 const struct attribute_group **groups; 2575 }; 2576 2577 static int devm_attr_group_match(struct device *dev, void *res, void *data) 2578 { 2579 return ((union device_attr_group_devres *)res)->group == data; 2580 } 2581 2582 static void devm_attr_group_remove(struct device *dev, void *res) 2583 { 2584 union device_attr_group_devres *devres = res; 2585 const struct attribute_group *group = devres->group; 2586 2587 dev_dbg(dev, "%s: removing group %p\n", __func__, group); 2588 sysfs_remove_group(&dev->kobj, group); 2589 } 2590 2591 static void devm_attr_groups_remove(struct device *dev, void *res) 2592 { 2593 union device_attr_group_devres *devres = res; 2594 const struct attribute_group **groups = devres->groups; 2595 2596 dev_dbg(dev, "%s: removing groups %p\n", __func__, groups); 2597 sysfs_remove_groups(&dev->kobj, groups); 2598 } 2599 2600 /** 2601 * devm_device_add_group - given a device, create a managed attribute group 2602 * @dev: The device to create the group for 2603 * @grp: The attribute group to create 2604 * 2605 * This function creates a group for the first time. It will explicitly 2606 * warn and error if any of the attribute files being created already exist. 2607 * 2608 * Returns 0 on success or error code on failure. 2609 */ 2610 int devm_device_add_group(struct device *dev, const struct attribute_group *grp) 2611 { 2612 union device_attr_group_devres *devres; 2613 int error; 2614 2615 devres = devres_alloc(devm_attr_group_remove, 2616 sizeof(*devres), GFP_KERNEL); 2617 if (!devres) 2618 return -ENOMEM; 2619 2620 error = sysfs_create_group(&dev->kobj, grp); 2621 if (error) { 2622 devres_free(devres); 2623 return error; 2624 } 2625 2626 devres->group = grp; 2627 devres_add(dev, devres); 2628 return 0; 2629 } 2630 EXPORT_SYMBOL_GPL(devm_device_add_group); 2631 2632 /** 2633 * devm_device_remove_group: remove a managed group from a device 2634 * @dev: device to remove the group from 2635 * @grp: group to remove 2636 * 2637 * This function removes a group of attributes from a device. The attributes 2638 * previously have to have been created for this group, otherwise it will fail. 2639 */ 2640 void devm_device_remove_group(struct device *dev, 2641 const struct attribute_group *grp) 2642 { 2643 WARN_ON(devres_release(dev, devm_attr_group_remove, 2644 devm_attr_group_match, 2645 /* cast away const */ (void *)grp)); 2646 } 2647 EXPORT_SYMBOL_GPL(devm_device_remove_group); 2648 2649 /** 2650 * devm_device_add_groups - create a bunch of managed attribute groups 2651 * @dev: The device to create the group for 2652 * @groups: The attribute groups to create, NULL terminated 2653 * 2654 * This function creates a bunch of managed attribute groups. If an error 2655 * occurs when creating a group, all previously created groups will be 2656 * removed, unwinding everything back to the original state when this 2657 * function was called. It will explicitly warn and error if any of the 2658 * attribute files being created already exist. 2659 * 2660 * Returns 0 on success or error code from sysfs_create_group on failure. 2661 */ 2662 int devm_device_add_groups(struct device *dev, 2663 const struct attribute_group **groups) 2664 { 2665 union device_attr_group_devres *devres; 2666 int error; 2667 2668 devres = devres_alloc(devm_attr_groups_remove, 2669 sizeof(*devres), GFP_KERNEL); 2670 if (!devres) 2671 return -ENOMEM; 2672 2673 error = sysfs_create_groups(&dev->kobj, groups); 2674 if (error) { 2675 devres_free(devres); 2676 return error; 2677 } 2678 2679 devres->groups = groups; 2680 devres_add(dev, devres); 2681 return 0; 2682 } 2683 EXPORT_SYMBOL_GPL(devm_device_add_groups); 2684 2685 /** 2686 * devm_device_remove_groups - remove a list of managed groups 2687 * 2688 * @dev: The device for the groups to be removed from 2689 * @groups: NULL terminated list of groups to be removed 2690 * 2691 * If groups is not NULL, remove the specified groups from the device. 2692 */ 2693 void devm_device_remove_groups(struct device *dev, 2694 const struct attribute_group **groups) 2695 { 2696 WARN_ON(devres_release(dev, devm_attr_groups_remove, 2697 devm_attr_group_match, 2698 /* cast away const */ (void *)groups)); 2699 } 2700 EXPORT_SYMBOL_GPL(devm_device_remove_groups); 2701 2702 static int device_add_attrs(struct device *dev) 2703 { 2704 struct class *class = dev->class; 2705 const struct device_type *type = dev->type; 2706 int error; 2707 2708 if (class) { 2709 error = device_add_groups(dev, class->dev_groups); 2710 if (error) 2711 return error; 2712 } 2713 2714 if (type) { 2715 error = device_add_groups(dev, type->groups); 2716 if (error) 2717 goto err_remove_class_groups; 2718 } 2719 2720 error = device_add_groups(dev, dev->groups); 2721 if (error) 2722 goto err_remove_type_groups; 2723 2724 if (device_supports_offline(dev) && !dev->offline_disabled) { 2725 error = device_create_file(dev, &dev_attr_online); 2726 if (error) 2727 goto err_remove_dev_groups; 2728 } 2729 2730 if (fw_devlink_flags && !fw_devlink_is_permissive() && dev->fwnode) { 2731 error = device_create_file(dev, &dev_attr_waiting_for_supplier); 2732 if (error) 2733 goto err_remove_dev_online; 2734 } 2735 2736 if (dev_removable_is_valid(dev)) { 2737 error = device_create_file(dev, &dev_attr_removable); 2738 if (error) 2739 goto err_remove_dev_waiting_for_supplier; 2740 } 2741 2742 if (dev_add_physical_location(dev)) { 2743 error = device_add_group(dev, 2744 &dev_attr_physical_location_group); 2745 if (error) 2746 goto err_remove_dev_removable; 2747 } 2748 2749 return 0; 2750 2751 err_remove_dev_removable: 2752 device_remove_file(dev, &dev_attr_removable); 2753 err_remove_dev_waiting_for_supplier: 2754 device_remove_file(dev, &dev_attr_waiting_for_supplier); 2755 err_remove_dev_online: 2756 device_remove_file(dev, &dev_attr_online); 2757 err_remove_dev_groups: 2758 device_remove_groups(dev, dev->groups); 2759 err_remove_type_groups: 2760 if (type) 2761 device_remove_groups(dev, type->groups); 2762 err_remove_class_groups: 2763 if (class) 2764 device_remove_groups(dev, class->dev_groups); 2765 2766 return error; 2767 } 2768 2769 static void device_remove_attrs(struct device *dev) 2770 { 2771 struct class *class = dev->class; 2772 const struct device_type *type = dev->type; 2773 2774 if (dev->physical_location) { 2775 device_remove_group(dev, &dev_attr_physical_location_group); 2776 kfree(dev->physical_location); 2777 } 2778 2779 device_remove_file(dev, &dev_attr_removable); 2780 device_remove_file(dev, &dev_attr_waiting_for_supplier); 2781 device_remove_file(dev, &dev_attr_online); 2782 device_remove_groups(dev, dev->groups); 2783 2784 if (type) 2785 device_remove_groups(dev, type->groups); 2786 2787 if (class) 2788 device_remove_groups(dev, class->dev_groups); 2789 } 2790 2791 static ssize_t dev_show(struct device *dev, struct device_attribute *attr, 2792 char *buf) 2793 { 2794 return print_dev_t(buf, dev->devt); 2795 } 2796 static DEVICE_ATTR_RO(dev); 2797 2798 /* /sys/devices/ */ 2799 struct kset *devices_kset; 2800 2801 /** 2802 * devices_kset_move_before - Move device in the devices_kset's list. 2803 * @deva: Device to move. 2804 * @devb: Device @deva should come before. 2805 */ 2806 static void devices_kset_move_before(struct device *deva, struct device *devb) 2807 { 2808 if (!devices_kset) 2809 return; 2810 pr_debug("devices_kset: Moving %s before %s\n", 2811 dev_name(deva), dev_name(devb)); 2812 spin_lock(&devices_kset->list_lock); 2813 list_move_tail(&deva->kobj.entry, &devb->kobj.entry); 2814 spin_unlock(&devices_kset->list_lock); 2815 } 2816 2817 /** 2818 * devices_kset_move_after - Move device in the devices_kset's list. 2819 * @deva: Device to move 2820 * @devb: Device @deva should come after. 2821 */ 2822 static void devices_kset_move_after(struct device *deva, struct device *devb) 2823 { 2824 if (!devices_kset) 2825 return; 2826 pr_debug("devices_kset: Moving %s after %s\n", 2827 dev_name(deva), dev_name(devb)); 2828 spin_lock(&devices_kset->list_lock); 2829 list_move(&deva->kobj.entry, &devb->kobj.entry); 2830 spin_unlock(&devices_kset->list_lock); 2831 } 2832 2833 /** 2834 * devices_kset_move_last - move the device to the end of devices_kset's list. 2835 * @dev: device to move 2836 */ 2837 void devices_kset_move_last(struct device *dev) 2838 { 2839 if (!devices_kset) 2840 return; 2841 pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev)); 2842 spin_lock(&devices_kset->list_lock); 2843 list_move_tail(&dev->kobj.entry, &devices_kset->list); 2844 spin_unlock(&devices_kset->list_lock); 2845 } 2846 2847 /** 2848 * device_create_file - create sysfs attribute file for device. 2849 * @dev: device. 2850 * @attr: device attribute descriptor. 2851 */ 2852 int device_create_file(struct device *dev, 2853 const struct device_attribute *attr) 2854 { 2855 int error = 0; 2856 2857 if (dev) { 2858 WARN(((attr->attr.mode & S_IWUGO) && !attr->store), 2859 "Attribute %s: write permission without 'store'\n", 2860 attr->attr.name); 2861 WARN(((attr->attr.mode & S_IRUGO) && !attr->show), 2862 "Attribute %s: read permission without 'show'\n", 2863 attr->attr.name); 2864 error = sysfs_create_file(&dev->kobj, &attr->attr); 2865 } 2866 2867 return error; 2868 } 2869 EXPORT_SYMBOL_GPL(device_create_file); 2870 2871 /** 2872 * device_remove_file - remove sysfs attribute file. 2873 * @dev: device. 2874 * @attr: device attribute descriptor. 2875 */ 2876 void device_remove_file(struct device *dev, 2877 const struct device_attribute *attr) 2878 { 2879 if (dev) 2880 sysfs_remove_file(&dev->kobj, &attr->attr); 2881 } 2882 EXPORT_SYMBOL_GPL(device_remove_file); 2883 2884 /** 2885 * device_remove_file_self - remove sysfs attribute file from its own method. 2886 * @dev: device. 2887 * @attr: device attribute descriptor. 2888 * 2889 * See kernfs_remove_self() for details. 2890 */ 2891 bool device_remove_file_self(struct device *dev, 2892 const struct device_attribute *attr) 2893 { 2894 if (dev) 2895 return sysfs_remove_file_self(&dev->kobj, &attr->attr); 2896 else 2897 return false; 2898 } 2899 EXPORT_SYMBOL_GPL(device_remove_file_self); 2900 2901 /** 2902 * device_create_bin_file - create sysfs binary attribute file for device. 2903 * @dev: device. 2904 * @attr: device binary attribute descriptor. 2905 */ 2906 int device_create_bin_file(struct device *dev, 2907 const struct bin_attribute *attr) 2908 { 2909 int error = -EINVAL; 2910 if (dev) 2911 error = sysfs_create_bin_file(&dev->kobj, attr); 2912 return error; 2913 } 2914 EXPORT_SYMBOL_GPL(device_create_bin_file); 2915 2916 /** 2917 * device_remove_bin_file - remove sysfs binary attribute file 2918 * @dev: device. 2919 * @attr: device binary attribute descriptor. 2920 */ 2921 void device_remove_bin_file(struct device *dev, 2922 const struct bin_attribute *attr) 2923 { 2924 if (dev) 2925 sysfs_remove_bin_file(&dev->kobj, attr); 2926 } 2927 EXPORT_SYMBOL_GPL(device_remove_bin_file); 2928 2929 static void klist_children_get(struct klist_node *n) 2930 { 2931 struct device_private *p = to_device_private_parent(n); 2932 struct device *dev = p->device; 2933 2934 get_device(dev); 2935 } 2936 2937 static void klist_children_put(struct klist_node *n) 2938 { 2939 struct device_private *p = to_device_private_parent(n); 2940 struct device *dev = p->device; 2941 2942 put_device(dev); 2943 } 2944 2945 /** 2946 * device_initialize - init device structure. 2947 * @dev: device. 2948 * 2949 * This prepares the device for use by other layers by initializing 2950 * its fields. 2951 * It is the first half of device_register(), if called by 2952 * that function, though it can also be called separately, so one 2953 * may use @dev's fields. In particular, get_device()/put_device() 2954 * may be used for reference counting of @dev after calling this 2955 * function. 2956 * 2957 * All fields in @dev must be initialized by the caller to 0, except 2958 * for those explicitly set to some other value. The simplest 2959 * approach is to use kzalloc() to allocate the structure containing 2960 * @dev. 2961 * 2962 * NOTE: Use put_device() to give up your reference instead of freeing 2963 * @dev directly once you have called this function. 2964 */ 2965 void device_initialize(struct device *dev) 2966 { 2967 dev->kobj.kset = devices_kset; 2968 kobject_init(&dev->kobj, &device_ktype); 2969 INIT_LIST_HEAD(&dev->dma_pools); 2970 mutex_init(&dev->mutex); 2971 lockdep_set_novalidate_class(&dev->mutex); 2972 spin_lock_init(&dev->devres_lock); 2973 INIT_LIST_HEAD(&dev->devres_head); 2974 device_pm_init(dev); 2975 set_dev_node(dev, NUMA_NO_NODE); 2976 INIT_LIST_HEAD(&dev->links.consumers); 2977 INIT_LIST_HEAD(&dev->links.suppliers); 2978 INIT_LIST_HEAD(&dev->links.defer_sync); 2979 dev->links.status = DL_DEV_NO_DRIVER; 2980 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ 2981 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ 2982 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) 2983 dev->dma_coherent = dma_default_coherent; 2984 #endif 2985 #ifdef CONFIG_SWIOTLB 2986 dev->dma_io_tlb_mem = &io_tlb_default_mem; 2987 #endif 2988 } 2989 EXPORT_SYMBOL_GPL(device_initialize); 2990 2991 struct kobject *virtual_device_parent(struct device *dev) 2992 { 2993 static struct kobject *virtual_dir = NULL; 2994 2995 if (!virtual_dir) 2996 virtual_dir = kobject_create_and_add("virtual", 2997 &devices_kset->kobj); 2998 2999 return virtual_dir; 3000 } 3001 3002 struct class_dir { 3003 struct kobject kobj; 3004 struct class *class; 3005 }; 3006 3007 #define to_class_dir(obj) container_of(obj, struct class_dir, kobj) 3008 3009 static void class_dir_release(struct kobject *kobj) 3010 { 3011 struct class_dir *dir = to_class_dir(kobj); 3012 kfree(dir); 3013 } 3014 3015 static const 3016 struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj) 3017 { 3018 struct class_dir *dir = to_class_dir(kobj); 3019 return dir->class->ns_type; 3020 } 3021 3022 static struct kobj_type class_dir_ktype = { 3023 .release = class_dir_release, 3024 .sysfs_ops = &kobj_sysfs_ops, 3025 .child_ns_type = class_dir_child_ns_type 3026 }; 3027 3028 static struct kobject * 3029 class_dir_create_and_add(struct class *class, struct kobject *parent_kobj) 3030 { 3031 struct class_dir *dir; 3032 int retval; 3033 3034 dir = kzalloc(sizeof(*dir), GFP_KERNEL); 3035 if (!dir) 3036 return ERR_PTR(-ENOMEM); 3037 3038 dir->class = class; 3039 kobject_init(&dir->kobj, &class_dir_ktype); 3040 3041 dir->kobj.kset = &class->p->glue_dirs; 3042 3043 retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name); 3044 if (retval < 0) { 3045 kobject_put(&dir->kobj); 3046 return ERR_PTR(retval); 3047 } 3048 return &dir->kobj; 3049 } 3050 3051 static DEFINE_MUTEX(gdp_mutex); 3052 3053 static struct kobject *get_device_parent(struct device *dev, 3054 struct device *parent) 3055 { 3056 if (dev->class) { 3057 struct kobject *kobj = NULL; 3058 struct kobject *parent_kobj; 3059 struct kobject *k; 3060 3061 #ifdef CONFIG_BLOCK 3062 /* block disks show up in /sys/block */ 3063 if (sysfs_deprecated && dev->class == &block_class) { 3064 if (parent && parent->class == &block_class) 3065 return &parent->kobj; 3066 return &block_class.p->subsys.kobj; 3067 } 3068 #endif 3069 3070 /* 3071 * If we have no parent, we live in "virtual". 3072 * Class-devices with a non class-device as parent, live 3073 * in a "glue" directory to prevent namespace collisions. 3074 */ 3075 if (parent == NULL) 3076 parent_kobj = virtual_device_parent(dev); 3077 else if (parent->class && !dev->class->ns_type) 3078 return &parent->kobj; 3079 else 3080 parent_kobj = &parent->kobj; 3081 3082 mutex_lock(&gdp_mutex); 3083 3084 /* find our class-directory at the parent and reference it */ 3085 spin_lock(&dev->class->p->glue_dirs.list_lock); 3086 list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry) 3087 if (k->parent == parent_kobj) { 3088 kobj = kobject_get(k); 3089 break; 3090 } 3091 spin_unlock(&dev->class->p->glue_dirs.list_lock); 3092 if (kobj) { 3093 mutex_unlock(&gdp_mutex); 3094 return kobj; 3095 } 3096 3097 /* or create a new class-directory at the parent device */ 3098 k = class_dir_create_and_add(dev->class, parent_kobj); 3099 /* do not emit an uevent for this simple "glue" directory */ 3100 mutex_unlock(&gdp_mutex); 3101 return k; 3102 } 3103 3104 /* subsystems can specify a default root directory for their devices */ 3105 if (!parent && dev->bus && dev->bus->dev_root) 3106 return &dev->bus->dev_root->kobj; 3107 3108 if (parent) 3109 return &parent->kobj; 3110 return NULL; 3111 } 3112 3113 static inline bool live_in_glue_dir(struct kobject *kobj, 3114 struct device *dev) 3115 { 3116 if (!kobj || !dev->class || 3117 kobj->kset != &dev->class->p->glue_dirs) 3118 return false; 3119 return true; 3120 } 3121 3122 static inline struct kobject *get_glue_dir(struct device *dev) 3123 { 3124 return dev->kobj.parent; 3125 } 3126 3127 /** 3128 * kobject_has_children - Returns whether a kobject has children. 3129 * @kobj: the object to test 3130 * 3131 * This will return whether a kobject has other kobjects as children. 3132 * 3133 * It does NOT account for the presence of attribute files, only sub 3134 * directories. It also assumes there is no concurrent addition or 3135 * removal of such children, and thus relies on external locking. 3136 */ 3137 static inline bool kobject_has_children(struct kobject *kobj) 3138 { 3139 WARN_ON_ONCE(kref_read(&kobj->kref) == 0); 3140 3141 return kobj->sd && kobj->sd->dir.subdirs; 3142 } 3143 3144 /* 3145 * make sure cleaning up dir as the last step, we need to make 3146 * sure .release handler of kobject is run with holding the 3147 * global lock 3148 */ 3149 static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) 3150 { 3151 unsigned int ref; 3152 3153 /* see if we live in a "glue" directory */ 3154 if (!live_in_glue_dir(glue_dir, dev)) 3155 return; 3156 3157 mutex_lock(&gdp_mutex); 3158 /** 3159 * There is a race condition between removing glue directory 3160 * and adding a new device under the glue directory. 3161 * 3162 * CPU1: CPU2: 3163 * 3164 * device_add() 3165 * get_device_parent() 3166 * class_dir_create_and_add() 3167 * kobject_add_internal() 3168 * create_dir() // create glue_dir 3169 * 3170 * device_add() 3171 * get_device_parent() 3172 * kobject_get() // get glue_dir 3173 * 3174 * device_del() 3175 * cleanup_glue_dir() 3176 * kobject_del(glue_dir) 3177 * 3178 * kobject_add() 3179 * kobject_add_internal() 3180 * create_dir() // in glue_dir 3181 * sysfs_create_dir_ns() 3182 * kernfs_create_dir_ns(sd) 3183 * 3184 * sysfs_remove_dir() // glue_dir->sd=NULL 3185 * sysfs_put() // free glue_dir->sd 3186 * 3187 * // sd is freed 3188 * kernfs_new_node(sd) 3189 * kernfs_get(glue_dir) 3190 * kernfs_add_one() 3191 * kernfs_put() 3192 * 3193 * Before CPU1 remove last child device under glue dir, if CPU2 add 3194 * a new device under glue dir, the glue_dir kobject reference count 3195 * will be increase to 2 in kobject_get(k). And CPU2 has been called 3196 * kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir() 3197 * and sysfs_put(). This result in glue_dir->sd is freed. 3198 * 3199 * Then the CPU2 will see a stale "empty" but still potentially used 3200 * glue dir around in kernfs_new_node(). 3201 * 3202 * In order to avoid this happening, we also should make sure that 3203 * kernfs_node for glue_dir is released in CPU1 only when refcount 3204 * for glue_dir kobj is 1. 3205 */ 3206 ref = kref_read(&glue_dir->kref); 3207 if (!kobject_has_children(glue_dir) && !--ref) 3208 kobject_del(glue_dir); 3209 kobject_put(glue_dir); 3210 mutex_unlock(&gdp_mutex); 3211 } 3212 3213 static int device_add_class_symlinks(struct device *dev) 3214 { 3215 struct device_node *of_node = dev_of_node(dev); 3216 int error; 3217 3218 if (of_node) { 3219 error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node"); 3220 if (error) 3221 dev_warn(dev, "Error %d creating of_node link\n",error); 3222 /* An error here doesn't warrant bringing down the device */ 3223 } 3224 3225 if (!dev->class) 3226 return 0; 3227 3228 error = sysfs_create_link(&dev->kobj, 3229 &dev->class->p->subsys.kobj, 3230 "subsystem"); 3231 if (error) 3232 goto out_devnode; 3233 3234 if (dev->parent && device_is_not_partition(dev)) { 3235 error = sysfs_create_link(&dev->kobj, &dev->parent->kobj, 3236 "device"); 3237 if (error) 3238 goto out_subsys; 3239 } 3240 3241 #ifdef CONFIG_BLOCK 3242 /* /sys/block has directories and does not need symlinks */ 3243 if (sysfs_deprecated && dev->class == &block_class) 3244 return 0; 3245 #endif 3246 3247 /* link in the class directory pointing to the device */ 3248 error = sysfs_create_link(&dev->class->p->subsys.kobj, 3249 &dev->kobj, dev_name(dev)); 3250 if (error) 3251 goto out_device; 3252 3253 return 0; 3254 3255 out_device: 3256 sysfs_remove_link(&dev->kobj, "device"); 3257 3258 out_subsys: 3259 sysfs_remove_link(&dev->kobj, "subsystem"); 3260 out_devnode: 3261 sysfs_remove_link(&dev->kobj, "of_node"); 3262 return error; 3263 } 3264 3265 static void device_remove_class_symlinks(struct device *dev) 3266 { 3267 if (dev_of_node(dev)) 3268 sysfs_remove_link(&dev->kobj, "of_node"); 3269 3270 if (!dev->class) 3271 return; 3272 3273 if (dev->parent && device_is_not_partition(dev)) 3274 sysfs_remove_link(&dev->kobj, "device"); 3275 sysfs_remove_link(&dev->kobj, "subsystem"); 3276 #ifdef CONFIG_BLOCK 3277 if (sysfs_deprecated && dev->class == &block_class) 3278 return; 3279 #endif 3280 sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev)); 3281 } 3282 3283 /** 3284 * dev_set_name - set a device name 3285 * @dev: device 3286 * @fmt: format string for the device's name 3287 */ 3288 int dev_set_name(struct device *dev, const char *fmt, ...) 3289 { 3290 va_list vargs; 3291 int err; 3292 3293 va_start(vargs, fmt); 3294 err = kobject_set_name_vargs(&dev->kobj, fmt, vargs); 3295 va_end(vargs); 3296 return err; 3297 } 3298 EXPORT_SYMBOL_GPL(dev_set_name); 3299 3300 /** 3301 * device_to_dev_kobj - select a /sys/dev/ directory for the device 3302 * @dev: device 3303 * 3304 * By default we select char/ for new entries. Setting class->dev_obj 3305 * to NULL prevents an entry from being created. class->dev_kobj must 3306 * be set (or cleared) before any devices are registered to the class 3307 * otherwise device_create_sys_dev_entry() and 3308 * device_remove_sys_dev_entry() will disagree about the presence of 3309 * the link. 3310 */ 3311 static struct kobject *device_to_dev_kobj(struct device *dev) 3312 { 3313 struct kobject *kobj; 3314 3315 if (dev->class) 3316 kobj = dev->class->dev_kobj; 3317 else 3318 kobj = sysfs_dev_char_kobj; 3319 3320 return kobj; 3321 } 3322 3323 static int device_create_sys_dev_entry(struct device *dev) 3324 { 3325 struct kobject *kobj = device_to_dev_kobj(dev); 3326 int error = 0; 3327 char devt_str[15]; 3328 3329 if (kobj) { 3330 format_dev_t(devt_str, dev->devt); 3331 error = sysfs_create_link(kobj, &dev->kobj, devt_str); 3332 } 3333 3334 return error; 3335 } 3336 3337 static void device_remove_sys_dev_entry(struct device *dev) 3338 { 3339 struct kobject *kobj = device_to_dev_kobj(dev); 3340 char devt_str[15]; 3341 3342 if (kobj) { 3343 format_dev_t(devt_str, dev->devt); 3344 sysfs_remove_link(kobj, devt_str); 3345 } 3346 } 3347 3348 static int device_private_init(struct device *dev) 3349 { 3350 dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL); 3351 if (!dev->p) 3352 return -ENOMEM; 3353 dev->p->device = dev; 3354 klist_init(&dev->p->klist_children, klist_children_get, 3355 klist_children_put); 3356 INIT_LIST_HEAD(&dev->p->deferred_probe); 3357 return 0; 3358 } 3359 3360 /** 3361 * device_add - add device to device hierarchy. 3362 * @dev: device. 3363 * 3364 * This is part 2 of device_register(), though may be called 3365 * separately _iff_ device_initialize() has been called separately. 3366 * 3367 * This adds @dev to the kobject hierarchy via kobject_add(), adds it 3368 * to the global and sibling lists for the device, then 3369 * adds it to the other relevant subsystems of the driver model. 3370 * 3371 * Do not call this routine or device_register() more than once for 3372 * any device structure. The driver model core is not designed to work 3373 * with devices that get unregistered and then spring back to life. 3374 * (Among other things, it's very hard to guarantee that all references 3375 * to the previous incarnation of @dev have been dropped.) Allocate 3376 * and register a fresh new struct device instead. 3377 * 3378 * NOTE: _Never_ directly free @dev after calling this function, even 3379 * if it returned an error! Always use put_device() to give up your 3380 * reference instead. 3381 * 3382 * Rule of thumb is: if device_add() succeeds, you should call 3383 * device_del() when you want to get rid of it. If device_add() has 3384 * *not* succeeded, use *only* put_device() to drop the reference 3385 * count. 3386 */ 3387 int device_add(struct device *dev) 3388 { 3389 struct device *parent; 3390 struct kobject *kobj; 3391 struct class_interface *class_intf; 3392 int error = -EINVAL; 3393 struct kobject *glue_dir = NULL; 3394 3395 dev = get_device(dev); 3396 if (!dev) 3397 goto done; 3398 3399 if (!dev->p) { 3400 error = device_private_init(dev); 3401 if (error) 3402 goto done; 3403 } 3404 3405 /* 3406 * for statically allocated devices, which should all be converted 3407 * some day, we need to initialize the name. We prevent reading back 3408 * the name, and force the use of dev_name() 3409 */ 3410 if (dev->init_name) { 3411 dev_set_name(dev, "%s", dev->init_name); 3412 dev->init_name = NULL; 3413 } 3414 3415 /* subsystems can specify simple device enumeration */ 3416 if (!dev_name(dev) && dev->bus && dev->bus->dev_name) 3417 dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id); 3418 3419 if (!dev_name(dev)) { 3420 error = -EINVAL; 3421 goto name_error; 3422 } 3423 3424 pr_debug("device: '%s': %s\n", dev_name(dev), __func__); 3425 3426 parent = get_device(dev->parent); 3427 kobj = get_device_parent(dev, parent); 3428 if (IS_ERR(kobj)) { 3429 error = PTR_ERR(kobj); 3430 goto parent_error; 3431 } 3432 if (kobj) 3433 dev->kobj.parent = kobj; 3434 3435 /* use parent numa_node */ 3436 if (parent && (dev_to_node(dev) == NUMA_NO_NODE)) 3437 set_dev_node(dev, dev_to_node(parent)); 3438 3439 /* first, register with generic layer. */ 3440 /* we require the name to be set before, and pass NULL */ 3441 error = kobject_add(&dev->kobj, dev->kobj.parent, NULL); 3442 if (error) { 3443 glue_dir = get_glue_dir(dev); 3444 goto Error; 3445 } 3446 3447 /* notify platform of device entry */ 3448 device_platform_notify(dev); 3449 3450 error = device_create_file(dev, &dev_attr_uevent); 3451 if (error) 3452 goto attrError; 3453 3454 error = device_add_class_symlinks(dev); 3455 if (error) 3456 goto SymlinkError; 3457 error = device_add_attrs(dev); 3458 if (error) 3459 goto AttrsError; 3460 error = bus_add_device(dev); 3461 if (error) 3462 goto BusError; 3463 error = dpm_sysfs_add(dev); 3464 if (error) 3465 goto DPMError; 3466 device_pm_add(dev); 3467 3468 if (MAJOR(dev->devt)) { 3469 error = device_create_file(dev, &dev_attr_dev); 3470 if (error) 3471 goto DevAttrError; 3472 3473 error = device_create_sys_dev_entry(dev); 3474 if (error) 3475 goto SysEntryError; 3476 3477 devtmpfs_create_node(dev); 3478 } 3479 3480 /* Notify clients of device addition. This call must come 3481 * after dpm_sysfs_add() and before kobject_uevent(). 3482 */ 3483 if (dev->bus) 3484 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 3485 BUS_NOTIFY_ADD_DEVICE, dev); 3486 3487 kobject_uevent(&dev->kobj, KOBJ_ADD); 3488 3489 /* 3490 * Check if any of the other devices (consumers) have been waiting for 3491 * this device (supplier) to be added so that they can create a device 3492 * link to it. 3493 * 3494 * This needs to happen after device_pm_add() because device_link_add() 3495 * requires the supplier be registered before it's called. 3496 * 3497 * But this also needs to happen before bus_probe_device() to make sure 3498 * waiting consumers can link to it before the driver is bound to the 3499 * device and the driver sync_state callback is called for this device. 3500 */ 3501 if (dev->fwnode && !dev->fwnode->dev) { 3502 dev->fwnode->dev = dev; 3503 fw_devlink_link_device(dev); 3504 } 3505 3506 bus_probe_device(dev); 3507 3508 /* 3509 * If all driver registration is done and a newly added device doesn't 3510 * match with any driver, don't block its consumers from probing in 3511 * case the consumer device is able to operate without this supplier. 3512 */ 3513 if (dev->fwnode && fw_devlink_drv_reg_done && !dev->can_match) 3514 fw_devlink_unblock_consumers(dev); 3515 3516 if (parent) 3517 klist_add_tail(&dev->p->knode_parent, 3518 &parent->p->klist_children); 3519 3520 if (dev->class) { 3521 mutex_lock(&dev->class->p->mutex); 3522 /* tie the class to the device */ 3523 klist_add_tail(&dev->p->knode_class, 3524 &dev->class->p->klist_devices); 3525 3526 /* notify any interfaces that the device is here */ 3527 list_for_each_entry(class_intf, 3528 &dev->class->p->interfaces, node) 3529 if (class_intf->add_dev) 3530 class_intf->add_dev(dev, class_intf); 3531 mutex_unlock(&dev->class->p->mutex); 3532 } 3533 done: 3534 put_device(dev); 3535 return error; 3536 SysEntryError: 3537 if (MAJOR(dev->devt)) 3538 device_remove_file(dev, &dev_attr_dev); 3539 DevAttrError: 3540 device_pm_remove(dev); 3541 dpm_sysfs_remove(dev); 3542 DPMError: 3543 bus_remove_device(dev); 3544 BusError: 3545 device_remove_attrs(dev); 3546 AttrsError: 3547 device_remove_class_symlinks(dev); 3548 SymlinkError: 3549 device_remove_file(dev, &dev_attr_uevent); 3550 attrError: 3551 device_platform_notify_remove(dev); 3552 kobject_uevent(&dev->kobj, KOBJ_REMOVE); 3553 glue_dir = get_glue_dir(dev); 3554 kobject_del(&dev->kobj); 3555 Error: 3556 cleanup_glue_dir(dev, glue_dir); 3557 parent_error: 3558 put_device(parent); 3559 name_error: 3560 kfree(dev->p); 3561 dev->p = NULL; 3562 goto done; 3563 } 3564 EXPORT_SYMBOL_GPL(device_add); 3565 3566 /** 3567 * device_register - register a device with the system. 3568 * @dev: pointer to the device structure 3569 * 3570 * This happens in two clean steps - initialize the device 3571 * and add it to the system. The two steps can be called 3572 * separately, but this is the easiest and most common. 3573 * I.e. you should only call the two helpers separately if 3574 * have a clearly defined need to use and refcount the device 3575 * before it is added to the hierarchy. 3576 * 3577 * For more information, see the kerneldoc for device_initialize() 3578 * and device_add(). 3579 * 3580 * NOTE: _Never_ directly free @dev after calling this function, even 3581 * if it returned an error! Always use put_device() to give up the 3582 * reference initialized in this function instead. 3583 */ 3584 int device_register(struct device *dev) 3585 { 3586 device_initialize(dev); 3587 return device_add(dev); 3588 } 3589 EXPORT_SYMBOL_GPL(device_register); 3590 3591 /** 3592 * get_device - increment reference count for device. 3593 * @dev: device. 3594 * 3595 * This simply forwards the call to kobject_get(), though 3596 * we do take care to provide for the case that we get a NULL 3597 * pointer passed in. 3598 */ 3599 struct device *get_device(struct device *dev) 3600 { 3601 return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL; 3602 } 3603 EXPORT_SYMBOL_GPL(get_device); 3604 3605 /** 3606 * put_device - decrement reference count. 3607 * @dev: device in question. 3608 */ 3609 void put_device(struct device *dev) 3610 { 3611 /* might_sleep(); */ 3612 if (dev) 3613 kobject_put(&dev->kobj); 3614 } 3615 EXPORT_SYMBOL_GPL(put_device); 3616 3617 bool kill_device(struct device *dev) 3618 { 3619 /* 3620 * Require the device lock and set the "dead" flag to guarantee that 3621 * the update behavior is consistent with the other bitfields near 3622 * it and that we cannot have an asynchronous probe routine trying 3623 * to run while we are tearing out the bus/class/sysfs from 3624 * underneath the device. 3625 */ 3626 device_lock_assert(dev); 3627 3628 if (dev->p->dead) 3629 return false; 3630 dev->p->dead = true; 3631 return true; 3632 } 3633 EXPORT_SYMBOL_GPL(kill_device); 3634 3635 /** 3636 * device_del - delete device from system. 3637 * @dev: device. 3638 * 3639 * This is the first part of the device unregistration 3640 * sequence. This removes the device from the lists we control 3641 * from here, has it removed from the other driver model 3642 * subsystems it was added to in device_add(), and removes it 3643 * from the kobject hierarchy. 3644 * 3645 * NOTE: this should be called manually _iff_ device_add() was 3646 * also called manually. 3647 */ 3648 void device_del(struct device *dev) 3649 { 3650 struct device *parent = dev->parent; 3651 struct kobject *glue_dir = NULL; 3652 struct class_interface *class_intf; 3653 unsigned int noio_flag; 3654 3655 device_lock(dev); 3656 kill_device(dev); 3657 device_unlock(dev); 3658 3659 if (dev->fwnode && dev->fwnode->dev == dev) 3660 dev->fwnode->dev = NULL; 3661 3662 /* Notify clients of device removal. This call must come 3663 * before dpm_sysfs_remove(). 3664 */ 3665 noio_flag = memalloc_noio_save(); 3666 if (dev->bus) 3667 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 3668 BUS_NOTIFY_DEL_DEVICE, dev); 3669 3670 dpm_sysfs_remove(dev); 3671 if (parent) 3672 klist_del(&dev->p->knode_parent); 3673 if (MAJOR(dev->devt)) { 3674 devtmpfs_delete_node(dev); 3675 device_remove_sys_dev_entry(dev); 3676 device_remove_file(dev, &dev_attr_dev); 3677 } 3678 if (dev->class) { 3679 device_remove_class_symlinks(dev); 3680 3681 mutex_lock(&dev->class->p->mutex); 3682 /* notify any interfaces that the device is now gone */ 3683 list_for_each_entry(class_intf, 3684 &dev->class->p->interfaces, node) 3685 if (class_intf->remove_dev) 3686 class_intf->remove_dev(dev, class_intf); 3687 /* remove the device from the class list */ 3688 klist_del(&dev->p->knode_class); 3689 mutex_unlock(&dev->class->p->mutex); 3690 } 3691 device_remove_file(dev, &dev_attr_uevent); 3692 device_remove_attrs(dev); 3693 bus_remove_device(dev); 3694 device_pm_remove(dev); 3695 driver_deferred_probe_del(dev); 3696 device_platform_notify_remove(dev); 3697 device_links_purge(dev); 3698 3699 if (dev->bus) 3700 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 3701 BUS_NOTIFY_REMOVED_DEVICE, dev); 3702 kobject_uevent(&dev->kobj, KOBJ_REMOVE); 3703 glue_dir = get_glue_dir(dev); 3704 kobject_del(&dev->kobj); 3705 cleanup_glue_dir(dev, glue_dir); 3706 memalloc_noio_restore(noio_flag); 3707 put_device(parent); 3708 } 3709 EXPORT_SYMBOL_GPL(device_del); 3710 3711 /** 3712 * device_unregister - unregister device from system. 3713 * @dev: device going away. 3714 * 3715 * We do this in two parts, like we do device_register(). First, 3716 * we remove it from all the subsystems with device_del(), then 3717 * we decrement the reference count via put_device(). If that 3718 * is the final reference count, the device will be cleaned up 3719 * via device_release() above. Otherwise, the structure will 3720 * stick around until the final reference to the device is dropped. 3721 */ 3722 void device_unregister(struct device *dev) 3723 { 3724 pr_debug("device: '%s': %s\n", dev_name(dev), __func__); 3725 device_del(dev); 3726 put_device(dev); 3727 } 3728 EXPORT_SYMBOL_GPL(device_unregister); 3729 3730 static struct device *prev_device(struct klist_iter *i) 3731 { 3732 struct klist_node *n = klist_prev(i); 3733 struct device *dev = NULL; 3734 struct device_private *p; 3735 3736 if (n) { 3737 p = to_device_private_parent(n); 3738 dev = p->device; 3739 } 3740 return dev; 3741 } 3742 3743 static struct device *next_device(struct klist_iter *i) 3744 { 3745 struct klist_node *n = klist_next(i); 3746 struct device *dev = NULL; 3747 struct device_private *p; 3748 3749 if (n) { 3750 p = to_device_private_parent(n); 3751 dev = p->device; 3752 } 3753 return dev; 3754 } 3755 3756 /** 3757 * device_get_devnode - path of device node file 3758 * @dev: device 3759 * @mode: returned file access mode 3760 * @uid: returned file owner 3761 * @gid: returned file group 3762 * @tmp: possibly allocated string 3763 * 3764 * Return the relative path of a possible device node. 3765 * Non-default names may need to allocate a memory to compose 3766 * a name. This memory is returned in tmp and needs to be 3767 * freed by the caller. 3768 */ 3769 const char *device_get_devnode(struct device *dev, 3770 umode_t *mode, kuid_t *uid, kgid_t *gid, 3771 const char **tmp) 3772 { 3773 char *s; 3774 3775 *tmp = NULL; 3776 3777 /* the device type may provide a specific name */ 3778 if (dev->type && dev->type->devnode) 3779 *tmp = dev->type->devnode(dev, mode, uid, gid); 3780 if (*tmp) 3781 return *tmp; 3782 3783 /* the class may provide a specific name */ 3784 if (dev->class && dev->class->devnode) 3785 *tmp = dev->class->devnode(dev, mode); 3786 if (*tmp) 3787 return *tmp; 3788 3789 /* return name without allocation, tmp == NULL */ 3790 if (strchr(dev_name(dev), '!') == NULL) 3791 return dev_name(dev); 3792 3793 /* replace '!' in the name with '/' */ 3794 s = kstrdup(dev_name(dev), GFP_KERNEL); 3795 if (!s) 3796 return NULL; 3797 strreplace(s, '!', '/'); 3798 return *tmp = s; 3799 } 3800 3801 /** 3802 * device_for_each_child - device child iterator. 3803 * @parent: parent struct device. 3804 * @fn: function to be called for each device. 3805 * @data: data for the callback. 3806 * 3807 * Iterate over @parent's child devices, and call @fn for each, 3808 * passing it @data. 3809 * 3810 * We check the return of @fn each time. If it returns anything 3811 * other than 0, we break out and return that value. 3812 */ 3813 int device_for_each_child(struct device *parent, void *data, 3814 int (*fn)(struct device *dev, void *data)) 3815 { 3816 struct klist_iter i; 3817 struct device *child; 3818 int error = 0; 3819 3820 if (!parent->p) 3821 return 0; 3822 3823 klist_iter_init(&parent->p->klist_children, &i); 3824 while (!error && (child = next_device(&i))) 3825 error = fn(child, data); 3826 klist_iter_exit(&i); 3827 return error; 3828 } 3829 EXPORT_SYMBOL_GPL(device_for_each_child); 3830 3831 /** 3832 * device_for_each_child_reverse - device child iterator in reversed order. 3833 * @parent: parent struct device. 3834 * @fn: function to be called for each device. 3835 * @data: data for the callback. 3836 * 3837 * Iterate over @parent's child devices, and call @fn for each, 3838 * passing it @data. 3839 * 3840 * We check the return of @fn each time. If it returns anything 3841 * other than 0, we break out and return that value. 3842 */ 3843 int device_for_each_child_reverse(struct device *parent, void *data, 3844 int (*fn)(struct device *dev, void *data)) 3845 { 3846 struct klist_iter i; 3847 struct device *child; 3848 int error = 0; 3849 3850 if (!parent->p) 3851 return 0; 3852 3853 klist_iter_init(&parent->p->klist_children, &i); 3854 while ((child = prev_device(&i)) && !error) 3855 error = fn(child, data); 3856 klist_iter_exit(&i); 3857 return error; 3858 } 3859 EXPORT_SYMBOL_GPL(device_for_each_child_reverse); 3860 3861 /** 3862 * device_find_child - device iterator for locating a particular device. 3863 * @parent: parent struct device 3864 * @match: Callback function to check device 3865 * @data: Data to pass to match function 3866 * 3867 * This is similar to the device_for_each_child() function above, but it 3868 * returns a reference to a device that is 'found' for later use, as 3869 * determined by the @match callback. 3870 * 3871 * The callback should return 0 if the device doesn't match and non-zero 3872 * if it does. If the callback returns non-zero and a reference to the 3873 * current device can be obtained, this function will return to the caller 3874 * and not iterate over any more devices. 3875 * 3876 * NOTE: you will need to drop the reference with put_device() after use. 3877 */ 3878 struct device *device_find_child(struct device *parent, void *data, 3879 int (*match)(struct device *dev, void *data)) 3880 { 3881 struct klist_iter i; 3882 struct device *child; 3883 3884 if (!parent) 3885 return NULL; 3886 3887 klist_iter_init(&parent->p->klist_children, &i); 3888 while ((child = next_device(&i))) 3889 if (match(child, data) && get_device(child)) 3890 break; 3891 klist_iter_exit(&i); 3892 return child; 3893 } 3894 EXPORT_SYMBOL_GPL(device_find_child); 3895 3896 /** 3897 * device_find_child_by_name - device iterator for locating a child device. 3898 * @parent: parent struct device 3899 * @name: name of the child device 3900 * 3901 * This is similar to the device_find_child() function above, but it 3902 * returns a reference to a device that has the name @name. 3903 * 3904 * NOTE: you will need to drop the reference with put_device() after use. 3905 */ 3906 struct device *device_find_child_by_name(struct device *parent, 3907 const char *name) 3908 { 3909 struct klist_iter i; 3910 struct device *child; 3911 3912 if (!parent) 3913 return NULL; 3914 3915 klist_iter_init(&parent->p->klist_children, &i); 3916 while ((child = next_device(&i))) 3917 if (sysfs_streq(dev_name(child), name) && get_device(child)) 3918 break; 3919 klist_iter_exit(&i); 3920 return child; 3921 } 3922 EXPORT_SYMBOL_GPL(device_find_child_by_name); 3923 3924 static int match_any(struct device *dev, void *unused) 3925 { 3926 return 1; 3927 } 3928 3929 /** 3930 * device_find_any_child - device iterator for locating a child device, if any. 3931 * @parent: parent struct device 3932 * 3933 * This is similar to the device_find_child() function above, but it 3934 * returns a reference to a child device, if any. 3935 * 3936 * NOTE: you will need to drop the reference with put_device() after use. 3937 */ 3938 struct device *device_find_any_child(struct device *parent) 3939 { 3940 return device_find_child(parent, NULL, match_any); 3941 } 3942 EXPORT_SYMBOL_GPL(device_find_any_child); 3943 3944 int __init devices_init(void) 3945 { 3946 devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL); 3947 if (!devices_kset) 3948 return -ENOMEM; 3949 dev_kobj = kobject_create_and_add("dev", NULL); 3950 if (!dev_kobj) 3951 goto dev_kobj_err; 3952 sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj); 3953 if (!sysfs_dev_block_kobj) 3954 goto block_kobj_err; 3955 sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj); 3956 if (!sysfs_dev_char_kobj) 3957 goto char_kobj_err; 3958 3959 return 0; 3960 3961 char_kobj_err: 3962 kobject_put(sysfs_dev_block_kobj); 3963 block_kobj_err: 3964 kobject_put(dev_kobj); 3965 dev_kobj_err: 3966 kset_unregister(devices_kset); 3967 return -ENOMEM; 3968 } 3969 3970 static int device_check_offline(struct device *dev, void *not_used) 3971 { 3972 int ret; 3973 3974 ret = device_for_each_child(dev, NULL, device_check_offline); 3975 if (ret) 3976 return ret; 3977 3978 return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0; 3979 } 3980 3981 /** 3982 * device_offline - Prepare the device for hot-removal. 3983 * @dev: Device to be put offline. 3984 * 3985 * Execute the device bus type's .offline() callback, if present, to prepare 3986 * the device for a subsequent hot-removal. If that succeeds, the device must 3987 * not be used until either it is removed or its bus type's .online() callback 3988 * is executed. 3989 * 3990 * Call under device_hotplug_lock. 3991 */ 3992 int device_offline(struct device *dev) 3993 { 3994 int ret; 3995 3996 if (dev->offline_disabled) 3997 return -EPERM; 3998 3999 ret = device_for_each_child(dev, NULL, device_check_offline); 4000 if (ret) 4001 return ret; 4002 4003 device_lock(dev); 4004 if (device_supports_offline(dev)) { 4005 if (dev->offline) { 4006 ret = 1; 4007 } else { 4008 ret = dev->bus->offline(dev); 4009 if (!ret) { 4010 kobject_uevent(&dev->kobj, KOBJ_OFFLINE); 4011 dev->offline = true; 4012 } 4013 } 4014 } 4015 device_unlock(dev); 4016 4017 return ret; 4018 } 4019 4020 /** 4021 * device_online - Put the device back online after successful device_offline(). 4022 * @dev: Device to be put back online. 4023 * 4024 * If device_offline() has been successfully executed for @dev, but the device 4025 * has not been removed subsequently, execute its bus type's .online() callback 4026 * to indicate that the device can be used again. 4027 * 4028 * Call under device_hotplug_lock. 4029 */ 4030 int device_online(struct device *dev) 4031 { 4032 int ret = 0; 4033 4034 device_lock(dev); 4035 if (device_supports_offline(dev)) { 4036 if (dev->offline) { 4037 ret = dev->bus->online(dev); 4038 if (!ret) { 4039 kobject_uevent(&dev->kobj, KOBJ_ONLINE); 4040 dev->offline = false; 4041 } 4042 } else { 4043 ret = 1; 4044 } 4045 } 4046 device_unlock(dev); 4047 4048 return ret; 4049 } 4050 4051 struct root_device { 4052 struct device dev; 4053 struct module *owner; 4054 }; 4055 4056 static inline struct root_device *to_root_device(struct device *d) 4057 { 4058 return container_of(d, struct root_device, dev); 4059 } 4060 4061 static void root_device_release(struct device *dev) 4062 { 4063 kfree(to_root_device(dev)); 4064 } 4065 4066 /** 4067 * __root_device_register - allocate and register a root device 4068 * @name: root device name 4069 * @owner: owner module of the root device, usually THIS_MODULE 4070 * 4071 * This function allocates a root device and registers it 4072 * using device_register(). In order to free the returned 4073 * device, use root_device_unregister(). 4074 * 4075 * Root devices are dummy devices which allow other devices 4076 * to be grouped under /sys/devices. Use this function to 4077 * allocate a root device and then use it as the parent of 4078 * any device which should appear under /sys/devices/{name} 4079 * 4080 * The /sys/devices/{name} directory will also contain a 4081 * 'module' symlink which points to the @owner directory 4082 * in sysfs. 4083 * 4084 * Returns &struct device pointer on success, or ERR_PTR() on error. 4085 * 4086 * Note: You probably want to use root_device_register(). 4087 */ 4088 struct device *__root_device_register(const char *name, struct module *owner) 4089 { 4090 struct root_device *root; 4091 int err = -ENOMEM; 4092 4093 root = kzalloc(sizeof(struct root_device), GFP_KERNEL); 4094 if (!root) 4095 return ERR_PTR(err); 4096 4097 err = dev_set_name(&root->dev, "%s", name); 4098 if (err) { 4099 kfree(root); 4100 return ERR_PTR(err); 4101 } 4102 4103 root->dev.release = root_device_release; 4104 4105 err = device_register(&root->dev); 4106 if (err) { 4107 put_device(&root->dev); 4108 return ERR_PTR(err); 4109 } 4110 4111 #ifdef CONFIG_MODULES /* gotta find a "cleaner" way to do this */ 4112 if (owner) { 4113 struct module_kobject *mk = &owner->mkobj; 4114 4115 err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module"); 4116 if (err) { 4117 device_unregister(&root->dev); 4118 return ERR_PTR(err); 4119 } 4120 root->owner = owner; 4121 } 4122 #endif 4123 4124 return &root->dev; 4125 } 4126 EXPORT_SYMBOL_GPL(__root_device_register); 4127 4128 /** 4129 * root_device_unregister - unregister and free a root device 4130 * @dev: device going away 4131 * 4132 * This function unregisters and cleans up a device that was created by 4133 * root_device_register(). 4134 */ 4135 void root_device_unregister(struct device *dev) 4136 { 4137 struct root_device *root = to_root_device(dev); 4138 4139 if (root->owner) 4140 sysfs_remove_link(&root->dev.kobj, "module"); 4141 4142 device_unregister(dev); 4143 } 4144 EXPORT_SYMBOL_GPL(root_device_unregister); 4145 4146 4147 static void device_create_release(struct device *dev) 4148 { 4149 pr_debug("device: '%s': %s\n", dev_name(dev), __func__); 4150 kfree(dev); 4151 } 4152 4153 static __printf(6, 0) struct device * 4154 device_create_groups_vargs(struct class *class, struct device *parent, 4155 dev_t devt, void *drvdata, 4156 const struct attribute_group **groups, 4157 const char *fmt, va_list args) 4158 { 4159 struct device *dev = NULL; 4160 int retval = -ENODEV; 4161 4162 if (class == NULL || IS_ERR(class)) 4163 goto error; 4164 4165 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 4166 if (!dev) { 4167 retval = -ENOMEM; 4168 goto error; 4169 } 4170 4171 device_initialize(dev); 4172 dev->devt = devt; 4173 dev->class = class; 4174 dev->parent = parent; 4175 dev->groups = groups; 4176 dev->release = device_create_release; 4177 dev_set_drvdata(dev, drvdata); 4178 4179 retval = kobject_set_name_vargs(&dev->kobj, fmt, args); 4180 if (retval) 4181 goto error; 4182 4183 retval = device_add(dev); 4184 if (retval) 4185 goto error; 4186 4187 return dev; 4188 4189 error: 4190 put_device(dev); 4191 return ERR_PTR(retval); 4192 } 4193 4194 /** 4195 * device_create - creates a device and registers it with sysfs 4196 * @class: pointer to the struct class that this device should be registered to 4197 * @parent: pointer to the parent struct device of this new device, if any 4198 * @devt: the dev_t for the char device to be added 4199 * @drvdata: the data to be added to the device for callbacks 4200 * @fmt: string for the device's name 4201 * 4202 * This function can be used by char device classes. A struct device 4203 * will be created in sysfs, registered to the specified class. 4204 * 4205 * A "dev" file will be created, showing the dev_t for the device, if 4206 * the dev_t is not 0,0. 4207 * If a pointer to a parent struct device is passed in, the newly created 4208 * struct device will be a child of that device in sysfs. 4209 * The pointer to the struct device will be returned from the call. 4210 * Any further sysfs files that might be required can be created using this 4211 * pointer. 4212 * 4213 * Returns &struct device pointer on success, or ERR_PTR() on error. 4214 * 4215 * Note: the struct class passed to this function must have previously 4216 * been created with a call to class_create(). 4217 */ 4218 struct device *device_create(struct class *class, struct device *parent, 4219 dev_t devt, void *drvdata, const char *fmt, ...) 4220 { 4221 va_list vargs; 4222 struct device *dev; 4223 4224 va_start(vargs, fmt); 4225 dev = device_create_groups_vargs(class, parent, devt, drvdata, NULL, 4226 fmt, vargs); 4227 va_end(vargs); 4228 return dev; 4229 } 4230 EXPORT_SYMBOL_GPL(device_create); 4231 4232 /** 4233 * device_create_with_groups - creates a device and registers it with sysfs 4234 * @class: pointer to the struct class that this device should be registered to 4235 * @parent: pointer to the parent struct device of this new device, if any 4236 * @devt: the dev_t for the char device to be added 4237 * @drvdata: the data to be added to the device for callbacks 4238 * @groups: NULL-terminated list of attribute groups to be created 4239 * @fmt: string for the device's name 4240 * 4241 * This function can be used by char device classes. A struct device 4242 * will be created in sysfs, registered to the specified class. 4243 * Additional attributes specified in the groups parameter will also 4244 * be created automatically. 4245 * 4246 * A "dev" file will be created, showing the dev_t for the device, if 4247 * the dev_t is not 0,0. 4248 * If a pointer to a parent struct device is passed in, the newly created 4249 * struct device will be a child of that device in sysfs. 4250 * The pointer to the struct device will be returned from the call. 4251 * Any further sysfs files that might be required can be created using this 4252 * pointer. 4253 * 4254 * Returns &struct device pointer on success, or ERR_PTR() on error. 4255 * 4256 * Note: the struct class passed to this function must have previously 4257 * been created with a call to class_create(). 4258 */ 4259 struct device *device_create_with_groups(struct class *class, 4260 struct device *parent, dev_t devt, 4261 void *drvdata, 4262 const struct attribute_group **groups, 4263 const char *fmt, ...) 4264 { 4265 va_list vargs; 4266 struct device *dev; 4267 4268 va_start(vargs, fmt); 4269 dev = device_create_groups_vargs(class, parent, devt, drvdata, groups, 4270 fmt, vargs); 4271 va_end(vargs); 4272 return dev; 4273 } 4274 EXPORT_SYMBOL_GPL(device_create_with_groups); 4275 4276 /** 4277 * device_destroy - removes a device that was created with device_create() 4278 * @class: pointer to the struct class that this device was registered with 4279 * @devt: the dev_t of the device that was previously registered 4280 * 4281 * This call unregisters and cleans up a device that was created with a 4282 * call to device_create(). 4283 */ 4284 void device_destroy(struct class *class, dev_t devt) 4285 { 4286 struct device *dev; 4287 4288 dev = class_find_device_by_devt(class, devt); 4289 if (dev) { 4290 put_device(dev); 4291 device_unregister(dev); 4292 } 4293 } 4294 EXPORT_SYMBOL_GPL(device_destroy); 4295 4296 /** 4297 * device_rename - renames a device 4298 * @dev: the pointer to the struct device to be renamed 4299 * @new_name: the new name of the device 4300 * 4301 * It is the responsibility of the caller to provide mutual 4302 * exclusion between two different calls of device_rename 4303 * on the same device to ensure that new_name is valid and 4304 * won't conflict with other devices. 4305 * 4306 * Note: Don't call this function. Currently, the networking layer calls this 4307 * function, but that will change. The following text from Kay Sievers offers 4308 * some insight: 4309 * 4310 * Renaming devices is racy at many levels, symlinks and other stuff are not 4311 * replaced atomically, and you get a "move" uevent, but it's not easy to 4312 * connect the event to the old and new device. Device nodes are not renamed at 4313 * all, there isn't even support for that in the kernel now. 4314 * 4315 * In the meantime, during renaming, your target name might be taken by another 4316 * driver, creating conflicts. Or the old name is taken directly after you 4317 * renamed it -- then you get events for the same DEVPATH, before you even see 4318 * the "move" event. It's just a mess, and nothing new should ever rely on 4319 * kernel device renaming. Besides that, it's not even implemented now for 4320 * other things than (driver-core wise very simple) network devices. 4321 * 4322 * We are currently about to change network renaming in udev to completely 4323 * disallow renaming of devices in the same namespace as the kernel uses, 4324 * because we can't solve the problems properly, that arise with swapping names 4325 * of multiple interfaces without races. Means, renaming of eth[0-9]* will only 4326 * be allowed to some other name than eth[0-9]*, for the aforementioned 4327 * reasons. 4328 * 4329 * Make up a "real" name in the driver before you register anything, or add 4330 * some other attributes for userspace to find the device, or use udev to add 4331 * symlinks -- but never rename kernel devices later, it's a complete mess. We 4332 * don't even want to get into that and try to implement the missing pieces in 4333 * the core. We really have other pieces to fix in the driver core mess. :) 4334 */ 4335 int device_rename(struct device *dev, const char *new_name) 4336 { 4337 struct kobject *kobj = &dev->kobj; 4338 char *old_device_name = NULL; 4339 int error; 4340 4341 dev = get_device(dev); 4342 if (!dev) 4343 return -EINVAL; 4344 4345 dev_dbg(dev, "renaming to %s\n", new_name); 4346 4347 old_device_name = kstrdup(dev_name(dev), GFP_KERNEL); 4348 if (!old_device_name) { 4349 error = -ENOMEM; 4350 goto out; 4351 } 4352 4353 if (dev->class) { 4354 error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj, 4355 kobj, old_device_name, 4356 new_name, kobject_namespace(kobj)); 4357 if (error) 4358 goto out; 4359 } 4360 4361 error = kobject_rename(kobj, new_name); 4362 if (error) 4363 goto out; 4364 4365 out: 4366 put_device(dev); 4367 4368 kfree(old_device_name); 4369 4370 return error; 4371 } 4372 EXPORT_SYMBOL_GPL(device_rename); 4373 4374 static int device_move_class_links(struct device *dev, 4375 struct device *old_parent, 4376 struct device *new_parent) 4377 { 4378 int error = 0; 4379 4380 if (old_parent) 4381 sysfs_remove_link(&dev->kobj, "device"); 4382 if (new_parent) 4383 error = sysfs_create_link(&dev->kobj, &new_parent->kobj, 4384 "device"); 4385 return error; 4386 } 4387 4388 /** 4389 * device_move - moves a device to a new parent 4390 * @dev: the pointer to the struct device to be moved 4391 * @new_parent: the new parent of the device (can be NULL) 4392 * @dpm_order: how to reorder the dpm_list 4393 */ 4394 int device_move(struct device *dev, struct device *new_parent, 4395 enum dpm_order dpm_order) 4396 { 4397 int error; 4398 struct device *old_parent; 4399 struct kobject *new_parent_kobj; 4400 4401 dev = get_device(dev); 4402 if (!dev) 4403 return -EINVAL; 4404 4405 device_pm_lock(); 4406 new_parent = get_device(new_parent); 4407 new_parent_kobj = get_device_parent(dev, new_parent); 4408 if (IS_ERR(new_parent_kobj)) { 4409 error = PTR_ERR(new_parent_kobj); 4410 put_device(new_parent); 4411 goto out; 4412 } 4413 4414 pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev), 4415 __func__, new_parent ? dev_name(new_parent) : "<NULL>"); 4416 error = kobject_move(&dev->kobj, new_parent_kobj); 4417 if (error) { 4418 cleanup_glue_dir(dev, new_parent_kobj); 4419 put_device(new_parent); 4420 goto out; 4421 } 4422 old_parent = dev->parent; 4423 dev->parent = new_parent; 4424 if (old_parent) 4425 klist_remove(&dev->p->knode_parent); 4426 if (new_parent) { 4427 klist_add_tail(&dev->p->knode_parent, 4428 &new_parent->p->klist_children); 4429 set_dev_node(dev, dev_to_node(new_parent)); 4430 } 4431 4432 if (dev->class) { 4433 error = device_move_class_links(dev, old_parent, new_parent); 4434 if (error) { 4435 /* We ignore errors on cleanup since we're hosed anyway... */ 4436 device_move_class_links(dev, new_parent, old_parent); 4437 if (!kobject_move(&dev->kobj, &old_parent->kobj)) { 4438 if (new_parent) 4439 klist_remove(&dev->p->knode_parent); 4440 dev->parent = old_parent; 4441 if (old_parent) { 4442 klist_add_tail(&dev->p->knode_parent, 4443 &old_parent->p->klist_children); 4444 set_dev_node(dev, dev_to_node(old_parent)); 4445 } 4446 } 4447 cleanup_glue_dir(dev, new_parent_kobj); 4448 put_device(new_parent); 4449 goto out; 4450 } 4451 } 4452 switch (dpm_order) { 4453 case DPM_ORDER_NONE: 4454 break; 4455 case DPM_ORDER_DEV_AFTER_PARENT: 4456 device_pm_move_after(dev, new_parent); 4457 devices_kset_move_after(dev, new_parent); 4458 break; 4459 case DPM_ORDER_PARENT_BEFORE_DEV: 4460 device_pm_move_before(new_parent, dev); 4461 devices_kset_move_before(new_parent, dev); 4462 break; 4463 case DPM_ORDER_DEV_LAST: 4464 device_pm_move_last(dev); 4465 devices_kset_move_last(dev); 4466 break; 4467 } 4468 4469 put_device(old_parent); 4470 out: 4471 device_pm_unlock(); 4472 put_device(dev); 4473 return error; 4474 } 4475 EXPORT_SYMBOL_GPL(device_move); 4476 4477 static int device_attrs_change_owner(struct device *dev, kuid_t kuid, 4478 kgid_t kgid) 4479 { 4480 struct kobject *kobj = &dev->kobj; 4481 struct class *class = dev->class; 4482 const struct device_type *type = dev->type; 4483 int error; 4484 4485 if (class) { 4486 /* 4487 * Change the device groups of the device class for @dev to 4488 * @kuid/@kgid. 4489 */ 4490 error = sysfs_groups_change_owner(kobj, class->dev_groups, kuid, 4491 kgid); 4492 if (error) 4493 return error; 4494 } 4495 4496 if (type) { 4497 /* 4498 * Change the device groups of the device type for @dev to 4499 * @kuid/@kgid. 4500 */ 4501 error = sysfs_groups_change_owner(kobj, type->groups, kuid, 4502 kgid); 4503 if (error) 4504 return error; 4505 } 4506 4507 /* Change the device groups of @dev to @kuid/@kgid. */ 4508 error = sysfs_groups_change_owner(kobj, dev->groups, kuid, kgid); 4509 if (error) 4510 return error; 4511 4512 if (device_supports_offline(dev) && !dev->offline_disabled) { 4513 /* Change online device attributes of @dev to @kuid/@kgid. */ 4514 error = sysfs_file_change_owner(kobj, dev_attr_online.attr.name, 4515 kuid, kgid); 4516 if (error) 4517 return error; 4518 } 4519 4520 return 0; 4521 } 4522 4523 /** 4524 * device_change_owner - change the owner of an existing device. 4525 * @dev: device. 4526 * @kuid: new owner's kuid 4527 * @kgid: new owner's kgid 4528 * 4529 * This changes the owner of @dev and its corresponding sysfs entries to 4530 * @kuid/@kgid. This function closely mirrors how @dev was added via driver 4531 * core. 4532 * 4533 * Returns 0 on success or error code on failure. 4534 */ 4535 int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid) 4536 { 4537 int error; 4538 struct kobject *kobj = &dev->kobj; 4539 4540 dev = get_device(dev); 4541 if (!dev) 4542 return -EINVAL; 4543 4544 /* 4545 * Change the kobject and the default attributes and groups of the 4546 * ktype associated with it to @kuid/@kgid. 4547 */ 4548 error = sysfs_change_owner(kobj, kuid, kgid); 4549 if (error) 4550 goto out; 4551 4552 /* 4553 * Change the uevent file for @dev to the new owner. The uevent file 4554 * was created in a separate step when @dev got added and we mirror 4555 * that step here. 4556 */ 4557 error = sysfs_file_change_owner(kobj, dev_attr_uevent.attr.name, kuid, 4558 kgid); 4559 if (error) 4560 goto out; 4561 4562 /* 4563 * Change the device groups, the device groups associated with the 4564 * device class, and the groups associated with the device type of @dev 4565 * to @kuid/@kgid. 4566 */ 4567 error = device_attrs_change_owner(dev, kuid, kgid); 4568 if (error) 4569 goto out; 4570 4571 error = dpm_sysfs_change_owner(dev, kuid, kgid); 4572 if (error) 4573 goto out; 4574 4575 #ifdef CONFIG_BLOCK 4576 if (sysfs_deprecated && dev->class == &block_class) 4577 goto out; 4578 #endif 4579 4580 /* 4581 * Change the owner of the symlink located in the class directory of 4582 * the device class associated with @dev which points to the actual 4583 * directory entry for @dev to @kuid/@kgid. This ensures that the 4584 * symlink shows the same permissions as its target. 4585 */ 4586 error = sysfs_link_change_owner(&dev->class->p->subsys.kobj, &dev->kobj, 4587 dev_name(dev), kuid, kgid); 4588 if (error) 4589 goto out; 4590 4591 out: 4592 put_device(dev); 4593 return error; 4594 } 4595 EXPORT_SYMBOL_GPL(device_change_owner); 4596 4597 /** 4598 * device_shutdown - call ->shutdown() on each device to shutdown. 4599 */ 4600 void device_shutdown(void) 4601 { 4602 struct device *dev, *parent; 4603 4604 wait_for_device_probe(); 4605 device_block_probing(); 4606 4607 cpufreq_suspend(); 4608 4609 spin_lock(&devices_kset->list_lock); 4610 /* 4611 * Walk the devices list backward, shutting down each in turn. 4612 * Beware that device unplug events may also start pulling 4613 * devices offline, even as the system is shutting down. 4614 */ 4615 while (!list_empty(&devices_kset->list)) { 4616 dev = list_entry(devices_kset->list.prev, struct device, 4617 kobj.entry); 4618 4619 /* 4620 * hold reference count of device's parent to 4621 * prevent it from being freed because parent's 4622 * lock is to be held 4623 */ 4624 parent = get_device(dev->parent); 4625 get_device(dev); 4626 /* 4627 * Make sure the device is off the kset list, in the 4628 * event that dev->*->shutdown() doesn't remove it. 4629 */ 4630 list_del_init(&dev->kobj.entry); 4631 spin_unlock(&devices_kset->list_lock); 4632 4633 /* hold lock to avoid race with probe/release */ 4634 if (parent) 4635 device_lock(parent); 4636 device_lock(dev); 4637 4638 /* Don't allow any more runtime suspends */ 4639 pm_runtime_get_noresume(dev); 4640 pm_runtime_barrier(dev); 4641 4642 if (dev->class && dev->class->shutdown_pre) { 4643 if (initcall_debug) 4644 dev_info(dev, "shutdown_pre\n"); 4645 dev->class->shutdown_pre(dev); 4646 } 4647 if (dev->bus && dev->bus->shutdown) { 4648 if (initcall_debug) 4649 dev_info(dev, "shutdown\n"); 4650 dev->bus->shutdown(dev); 4651 } else if (dev->driver && dev->driver->shutdown) { 4652 if (initcall_debug) 4653 dev_info(dev, "shutdown\n"); 4654 dev->driver->shutdown(dev); 4655 } 4656 4657 device_unlock(dev); 4658 if (parent) 4659 device_unlock(parent); 4660 4661 put_device(dev); 4662 put_device(parent); 4663 4664 spin_lock(&devices_kset->list_lock); 4665 } 4666 spin_unlock(&devices_kset->list_lock); 4667 } 4668 4669 /* 4670 * Device logging functions 4671 */ 4672 4673 #ifdef CONFIG_PRINTK 4674 static void 4675 set_dev_info(const struct device *dev, struct dev_printk_info *dev_info) 4676 { 4677 const char *subsys; 4678 4679 memset(dev_info, 0, sizeof(*dev_info)); 4680 4681 if (dev->class) 4682 subsys = dev->class->name; 4683 else if (dev->bus) 4684 subsys = dev->bus->name; 4685 else 4686 return; 4687 4688 strscpy(dev_info->subsystem, subsys, sizeof(dev_info->subsystem)); 4689 4690 /* 4691 * Add device identifier DEVICE=: 4692 * b12:8 block dev_t 4693 * c127:3 char dev_t 4694 * n8 netdev ifindex 4695 * +sound:card0 subsystem:devname 4696 */ 4697 if (MAJOR(dev->devt)) { 4698 char c; 4699 4700 if (strcmp(subsys, "block") == 0) 4701 c = 'b'; 4702 else 4703 c = 'c'; 4704 4705 snprintf(dev_info->device, sizeof(dev_info->device), 4706 "%c%u:%u", c, MAJOR(dev->devt), MINOR(dev->devt)); 4707 } else if (strcmp(subsys, "net") == 0) { 4708 struct net_device *net = to_net_dev(dev); 4709 4710 snprintf(dev_info->device, sizeof(dev_info->device), 4711 "n%u", net->ifindex); 4712 } else { 4713 snprintf(dev_info->device, sizeof(dev_info->device), 4714 "+%s:%s", subsys, dev_name(dev)); 4715 } 4716 } 4717 4718 int dev_vprintk_emit(int level, const struct device *dev, 4719 const char *fmt, va_list args) 4720 { 4721 struct dev_printk_info dev_info; 4722 4723 set_dev_info(dev, &dev_info); 4724 4725 return vprintk_emit(0, level, &dev_info, fmt, args); 4726 } 4727 EXPORT_SYMBOL(dev_vprintk_emit); 4728 4729 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...) 4730 { 4731 va_list args; 4732 int r; 4733 4734 va_start(args, fmt); 4735 4736 r = dev_vprintk_emit(level, dev, fmt, args); 4737 4738 va_end(args); 4739 4740 return r; 4741 } 4742 EXPORT_SYMBOL(dev_printk_emit); 4743 4744 static void __dev_printk(const char *level, const struct device *dev, 4745 struct va_format *vaf) 4746 { 4747 if (dev) 4748 dev_printk_emit(level[1] - '0', dev, "%s %s: %pV", 4749 dev_driver_string(dev), dev_name(dev), vaf); 4750 else 4751 printk("%s(NULL device *): %pV", level, vaf); 4752 } 4753 4754 void _dev_printk(const char *level, const struct device *dev, 4755 const char *fmt, ...) 4756 { 4757 struct va_format vaf; 4758 va_list args; 4759 4760 va_start(args, fmt); 4761 4762 vaf.fmt = fmt; 4763 vaf.va = &args; 4764 4765 __dev_printk(level, dev, &vaf); 4766 4767 va_end(args); 4768 } 4769 EXPORT_SYMBOL(_dev_printk); 4770 4771 #define define_dev_printk_level(func, kern_level) \ 4772 void func(const struct device *dev, const char *fmt, ...) \ 4773 { \ 4774 struct va_format vaf; \ 4775 va_list args; \ 4776 \ 4777 va_start(args, fmt); \ 4778 \ 4779 vaf.fmt = fmt; \ 4780 vaf.va = &args; \ 4781 \ 4782 __dev_printk(kern_level, dev, &vaf); \ 4783 \ 4784 va_end(args); \ 4785 } \ 4786 EXPORT_SYMBOL(func); 4787 4788 define_dev_printk_level(_dev_emerg, KERN_EMERG); 4789 define_dev_printk_level(_dev_alert, KERN_ALERT); 4790 define_dev_printk_level(_dev_crit, KERN_CRIT); 4791 define_dev_printk_level(_dev_err, KERN_ERR); 4792 define_dev_printk_level(_dev_warn, KERN_WARNING); 4793 define_dev_printk_level(_dev_notice, KERN_NOTICE); 4794 define_dev_printk_level(_dev_info, KERN_INFO); 4795 4796 #endif 4797 4798 /** 4799 * dev_err_probe - probe error check and log helper 4800 * @dev: the pointer to the struct device 4801 * @err: error value to test 4802 * @fmt: printf-style format string 4803 * @...: arguments as specified in the format string 4804 * 4805 * This helper implements common pattern present in probe functions for error 4806 * checking: print debug or error message depending if the error value is 4807 * -EPROBE_DEFER and propagate error upwards. 4808 * In case of -EPROBE_DEFER it sets also defer probe reason, which can be 4809 * checked later by reading devices_deferred debugfs attribute. 4810 * It replaces code sequence:: 4811 * 4812 * if (err != -EPROBE_DEFER) 4813 * dev_err(dev, ...); 4814 * else 4815 * dev_dbg(dev, ...); 4816 * return err; 4817 * 4818 * with:: 4819 * 4820 * return dev_err_probe(dev, err, ...); 4821 * 4822 * Note that it is deemed acceptable to use this function for error 4823 * prints during probe even if the @err is known to never be -EPROBE_DEFER. 4824 * The benefit compared to a normal dev_err() is the standardized format 4825 * of the error code and the fact that the error code is returned. 4826 * 4827 * Returns @err. 4828 * 4829 */ 4830 int dev_err_probe(const struct device *dev, int err, const char *fmt, ...) 4831 { 4832 struct va_format vaf; 4833 va_list args; 4834 4835 va_start(args, fmt); 4836 vaf.fmt = fmt; 4837 vaf.va = &args; 4838 4839 if (err != -EPROBE_DEFER) { 4840 dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf); 4841 } else { 4842 device_set_deferred_probe_reason(dev, &vaf); 4843 dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf); 4844 } 4845 4846 va_end(args); 4847 4848 return err; 4849 } 4850 EXPORT_SYMBOL_GPL(dev_err_probe); 4851 4852 static inline bool fwnode_is_primary(struct fwnode_handle *fwnode) 4853 { 4854 return fwnode && !IS_ERR(fwnode->secondary); 4855 } 4856 4857 /** 4858 * set_primary_fwnode - Change the primary firmware node of a given device. 4859 * @dev: Device to handle. 4860 * @fwnode: New primary firmware node of the device. 4861 * 4862 * Set the device's firmware node pointer to @fwnode, but if a secondary 4863 * firmware node of the device is present, preserve it. 4864 * 4865 * Valid fwnode cases are: 4866 * - primary --> secondary --> -ENODEV 4867 * - primary --> NULL 4868 * - secondary --> -ENODEV 4869 * - NULL 4870 */ 4871 void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) 4872 { 4873 struct device *parent = dev->parent; 4874 struct fwnode_handle *fn = dev->fwnode; 4875 4876 if (fwnode) { 4877 if (fwnode_is_primary(fn)) 4878 fn = fn->secondary; 4879 4880 if (fn) { 4881 WARN_ON(fwnode->secondary); 4882 fwnode->secondary = fn; 4883 } 4884 dev->fwnode = fwnode; 4885 } else { 4886 if (fwnode_is_primary(fn)) { 4887 dev->fwnode = fn->secondary; 4888 /* Set fn->secondary = NULL, so fn remains the primary fwnode */ 4889 if (!(parent && fn == parent->fwnode)) 4890 fn->secondary = NULL; 4891 } else { 4892 dev->fwnode = NULL; 4893 } 4894 } 4895 } 4896 EXPORT_SYMBOL_GPL(set_primary_fwnode); 4897 4898 /** 4899 * set_secondary_fwnode - Change the secondary firmware node of a given device. 4900 * @dev: Device to handle. 4901 * @fwnode: New secondary firmware node of the device. 4902 * 4903 * If a primary firmware node of the device is present, set its secondary 4904 * pointer to @fwnode. Otherwise, set the device's firmware node pointer to 4905 * @fwnode. 4906 */ 4907 void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode) 4908 { 4909 if (fwnode) 4910 fwnode->secondary = ERR_PTR(-ENODEV); 4911 4912 if (fwnode_is_primary(dev->fwnode)) 4913 dev->fwnode->secondary = fwnode; 4914 else 4915 dev->fwnode = fwnode; 4916 } 4917 EXPORT_SYMBOL_GPL(set_secondary_fwnode); 4918 4919 /** 4920 * device_set_of_node_from_dev - reuse device-tree node of another device 4921 * @dev: device whose device-tree node is being set 4922 * @dev2: device whose device-tree node is being reused 4923 * 4924 * Takes another reference to the new device-tree node after first dropping 4925 * any reference held to the old node. 4926 */ 4927 void device_set_of_node_from_dev(struct device *dev, const struct device *dev2) 4928 { 4929 of_node_put(dev->of_node); 4930 dev->of_node = of_node_get(dev2->of_node); 4931 dev->of_node_reused = true; 4932 } 4933 EXPORT_SYMBOL_GPL(device_set_of_node_from_dev); 4934 4935 void device_set_node(struct device *dev, struct fwnode_handle *fwnode) 4936 { 4937 dev->fwnode = fwnode; 4938 dev->of_node = to_of_node(fwnode); 4939 } 4940 EXPORT_SYMBOL_GPL(device_set_node); 4941 4942 int device_match_name(struct device *dev, const void *name) 4943 { 4944 return sysfs_streq(dev_name(dev), name); 4945 } 4946 EXPORT_SYMBOL_GPL(device_match_name); 4947 4948 int device_match_of_node(struct device *dev, const void *np) 4949 { 4950 return dev->of_node == np; 4951 } 4952 EXPORT_SYMBOL_GPL(device_match_of_node); 4953 4954 int device_match_fwnode(struct device *dev, const void *fwnode) 4955 { 4956 return dev_fwnode(dev) == fwnode; 4957 } 4958 EXPORT_SYMBOL_GPL(device_match_fwnode); 4959 4960 int device_match_devt(struct device *dev, const void *pdevt) 4961 { 4962 return dev->devt == *(dev_t *)pdevt; 4963 } 4964 EXPORT_SYMBOL_GPL(device_match_devt); 4965 4966 int device_match_acpi_dev(struct device *dev, const void *adev) 4967 { 4968 return ACPI_COMPANION(dev) == adev; 4969 } 4970 EXPORT_SYMBOL(device_match_acpi_dev); 4971 4972 int device_match_acpi_handle(struct device *dev, const void *handle) 4973 { 4974 return ACPI_HANDLE(dev) == handle; 4975 } 4976 EXPORT_SYMBOL(device_match_acpi_handle); 4977 4978 int device_match_any(struct device *dev, const void *unused) 4979 { 4980 return 1; 4981 } 4982 EXPORT_SYMBOL_GPL(device_match_any); 4983