1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * drivers/base/core.c - core driver model code (device registration, etc) 4 * 5 * Copyright (c) 2002-3 Patrick Mochel 6 * Copyright (c) 2002-3 Open Source Development Labs 7 * Copyright (c) 2006 Greg Kroah-Hartman <gregkh@suse.de> 8 * Copyright (c) 2006 Novell, Inc. 9 */ 10 11 #include <linux/acpi.h> 12 #include <linux/cpufreq.h> 13 #include <linux/device.h> 14 #include <linux/err.h> 15 #include <linux/fwnode.h> 16 #include <linux/init.h> 17 #include <linux/module.h> 18 #include <linux/slab.h> 19 #include <linux/string.h> 20 #include <linux/kdev_t.h> 21 #include <linux/notifier.h> 22 #include <linux/of.h> 23 #include <linux/of_device.h> 24 #include <linux/genhd.h> 25 #include <linux/mutex.h> 26 #include <linux/pm_runtime.h> 27 #include <linux/netdevice.h> 28 #include <linux/sched/signal.h> 29 #include <linux/sched/mm.h> 30 #include <linux/swiotlb.h> 31 #include <linux/sysfs.h> 32 #include <linux/dma-map-ops.h> /* for dma_default_coherent */ 33 34 #include "base.h" 35 #include "power/power.h" 36 37 #ifdef CONFIG_SYSFS_DEPRECATED 38 #ifdef CONFIG_SYSFS_DEPRECATED_V2 39 long sysfs_deprecated = 1; 40 #else 41 long sysfs_deprecated = 0; 42 #endif 43 static int __init sysfs_deprecated_setup(char *arg) 44 { 45 return kstrtol(arg, 10, &sysfs_deprecated); 46 } 47 early_param("sysfs.deprecated", sysfs_deprecated_setup); 48 #endif 49 50 /* Device links support. */ 51 static LIST_HEAD(deferred_sync); 52 static unsigned int defer_sync_state_count = 1; 53 static DEFINE_MUTEX(fwnode_link_lock); 54 static bool fw_devlink_is_permissive(void); 55 static bool fw_devlink_drv_reg_done; 56 57 /** 58 * fwnode_link_add - Create a link between two fwnode_handles. 59 * @con: Consumer end of the link. 60 * @sup: Supplier end of the link. 61 * 62 * Create a fwnode link between fwnode handles @con and @sup. The fwnode link 63 * represents the detail that the firmware lists @sup fwnode as supplying a 64 * resource to @con. 65 * 66 * The driver core will use the fwnode link to create a device link between the 67 * two device objects corresponding to @con and @sup when they are created. The 68 * driver core will automatically delete the fwnode link between @con and @sup 69 * after doing that. 70 * 71 * Attempts to create duplicate links between the same pair of fwnode handles 72 * are ignored and there is no reference counting. 73 */ 74 int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup) 75 { 76 struct fwnode_link *link; 77 int ret = 0; 78 79 mutex_lock(&fwnode_link_lock); 80 81 list_for_each_entry(link, &sup->consumers, s_hook) 82 if (link->consumer == con) 83 goto out; 84 85 link = kzalloc(sizeof(*link), GFP_KERNEL); 86 if (!link) { 87 ret = -ENOMEM; 88 goto out; 89 } 90 91 link->supplier = sup; 92 INIT_LIST_HEAD(&link->s_hook); 93 link->consumer = con; 94 INIT_LIST_HEAD(&link->c_hook); 95 96 list_add(&link->s_hook, &sup->consumers); 97 list_add(&link->c_hook, &con->suppliers); 98 pr_debug("%pfwP Linked as a fwnode consumer to %pfwP\n", 99 con, sup); 100 out: 101 mutex_unlock(&fwnode_link_lock); 102 103 return ret; 104 } 105 106 /** 107 * __fwnode_link_del - Delete a link between two fwnode_handles. 108 * @link: the fwnode_link to be deleted 109 * 110 * The fwnode_link_lock needs to be held when this function is called. 111 */ 112 static void __fwnode_link_del(struct fwnode_link *link) 113 { 114 pr_debug("%pfwP Dropping the fwnode link to %pfwP\n", 115 link->consumer, link->supplier); 116 list_del(&link->s_hook); 117 list_del(&link->c_hook); 118 kfree(link); 119 } 120 121 /** 122 * fwnode_links_purge_suppliers - Delete all supplier links of fwnode_handle. 123 * @fwnode: fwnode whose supplier links need to be deleted 124 * 125 * Deletes all supplier links connecting directly to @fwnode. 126 */ 127 static void fwnode_links_purge_suppliers(struct fwnode_handle *fwnode) 128 { 129 struct fwnode_link *link, *tmp; 130 131 mutex_lock(&fwnode_link_lock); 132 list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) 133 __fwnode_link_del(link); 134 mutex_unlock(&fwnode_link_lock); 135 } 136 137 /** 138 * fwnode_links_purge_consumers - Delete all consumer links of fwnode_handle. 139 * @fwnode: fwnode whose consumer links need to be deleted 140 * 141 * Deletes all consumer links connecting directly to @fwnode. 142 */ 143 static void fwnode_links_purge_consumers(struct fwnode_handle *fwnode) 144 { 145 struct fwnode_link *link, *tmp; 146 147 mutex_lock(&fwnode_link_lock); 148 list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) 149 __fwnode_link_del(link); 150 mutex_unlock(&fwnode_link_lock); 151 } 152 153 /** 154 * fwnode_links_purge - Delete all links connected to a fwnode_handle. 155 * @fwnode: fwnode whose links needs to be deleted 156 * 157 * Deletes all links connecting directly to a fwnode. 158 */ 159 void fwnode_links_purge(struct fwnode_handle *fwnode) 160 { 161 fwnode_links_purge_suppliers(fwnode); 162 fwnode_links_purge_consumers(fwnode); 163 } 164 165 void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode) 166 { 167 struct fwnode_handle *child; 168 169 /* Don't purge consumer links of an added child */ 170 if (fwnode->dev) 171 return; 172 173 fwnode->flags |= FWNODE_FLAG_NOT_DEVICE; 174 fwnode_links_purge_consumers(fwnode); 175 176 fwnode_for_each_available_child_node(fwnode, child) 177 fw_devlink_purge_absent_suppliers(child); 178 } 179 EXPORT_SYMBOL_GPL(fw_devlink_purge_absent_suppliers); 180 181 #ifdef CONFIG_SRCU 182 static DEFINE_MUTEX(device_links_lock); 183 DEFINE_STATIC_SRCU(device_links_srcu); 184 185 static inline void device_links_write_lock(void) 186 { 187 mutex_lock(&device_links_lock); 188 } 189 190 static inline void device_links_write_unlock(void) 191 { 192 mutex_unlock(&device_links_lock); 193 } 194 195 int device_links_read_lock(void) __acquires(&device_links_srcu) 196 { 197 return srcu_read_lock(&device_links_srcu); 198 } 199 200 void device_links_read_unlock(int idx) __releases(&device_links_srcu) 201 { 202 srcu_read_unlock(&device_links_srcu, idx); 203 } 204 205 int device_links_read_lock_held(void) 206 { 207 return srcu_read_lock_held(&device_links_srcu); 208 } 209 210 static void device_link_synchronize_removal(void) 211 { 212 synchronize_srcu(&device_links_srcu); 213 } 214 215 static void device_link_remove_from_lists(struct device_link *link) 216 { 217 list_del_rcu(&link->s_node); 218 list_del_rcu(&link->c_node); 219 } 220 #else /* !CONFIG_SRCU */ 221 static DECLARE_RWSEM(device_links_lock); 222 223 static inline void device_links_write_lock(void) 224 { 225 down_write(&device_links_lock); 226 } 227 228 static inline void device_links_write_unlock(void) 229 { 230 up_write(&device_links_lock); 231 } 232 233 int device_links_read_lock(void) 234 { 235 down_read(&device_links_lock); 236 return 0; 237 } 238 239 void device_links_read_unlock(int not_used) 240 { 241 up_read(&device_links_lock); 242 } 243 244 #ifdef CONFIG_DEBUG_LOCK_ALLOC 245 int device_links_read_lock_held(void) 246 { 247 return lockdep_is_held(&device_links_lock); 248 } 249 #endif 250 251 static inline void device_link_synchronize_removal(void) 252 { 253 } 254 255 static void device_link_remove_from_lists(struct device_link *link) 256 { 257 list_del(&link->s_node); 258 list_del(&link->c_node); 259 } 260 #endif /* !CONFIG_SRCU */ 261 262 static bool device_is_ancestor(struct device *dev, struct device *target) 263 { 264 while (target->parent) { 265 target = target->parent; 266 if (dev == target) 267 return true; 268 } 269 return false; 270 } 271 272 /** 273 * device_is_dependent - Check if one device depends on another one 274 * @dev: Device to check dependencies for. 275 * @target: Device to check against. 276 * 277 * Check if @target depends on @dev or any device dependent on it (its child or 278 * its consumer etc). Return 1 if that is the case or 0 otherwise. 279 */ 280 int device_is_dependent(struct device *dev, void *target) 281 { 282 struct device_link *link; 283 int ret; 284 285 /* 286 * The "ancestors" check is needed to catch the case when the target 287 * device has not been completely initialized yet and it is still 288 * missing from the list of children of its parent device. 289 */ 290 if (dev == target || device_is_ancestor(dev, target)) 291 return 1; 292 293 ret = device_for_each_child(dev, target, device_is_dependent); 294 if (ret) 295 return ret; 296 297 list_for_each_entry(link, &dev->links.consumers, s_node) { 298 if ((link->flags & ~DL_FLAG_INFERRED) == 299 (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED)) 300 continue; 301 302 if (link->consumer == target) 303 return 1; 304 305 ret = device_is_dependent(link->consumer, target); 306 if (ret) 307 break; 308 } 309 return ret; 310 } 311 312 static void device_link_init_status(struct device_link *link, 313 struct device *consumer, 314 struct device *supplier) 315 { 316 switch (supplier->links.status) { 317 case DL_DEV_PROBING: 318 switch (consumer->links.status) { 319 case DL_DEV_PROBING: 320 /* 321 * A consumer driver can create a link to a supplier 322 * that has not completed its probing yet as long as it 323 * knows that the supplier is already functional (for 324 * example, it has just acquired some resources from the 325 * supplier). 326 */ 327 link->status = DL_STATE_CONSUMER_PROBE; 328 break; 329 default: 330 link->status = DL_STATE_DORMANT; 331 break; 332 } 333 break; 334 case DL_DEV_DRIVER_BOUND: 335 switch (consumer->links.status) { 336 case DL_DEV_PROBING: 337 link->status = DL_STATE_CONSUMER_PROBE; 338 break; 339 case DL_DEV_DRIVER_BOUND: 340 link->status = DL_STATE_ACTIVE; 341 break; 342 default: 343 link->status = DL_STATE_AVAILABLE; 344 break; 345 } 346 break; 347 case DL_DEV_UNBINDING: 348 link->status = DL_STATE_SUPPLIER_UNBIND; 349 break; 350 default: 351 link->status = DL_STATE_DORMANT; 352 break; 353 } 354 } 355 356 static int device_reorder_to_tail(struct device *dev, void *not_used) 357 { 358 struct device_link *link; 359 360 /* 361 * Devices that have not been registered yet will be put to the ends 362 * of the lists during the registration, so skip them here. 363 */ 364 if (device_is_registered(dev)) 365 devices_kset_move_last(dev); 366 367 if (device_pm_initialized(dev)) 368 device_pm_move_last(dev); 369 370 device_for_each_child(dev, NULL, device_reorder_to_tail); 371 list_for_each_entry(link, &dev->links.consumers, s_node) { 372 if ((link->flags & ~DL_FLAG_INFERRED) == 373 (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED)) 374 continue; 375 device_reorder_to_tail(link->consumer, NULL); 376 } 377 378 return 0; 379 } 380 381 /** 382 * device_pm_move_to_tail - Move set of devices to the end of device lists 383 * @dev: Device to move 384 * 385 * This is a device_reorder_to_tail() wrapper taking the requisite locks. 386 * 387 * It moves the @dev along with all of its children and all of its consumers 388 * to the ends of the device_kset and dpm_list, recursively. 389 */ 390 void device_pm_move_to_tail(struct device *dev) 391 { 392 int idx; 393 394 idx = device_links_read_lock(); 395 device_pm_lock(); 396 device_reorder_to_tail(dev, NULL); 397 device_pm_unlock(); 398 device_links_read_unlock(idx); 399 } 400 401 #define to_devlink(dev) container_of((dev), struct device_link, link_dev) 402 403 static ssize_t status_show(struct device *dev, 404 struct device_attribute *attr, char *buf) 405 { 406 const char *output; 407 408 switch (to_devlink(dev)->status) { 409 case DL_STATE_NONE: 410 output = "not tracked"; 411 break; 412 case DL_STATE_DORMANT: 413 output = "dormant"; 414 break; 415 case DL_STATE_AVAILABLE: 416 output = "available"; 417 break; 418 case DL_STATE_CONSUMER_PROBE: 419 output = "consumer probing"; 420 break; 421 case DL_STATE_ACTIVE: 422 output = "active"; 423 break; 424 case DL_STATE_SUPPLIER_UNBIND: 425 output = "supplier unbinding"; 426 break; 427 default: 428 output = "unknown"; 429 break; 430 } 431 432 return sysfs_emit(buf, "%s\n", output); 433 } 434 static DEVICE_ATTR_RO(status); 435 436 static ssize_t auto_remove_on_show(struct device *dev, 437 struct device_attribute *attr, char *buf) 438 { 439 struct device_link *link = to_devlink(dev); 440 const char *output; 441 442 if (link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) 443 output = "supplier unbind"; 444 else if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) 445 output = "consumer unbind"; 446 else 447 output = "never"; 448 449 return sysfs_emit(buf, "%s\n", output); 450 } 451 static DEVICE_ATTR_RO(auto_remove_on); 452 453 static ssize_t runtime_pm_show(struct device *dev, 454 struct device_attribute *attr, char *buf) 455 { 456 struct device_link *link = to_devlink(dev); 457 458 return sysfs_emit(buf, "%d\n", !!(link->flags & DL_FLAG_PM_RUNTIME)); 459 } 460 static DEVICE_ATTR_RO(runtime_pm); 461 462 static ssize_t sync_state_only_show(struct device *dev, 463 struct device_attribute *attr, char *buf) 464 { 465 struct device_link *link = to_devlink(dev); 466 467 return sysfs_emit(buf, "%d\n", 468 !!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); 469 } 470 static DEVICE_ATTR_RO(sync_state_only); 471 472 static struct attribute *devlink_attrs[] = { 473 &dev_attr_status.attr, 474 &dev_attr_auto_remove_on.attr, 475 &dev_attr_runtime_pm.attr, 476 &dev_attr_sync_state_only.attr, 477 NULL, 478 }; 479 ATTRIBUTE_GROUPS(devlink); 480 481 static void device_link_release_fn(struct work_struct *work) 482 { 483 struct device_link *link = container_of(work, struct device_link, rm_work); 484 485 /* Ensure that all references to the link object have been dropped. */ 486 device_link_synchronize_removal(); 487 488 while (refcount_dec_not_one(&link->rpm_active)) 489 pm_runtime_put(link->supplier); 490 491 put_device(link->consumer); 492 put_device(link->supplier); 493 kfree(link); 494 } 495 496 static void devlink_dev_release(struct device *dev) 497 { 498 struct device_link *link = to_devlink(dev); 499 500 INIT_WORK(&link->rm_work, device_link_release_fn); 501 /* 502 * It may take a while to complete this work because of the SRCU 503 * synchronization in device_link_release_fn() and if the consumer or 504 * supplier devices get deleted when it runs, so put it into the "long" 505 * workqueue. 506 */ 507 queue_work(system_long_wq, &link->rm_work); 508 } 509 510 static struct class devlink_class = { 511 .name = "devlink", 512 .owner = THIS_MODULE, 513 .dev_groups = devlink_groups, 514 .dev_release = devlink_dev_release, 515 }; 516 517 static int devlink_add_symlinks(struct device *dev, 518 struct class_interface *class_intf) 519 { 520 int ret; 521 size_t len; 522 struct device_link *link = to_devlink(dev); 523 struct device *sup = link->supplier; 524 struct device *con = link->consumer; 525 char *buf; 526 527 len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)), 528 strlen(dev_bus_name(con)) + strlen(dev_name(con))); 529 len += strlen(":"); 530 len += strlen("supplier:") + 1; 531 buf = kzalloc(len, GFP_KERNEL); 532 if (!buf) 533 return -ENOMEM; 534 535 ret = sysfs_create_link(&link->link_dev.kobj, &sup->kobj, "supplier"); 536 if (ret) 537 goto out; 538 539 ret = sysfs_create_link(&link->link_dev.kobj, &con->kobj, "consumer"); 540 if (ret) 541 goto err_con; 542 543 snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con)); 544 ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf); 545 if (ret) 546 goto err_con_dev; 547 548 snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup)); 549 ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf); 550 if (ret) 551 goto err_sup_dev; 552 553 goto out; 554 555 err_sup_dev: 556 snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con)); 557 sysfs_remove_link(&sup->kobj, buf); 558 err_con_dev: 559 sysfs_remove_link(&link->link_dev.kobj, "consumer"); 560 err_con: 561 sysfs_remove_link(&link->link_dev.kobj, "supplier"); 562 out: 563 kfree(buf); 564 return ret; 565 } 566 567 static void devlink_remove_symlinks(struct device *dev, 568 struct class_interface *class_intf) 569 { 570 struct device_link *link = to_devlink(dev); 571 size_t len; 572 struct device *sup = link->supplier; 573 struct device *con = link->consumer; 574 char *buf; 575 576 sysfs_remove_link(&link->link_dev.kobj, "consumer"); 577 sysfs_remove_link(&link->link_dev.kobj, "supplier"); 578 579 len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)), 580 strlen(dev_bus_name(con)) + strlen(dev_name(con))); 581 len += strlen(":"); 582 len += strlen("supplier:") + 1; 583 buf = kzalloc(len, GFP_KERNEL); 584 if (!buf) { 585 WARN(1, "Unable to properly free device link symlinks!\n"); 586 return; 587 } 588 589 if (device_is_registered(con)) { 590 snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup)); 591 sysfs_remove_link(&con->kobj, buf); 592 } 593 snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con)); 594 sysfs_remove_link(&sup->kobj, buf); 595 kfree(buf); 596 } 597 598 static struct class_interface devlink_class_intf = { 599 .class = &devlink_class, 600 .add_dev = devlink_add_symlinks, 601 .remove_dev = devlink_remove_symlinks, 602 }; 603 604 static int __init devlink_class_init(void) 605 { 606 int ret; 607 608 ret = class_register(&devlink_class); 609 if (ret) 610 return ret; 611 612 ret = class_interface_register(&devlink_class_intf); 613 if (ret) 614 class_unregister(&devlink_class); 615 616 return ret; 617 } 618 postcore_initcall(devlink_class_init); 619 620 #define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \ 621 DL_FLAG_AUTOREMOVE_SUPPLIER | \ 622 DL_FLAG_AUTOPROBE_CONSUMER | \ 623 DL_FLAG_SYNC_STATE_ONLY | \ 624 DL_FLAG_INFERRED) 625 626 #define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \ 627 DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE) 628 629 /** 630 * device_link_add - Create a link between two devices. 631 * @consumer: Consumer end of the link. 632 * @supplier: Supplier end of the link. 633 * @flags: Link flags. 634 * 635 * The caller is responsible for the proper synchronization of the link creation 636 * with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the 637 * runtime PM framework to take the link into account. Second, if the 638 * DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will 639 * be forced into the active meta state and reference-counted upon the creation 640 * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be 641 * ignored. 642 * 643 * If DL_FLAG_STATELESS is set in @flags, the caller of this function is 644 * expected to release the link returned by it directly with the help of either 645 * device_link_del() or device_link_remove(). 646 * 647 * If that flag is not set, however, the caller of this function is handing the 648 * management of the link over to the driver core entirely and its return value 649 * can only be used to check whether or not the link is present. In that case, 650 * the DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_AUTOREMOVE_SUPPLIER device link 651 * flags can be used to indicate to the driver core when the link can be safely 652 * deleted. Namely, setting one of them in @flags indicates to the driver core 653 * that the link is not going to be used (by the given caller of this function) 654 * after unbinding the consumer or supplier driver, respectively, from its 655 * device, so the link can be deleted at that point. If none of them is set, 656 * the link will be maintained until one of the devices pointed to by it (either 657 * the consumer or the supplier) is unregistered. 658 * 659 * Also, if DL_FLAG_STATELESS, DL_FLAG_AUTOREMOVE_CONSUMER and 660 * DL_FLAG_AUTOREMOVE_SUPPLIER are not set in @flags (that is, a persistent 661 * managed device link is being added), the DL_FLAG_AUTOPROBE_CONSUMER flag can 662 * be used to request the driver core to automatically probe for a consumer 663 * driver after successfully binding a driver to the supplier device. 664 * 665 * The combination of DL_FLAG_STATELESS and one of DL_FLAG_AUTOREMOVE_CONSUMER, 666 * DL_FLAG_AUTOREMOVE_SUPPLIER, or DL_FLAG_AUTOPROBE_CONSUMER set in @flags at 667 * the same time is invalid and will cause NULL to be returned upfront. 668 * However, if a device link between the given @consumer and @supplier pair 669 * exists already when this function is called for them, the existing link will 670 * be returned regardless of its current type and status (the link's flags may 671 * be modified then). The caller of this function is then expected to treat 672 * the link as though it has just been created, so (in particular) if 673 * DL_FLAG_STATELESS was passed in @flags, the link needs to be released 674 * explicitly when not needed any more (as stated above). 675 * 676 * A side effect of the link creation is re-ordering of dpm_list and the 677 * devices_kset list by moving the consumer device and all devices depending 678 * on it to the ends of these lists (that does not happen to devices that have 679 * not been registered when this function is called). 680 * 681 * The supplier device is required to be registered when this function is called 682 * and NULL will be returned if that is not the case. The consumer device need 683 * not be registered, however. 684 */ 685 struct device_link *device_link_add(struct device *consumer, 686 struct device *supplier, u32 flags) 687 { 688 struct device_link *link; 689 690 if (!consumer || !supplier || flags & ~DL_ADD_VALID_FLAGS || 691 (flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) || 692 (flags & DL_FLAG_SYNC_STATE_ONLY && 693 (flags & ~DL_FLAG_INFERRED) != DL_FLAG_SYNC_STATE_ONLY) || 694 (flags & DL_FLAG_AUTOPROBE_CONSUMER && 695 flags & (DL_FLAG_AUTOREMOVE_CONSUMER | 696 DL_FLAG_AUTOREMOVE_SUPPLIER))) 697 return NULL; 698 699 if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) { 700 if (pm_runtime_get_sync(supplier) < 0) { 701 pm_runtime_put_noidle(supplier); 702 return NULL; 703 } 704 } 705 706 if (!(flags & DL_FLAG_STATELESS)) 707 flags |= DL_FLAG_MANAGED; 708 709 device_links_write_lock(); 710 device_pm_lock(); 711 712 /* 713 * If the supplier has not been fully registered yet or there is a 714 * reverse (non-SYNC_STATE_ONLY) dependency between the consumer and 715 * the supplier already in the graph, return NULL. If the link is a 716 * SYNC_STATE_ONLY link, we don't check for reverse dependencies 717 * because it only affects sync_state() callbacks. 718 */ 719 if (!device_pm_initialized(supplier) 720 || (!(flags & DL_FLAG_SYNC_STATE_ONLY) && 721 device_is_dependent(consumer, supplier))) { 722 link = NULL; 723 goto out; 724 } 725 726 /* 727 * SYNC_STATE_ONLY links are useless once a consumer device has probed. 728 * So, only create it if the consumer hasn't probed yet. 729 */ 730 if (flags & DL_FLAG_SYNC_STATE_ONLY && 731 consumer->links.status != DL_DEV_NO_DRIVER && 732 consumer->links.status != DL_DEV_PROBING) { 733 link = NULL; 734 goto out; 735 } 736 737 /* 738 * DL_FLAG_AUTOREMOVE_SUPPLIER indicates that the link will be needed 739 * longer than for DL_FLAG_AUTOREMOVE_CONSUMER and setting them both 740 * together doesn't make sense, so prefer DL_FLAG_AUTOREMOVE_SUPPLIER. 741 */ 742 if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) 743 flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; 744 745 list_for_each_entry(link, &supplier->links.consumers, s_node) { 746 if (link->consumer != consumer) 747 continue; 748 749 if (link->flags & DL_FLAG_INFERRED && 750 !(flags & DL_FLAG_INFERRED)) 751 link->flags &= ~DL_FLAG_INFERRED; 752 753 if (flags & DL_FLAG_PM_RUNTIME) { 754 if (!(link->flags & DL_FLAG_PM_RUNTIME)) { 755 pm_runtime_new_link(consumer); 756 link->flags |= DL_FLAG_PM_RUNTIME; 757 } 758 if (flags & DL_FLAG_RPM_ACTIVE) 759 refcount_inc(&link->rpm_active); 760 } 761 762 if (flags & DL_FLAG_STATELESS) { 763 kref_get(&link->kref); 764 if (link->flags & DL_FLAG_SYNC_STATE_ONLY && 765 !(link->flags & DL_FLAG_STATELESS)) { 766 link->flags |= DL_FLAG_STATELESS; 767 goto reorder; 768 } else { 769 link->flags |= DL_FLAG_STATELESS; 770 goto out; 771 } 772 } 773 774 /* 775 * If the life time of the link following from the new flags is 776 * longer than indicated by the flags of the existing link, 777 * update the existing link to stay around longer. 778 */ 779 if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) { 780 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) { 781 link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; 782 link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER; 783 } 784 } else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) { 785 link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER | 786 DL_FLAG_AUTOREMOVE_SUPPLIER); 787 } 788 if (!(link->flags & DL_FLAG_MANAGED)) { 789 kref_get(&link->kref); 790 link->flags |= DL_FLAG_MANAGED; 791 device_link_init_status(link, consumer, supplier); 792 } 793 if (link->flags & DL_FLAG_SYNC_STATE_ONLY && 794 !(flags & DL_FLAG_SYNC_STATE_ONLY)) { 795 link->flags &= ~DL_FLAG_SYNC_STATE_ONLY; 796 goto reorder; 797 } 798 799 goto out; 800 } 801 802 link = kzalloc(sizeof(*link), GFP_KERNEL); 803 if (!link) 804 goto out; 805 806 refcount_set(&link->rpm_active, 1); 807 808 get_device(supplier); 809 link->supplier = supplier; 810 INIT_LIST_HEAD(&link->s_node); 811 get_device(consumer); 812 link->consumer = consumer; 813 INIT_LIST_HEAD(&link->c_node); 814 link->flags = flags; 815 kref_init(&link->kref); 816 817 link->link_dev.class = &devlink_class; 818 device_set_pm_not_required(&link->link_dev); 819 dev_set_name(&link->link_dev, "%s:%s--%s:%s", 820 dev_bus_name(supplier), dev_name(supplier), 821 dev_bus_name(consumer), dev_name(consumer)); 822 if (device_register(&link->link_dev)) { 823 put_device(consumer); 824 put_device(supplier); 825 kfree(link); 826 link = NULL; 827 goto out; 828 } 829 830 if (flags & DL_FLAG_PM_RUNTIME) { 831 if (flags & DL_FLAG_RPM_ACTIVE) 832 refcount_inc(&link->rpm_active); 833 834 pm_runtime_new_link(consumer); 835 } 836 837 /* Determine the initial link state. */ 838 if (flags & DL_FLAG_STATELESS) 839 link->status = DL_STATE_NONE; 840 else 841 device_link_init_status(link, consumer, supplier); 842 843 /* 844 * Some callers expect the link creation during consumer driver probe to 845 * resume the supplier even without DL_FLAG_RPM_ACTIVE. 846 */ 847 if (link->status == DL_STATE_CONSUMER_PROBE && 848 flags & DL_FLAG_PM_RUNTIME) 849 pm_runtime_resume(supplier); 850 851 list_add_tail_rcu(&link->s_node, &supplier->links.consumers); 852 list_add_tail_rcu(&link->c_node, &consumer->links.suppliers); 853 854 if (flags & DL_FLAG_SYNC_STATE_ONLY) { 855 dev_dbg(consumer, 856 "Linked as a sync state only consumer to %s\n", 857 dev_name(supplier)); 858 goto out; 859 } 860 861 reorder: 862 /* 863 * Move the consumer and all of the devices depending on it to the end 864 * of dpm_list and the devices_kset list. 865 * 866 * It is necessary to hold dpm_list locked throughout all that or else 867 * we may end up suspending with a wrong ordering of it. 868 */ 869 device_reorder_to_tail(consumer, NULL); 870 871 dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier)); 872 873 out: 874 device_pm_unlock(); 875 device_links_write_unlock(); 876 877 if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link) 878 pm_runtime_put(supplier); 879 880 return link; 881 } 882 EXPORT_SYMBOL_GPL(device_link_add); 883 884 static void __device_link_del(struct kref *kref) 885 { 886 struct device_link *link = container_of(kref, struct device_link, kref); 887 888 dev_dbg(link->consumer, "Dropping the link to %s\n", 889 dev_name(link->supplier)); 890 891 pm_runtime_drop_link(link); 892 893 device_link_remove_from_lists(link); 894 device_unregister(&link->link_dev); 895 } 896 897 static void device_link_put_kref(struct device_link *link) 898 { 899 if (link->flags & DL_FLAG_STATELESS) 900 kref_put(&link->kref, __device_link_del); 901 else if (!device_is_registered(link->consumer)) 902 __device_link_del(&link->kref); 903 else 904 WARN(1, "Unable to drop a managed device link reference\n"); 905 } 906 907 /** 908 * device_link_del - Delete a stateless link between two devices. 909 * @link: Device link to delete. 910 * 911 * The caller must ensure proper synchronization of this function with runtime 912 * PM. If the link was added multiple times, it needs to be deleted as often. 913 * Care is required for hotplugged devices: Their links are purged on removal 914 * and calling device_link_del() is then no longer allowed. 915 */ 916 void device_link_del(struct device_link *link) 917 { 918 device_links_write_lock(); 919 device_link_put_kref(link); 920 device_links_write_unlock(); 921 } 922 EXPORT_SYMBOL_GPL(device_link_del); 923 924 /** 925 * device_link_remove - Delete a stateless link between two devices. 926 * @consumer: Consumer end of the link. 927 * @supplier: Supplier end of the link. 928 * 929 * The caller must ensure proper synchronization of this function with runtime 930 * PM. 931 */ 932 void device_link_remove(void *consumer, struct device *supplier) 933 { 934 struct device_link *link; 935 936 if (WARN_ON(consumer == supplier)) 937 return; 938 939 device_links_write_lock(); 940 941 list_for_each_entry(link, &supplier->links.consumers, s_node) { 942 if (link->consumer == consumer) { 943 device_link_put_kref(link); 944 break; 945 } 946 } 947 948 device_links_write_unlock(); 949 } 950 EXPORT_SYMBOL_GPL(device_link_remove); 951 952 static void device_links_missing_supplier(struct device *dev) 953 { 954 struct device_link *link; 955 956 list_for_each_entry(link, &dev->links.suppliers, c_node) { 957 if (link->status != DL_STATE_CONSUMER_PROBE) 958 continue; 959 960 if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) { 961 WRITE_ONCE(link->status, DL_STATE_AVAILABLE); 962 } else { 963 WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); 964 WRITE_ONCE(link->status, DL_STATE_DORMANT); 965 } 966 } 967 } 968 969 /** 970 * device_links_check_suppliers - Check presence of supplier drivers. 971 * @dev: Consumer device. 972 * 973 * Check links from this device to any suppliers. Walk the list of the device's 974 * links to suppliers and see if all of them are available. If not, simply 975 * return -EPROBE_DEFER. 976 * 977 * We need to guarantee that the supplier will not go away after the check has 978 * been positive here. It only can go away in __device_release_driver() and 979 * that function checks the device's links to consumers. This means we need to 980 * mark the link as "consumer probe in progress" to make the supplier removal 981 * wait for us to complete (or bad things may happen). 982 * 983 * Links without the DL_FLAG_MANAGED flag set are ignored. 984 */ 985 int device_links_check_suppliers(struct device *dev) 986 { 987 struct device_link *link; 988 int ret = 0; 989 struct fwnode_handle *sup_fw; 990 991 /* 992 * Device waiting for supplier to become available is not allowed to 993 * probe. 994 */ 995 mutex_lock(&fwnode_link_lock); 996 if (dev->fwnode && !list_empty(&dev->fwnode->suppliers) && 997 !fw_devlink_is_permissive()) { 998 sup_fw = list_first_entry(&dev->fwnode->suppliers, 999 struct fwnode_link, 1000 c_hook)->supplier; 1001 dev_err_probe(dev, -EPROBE_DEFER, "wait for supplier %pfwP\n", 1002 sup_fw); 1003 mutex_unlock(&fwnode_link_lock); 1004 return -EPROBE_DEFER; 1005 } 1006 mutex_unlock(&fwnode_link_lock); 1007 1008 device_links_write_lock(); 1009 1010 list_for_each_entry(link, &dev->links.suppliers, c_node) { 1011 if (!(link->flags & DL_FLAG_MANAGED)) 1012 continue; 1013 1014 if (link->status != DL_STATE_AVAILABLE && 1015 !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) { 1016 device_links_missing_supplier(dev); 1017 dev_err_probe(dev, -EPROBE_DEFER, 1018 "supplier %s not ready\n", 1019 dev_name(link->supplier)); 1020 ret = -EPROBE_DEFER; 1021 break; 1022 } 1023 WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE); 1024 } 1025 dev->links.status = DL_DEV_PROBING; 1026 1027 device_links_write_unlock(); 1028 return ret; 1029 } 1030 1031 /** 1032 * __device_links_queue_sync_state - Queue a device for sync_state() callback 1033 * @dev: Device to call sync_state() on 1034 * @list: List head to queue the @dev on 1035 * 1036 * Queues a device for a sync_state() callback when the device links write lock 1037 * isn't held. This allows the sync_state() execution flow to use device links 1038 * APIs. The caller must ensure this function is called with 1039 * device_links_write_lock() held. 1040 * 1041 * This function does a get_device() to make sure the device is not freed while 1042 * on this list. 1043 * 1044 * So the caller must also ensure that device_links_flush_sync_list() is called 1045 * as soon as the caller releases device_links_write_lock(). This is necessary 1046 * to make sure the sync_state() is called in a timely fashion and the 1047 * put_device() is called on this device. 1048 */ 1049 static void __device_links_queue_sync_state(struct device *dev, 1050 struct list_head *list) 1051 { 1052 struct device_link *link; 1053 1054 if (!dev_has_sync_state(dev)) 1055 return; 1056 if (dev->state_synced) 1057 return; 1058 1059 list_for_each_entry(link, &dev->links.consumers, s_node) { 1060 if (!(link->flags & DL_FLAG_MANAGED)) 1061 continue; 1062 if (link->status != DL_STATE_ACTIVE) 1063 return; 1064 } 1065 1066 /* 1067 * Set the flag here to avoid adding the same device to a list more 1068 * than once. This can happen if new consumers get added to the device 1069 * and probed before the list is flushed. 1070 */ 1071 dev->state_synced = true; 1072 1073 if (WARN_ON(!list_empty(&dev->links.defer_sync))) 1074 return; 1075 1076 get_device(dev); 1077 list_add_tail(&dev->links.defer_sync, list); 1078 } 1079 1080 /** 1081 * device_links_flush_sync_list - Call sync_state() on a list of devices 1082 * @list: List of devices to call sync_state() on 1083 * @dont_lock_dev: Device for which lock is already held by the caller 1084 * 1085 * Calls sync_state() on all the devices that have been queued for it. This 1086 * function is used in conjunction with __device_links_queue_sync_state(). The 1087 * @dont_lock_dev parameter is useful when this function is called from a 1088 * context where a device lock is already held. 1089 */ 1090 static void device_links_flush_sync_list(struct list_head *list, 1091 struct device *dont_lock_dev) 1092 { 1093 struct device *dev, *tmp; 1094 1095 list_for_each_entry_safe(dev, tmp, list, links.defer_sync) { 1096 list_del_init(&dev->links.defer_sync); 1097 1098 if (dev != dont_lock_dev) 1099 device_lock(dev); 1100 1101 if (dev->bus->sync_state) 1102 dev->bus->sync_state(dev); 1103 else if (dev->driver && dev->driver->sync_state) 1104 dev->driver->sync_state(dev); 1105 1106 if (dev != dont_lock_dev) 1107 device_unlock(dev); 1108 1109 put_device(dev); 1110 } 1111 } 1112 1113 void device_links_supplier_sync_state_pause(void) 1114 { 1115 device_links_write_lock(); 1116 defer_sync_state_count++; 1117 device_links_write_unlock(); 1118 } 1119 1120 void device_links_supplier_sync_state_resume(void) 1121 { 1122 struct device *dev, *tmp; 1123 LIST_HEAD(sync_list); 1124 1125 device_links_write_lock(); 1126 if (!defer_sync_state_count) { 1127 WARN(true, "Unmatched sync_state pause/resume!"); 1128 goto out; 1129 } 1130 defer_sync_state_count--; 1131 if (defer_sync_state_count) 1132 goto out; 1133 1134 list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) { 1135 /* 1136 * Delete from deferred_sync list before queuing it to 1137 * sync_list because defer_sync is used for both lists. 1138 */ 1139 list_del_init(&dev->links.defer_sync); 1140 __device_links_queue_sync_state(dev, &sync_list); 1141 } 1142 out: 1143 device_links_write_unlock(); 1144 1145 device_links_flush_sync_list(&sync_list, NULL); 1146 } 1147 1148 static int sync_state_resume_initcall(void) 1149 { 1150 device_links_supplier_sync_state_resume(); 1151 return 0; 1152 } 1153 late_initcall(sync_state_resume_initcall); 1154 1155 static void __device_links_supplier_defer_sync(struct device *sup) 1156 { 1157 if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup)) 1158 list_add_tail(&sup->links.defer_sync, &deferred_sync); 1159 } 1160 1161 static void device_link_drop_managed(struct device_link *link) 1162 { 1163 link->flags &= ~DL_FLAG_MANAGED; 1164 WRITE_ONCE(link->status, DL_STATE_NONE); 1165 kref_put(&link->kref, __device_link_del); 1166 } 1167 1168 static ssize_t waiting_for_supplier_show(struct device *dev, 1169 struct device_attribute *attr, 1170 char *buf) 1171 { 1172 bool val; 1173 1174 device_lock(dev); 1175 val = !list_empty(&dev->fwnode->suppliers); 1176 device_unlock(dev); 1177 return sysfs_emit(buf, "%u\n", val); 1178 } 1179 static DEVICE_ATTR_RO(waiting_for_supplier); 1180 1181 /** 1182 * device_links_force_bind - Prepares device to be force bound 1183 * @dev: Consumer device. 1184 * 1185 * device_bind_driver() force binds a device to a driver without calling any 1186 * driver probe functions. So the consumer really isn't going to wait for any 1187 * supplier before it's bound to the driver. We still want the device link 1188 * states to be sensible when this happens. 1189 * 1190 * In preparation for device_bind_driver(), this function goes through each 1191 * supplier device links and checks if the supplier is bound. If it is, then 1192 * the device link status is set to CONSUMER_PROBE. Otherwise, the device link 1193 * is dropped. Links without the DL_FLAG_MANAGED flag set are ignored. 1194 */ 1195 void device_links_force_bind(struct device *dev) 1196 { 1197 struct device_link *link, *ln; 1198 1199 device_links_write_lock(); 1200 1201 list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) { 1202 if (!(link->flags & DL_FLAG_MANAGED)) 1203 continue; 1204 1205 if (link->status != DL_STATE_AVAILABLE) { 1206 device_link_drop_managed(link); 1207 continue; 1208 } 1209 WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE); 1210 } 1211 dev->links.status = DL_DEV_PROBING; 1212 1213 device_links_write_unlock(); 1214 } 1215 1216 /** 1217 * device_links_driver_bound - Update device links after probing its driver. 1218 * @dev: Device to update the links for. 1219 * 1220 * The probe has been successful, so update links from this device to any 1221 * consumers by changing their status to "available". 1222 * 1223 * Also change the status of @dev's links to suppliers to "active". 1224 * 1225 * Links without the DL_FLAG_MANAGED flag set are ignored. 1226 */ 1227 void device_links_driver_bound(struct device *dev) 1228 { 1229 struct device_link *link, *ln; 1230 LIST_HEAD(sync_list); 1231 1232 /* 1233 * If a device binds successfully, it's expected to have created all 1234 * the device links it needs to or make new device links as it needs 1235 * them. So, fw_devlink no longer needs to create device links to any 1236 * of the device's suppliers. 1237 * 1238 * Also, if a child firmware node of this bound device is not added as 1239 * a device by now, assume it is never going to be added and make sure 1240 * other devices don't defer probe indefinitely by waiting for such a 1241 * child device. 1242 */ 1243 if (dev->fwnode && dev->fwnode->dev == dev) { 1244 struct fwnode_handle *child; 1245 fwnode_links_purge_suppliers(dev->fwnode); 1246 fwnode_for_each_available_child_node(dev->fwnode, child) 1247 fw_devlink_purge_absent_suppliers(child); 1248 } 1249 device_remove_file(dev, &dev_attr_waiting_for_supplier); 1250 1251 device_links_write_lock(); 1252 1253 list_for_each_entry(link, &dev->links.consumers, s_node) { 1254 if (!(link->flags & DL_FLAG_MANAGED)) 1255 continue; 1256 1257 /* 1258 * Links created during consumer probe may be in the "consumer 1259 * probe" state to start with if the supplier is still probing 1260 * when they are created and they may become "active" if the 1261 * consumer probe returns first. Skip them here. 1262 */ 1263 if (link->status == DL_STATE_CONSUMER_PROBE || 1264 link->status == DL_STATE_ACTIVE) 1265 continue; 1266 1267 WARN_ON(link->status != DL_STATE_DORMANT); 1268 WRITE_ONCE(link->status, DL_STATE_AVAILABLE); 1269 1270 if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER) 1271 driver_deferred_probe_add(link->consumer); 1272 } 1273 1274 if (defer_sync_state_count) 1275 __device_links_supplier_defer_sync(dev); 1276 else 1277 __device_links_queue_sync_state(dev, &sync_list); 1278 1279 list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) { 1280 struct device *supplier; 1281 1282 if (!(link->flags & DL_FLAG_MANAGED)) 1283 continue; 1284 1285 supplier = link->supplier; 1286 if (link->flags & DL_FLAG_SYNC_STATE_ONLY) { 1287 /* 1288 * When DL_FLAG_SYNC_STATE_ONLY is set, it means no 1289 * other DL_MANAGED_LINK_FLAGS have been set. So, it's 1290 * save to drop the managed link completely. 1291 */ 1292 device_link_drop_managed(link); 1293 } else { 1294 WARN_ON(link->status != DL_STATE_CONSUMER_PROBE); 1295 WRITE_ONCE(link->status, DL_STATE_ACTIVE); 1296 } 1297 1298 /* 1299 * This needs to be done even for the deleted 1300 * DL_FLAG_SYNC_STATE_ONLY device link in case it was the last 1301 * device link that was preventing the supplier from getting a 1302 * sync_state() call. 1303 */ 1304 if (defer_sync_state_count) 1305 __device_links_supplier_defer_sync(supplier); 1306 else 1307 __device_links_queue_sync_state(supplier, &sync_list); 1308 } 1309 1310 dev->links.status = DL_DEV_DRIVER_BOUND; 1311 1312 device_links_write_unlock(); 1313 1314 device_links_flush_sync_list(&sync_list, dev); 1315 } 1316 1317 /** 1318 * __device_links_no_driver - Update links of a device without a driver. 1319 * @dev: Device without a drvier. 1320 * 1321 * Delete all non-persistent links from this device to any suppliers. 1322 * 1323 * Persistent links stay around, but their status is changed to "available", 1324 * unless they already are in the "supplier unbind in progress" state in which 1325 * case they need not be updated. 1326 * 1327 * Links without the DL_FLAG_MANAGED flag set are ignored. 1328 */ 1329 static void __device_links_no_driver(struct device *dev) 1330 { 1331 struct device_link *link, *ln; 1332 1333 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) { 1334 if (!(link->flags & DL_FLAG_MANAGED)) 1335 continue; 1336 1337 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) { 1338 device_link_drop_managed(link); 1339 continue; 1340 } 1341 1342 if (link->status != DL_STATE_CONSUMER_PROBE && 1343 link->status != DL_STATE_ACTIVE) 1344 continue; 1345 1346 if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) { 1347 WRITE_ONCE(link->status, DL_STATE_AVAILABLE); 1348 } else { 1349 WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); 1350 WRITE_ONCE(link->status, DL_STATE_DORMANT); 1351 } 1352 } 1353 1354 dev->links.status = DL_DEV_NO_DRIVER; 1355 } 1356 1357 /** 1358 * device_links_no_driver - Update links after failing driver probe. 1359 * @dev: Device whose driver has just failed to probe. 1360 * 1361 * Clean up leftover links to consumers for @dev and invoke 1362 * %__device_links_no_driver() to update links to suppliers for it as 1363 * appropriate. 1364 * 1365 * Links without the DL_FLAG_MANAGED flag set are ignored. 1366 */ 1367 void device_links_no_driver(struct device *dev) 1368 { 1369 struct device_link *link; 1370 1371 device_links_write_lock(); 1372 1373 list_for_each_entry(link, &dev->links.consumers, s_node) { 1374 if (!(link->flags & DL_FLAG_MANAGED)) 1375 continue; 1376 1377 /* 1378 * The probe has failed, so if the status of the link is 1379 * "consumer probe" or "active", it must have been added by 1380 * a probing consumer while this device was still probing. 1381 * Change its state to "dormant", as it represents a valid 1382 * relationship, but it is not functionally meaningful. 1383 */ 1384 if (link->status == DL_STATE_CONSUMER_PROBE || 1385 link->status == DL_STATE_ACTIVE) 1386 WRITE_ONCE(link->status, DL_STATE_DORMANT); 1387 } 1388 1389 __device_links_no_driver(dev); 1390 1391 device_links_write_unlock(); 1392 } 1393 1394 /** 1395 * device_links_driver_cleanup - Update links after driver removal. 1396 * @dev: Device whose driver has just gone away. 1397 * 1398 * Update links to consumers for @dev by changing their status to "dormant" and 1399 * invoke %__device_links_no_driver() to update links to suppliers for it as 1400 * appropriate. 1401 * 1402 * Links without the DL_FLAG_MANAGED flag set are ignored. 1403 */ 1404 void device_links_driver_cleanup(struct device *dev) 1405 { 1406 struct device_link *link, *ln; 1407 1408 device_links_write_lock(); 1409 1410 list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) { 1411 if (!(link->flags & DL_FLAG_MANAGED)) 1412 continue; 1413 1414 WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER); 1415 WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND); 1416 1417 /* 1418 * autoremove the links between this @dev and its consumer 1419 * devices that are not active, i.e. where the link state 1420 * has moved to DL_STATE_SUPPLIER_UNBIND. 1421 */ 1422 if (link->status == DL_STATE_SUPPLIER_UNBIND && 1423 link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) 1424 device_link_drop_managed(link); 1425 1426 WRITE_ONCE(link->status, DL_STATE_DORMANT); 1427 } 1428 1429 list_del_init(&dev->links.defer_sync); 1430 __device_links_no_driver(dev); 1431 1432 device_links_write_unlock(); 1433 } 1434 1435 /** 1436 * device_links_busy - Check if there are any busy links to consumers. 1437 * @dev: Device to check. 1438 * 1439 * Check each consumer of the device and return 'true' if its link's status 1440 * is one of "consumer probe" or "active" (meaning that the given consumer is 1441 * probing right now or its driver is present). Otherwise, change the link 1442 * state to "supplier unbind" to prevent the consumer from being probed 1443 * successfully going forward. 1444 * 1445 * Return 'false' if there are no probing or active consumers. 1446 * 1447 * Links without the DL_FLAG_MANAGED flag set are ignored. 1448 */ 1449 bool device_links_busy(struct device *dev) 1450 { 1451 struct device_link *link; 1452 bool ret = false; 1453 1454 device_links_write_lock(); 1455 1456 list_for_each_entry(link, &dev->links.consumers, s_node) { 1457 if (!(link->flags & DL_FLAG_MANAGED)) 1458 continue; 1459 1460 if (link->status == DL_STATE_CONSUMER_PROBE 1461 || link->status == DL_STATE_ACTIVE) { 1462 ret = true; 1463 break; 1464 } 1465 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND); 1466 } 1467 1468 dev->links.status = DL_DEV_UNBINDING; 1469 1470 device_links_write_unlock(); 1471 return ret; 1472 } 1473 1474 /** 1475 * device_links_unbind_consumers - Force unbind consumers of the given device. 1476 * @dev: Device to unbind the consumers of. 1477 * 1478 * Walk the list of links to consumers for @dev and if any of them is in the 1479 * "consumer probe" state, wait for all device probes in progress to complete 1480 * and start over. 1481 * 1482 * If that's not the case, change the status of the link to "supplier unbind" 1483 * and check if the link was in the "active" state. If so, force the consumer 1484 * driver to unbind and start over (the consumer will not re-probe as we have 1485 * changed the state of the link already). 1486 * 1487 * Links without the DL_FLAG_MANAGED flag set are ignored. 1488 */ 1489 void device_links_unbind_consumers(struct device *dev) 1490 { 1491 struct device_link *link; 1492 1493 start: 1494 device_links_write_lock(); 1495 1496 list_for_each_entry(link, &dev->links.consumers, s_node) { 1497 enum device_link_state status; 1498 1499 if (!(link->flags & DL_FLAG_MANAGED) || 1500 link->flags & DL_FLAG_SYNC_STATE_ONLY) 1501 continue; 1502 1503 status = link->status; 1504 if (status == DL_STATE_CONSUMER_PROBE) { 1505 device_links_write_unlock(); 1506 1507 wait_for_device_probe(); 1508 goto start; 1509 } 1510 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND); 1511 if (status == DL_STATE_ACTIVE) { 1512 struct device *consumer = link->consumer; 1513 1514 get_device(consumer); 1515 1516 device_links_write_unlock(); 1517 1518 device_release_driver_internal(consumer, NULL, 1519 consumer->parent); 1520 put_device(consumer); 1521 goto start; 1522 } 1523 } 1524 1525 device_links_write_unlock(); 1526 } 1527 1528 /** 1529 * device_links_purge - Delete existing links to other devices. 1530 * @dev: Target device. 1531 */ 1532 static void device_links_purge(struct device *dev) 1533 { 1534 struct device_link *link, *ln; 1535 1536 if (dev->class == &devlink_class) 1537 return; 1538 1539 /* 1540 * Delete all of the remaining links from this device to any other 1541 * devices (either consumers or suppliers). 1542 */ 1543 device_links_write_lock(); 1544 1545 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) { 1546 WARN_ON(link->status == DL_STATE_ACTIVE); 1547 __device_link_del(&link->kref); 1548 } 1549 1550 list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) { 1551 WARN_ON(link->status != DL_STATE_DORMANT && 1552 link->status != DL_STATE_NONE); 1553 __device_link_del(&link->kref); 1554 } 1555 1556 device_links_write_unlock(); 1557 } 1558 1559 #define FW_DEVLINK_FLAGS_PERMISSIVE (DL_FLAG_INFERRED | \ 1560 DL_FLAG_SYNC_STATE_ONLY) 1561 #define FW_DEVLINK_FLAGS_ON (DL_FLAG_INFERRED | \ 1562 DL_FLAG_AUTOPROBE_CONSUMER) 1563 #define FW_DEVLINK_FLAGS_RPM (FW_DEVLINK_FLAGS_ON | \ 1564 DL_FLAG_PM_RUNTIME) 1565 1566 static u32 fw_devlink_flags = FW_DEVLINK_FLAGS_ON; 1567 static int __init fw_devlink_setup(char *arg) 1568 { 1569 if (!arg) 1570 return -EINVAL; 1571 1572 if (strcmp(arg, "off") == 0) { 1573 fw_devlink_flags = 0; 1574 } else if (strcmp(arg, "permissive") == 0) { 1575 fw_devlink_flags = FW_DEVLINK_FLAGS_PERMISSIVE; 1576 } else if (strcmp(arg, "on") == 0) { 1577 fw_devlink_flags = FW_DEVLINK_FLAGS_ON; 1578 } else if (strcmp(arg, "rpm") == 0) { 1579 fw_devlink_flags = FW_DEVLINK_FLAGS_RPM; 1580 } 1581 return 0; 1582 } 1583 early_param("fw_devlink", fw_devlink_setup); 1584 1585 static bool fw_devlink_strict; 1586 static int __init fw_devlink_strict_setup(char *arg) 1587 { 1588 return strtobool(arg, &fw_devlink_strict); 1589 } 1590 early_param("fw_devlink.strict", fw_devlink_strict_setup); 1591 1592 u32 fw_devlink_get_flags(void) 1593 { 1594 return fw_devlink_flags; 1595 } 1596 1597 static bool fw_devlink_is_permissive(void) 1598 { 1599 return fw_devlink_flags == FW_DEVLINK_FLAGS_PERMISSIVE; 1600 } 1601 1602 bool fw_devlink_is_strict(void) 1603 { 1604 return fw_devlink_strict && !fw_devlink_is_permissive(); 1605 } 1606 1607 static void fw_devlink_parse_fwnode(struct fwnode_handle *fwnode) 1608 { 1609 if (fwnode->flags & FWNODE_FLAG_LINKS_ADDED) 1610 return; 1611 1612 fwnode_call_int_op(fwnode, add_links); 1613 fwnode->flags |= FWNODE_FLAG_LINKS_ADDED; 1614 } 1615 1616 static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode) 1617 { 1618 struct fwnode_handle *child = NULL; 1619 1620 fw_devlink_parse_fwnode(fwnode); 1621 1622 while ((child = fwnode_get_next_available_child_node(fwnode, child))) 1623 fw_devlink_parse_fwtree(child); 1624 } 1625 1626 static void fw_devlink_relax_link(struct device_link *link) 1627 { 1628 if (!(link->flags & DL_FLAG_INFERRED)) 1629 return; 1630 1631 if (link->flags == (DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE)) 1632 return; 1633 1634 pm_runtime_drop_link(link); 1635 link->flags = DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE; 1636 dev_dbg(link->consumer, "Relaxing link with %s\n", 1637 dev_name(link->supplier)); 1638 } 1639 1640 static int fw_devlink_no_driver(struct device *dev, void *data) 1641 { 1642 struct device_link *link = to_devlink(dev); 1643 1644 if (!link->supplier->can_match) 1645 fw_devlink_relax_link(link); 1646 1647 return 0; 1648 } 1649 1650 void fw_devlink_drivers_done(void) 1651 { 1652 fw_devlink_drv_reg_done = true; 1653 device_links_write_lock(); 1654 class_for_each_device(&devlink_class, NULL, NULL, 1655 fw_devlink_no_driver); 1656 device_links_write_unlock(); 1657 } 1658 1659 static void fw_devlink_unblock_consumers(struct device *dev) 1660 { 1661 struct device_link *link; 1662 1663 if (!fw_devlink_flags || fw_devlink_is_permissive()) 1664 return; 1665 1666 device_links_write_lock(); 1667 list_for_each_entry(link, &dev->links.consumers, s_node) 1668 fw_devlink_relax_link(link); 1669 device_links_write_unlock(); 1670 } 1671 1672 /** 1673 * fw_devlink_relax_cycle - Convert cyclic links to SYNC_STATE_ONLY links 1674 * @con: Device to check dependencies for. 1675 * @sup: Device to check against. 1676 * 1677 * Check if @sup depends on @con or any device dependent on it (its child or 1678 * its consumer etc). When such a cyclic dependency is found, convert all 1679 * device links created solely by fw_devlink into SYNC_STATE_ONLY device links. 1680 * This is the equivalent of doing fw_devlink=permissive just between the 1681 * devices in the cycle. We need to do this because, at this point, fw_devlink 1682 * can't tell which of these dependencies is not a real dependency. 1683 * 1684 * Return 1 if a cycle is found. Otherwise, return 0. 1685 */ 1686 static int fw_devlink_relax_cycle(struct device *con, void *sup) 1687 { 1688 struct device_link *link; 1689 int ret; 1690 1691 if (con == sup) 1692 return 1; 1693 1694 ret = device_for_each_child(con, sup, fw_devlink_relax_cycle); 1695 if (ret) 1696 return ret; 1697 1698 list_for_each_entry(link, &con->links.consumers, s_node) { 1699 if ((link->flags & ~DL_FLAG_INFERRED) == 1700 (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED)) 1701 continue; 1702 1703 if (!fw_devlink_relax_cycle(link->consumer, sup)) 1704 continue; 1705 1706 ret = 1; 1707 1708 fw_devlink_relax_link(link); 1709 } 1710 return ret; 1711 } 1712 1713 /** 1714 * fw_devlink_create_devlink - Create a device link from a consumer to fwnode 1715 * @con: consumer device for the device link 1716 * @sup_handle: fwnode handle of supplier 1717 * @flags: devlink flags 1718 * 1719 * This function will try to create a device link between the consumer device 1720 * @con and the supplier device represented by @sup_handle. 1721 * 1722 * The supplier has to be provided as a fwnode because incorrect cycles in 1723 * fwnode links can sometimes cause the supplier device to never be created. 1724 * This function detects such cases and returns an error if it cannot create a 1725 * device link from the consumer to a missing supplier. 1726 * 1727 * Returns, 1728 * 0 on successfully creating a device link 1729 * -EINVAL if the device link cannot be created as expected 1730 * -EAGAIN if the device link cannot be created right now, but it may be 1731 * possible to do that in the future 1732 */ 1733 static int fw_devlink_create_devlink(struct device *con, 1734 struct fwnode_handle *sup_handle, u32 flags) 1735 { 1736 struct device *sup_dev; 1737 int ret = 0; 1738 1739 /* 1740 * In some cases, a device P might also be a supplier to its child node 1741 * C. However, this would defer the probe of C until the probe of P 1742 * completes successfully. This is perfectly fine in the device driver 1743 * model. device_add() doesn't guarantee probe completion of the device 1744 * by the time it returns. 1745 * 1746 * However, there are a few drivers that assume C will finish probing 1747 * as soon as it's added and before P finishes probing. So, we provide 1748 * a flag to let fw_devlink know not to delay the probe of C until the 1749 * probe of P completes successfully. 1750 * 1751 * When such a flag is set, we can't create device links where P is the 1752 * supplier of C as that would delay the probe of C. 1753 */ 1754 if (sup_handle->flags & FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD && 1755 fwnode_is_ancestor_of(sup_handle, con->fwnode)) 1756 return -EINVAL; 1757 1758 sup_dev = get_dev_from_fwnode(sup_handle); 1759 if (sup_dev) { 1760 /* 1761 * If it's one of those drivers that don't actually bind to 1762 * their device using driver core, then don't wait on this 1763 * supplier device indefinitely. 1764 */ 1765 if (sup_dev->links.status == DL_DEV_NO_DRIVER && 1766 sup_handle->flags & FWNODE_FLAG_INITIALIZED) { 1767 ret = -EINVAL; 1768 goto out; 1769 } 1770 1771 /* 1772 * If this fails, it is due to cycles in device links. Just 1773 * give up on this link and treat it as invalid. 1774 */ 1775 if (!device_link_add(con, sup_dev, flags) && 1776 !(flags & DL_FLAG_SYNC_STATE_ONLY)) { 1777 dev_info(con, "Fixing up cyclic dependency with %s\n", 1778 dev_name(sup_dev)); 1779 device_links_write_lock(); 1780 fw_devlink_relax_cycle(con, sup_dev); 1781 device_links_write_unlock(); 1782 device_link_add(con, sup_dev, 1783 FW_DEVLINK_FLAGS_PERMISSIVE); 1784 ret = -EINVAL; 1785 } 1786 1787 goto out; 1788 } 1789 1790 /* Supplier that's already initialized without a struct device. */ 1791 if (sup_handle->flags & FWNODE_FLAG_INITIALIZED) 1792 return -EINVAL; 1793 1794 /* 1795 * DL_FLAG_SYNC_STATE_ONLY doesn't block probing and supports 1796 * cycles. So cycle detection isn't necessary and shouldn't be 1797 * done. 1798 */ 1799 if (flags & DL_FLAG_SYNC_STATE_ONLY) 1800 return -EAGAIN; 1801 1802 /* 1803 * If we can't find the supplier device from its fwnode, it might be 1804 * due to a cyclic dependency between fwnodes. Some of these cycles can 1805 * be broken by applying logic. Check for these types of cycles and 1806 * break them so that devices in the cycle probe properly. 1807 * 1808 * If the supplier's parent is dependent on the consumer, then the 1809 * consumer and supplier have a cyclic dependency. Since fw_devlink 1810 * can't tell which of the inferred dependencies are incorrect, don't 1811 * enforce probe ordering between any of the devices in this cyclic 1812 * dependency. Do this by relaxing all the fw_devlink device links in 1813 * this cycle and by treating the fwnode link between the consumer and 1814 * the supplier as an invalid dependency. 1815 */ 1816 sup_dev = fwnode_get_next_parent_dev(sup_handle); 1817 if (sup_dev && device_is_dependent(con, sup_dev)) { 1818 dev_info(con, "Fixing up cyclic dependency with %pfwP (%s)\n", 1819 sup_handle, dev_name(sup_dev)); 1820 device_links_write_lock(); 1821 fw_devlink_relax_cycle(con, sup_dev); 1822 device_links_write_unlock(); 1823 ret = -EINVAL; 1824 } else { 1825 /* 1826 * Can't check for cycles or no cycles. So let's try 1827 * again later. 1828 */ 1829 ret = -EAGAIN; 1830 } 1831 1832 out: 1833 put_device(sup_dev); 1834 return ret; 1835 } 1836 1837 /** 1838 * __fw_devlink_link_to_consumers - Create device links to consumers of a device 1839 * @dev: Device that needs to be linked to its consumers 1840 * 1841 * This function looks at all the consumer fwnodes of @dev and creates device 1842 * links between the consumer device and @dev (supplier). 1843 * 1844 * If the consumer device has not been added yet, then this function creates a 1845 * SYNC_STATE_ONLY link between @dev (supplier) and the closest ancestor device 1846 * of the consumer fwnode. This is necessary to make sure @dev doesn't get a 1847 * sync_state() callback before the real consumer device gets to be added and 1848 * then probed. 1849 * 1850 * Once device links are created from the real consumer to @dev (supplier), the 1851 * fwnode links are deleted. 1852 */ 1853 static void __fw_devlink_link_to_consumers(struct device *dev) 1854 { 1855 struct fwnode_handle *fwnode = dev->fwnode; 1856 struct fwnode_link *link, *tmp; 1857 1858 list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) { 1859 u32 dl_flags = fw_devlink_get_flags(); 1860 struct device *con_dev; 1861 bool own_link = true; 1862 int ret; 1863 1864 con_dev = get_dev_from_fwnode(link->consumer); 1865 /* 1866 * If consumer device is not available yet, make a "proxy" 1867 * SYNC_STATE_ONLY link from the consumer's parent device to 1868 * the supplier device. This is necessary to make sure the 1869 * supplier doesn't get a sync_state() callback before the real 1870 * consumer can create a device link to the supplier. 1871 * 1872 * This proxy link step is needed to handle the case where the 1873 * consumer's parent device is added before the supplier. 1874 */ 1875 if (!con_dev) { 1876 con_dev = fwnode_get_next_parent_dev(link->consumer); 1877 /* 1878 * However, if the consumer's parent device is also the 1879 * parent of the supplier, don't create a 1880 * consumer-supplier link from the parent to its child 1881 * device. Such a dependency is impossible. 1882 */ 1883 if (con_dev && 1884 fwnode_is_ancestor_of(con_dev->fwnode, fwnode)) { 1885 put_device(con_dev); 1886 con_dev = NULL; 1887 } else { 1888 own_link = false; 1889 dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE; 1890 } 1891 } 1892 1893 if (!con_dev) 1894 continue; 1895 1896 ret = fw_devlink_create_devlink(con_dev, fwnode, dl_flags); 1897 put_device(con_dev); 1898 if (!own_link || ret == -EAGAIN) 1899 continue; 1900 1901 __fwnode_link_del(link); 1902 } 1903 } 1904 1905 /** 1906 * __fw_devlink_link_to_suppliers - Create device links to suppliers of a device 1907 * @dev: The consumer device that needs to be linked to its suppliers 1908 * @fwnode: Root of the fwnode tree that is used to create device links 1909 * 1910 * This function looks at all the supplier fwnodes of fwnode tree rooted at 1911 * @fwnode and creates device links between @dev (consumer) and all the 1912 * supplier devices of the entire fwnode tree at @fwnode. 1913 * 1914 * The function creates normal (non-SYNC_STATE_ONLY) device links between @dev 1915 * and the real suppliers of @dev. Once these device links are created, the 1916 * fwnode links are deleted. When such device links are successfully created, 1917 * this function is called recursively on those supplier devices. This is 1918 * needed to detect and break some invalid cycles in fwnode links. See 1919 * fw_devlink_create_devlink() for more details. 1920 * 1921 * In addition, it also looks at all the suppliers of the entire fwnode tree 1922 * because some of the child devices of @dev that have not been added yet 1923 * (because @dev hasn't probed) might already have their suppliers added to 1924 * driver core. So, this function creates SYNC_STATE_ONLY device links between 1925 * @dev (consumer) and these suppliers to make sure they don't execute their 1926 * sync_state() callbacks before these child devices have a chance to create 1927 * their device links. The fwnode links that correspond to the child devices 1928 * aren't delete because they are needed later to create the device links 1929 * between the real consumer and supplier devices. 1930 */ 1931 static void __fw_devlink_link_to_suppliers(struct device *dev, 1932 struct fwnode_handle *fwnode) 1933 { 1934 bool own_link = (dev->fwnode == fwnode); 1935 struct fwnode_link *link, *tmp; 1936 struct fwnode_handle *child = NULL; 1937 u32 dl_flags; 1938 1939 if (own_link) 1940 dl_flags = fw_devlink_get_flags(); 1941 else 1942 dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE; 1943 1944 list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) { 1945 int ret; 1946 struct device *sup_dev; 1947 struct fwnode_handle *sup = link->supplier; 1948 1949 ret = fw_devlink_create_devlink(dev, sup, dl_flags); 1950 if (!own_link || ret == -EAGAIN) 1951 continue; 1952 1953 __fwnode_link_del(link); 1954 1955 /* If no device link was created, nothing more to do. */ 1956 if (ret) 1957 continue; 1958 1959 /* 1960 * If a device link was successfully created to a supplier, we 1961 * now need to try and link the supplier to all its suppliers. 1962 * 1963 * This is needed to detect and delete false dependencies in 1964 * fwnode links that haven't been converted to a device link 1965 * yet. See comments in fw_devlink_create_devlink() for more 1966 * details on the false dependency. 1967 * 1968 * Without deleting these false dependencies, some devices will 1969 * never probe because they'll keep waiting for their false 1970 * dependency fwnode links to be converted to device links. 1971 */ 1972 sup_dev = get_dev_from_fwnode(sup); 1973 __fw_devlink_link_to_suppliers(sup_dev, sup_dev->fwnode); 1974 put_device(sup_dev); 1975 } 1976 1977 /* 1978 * Make "proxy" SYNC_STATE_ONLY device links to represent the needs of 1979 * all the descendants. This proxy link step is needed to handle the 1980 * case where the supplier is added before the consumer's parent device 1981 * (@dev). 1982 */ 1983 while ((child = fwnode_get_next_available_child_node(fwnode, child))) 1984 __fw_devlink_link_to_suppliers(dev, child); 1985 } 1986 1987 static void fw_devlink_link_device(struct device *dev) 1988 { 1989 struct fwnode_handle *fwnode = dev->fwnode; 1990 1991 if (!fw_devlink_flags) 1992 return; 1993 1994 fw_devlink_parse_fwtree(fwnode); 1995 1996 mutex_lock(&fwnode_link_lock); 1997 __fw_devlink_link_to_consumers(dev); 1998 __fw_devlink_link_to_suppliers(dev, fwnode); 1999 mutex_unlock(&fwnode_link_lock); 2000 } 2001 2002 /* Device links support end. */ 2003 2004 int (*platform_notify)(struct device *dev) = NULL; 2005 int (*platform_notify_remove)(struct device *dev) = NULL; 2006 static struct kobject *dev_kobj; 2007 struct kobject *sysfs_dev_char_kobj; 2008 struct kobject *sysfs_dev_block_kobj; 2009 2010 static DEFINE_MUTEX(device_hotplug_lock); 2011 2012 void lock_device_hotplug(void) 2013 { 2014 mutex_lock(&device_hotplug_lock); 2015 } 2016 2017 void unlock_device_hotplug(void) 2018 { 2019 mutex_unlock(&device_hotplug_lock); 2020 } 2021 2022 int lock_device_hotplug_sysfs(void) 2023 { 2024 if (mutex_trylock(&device_hotplug_lock)) 2025 return 0; 2026 2027 /* Avoid busy looping (5 ms of sleep should do). */ 2028 msleep(5); 2029 return restart_syscall(); 2030 } 2031 2032 #ifdef CONFIG_BLOCK 2033 static inline int device_is_not_partition(struct device *dev) 2034 { 2035 return !(dev->type == &part_type); 2036 } 2037 #else 2038 static inline int device_is_not_partition(struct device *dev) 2039 { 2040 return 1; 2041 } 2042 #endif 2043 2044 static void device_platform_notify(struct device *dev) 2045 { 2046 acpi_device_notify(dev); 2047 2048 software_node_notify(dev); 2049 2050 if (platform_notify) 2051 platform_notify(dev); 2052 } 2053 2054 static void device_platform_notify_remove(struct device *dev) 2055 { 2056 acpi_device_notify_remove(dev); 2057 2058 software_node_notify_remove(dev); 2059 2060 if (platform_notify_remove) 2061 platform_notify_remove(dev); 2062 } 2063 2064 /** 2065 * dev_driver_string - Return a device's driver name, if at all possible 2066 * @dev: struct device to get the name of 2067 * 2068 * Will return the device's driver's name if it is bound to a device. If 2069 * the device is not bound to a driver, it will return the name of the bus 2070 * it is attached to. If it is not attached to a bus either, an empty 2071 * string will be returned. 2072 */ 2073 const char *dev_driver_string(const struct device *dev) 2074 { 2075 struct device_driver *drv; 2076 2077 /* dev->driver can change to NULL underneath us because of unbinding, 2078 * so be careful about accessing it. dev->bus and dev->class should 2079 * never change once they are set, so they don't need special care. 2080 */ 2081 drv = READ_ONCE(dev->driver); 2082 return drv ? drv->name : dev_bus_name(dev); 2083 } 2084 EXPORT_SYMBOL(dev_driver_string); 2085 2086 #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr) 2087 2088 static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr, 2089 char *buf) 2090 { 2091 struct device_attribute *dev_attr = to_dev_attr(attr); 2092 struct device *dev = kobj_to_dev(kobj); 2093 ssize_t ret = -EIO; 2094 2095 if (dev_attr->show) 2096 ret = dev_attr->show(dev, dev_attr, buf); 2097 if (ret >= (ssize_t)PAGE_SIZE) { 2098 printk("dev_attr_show: %pS returned bad count\n", 2099 dev_attr->show); 2100 } 2101 return ret; 2102 } 2103 2104 static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr, 2105 const char *buf, size_t count) 2106 { 2107 struct device_attribute *dev_attr = to_dev_attr(attr); 2108 struct device *dev = kobj_to_dev(kobj); 2109 ssize_t ret = -EIO; 2110 2111 if (dev_attr->store) 2112 ret = dev_attr->store(dev, dev_attr, buf, count); 2113 return ret; 2114 } 2115 2116 static const struct sysfs_ops dev_sysfs_ops = { 2117 .show = dev_attr_show, 2118 .store = dev_attr_store, 2119 }; 2120 2121 #define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr) 2122 2123 ssize_t device_store_ulong(struct device *dev, 2124 struct device_attribute *attr, 2125 const char *buf, size_t size) 2126 { 2127 struct dev_ext_attribute *ea = to_ext_attr(attr); 2128 int ret; 2129 unsigned long new; 2130 2131 ret = kstrtoul(buf, 0, &new); 2132 if (ret) 2133 return ret; 2134 *(unsigned long *)(ea->var) = new; 2135 /* Always return full write size even if we didn't consume all */ 2136 return size; 2137 } 2138 EXPORT_SYMBOL_GPL(device_store_ulong); 2139 2140 ssize_t device_show_ulong(struct device *dev, 2141 struct device_attribute *attr, 2142 char *buf) 2143 { 2144 struct dev_ext_attribute *ea = to_ext_attr(attr); 2145 return sysfs_emit(buf, "%lx\n", *(unsigned long *)(ea->var)); 2146 } 2147 EXPORT_SYMBOL_GPL(device_show_ulong); 2148 2149 ssize_t device_store_int(struct device *dev, 2150 struct device_attribute *attr, 2151 const char *buf, size_t size) 2152 { 2153 struct dev_ext_attribute *ea = to_ext_attr(attr); 2154 int ret; 2155 long new; 2156 2157 ret = kstrtol(buf, 0, &new); 2158 if (ret) 2159 return ret; 2160 2161 if (new > INT_MAX || new < INT_MIN) 2162 return -EINVAL; 2163 *(int *)(ea->var) = new; 2164 /* Always return full write size even if we didn't consume all */ 2165 return size; 2166 } 2167 EXPORT_SYMBOL_GPL(device_store_int); 2168 2169 ssize_t device_show_int(struct device *dev, 2170 struct device_attribute *attr, 2171 char *buf) 2172 { 2173 struct dev_ext_attribute *ea = to_ext_attr(attr); 2174 2175 return sysfs_emit(buf, "%d\n", *(int *)(ea->var)); 2176 } 2177 EXPORT_SYMBOL_GPL(device_show_int); 2178 2179 ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, 2180 const char *buf, size_t size) 2181 { 2182 struct dev_ext_attribute *ea = to_ext_attr(attr); 2183 2184 if (strtobool(buf, ea->var) < 0) 2185 return -EINVAL; 2186 2187 return size; 2188 } 2189 EXPORT_SYMBOL_GPL(device_store_bool); 2190 2191 ssize_t device_show_bool(struct device *dev, struct device_attribute *attr, 2192 char *buf) 2193 { 2194 struct dev_ext_attribute *ea = to_ext_attr(attr); 2195 2196 return sysfs_emit(buf, "%d\n", *(bool *)(ea->var)); 2197 } 2198 EXPORT_SYMBOL_GPL(device_show_bool); 2199 2200 /** 2201 * device_release - free device structure. 2202 * @kobj: device's kobject. 2203 * 2204 * This is called once the reference count for the object 2205 * reaches 0. We forward the call to the device's release 2206 * method, which should handle actually freeing the structure. 2207 */ 2208 static void device_release(struct kobject *kobj) 2209 { 2210 struct device *dev = kobj_to_dev(kobj); 2211 struct device_private *p = dev->p; 2212 2213 /* 2214 * Some platform devices are driven without driver attached 2215 * and managed resources may have been acquired. Make sure 2216 * all resources are released. 2217 * 2218 * Drivers still can add resources into device after device 2219 * is deleted but alive, so release devres here to avoid 2220 * possible memory leak. 2221 */ 2222 devres_release_all(dev); 2223 2224 kfree(dev->dma_range_map); 2225 2226 if (dev->release) 2227 dev->release(dev); 2228 else if (dev->type && dev->type->release) 2229 dev->type->release(dev); 2230 else if (dev->class && dev->class->dev_release) 2231 dev->class->dev_release(dev); 2232 else 2233 WARN(1, KERN_ERR "Device '%s' does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n", 2234 dev_name(dev)); 2235 kfree(p); 2236 } 2237 2238 static const void *device_namespace(struct kobject *kobj) 2239 { 2240 struct device *dev = kobj_to_dev(kobj); 2241 const void *ns = NULL; 2242 2243 if (dev->class && dev->class->ns_type) 2244 ns = dev->class->namespace(dev); 2245 2246 return ns; 2247 } 2248 2249 static void device_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid) 2250 { 2251 struct device *dev = kobj_to_dev(kobj); 2252 2253 if (dev->class && dev->class->get_ownership) 2254 dev->class->get_ownership(dev, uid, gid); 2255 } 2256 2257 static struct kobj_type device_ktype = { 2258 .release = device_release, 2259 .sysfs_ops = &dev_sysfs_ops, 2260 .namespace = device_namespace, 2261 .get_ownership = device_get_ownership, 2262 }; 2263 2264 2265 static int dev_uevent_filter(struct kset *kset, struct kobject *kobj) 2266 { 2267 struct kobj_type *ktype = get_ktype(kobj); 2268 2269 if (ktype == &device_ktype) { 2270 struct device *dev = kobj_to_dev(kobj); 2271 if (dev->bus) 2272 return 1; 2273 if (dev->class) 2274 return 1; 2275 } 2276 return 0; 2277 } 2278 2279 static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj) 2280 { 2281 struct device *dev = kobj_to_dev(kobj); 2282 2283 if (dev->bus) 2284 return dev->bus->name; 2285 if (dev->class) 2286 return dev->class->name; 2287 return NULL; 2288 } 2289 2290 static int dev_uevent(struct kset *kset, struct kobject *kobj, 2291 struct kobj_uevent_env *env) 2292 { 2293 struct device *dev = kobj_to_dev(kobj); 2294 int retval = 0; 2295 2296 /* add device node properties if present */ 2297 if (MAJOR(dev->devt)) { 2298 const char *tmp; 2299 const char *name; 2300 umode_t mode = 0; 2301 kuid_t uid = GLOBAL_ROOT_UID; 2302 kgid_t gid = GLOBAL_ROOT_GID; 2303 2304 add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt)); 2305 add_uevent_var(env, "MINOR=%u", MINOR(dev->devt)); 2306 name = device_get_devnode(dev, &mode, &uid, &gid, &tmp); 2307 if (name) { 2308 add_uevent_var(env, "DEVNAME=%s", name); 2309 if (mode) 2310 add_uevent_var(env, "DEVMODE=%#o", mode & 0777); 2311 if (!uid_eq(uid, GLOBAL_ROOT_UID)) 2312 add_uevent_var(env, "DEVUID=%u", from_kuid(&init_user_ns, uid)); 2313 if (!gid_eq(gid, GLOBAL_ROOT_GID)) 2314 add_uevent_var(env, "DEVGID=%u", from_kgid(&init_user_ns, gid)); 2315 kfree(tmp); 2316 } 2317 } 2318 2319 if (dev->type && dev->type->name) 2320 add_uevent_var(env, "DEVTYPE=%s", dev->type->name); 2321 2322 if (dev->driver) 2323 add_uevent_var(env, "DRIVER=%s", dev->driver->name); 2324 2325 /* Add common DT information about the device */ 2326 of_device_uevent(dev, env); 2327 2328 /* have the bus specific function add its stuff */ 2329 if (dev->bus && dev->bus->uevent) { 2330 retval = dev->bus->uevent(dev, env); 2331 if (retval) 2332 pr_debug("device: '%s': %s: bus uevent() returned %d\n", 2333 dev_name(dev), __func__, retval); 2334 } 2335 2336 /* have the class specific function add its stuff */ 2337 if (dev->class && dev->class->dev_uevent) { 2338 retval = dev->class->dev_uevent(dev, env); 2339 if (retval) 2340 pr_debug("device: '%s': %s: class uevent() " 2341 "returned %d\n", dev_name(dev), 2342 __func__, retval); 2343 } 2344 2345 /* have the device type specific function add its stuff */ 2346 if (dev->type && dev->type->uevent) { 2347 retval = dev->type->uevent(dev, env); 2348 if (retval) 2349 pr_debug("device: '%s': %s: dev_type uevent() " 2350 "returned %d\n", dev_name(dev), 2351 __func__, retval); 2352 } 2353 2354 return retval; 2355 } 2356 2357 static const struct kset_uevent_ops device_uevent_ops = { 2358 .filter = dev_uevent_filter, 2359 .name = dev_uevent_name, 2360 .uevent = dev_uevent, 2361 }; 2362 2363 static ssize_t uevent_show(struct device *dev, struct device_attribute *attr, 2364 char *buf) 2365 { 2366 struct kobject *top_kobj; 2367 struct kset *kset; 2368 struct kobj_uevent_env *env = NULL; 2369 int i; 2370 int len = 0; 2371 int retval; 2372 2373 /* search the kset, the device belongs to */ 2374 top_kobj = &dev->kobj; 2375 while (!top_kobj->kset && top_kobj->parent) 2376 top_kobj = top_kobj->parent; 2377 if (!top_kobj->kset) 2378 goto out; 2379 2380 kset = top_kobj->kset; 2381 if (!kset->uevent_ops || !kset->uevent_ops->uevent) 2382 goto out; 2383 2384 /* respect filter */ 2385 if (kset->uevent_ops && kset->uevent_ops->filter) 2386 if (!kset->uevent_ops->filter(kset, &dev->kobj)) 2387 goto out; 2388 2389 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL); 2390 if (!env) 2391 return -ENOMEM; 2392 2393 /* let the kset specific function add its keys */ 2394 retval = kset->uevent_ops->uevent(kset, &dev->kobj, env); 2395 if (retval) 2396 goto out; 2397 2398 /* copy keys to file */ 2399 for (i = 0; i < env->envp_idx; i++) 2400 len += sysfs_emit_at(buf, len, "%s\n", env->envp[i]); 2401 out: 2402 kfree(env); 2403 return len; 2404 } 2405 2406 static ssize_t uevent_store(struct device *dev, struct device_attribute *attr, 2407 const char *buf, size_t count) 2408 { 2409 int rc; 2410 2411 rc = kobject_synth_uevent(&dev->kobj, buf, count); 2412 2413 if (rc) { 2414 dev_err(dev, "uevent: failed to send synthetic uevent\n"); 2415 return rc; 2416 } 2417 2418 return count; 2419 } 2420 static DEVICE_ATTR_RW(uevent); 2421 2422 static ssize_t online_show(struct device *dev, struct device_attribute *attr, 2423 char *buf) 2424 { 2425 bool val; 2426 2427 device_lock(dev); 2428 val = !dev->offline; 2429 device_unlock(dev); 2430 return sysfs_emit(buf, "%u\n", val); 2431 } 2432 2433 static ssize_t online_store(struct device *dev, struct device_attribute *attr, 2434 const char *buf, size_t count) 2435 { 2436 bool val; 2437 int ret; 2438 2439 ret = strtobool(buf, &val); 2440 if (ret < 0) 2441 return ret; 2442 2443 ret = lock_device_hotplug_sysfs(); 2444 if (ret) 2445 return ret; 2446 2447 ret = val ? device_online(dev) : device_offline(dev); 2448 unlock_device_hotplug(); 2449 return ret < 0 ? ret : count; 2450 } 2451 static DEVICE_ATTR_RW(online); 2452 2453 static ssize_t removable_show(struct device *dev, struct device_attribute *attr, 2454 char *buf) 2455 { 2456 const char *loc; 2457 2458 switch (dev->removable) { 2459 case DEVICE_REMOVABLE: 2460 loc = "removable"; 2461 break; 2462 case DEVICE_FIXED: 2463 loc = "fixed"; 2464 break; 2465 default: 2466 loc = "unknown"; 2467 } 2468 return sysfs_emit(buf, "%s\n", loc); 2469 } 2470 static DEVICE_ATTR_RO(removable); 2471 2472 int device_add_groups(struct device *dev, const struct attribute_group **groups) 2473 { 2474 return sysfs_create_groups(&dev->kobj, groups); 2475 } 2476 EXPORT_SYMBOL_GPL(device_add_groups); 2477 2478 void device_remove_groups(struct device *dev, 2479 const struct attribute_group **groups) 2480 { 2481 sysfs_remove_groups(&dev->kobj, groups); 2482 } 2483 EXPORT_SYMBOL_GPL(device_remove_groups); 2484 2485 union device_attr_group_devres { 2486 const struct attribute_group *group; 2487 const struct attribute_group **groups; 2488 }; 2489 2490 static int devm_attr_group_match(struct device *dev, void *res, void *data) 2491 { 2492 return ((union device_attr_group_devres *)res)->group == data; 2493 } 2494 2495 static void devm_attr_group_remove(struct device *dev, void *res) 2496 { 2497 union device_attr_group_devres *devres = res; 2498 const struct attribute_group *group = devres->group; 2499 2500 dev_dbg(dev, "%s: removing group %p\n", __func__, group); 2501 sysfs_remove_group(&dev->kobj, group); 2502 } 2503 2504 static void devm_attr_groups_remove(struct device *dev, void *res) 2505 { 2506 union device_attr_group_devres *devres = res; 2507 const struct attribute_group **groups = devres->groups; 2508 2509 dev_dbg(dev, "%s: removing groups %p\n", __func__, groups); 2510 sysfs_remove_groups(&dev->kobj, groups); 2511 } 2512 2513 /** 2514 * devm_device_add_group - given a device, create a managed attribute group 2515 * @dev: The device to create the group for 2516 * @grp: The attribute group to create 2517 * 2518 * This function creates a group for the first time. It will explicitly 2519 * warn and error if any of the attribute files being created already exist. 2520 * 2521 * Returns 0 on success or error code on failure. 2522 */ 2523 int devm_device_add_group(struct device *dev, const struct attribute_group *grp) 2524 { 2525 union device_attr_group_devres *devres; 2526 int error; 2527 2528 devres = devres_alloc(devm_attr_group_remove, 2529 sizeof(*devres), GFP_KERNEL); 2530 if (!devres) 2531 return -ENOMEM; 2532 2533 error = sysfs_create_group(&dev->kobj, grp); 2534 if (error) { 2535 devres_free(devres); 2536 return error; 2537 } 2538 2539 devres->group = grp; 2540 devres_add(dev, devres); 2541 return 0; 2542 } 2543 EXPORT_SYMBOL_GPL(devm_device_add_group); 2544 2545 /** 2546 * devm_device_remove_group: remove a managed group from a device 2547 * @dev: device to remove the group from 2548 * @grp: group to remove 2549 * 2550 * This function removes a group of attributes from a device. The attributes 2551 * previously have to have been created for this group, otherwise it will fail. 2552 */ 2553 void devm_device_remove_group(struct device *dev, 2554 const struct attribute_group *grp) 2555 { 2556 WARN_ON(devres_release(dev, devm_attr_group_remove, 2557 devm_attr_group_match, 2558 /* cast away const */ (void *)grp)); 2559 } 2560 EXPORT_SYMBOL_GPL(devm_device_remove_group); 2561 2562 /** 2563 * devm_device_add_groups - create a bunch of managed attribute groups 2564 * @dev: The device to create the group for 2565 * @groups: The attribute groups to create, NULL terminated 2566 * 2567 * This function creates a bunch of managed attribute groups. If an error 2568 * occurs when creating a group, all previously created groups will be 2569 * removed, unwinding everything back to the original state when this 2570 * function was called. It will explicitly warn and error if any of the 2571 * attribute files being created already exist. 2572 * 2573 * Returns 0 on success or error code from sysfs_create_group on failure. 2574 */ 2575 int devm_device_add_groups(struct device *dev, 2576 const struct attribute_group **groups) 2577 { 2578 union device_attr_group_devres *devres; 2579 int error; 2580 2581 devres = devres_alloc(devm_attr_groups_remove, 2582 sizeof(*devres), GFP_KERNEL); 2583 if (!devres) 2584 return -ENOMEM; 2585 2586 error = sysfs_create_groups(&dev->kobj, groups); 2587 if (error) { 2588 devres_free(devres); 2589 return error; 2590 } 2591 2592 devres->groups = groups; 2593 devres_add(dev, devres); 2594 return 0; 2595 } 2596 EXPORT_SYMBOL_GPL(devm_device_add_groups); 2597 2598 /** 2599 * devm_device_remove_groups - remove a list of managed groups 2600 * 2601 * @dev: The device for the groups to be removed from 2602 * @groups: NULL terminated list of groups to be removed 2603 * 2604 * If groups is not NULL, remove the specified groups from the device. 2605 */ 2606 void devm_device_remove_groups(struct device *dev, 2607 const struct attribute_group **groups) 2608 { 2609 WARN_ON(devres_release(dev, devm_attr_groups_remove, 2610 devm_attr_group_match, 2611 /* cast away const */ (void *)groups)); 2612 } 2613 EXPORT_SYMBOL_GPL(devm_device_remove_groups); 2614 2615 static int device_add_attrs(struct device *dev) 2616 { 2617 struct class *class = dev->class; 2618 const struct device_type *type = dev->type; 2619 int error; 2620 2621 if (class) { 2622 error = device_add_groups(dev, class->dev_groups); 2623 if (error) 2624 return error; 2625 } 2626 2627 if (type) { 2628 error = device_add_groups(dev, type->groups); 2629 if (error) 2630 goto err_remove_class_groups; 2631 } 2632 2633 error = device_add_groups(dev, dev->groups); 2634 if (error) 2635 goto err_remove_type_groups; 2636 2637 if (device_supports_offline(dev) && !dev->offline_disabled) { 2638 error = device_create_file(dev, &dev_attr_online); 2639 if (error) 2640 goto err_remove_dev_groups; 2641 } 2642 2643 if (fw_devlink_flags && !fw_devlink_is_permissive() && dev->fwnode) { 2644 error = device_create_file(dev, &dev_attr_waiting_for_supplier); 2645 if (error) 2646 goto err_remove_dev_online; 2647 } 2648 2649 if (dev_removable_is_valid(dev)) { 2650 error = device_create_file(dev, &dev_attr_removable); 2651 if (error) 2652 goto err_remove_dev_waiting_for_supplier; 2653 } 2654 2655 return 0; 2656 2657 err_remove_dev_waiting_for_supplier: 2658 device_remove_file(dev, &dev_attr_waiting_for_supplier); 2659 err_remove_dev_online: 2660 device_remove_file(dev, &dev_attr_online); 2661 err_remove_dev_groups: 2662 device_remove_groups(dev, dev->groups); 2663 err_remove_type_groups: 2664 if (type) 2665 device_remove_groups(dev, type->groups); 2666 err_remove_class_groups: 2667 if (class) 2668 device_remove_groups(dev, class->dev_groups); 2669 2670 return error; 2671 } 2672 2673 static void device_remove_attrs(struct device *dev) 2674 { 2675 struct class *class = dev->class; 2676 const struct device_type *type = dev->type; 2677 2678 device_remove_file(dev, &dev_attr_removable); 2679 device_remove_file(dev, &dev_attr_waiting_for_supplier); 2680 device_remove_file(dev, &dev_attr_online); 2681 device_remove_groups(dev, dev->groups); 2682 2683 if (type) 2684 device_remove_groups(dev, type->groups); 2685 2686 if (class) 2687 device_remove_groups(dev, class->dev_groups); 2688 } 2689 2690 static ssize_t dev_show(struct device *dev, struct device_attribute *attr, 2691 char *buf) 2692 { 2693 return print_dev_t(buf, dev->devt); 2694 } 2695 static DEVICE_ATTR_RO(dev); 2696 2697 /* /sys/devices/ */ 2698 struct kset *devices_kset; 2699 2700 /** 2701 * devices_kset_move_before - Move device in the devices_kset's list. 2702 * @deva: Device to move. 2703 * @devb: Device @deva should come before. 2704 */ 2705 static void devices_kset_move_before(struct device *deva, struct device *devb) 2706 { 2707 if (!devices_kset) 2708 return; 2709 pr_debug("devices_kset: Moving %s before %s\n", 2710 dev_name(deva), dev_name(devb)); 2711 spin_lock(&devices_kset->list_lock); 2712 list_move_tail(&deva->kobj.entry, &devb->kobj.entry); 2713 spin_unlock(&devices_kset->list_lock); 2714 } 2715 2716 /** 2717 * devices_kset_move_after - Move device in the devices_kset's list. 2718 * @deva: Device to move 2719 * @devb: Device @deva should come after. 2720 */ 2721 static void devices_kset_move_after(struct device *deva, struct device *devb) 2722 { 2723 if (!devices_kset) 2724 return; 2725 pr_debug("devices_kset: Moving %s after %s\n", 2726 dev_name(deva), dev_name(devb)); 2727 spin_lock(&devices_kset->list_lock); 2728 list_move(&deva->kobj.entry, &devb->kobj.entry); 2729 spin_unlock(&devices_kset->list_lock); 2730 } 2731 2732 /** 2733 * devices_kset_move_last - move the device to the end of devices_kset's list. 2734 * @dev: device to move 2735 */ 2736 void devices_kset_move_last(struct device *dev) 2737 { 2738 if (!devices_kset) 2739 return; 2740 pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev)); 2741 spin_lock(&devices_kset->list_lock); 2742 list_move_tail(&dev->kobj.entry, &devices_kset->list); 2743 spin_unlock(&devices_kset->list_lock); 2744 } 2745 2746 /** 2747 * device_create_file - create sysfs attribute file for device. 2748 * @dev: device. 2749 * @attr: device attribute descriptor. 2750 */ 2751 int device_create_file(struct device *dev, 2752 const struct device_attribute *attr) 2753 { 2754 int error = 0; 2755 2756 if (dev) { 2757 WARN(((attr->attr.mode & S_IWUGO) && !attr->store), 2758 "Attribute %s: write permission without 'store'\n", 2759 attr->attr.name); 2760 WARN(((attr->attr.mode & S_IRUGO) && !attr->show), 2761 "Attribute %s: read permission without 'show'\n", 2762 attr->attr.name); 2763 error = sysfs_create_file(&dev->kobj, &attr->attr); 2764 } 2765 2766 return error; 2767 } 2768 EXPORT_SYMBOL_GPL(device_create_file); 2769 2770 /** 2771 * device_remove_file - remove sysfs attribute file. 2772 * @dev: device. 2773 * @attr: device attribute descriptor. 2774 */ 2775 void device_remove_file(struct device *dev, 2776 const struct device_attribute *attr) 2777 { 2778 if (dev) 2779 sysfs_remove_file(&dev->kobj, &attr->attr); 2780 } 2781 EXPORT_SYMBOL_GPL(device_remove_file); 2782 2783 /** 2784 * device_remove_file_self - remove sysfs attribute file from its own method. 2785 * @dev: device. 2786 * @attr: device attribute descriptor. 2787 * 2788 * See kernfs_remove_self() for details. 2789 */ 2790 bool device_remove_file_self(struct device *dev, 2791 const struct device_attribute *attr) 2792 { 2793 if (dev) 2794 return sysfs_remove_file_self(&dev->kobj, &attr->attr); 2795 else 2796 return false; 2797 } 2798 EXPORT_SYMBOL_GPL(device_remove_file_self); 2799 2800 /** 2801 * device_create_bin_file - create sysfs binary attribute file for device. 2802 * @dev: device. 2803 * @attr: device binary attribute descriptor. 2804 */ 2805 int device_create_bin_file(struct device *dev, 2806 const struct bin_attribute *attr) 2807 { 2808 int error = -EINVAL; 2809 if (dev) 2810 error = sysfs_create_bin_file(&dev->kobj, attr); 2811 return error; 2812 } 2813 EXPORT_SYMBOL_GPL(device_create_bin_file); 2814 2815 /** 2816 * device_remove_bin_file - remove sysfs binary attribute file 2817 * @dev: device. 2818 * @attr: device binary attribute descriptor. 2819 */ 2820 void device_remove_bin_file(struct device *dev, 2821 const struct bin_attribute *attr) 2822 { 2823 if (dev) 2824 sysfs_remove_bin_file(&dev->kobj, attr); 2825 } 2826 EXPORT_SYMBOL_GPL(device_remove_bin_file); 2827 2828 static void klist_children_get(struct klist_node *n) 2829 { 2830 struct device_private *p = to_device_private_parent(n); 2831 struct device *dev = p->device; 2832 2833 get_device(dev); 2834 } 2835 2836 static void klist_children_put(struct klist_node *n) 2837 { 2838 struct device_private *p = to_device_private_parent(n); 2839 struct device *dev = p->device; 2840 2841 put_device(dev); 2842 } 2843 2844 /** 2845 * device_initialize - init device structure. 2846 * @dev: device. 2847 * 2848 * This prepares the device for use by other layers by initializing 2849 * its fields. 2850 * It is the first half of device_register(), if called by 2851 * that function, though it can also be called separately, so one 2852 * may use @dev's fields. In particular, get_device()/put_device() 2853 * may be used for reference counting of @dev after calling this 2854 * function. 2855 * 2856 * All fields in @dev must be initialized by the caller to 0, except 2857 * for those explicitly set to some other value. The simplest 2858 * approach is to use kzalloc() to allocate the structure containing 2859 * @dev. 2860 * 2861 * NOTE: Use put_device() to give up your reference instead of freeing 2862 * @dev directly once you have called this function. 2863 */ 2864 void device_initialize(struct device *dev) 2865 { 2866 dev->kobj.kset = devices_kset; 2867 kobject_init(&dev->kobj, &device_ktype); 2868 INIT_LIST_HEAD(&dev->dma_pools); 2869 mutex_init(&dev->mutex); 2870 #ifdef CONFIG_PROVE_LOCKING 2871 mutex_init(&dev->lockdep_mutex); 2872 #endif 2873 lockdep_set_novalidate_class(&dev->mutex); 2874 spin_lock_init(&dev->devres_lock); 2875 INIT_LIST_HEAD(&dev->devres_head); 2876 device_pm_init(dev); 2877 set_dev_node(dev, -1); 2878 #ifdef CONFIG_GENERIC_MSI_IRQ 2879 raw_spin_lock_init(&dev->msi_lock); 2880 INIT_LIST_HEAD(&dev->msi_list); 2881 #endif 2882 INIT_LIST_HEAD(&dev->links.consumers); 2883 INIT_LIST_HEAD(&dev->links.suppliers); 2884 INIT_LIST_HEAD(&dev->links.defer_sync); 2885 dev->links.status = DL_DEV_NO_DRIVER; 2886 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ 2887 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ 2888 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) 2889 dev->dma_coherent = dma_default_coherent; 2890 #endif 2891 #ifdef CONFIG_SWIOTLB 2892 dev->dma_io_tlb_mem = &io_tlb_default_mem; 2893 #endif 2894 } 2895 EXPORT_SYMBOL_GPL(device_initialize); 2896 2897 struct kobject *virtual_device_parent(struct device *dev) 2898 { 2899 static struct kobject *virtual_dir = NULL; 2900 2901 if (!virtual_dir) 2902 virtual_dir = kobject_create_and_add("virtual", 2903 &devices_kset->kobj); 2904 2905 return virtual_dir; 2906 } 2907 2908 struct class_dir { 2909 struct kobject kobj; 2910 struct class *class; 2911 }; 2912 2913 #define to_class_dir(obj) container_of(obj, struct class_dir, kobj) 2914 2915 static void class_dir_release(struct kobject *kobj) 2916 { 2917 struct class_dir *dir = to_class_dir(kobj); 2918 kfree(dir); 2919 } 2920 2921 static const 2922 struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj) 2923 { 2924 struct class_dir *dir = to_class_dir(kobj); 2925 return dir->class->ns_type; 2926 } 2927 2928 static struct kobj_type class_dir_ktype = { 2929 .release = class_dir_release, 2930 .sysfs_ops = &kobj_sysfs_ops, 2931 .child_ns_type = class_dir_child_ns_type 2932 }; 2933 2934 static struct kobject * 2935 class_dir_create_and_add(struct class *class, struct kobject *parent_kobj) 2936 { 2937 struct class_dir *dir; 2938 int retval; 2939 2940 dir = kzalloc(sizeof(*dir), GFP_KERNEL); 2941 if (!dir) 2942 return ERR_PTR(-ENOMEM); 2943 2944 dir->class = class; 2945 kobject_init(&dir->kobj, &class_dir_ktype); 2946 2947 dir->kobj.kset = &class->p->glue_dirs; 2948 2949 retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name); 2950 if (retval < 0) { 2951 kobject_put(&dir->kobj); 2952 return ERR_PTR(retval); 2953 } 2954 return &dir->kobj; 2955 } 2956 2957 static DEFINE_MUTEX(gdp_mutex); 2958 2959 static struct kobject *get_device_parent(struct device *dev, 2960 struct device *parent) 2961 { 2962 if (dev->class) { 2963 struct kobject *kobj = NULL; 2964 struct kobject *parent_kobj; 2965 struct kobject *k; 2966 2967 #ifdef CONFIG_BLOCK 2968 /* block disks show up in /sys/block */ 2969 if (sysfs_deprecated && dev->class == &block_class) { 2970 if (parent && parent->class == &block_class) 2971 return &parent->kobj; 2972 return &block_class.p->subsys.kobj; 2973 } 2974 #endif 2975 2976 /* 2977 * If we have no parent, we live in "virtual". 2978 * Class-devices with a non class-device as parent, live 2979 * in a "glue" directory to prevent namespace collisions. 2980 */ 2981 if (parent == NULL) 2982 parent_kobj = virtual_device_parent(dev); 2983 else if (parent->class && !dev->class->ns_type) 2984 return &parent->kobj; 2985 else 2986 parent_kobj = &parent->kobj; 2987 2988 mutex_lock(&gdp_mutex); 2989 2990 /* find our class-directory at the parent and reference it */ 2991 spin_lock(&dev->class->p->glue_dirs.list_lock); 2992 list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry) 2993 if (k->parent == parent_kobj) { 2994 kobj = kobject_get(k); 2995 break; 2996 } 2997 spin_unlock(&dev->class->p->glue_dirs.list_lock); 2998 if (kobj) { 2999 mutex_unlock(&gdp_mutex); 3000 return kobj; 3001 } 3002 3003 /* or create a new class-directory at the parent device */ 3004 k = class_dir_create_and_add(dev->class, parent_kobj); 3005 /* do not emit an uevent for this simple "glue" directory */ 3006 mutex_unlock(&gdp_mutex); 3007 return k; 3008 } 3009 3010 /* subsystems can specify a default root directory for their devices */ 3011 if (!parent && dev->bus && dev->bus->dev_root) 3012 return &dev->bus->dev_root->kobj; 3013 3014 if (parent) 3015 return &parent->kobj; 3016 return NULL; 3017 } 3018 3019 static inline bool live_in_glue_dir(struct kobject *kobj, 3020 struct device *dev) 3021 { 3022 if (!kobj || !dev->class || 3023 kobj->kset != &dev->class->p->glue_dirs) 3024 return false; 3025 return true; 3026 } 3027 3028 static inline struct kobject *get_glue_dir(struct device *dev) 3029 { 3030 return dev->kobj.parent; 3031 } 3032 3033 /* 3034 * make sure cleaning up dir as the last step, we need to make 3035 * sure .release handler of kobject is run with holding the 3036 * global lock 3037 */ 3038 static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) 3039 { 3040 unsigned int ref; 3041 3042 /* see if we live in a "glue" directory */ 3043 if (!live_in_glue_dir(glue_dir, dev)) 3044 return; 3045 3046 mutex_lock(&gdp_mutex); 3047 /** 3048 * There is a race condition between removing glue directory 3049 * and adding a new device under the glue directory. 3050 * 3051 * CPU1: CPU2: 3052 * 3053 * device_add() 3054 * get_device_parent() 3055 * class_dir_create_and_add() 3056 * kobject_add_internal() 3057 * create_dir() // create glue_dir 3058 * 3059 * device_add() 3060 * get_device_parent() 3061 * kobject_get() // get glue_dir 3062 * 3063 * device_del() 3064 * cleanup_glue_dir() 3065 * kobject_del(glue_dir) 3066 * 3067 * kobject_add() 3068 * kobject_add_internal() 3069 * create_dir() // in glue_dir 3070 * sysfs_create_dir_ns() 3071 * kernfs_create_dir_ns(sd) 3072 * 3073 * sysfs_remove_dir() // glue_dir->sd=NULL 3074 * sysfs_put() // free glue_dir->sd 3075 * 3076 * // sd is freed 3077 * kernfs_new_node(sd) 3078 * kernfs_get(glue_dir) 3079 * kernfs_add_one() 3080 * kernfs_put() 3081 * 3082 * Before CPU1 remove last child device under glue dir, if CPU2 add 3083 * a new device under glue dir, the glue_dir kobject reference count 3084 * will be increase to 2 in kobject_get(k). And CPU2 has been called 3085 * kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir() 3086 * and sysfs_put(). This result in glue_dir->sd is freed. 3087 * 3088 * Then the CPU2 will see a stale "empty" but still potentially used 3089 * glue dir around in kernfs_new_node(). 3090 * 3091 * In order to avoid this happening, we also should make sure that 3092 * kernfs_node for glue_dir is released in CPU1 only when refcount 3093 * for glue_dir kobj is 1. 3094 */ 3095 ref = kref_read(&glue_dir->kref); 3096 if (!kobject_has_children(glue_dir) && !--ref) 3097 kobject_del(glue_dir); 3098 kobject_put(glue_dir); 3099 mutex_unlock(&gdp_mutex); 3100 } 3101 3102 static int device_add_class_symlinks(struct device *dev) 3103 { 3104 struct device_node *of_node = dev_of_node(dev); 3105 int error; 3106 3107 if (of_node) { 3108 error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node"); 3109 if (error) 3110 dev_warn(dev, "Error %d creating of_node link\n",error); 3111 /* An error here doesn't warrant bringing down the device */ 3112 } 3113 3114 if (!dev->class) 3115 return 0; 3116 3117 error = sysfs_create_link(&dev->kobj, 3118 &dev->class->p->subsys.kobj, 3119 "subsystem"); 3120 if (error) 3121 goto out_devnode; 3122 3123 if (dev->parent && device_is_not_partition(dev)) { 3124 error = sysfs_create_link(&dev->kobj, &dev->parent->kobj, 3125 "device"); 3126 if (error) 3127 goto out_subsys; 3128 } 3129 3130 #ifdef CONFIG_BLOCK 3131 /* /sys/block has directories and does not need symlinks */ 3132 if (sysfs_deprecated && dev->class == &block_class) 3133 return 0; 3134 #endif 3135 3136 /* link in the class directory pointing to the device */ 3137 error = sysfs_create_link(&dev->class->p->subsys.kobj, 3138 &dev->kobj, dev_name(dev)); 3139 if (error) 3140 goto out_device; 3141 3142 return 0; 3143 3144 out_device: 3145 sysfs_remove_link(&dev->kobj, "device"); 3146 3147 out_subsys: 3148 sysfs_remove_link(&dev->kobj, "subsystem"); 3149 out_devnode: 3150 sysfs_remove_link(&dev->kobj, "of_node"); 3151 return error; 3152 } 3153 3154 static void device_remove_class_symlinks(struct device *dev) 3155 { 3156 if (dev_of_node(dev)) 3157 sysfs_remove_link(&dev->kobj, "of_node"); 3158 3159 if (!dev->class) 3160 return; 3161 3162 if (dev->parent && device_is_not_partition(dev)) 3163 sysfs_remove_link(&dev->kobj, "device"); 3164 sysfs_remove_link(&dev->kobj, "subsystem"); 3165 #ifdef CONFIG_BLOCK 3166 if (sysfs_deprecated && dev->class == &block_class) 3167 return; 3168 #endif 3169 sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev)); 3170 } 3171 3172 /** 3173 * dev_set_name - set a device name 3174 * @dev: device 3175 * @fmt: format string for the device's name 3176 */ 3177 int dev_set_name(struct device *dev, const char *fmt, ...) 3178 { 3179 va_list vargs; 3180 int err; 3181 3182 va_start(vargs, fmt); 3183 err = kobject_set_name_vargs(&dev->kobj, fmt, vargs); 3184 va_end(vargs); 3185 return err; 3186 } 3187 EXPORT_SYMBOL_GPL(dev_set_name); 3188 3189 /** 3190 * device_to_dev_kobj - select a /sys/dev/ directory for the device 3191 * @dev: device 3192 * 3193 * By default we select char/ for new entries. Setting class->dev_obj 3194 * to NULL prevents an entry from being created. class->dev_kobj must 3195 * be set (or cleared) before any devices are registered to the class 3196 * otherwise device_create_sys_dev_entry() and 3197 * device_remove_sys_dev_entry() will disagree about the presence of 3198 * the link. 3199 */ 3200 static struct kobject *device_to_dev_kobj(struct device *dev) 3201 { 3202 struct kobject *kobj; 3203 3204 if (dev->class) 3205 kobj = dev->class->dev_kobj; 3206 else 3207 kobj = sysfs_dev_char_kobj; 3208 3209 return kobj; 3210 } 3211 3212 static int device_create_sys_dev_entry(struct device *dev) 3213 { 3214 struct kobject *kobj = device_to_dev_kobj(dev); 3215 int error = 0; 3216 char devt_str[15]; 3217 3218 if (kobj) { 3219 format_dev_t(devt_str, dev->devt); 3220 error = sysfs_create_link(kobj, &dev->kobj, devt_str); 3221 } 3222 3223 return error; 3224 } 3225 3226 static void device_remove_sys_dev_entry(struct device *dev) 3227 { 3228 struct kobject *kobj = device_to_dev_kobj(dev); 3229 char devt_str[15]; 3230 3231 if (kobj) { 3232 format_dev_t(devt_str, dev->devt); 3233 sysfs_remove_link(kobj, devt_str); 3234 } 3235 } 3236 3237 static int device_private_init(struct device *dev) 3238 { 3239 dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL); 3240 if (!dev->p) 3241 return -ENOMEM; 3242 dev->p->device = dev; 3243 klist_init(&dev->p->klist_children, klist_children_get, 3244 klist_children_put); 3245 INIT_LIST_HEAD(&dev->p->deferred_probe); 3246 return 0; 3247 } 3248 3249 /** 3250 * device_add - add device to device hierarchy. 3251 * @dev: device. 3252 * 3253 * This is part 2 of device_register(), though may be called 3254 * separately _iff_ device_initialize() has been called separately. 3255 * 3256 * This adds @dev to the kobject hierarchy via kobject_add(), adds it 3257 * to the global and sibling lists for the device, then 3258 * adds it to the other relevant subsystems of the driver model. 3259 * 3260 * Do not call this routine or device_register() more than once for 3261 * any device structure. The driver model core is not designed to work 3262 * with devices that get unregistered and then spring back to life. 3263 * (Among other things, it's very hard to guarantee that all references 3264 * to the previous incarnation of @dev have been dropped.) Allocate 3265 * and register a fresh new struct device instead. 3266 * 3267 * NOTE: _Never_ directly free @dev after calling this function, even 3268 * if it returned an error! Always use put_device() to give up your 3269 * reference instead. 3270 * 3271 * Rule of thumb is: if device_add() succeeds, you should call 3272 * device_del() when you want to get rid of it. If device_add() has 3273 * *not* succeeded, use *only* put_device() to drop the reference 3274 * count. 3275 */ 3276 int device_add(struct device *dev) 3277 { 3278 struct device *parent; 3279 struct kobject *kobj; 3280 struct class_interface *class_intf; 3281 int error = -EINVAL; 3282 struct kobject *glue_dir = NULL; 3283 3284 dev = get_device(dev); 3285 if (!dev) 3286 goto done; 3287 3288 if (!dev->p) { 3289 error = device_private_init(dev); 3290 if (error) 3291 goto done; 3292 } 3293 3294 /* 3295 * for statically allocated devices, which should all be converted 3296 * some day, we need to initialize the name. We prevent reading back 3297 * the name, and force the use of dev_name() 3298 */ 3299 if (dev->init_name) { 3300 dev_set_name(dev, "%s", dev->init_name); 3301 dev->init_name = NULL; 3302 } 3303 3304 /* subsystems can specify simple device enumeration */ 3305 if (!dev_name(dev) && dev->bus && dev->bus->dev_name) 3306 dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id); 3307 3308 if (!dev_name(dev)) { 3309 error = -EINVAL; 3310 goto name_error; 3311 } 3312 3313 pr_debug("device: '%s': %s\n", dev_name(dev), __func__); 3314 3315 parent = get_device(dev->parent); 3316 kobj = get_device_parent(dev, parent); 3317 if (IS_ERR(kobj)) { 3318 error = PTR_ERR(kobj); 3319 goto parent_error; 3320 } 3321 if (kobj) 3322 dev->kobj.parent = kobj; 3323 3324 /* use parent numa_node */ 3325 if (parent && (dev_to_node(dev) == NUMA_NO_NODE)) 3326 set_dev_node(dev, dev_to_node(parent)); 3327 3328 /* first, register with generic layer. */ 3329 /* we require the name to be set before, and pass NULL */ 3330 error = kobject_add(&dev->kobj, dev->kobj.parent, NULL); 3331 if (error) { 3332 glue_dir = get_glue_dir(dev); 3333 goto Error; 3334 } 3335 3336 /* notify platform of device entry */ 3337 device_platform_notify(dev); 3338 3339 error = device_create_file(dev, &dev_attr_uevent); 3340 if (error) 3341 goto attrError; 3342 3343 error = device_add_class_symlinks(dev); 3344 if (error) 3345 goto SymlinkError; 3346 error = device_add_attrs(dev); 3347 if (error) 3348 goto AttrsError; 3349 error = bus_add_device(dev); 3350 if (error) 3351 goto BusError; 3352 error = dpm_sysfs_add(dev); 3353 if (error) 3354 goto DPMError; 3355 device_pm_add(dev); 3356 3357 if (MAJOR(dev->devt)) { 3358 error = device_create_file(dev, &dev_attr_dev); 3359 if (error) 3360 goto DevAttrError; 3361 3362 error = device_create_sys_dev_entry(dev); 3363 if (error) 3364 goto SysEntryError; 3365 3366 devtmpfs_create_node(dev); 3367 } 3368 3369 /* Notify clients of device addition. This call must come 3370 * after dpm_sysfs_add() and before kobject_uevent(). 3371 */ 3372 if (dev->bus) 3373 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 3374 BUS_NOTIFY_ADD_DEVICE, dev); 3375 3376 kobject_uevent(&dev->kobj, KOBJ_ADD); 3377 3378 /* 3379 * Check if any of the other devices (consumers) have been waiting for 3380 * this device (supplier) to be added so that they can create a device 3381 * link to it. 3382 * 3383 * This needs to happen after device_pm_add() because device_link_add() 3384 * requires the supplier be registered before it's called. 3385 * 3386 * But this also needs to happen before bus_probe_device() to make sure 3387 * waiting consumers can link to it before the driver is bound to the 3388 * device and the driver sync_state callback is called for this device. 3389 */ 3390 if (dev->fwnode && !dev->fwnode->dev) { 3391 dev->fwnode->dev = dev; 3392 fw_devlink_link_device(dev); 3393 } 3394 3395 bus_probe_device(dev); 3396 3397 /* 3398 * If all driver registration is done and a newly added device doesn't 3399 * match with any driver, don't block its consumers from probing in 3400 * case the consumer device is able to operate without this supplier. 3401 */ 3402 if (dev->fwnode && fw_devlink_drv_reg_done && !dev->can_match) 3403 fw_devlink_unblock_consumers(dev); 3404 3405 if (parent) 3406 klist_add_tail(&dev->p->knode_parent, 3407 &parent->p->klist_children); 3408 3409 if (dev->class) { 3410 mutex_lock(&dev->class->p->mutex); 3411 /* tie the class to the device */ 3412 klist_add_tail(&dev->p->knode_class, 3413 &dev->class->p->klist_devices); 3414 3415 /* notify any interfaces that the device is here */ 3416 list_for_each_entry(class_intf, 3417 &dev->class->p->interfaces, node) 3418 if (class_intf->add_dev) 3419 class_intf->add_dev(dev, class_intf); 3420 mutex_unlock(&dev->class->p->mutex); 3421 } 3422 done: 3423 put_device(dev); 3424 return error; 3425 SysEntryError: 3426 if (MAJOR(dev->devt)) 3427 device_remove_file(dev, &dev_attr_dev); 3428 DevAttrError: 3429 device_pm_remove(dev); 3430 dpm_sysfs_remove(dev); 3431 DPMError: 3432 bus_remove_device(dev); 3433 BusError: 3434 device_remove_attrs(dev); 3435 AttrsError: 3436 device_remove_class_symlinks(dev); 3437 SymlinkError: 3438 device_remove_file(dev, &dev_attr_uevent); 3439 attrError: 3440 device_platform_notify_remove(dev); 3441 kobject_uevent(&dev->kobj, KOBJ_REMOVE); 3442 glue_dir = get_glue_dir(dev); 3443 kobject_del(&dev->kobj); 3444 Error: 3445 cleanup_glue_dir(dev, glue_dir); 3446 parent_error: 3447 put_device(parent); 3448 name_error: 3449 kfree(dev->p); 3450 dev->p = NULL; 3451 goto done; 3452 } 3453 EXPORT_SYMBOL_GPL(device_add); 3454 3455 /** 3456 * device_register - register a device with the system. 3457 * @dev: pointer to the device structure 3458 * 3459 * This happens in two clean steps - initialize the device 3460 * and add it to the system. The two steps can be called 3461 * separately, but this is the easiest and most common. 3462 * I.e. you should only call the two helpers separately if 3463 * have a clearly defined need to use and refcount the device 3464 * before it is added to the hierarchy. 3465 * 3466 * For more information, see the kerneldoc for device_initialize() 3467 * and device_add(). 3468 * 3469 * NOTE: _Never_ directly free @dev after calling this function, even 3470 * if it returned an error! Always use put_device() to give up the 3471 * reference initialized in this function instead. 3472 */ 3473 int device_register(struct device *dev) 3474 { 3475 device_initialize(dev); 3476 return device_add(dev); 3477 } 3478 EXPORT_SYMBOL_GPL(device_register); 3479 3480 /** 3481 * get_device - increment reference count for device. 3482 * @dev: device. 3483 * 3484 * This simply forwards the call to kobject_get(), though 3485 * we do take care to provide for the case that we get a NULL 3486 * pointer passed in. 3487 */ 3488 struct device *get_device(struct device *dev) 3489 { 3490 return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL; 3491 } 3492 EXPORT_SYMBOL_GPL(get_device); 3493 3494 /** 3495 * put_device - decrement reference count. 3496 * @dev: device in question. 3497 */ 3498 void put_device(struct device *dev) 3499 { 3500 /* might_sleep(); */ 3501 if (dev) 3502 kobject_put(&dev->kobj); 3503 } 3504 EXPORT_SYMBOL_GPL(put_device); 3505 3506 bool kill_device(struct device *dev) 3507 { 3508 /* 3509 * Require the device lock and set the "dead" flag to guarantee that 3510 * the update behavior is consistent with the other bitfields near 3511 * it and that we cannot have an asynchronous probe routine trying 3512 * to run while we are tearing out the bus/class/sysfs from 3513 * underneath the device. 3514 */ 3515 device_lock_assert(dev); 3516 3517 if (dev->p->dead) 3518 return false; 3519 dev->p->dead = true; 3520 return true; 3521 } 3522 EXPORT_SYMBOL_GPL(kill_device); 3523 3524 /** 3525 * device_del - delete device from system. 3526 * @dev: device. 3527 * 3528 * This is the first part of the device unregistration 3529 * sequence. This removes the device from the lists we control 3530 * from here, has it removed from the other driver model 3531 * subsystems it was added to in device_add(), and removes it 3532 * from the kobject hierarchy. 3533 * 3534 * NOTE: this should be called manually _iff_ device_add() was 3535 * also called manually. 3536 */ 3537 void device_del(struct device *dev) 3538 { 3539 struct device *parent = dev->parent; 3540 struct kobject *glue_dir = NULL; 3541 struct class_interface *class_intf; 3542 unsigned int noio_flag; 3543 3544 device_lock(dev); 3545 kill_device(dev); 3546 device_unlock(dev); 3547 3548 if (dev->fwnode && dev->fwnode->dev == dev) 3549 dev->fwnode->dev = NULL; 3550 3551 /* Notify clients of device removal. This call must come 3552 * before dpm_sysfs_remove(). 3553 */ 3554 noio_flag = memalloc_noio_save(); 3555 if (dev->bus) 3556 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 3557 BUS_NOTIFY_DEL_DEVICE, dev); 3558 3559 dpm_sysfs_remove(dev); 3560 if (parent) 3561 klist_del(&dev->p->knode_parent); 3562 if (MAJOR(dev->devt)) { 3563 devtmpfs_delete_node(dev); 3564 device_remove_sys_dev_entry(dev); 3565 device_remove_file(dev, &dev_attr_dev); 3566 } 3567 if (dev->class) { 3568 device_remove_class_symlinks(dev); 3569 3570 mutex_lock(&dev->class->p->mutex); 3571 /* notify any interfaces that the device is now gone */ 3572 list_for_each_entry(class_intf, 3573 &dev->class->p->interfaces, node) 3574 if (class_intf->remove_dev) 3575 class_intf->remove_dev(dev, class_intf); 3576 /* remove the device from the class list */ 3577 klist_del(&dev->p->knode_class); 3578 mutex_unlock(&dev->class->p->mutex); 3579 } 3580 device_remove_file(dev, &dev_attr_uevent); 3581 device_remove_attrs(dev); 3582 bus_remove_device(dev); 3583 device_pm_remove(dev); 3584 driver_deferred_probe_del(dev); 3585 device_platform_notify_remove(dev); 3586 device_remove_properties(dev); 3587 device_links_purge(dev); 3588 3589 if (dev->bus) 3590 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 3591 BUS_NOTIFY_REMOVED_DEVICE, dev); 3592 kobject_uevent(&dev->kobj, KOBJ_REMOVE); 3593 glue_dir = get_glue_dir(dev); 3594 kobject_del(&dev->kobj); 3595 cleanup_glue_dir(dev, glue_dir); 3596 memalloc_noio_restore(noio_flag); 3597 put_device(parent); 3598 } 3599 EXPORT_SYMBOL_GPL(device_del); 3600 3601 /** 3602 * device_unregister - unregister device from system. 3603 * @dev: device going away. 3604 * 3605 * We do this in two parts, like we do device_register(). First, 3606 * we remove it from all the subsystems with device_del(), then 3607 * we decrement the reference count via put_device(). If that 3608 * is the final reference count, the device will be cleaned up 3609 * via device_release() above. Otherwise, the structure will 3610 * stick around until the final reference to the device is dropped. 3611 */ 3612 void device_unregister(struct device *dev) 3613 { 3614 pr_debug("device: '%s': %s\n", dev_name(dev), __func__); 3615 device_del(dev); 3616 put_device(dev); 3617 } 3618 EXPORT_SYMBOL_GPL(device_unregister); 3619 3620 static struct device *prev_device(struct klist_iter *i) 3621 { 3622 struct klist_node *n = klist_prev(i); 3623 struct device *dev = NULL; 3624 struct device_private *p; 3625 3626 if (n) { 3627 p = to_device_private_parent(n); 3628 dev = p->device; 3629 } 3630 return dev; 3631 } 3632 3633 static struct device *next_device(struct klist_iter *i) 3634 { 3635 struct klist_node *n = klist_next(i); 3636 struct device *dev = NULL; 3637 struct device_private *p; 3638 3639 if (n) { 3640 p = to_device_private_parent(n); 3641 dev = p->device; 3642 } 3643 return dev; 3644 } 3645 3646 /** 3647 * device_get_devnode - path of device node file 3648 * @dev: device 3649 * @mode: returned file access mode 3650 * @uid: returned file owner 3651 * @gid: returned file group 3652 * @tmp: possibly allocated string 3653 * 3654 * Return the relative path of a possible device node. 3655 * Non-default names may need to allocate a memory to compose 3656 * a name. This memory is returned in tmp and needs to be 3657 * freed by the caller. 3658 */ 3659 const char *device_get_devnode(struct device *dev, 3660 umode_t *mode, kuid_t *uid, kgid_t *gid, 3661 const char **tmp) 3662 { 3663 char *s; 3664 3665 *tmp = NULL; 3666 3667 /* the device type may provide a specific name */ 3668 if (dev->type && dev->type->devnode) 3669 *tmp = dev->type->devnode(dev, mode, uid, gid); 3670 if (*tmp) 3671 return *tmp; 3672 3673 /* the class may provide a specific name */ 3674 if (dev->class && dev->class->devnode) 3675 *tmp = dev->class->devnode(dev, mode); 3676 if (*tmp) 3677 return *tmp; 3678 3679 /* return name without allocation, tmp == NULL */ 3680 if (strchr(dev_name(dev), '!') == NULL) 3681 return dev_name(dev); 3682 3683 /* replace '!' in the name with '/' */ 3684 s = kstrdup(dev_name(dev), GFP_KERNEL); 3685 if (!s) 3686 return NULL; 3687 strreplace(s, '!', '/'); 3688 return *tmp = s; 3689 } 3690 3691 /** 3692 * device_for_each_child - device child iterator. 3693 * @parent: parent struct device. 3694 * @fn: function to be called for each device. 3695 * @data: data for the callback. 3696 * 3697 * Iterate over @parent's child devices, and call @fn for each, 3698 * passing it @data. 3699 * 3700 * We check the return of @fn each time. If it returns anything 3701 * other than 0, we break out and return that value. 3702 */ 3703 int device_for_each_child(struct device *parent, void *data, 3704 int (*fn)(struct device *dev, void *data)) 3705 { 3706 struct klist_iter i; 3707 struct device *child; 3708 int error = 0; 3709 3710 if (!parent->p) 3711 return 0; 3712 3713 klist_iter_init(&parent->p->klist_children, &i); 3714 while (!error && (child = next_device(&i))) 3715 error = fn(child, data); 3716 klist_iter_exit(&i); 3717 return error; 3718 } 3719 EXPORT_SYMBOL_GPL(device_for_each_child); 3720 3721 /** 3722 * device_for_each_child_reverse - device child iterator in reversed order. 3723 * @parent: parent struct device. 3724 * @fn: function to be called for each device. 3725 * @data: data for the callback. 3726 * 3727 * Iterate over @parent's child devices, and call @fn for each, 3728 * passing it @data. 3729 * 3730 * We check the return of @fn each time. If it returns anything 3731 * other than 0, we break out and return that value. 3732 */ 3733 int device_for_each_child_reverse(struct device *parent, void *data, 3734 int (*fn)(struct device *dev, void *data)) 3735 { 3736 struct klist_iter i; 3737 struct device *child; 3738 int error = 0; 3739 3740 if (!parent->p) 3741 return 0; 3742 3743 klist_iter_init(&parent->p->klist_children, &i); 3744 while ((child = prev_device(&i)) && !error) 3745 error = fn(child, data); 3746 klist_iter_exit(&i); 3747 return error; 3748 } 3749 EXPORT_SYMBOL_GPL(device_for_each_child_reverse); 3750 3751 /** 3752 * device_find_child - device iterator for locating a particular device. 3753 * @parent: parent struct device 3754 * @match: Callback function to check device 3755 * @data: Data to pass to match function 3756 * 3757 * This is similar to the device_for_each_child() function above, but it 3758 * returns a reference to a device that is 'found' for later use, as 3759 * determined by the @match callback. 3760 * 3761 * The callback should return 0 if the device doesn't match and non-zero 3762 * if it does. If the callback returns non-zero and a reference to the 3763 * current device can be obtained, this function will return to the caller 3764 * and not iterate over any more devices. 3765 * 3766 * NOTE: you will need to drop the reference with put_device() after use. 3767 */ 3768 struct device *device_find_child(struct device *parent, void *data, 3769 int (*match)(struct device *dev, void *data)) 3770 { 3771 struct klist_iter i; 3772 struct device *child; 3773 3774 if (!parent) 3775 return NULL; 3776 3777 klist_iter_init(&parent->p->klist_children, &i); 3778 while ((child = next_device(&i))) 3779 if (match(child, data) && get_device(child)) 3780 break; 3781 klist_iter_exit(&i); 3782 return child; 3783 } 3784 EXPORT_SYMBOL_GPL(device_find_child); 3785 3786 /** 3787 * device_find_child_by_name - device iterator for locating a child device. 3788 * @parent: parent struct device 3789 * @name: name of the child device 3790 * 3791 * This is similar to the device_find_child() function above, but it 3792 * returns a reference to a device that has the name @name. 3793 * 3794 * NOTE: you will need to drop the reference with put_device() after use. 3795 */ 3796 struct device *device_find_child_by_name(struct device *parent, 3797 const char *name) 3798 { 3799 struct klist_iter i; 3800 struct device *child; 3801 3802 if (!parent) 3803 return NULL; 3804 3805 klist_iter_init(&parent->p->klist_children, &i); 3806 while ((child = next_device(&i))) 3807 if (sysfs_streq(dev_name(child), name) && get_device(child)) 3808 break; 3809 klist_iter_exit(&i); 3810 return child; 3811 } 3812 EXPORT_SYMBOL_GPL(device_find_child_by_name); 3813 3814 int __init devices_init(void) 3815 { 3816 devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL); 3817 if (!devices_kset) 3818 return -ENOMEM; 3819 dev_kobj = kobject_create_and_add("dev", NULL); 3820 if (!dev_kobj) 3821 goto dev_kobj_err; 3822 sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj); 3823 if (!sysfs_dev_block_kobj) 3824 goto block_kobj_err; 3825 sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj); 3826 if (!sysfs_dev_char_kobj) 3827 goto char_kobj_err; 3828 3829 return 0; 3830 3831 char_kobj_err: 3832 kobject_put(sysfs_dev_block_kobj); 3833 block_kobj_err: 3834 kobject_put(dev_kobj); 3835 dev_kobj_err: 3836 kset_unregister(devices_kset); 3837 return -ENOMEM; 3838 } 3839 3840 static int device_check_offline(struct device *dev, void *not_used) 3841 { 3842 int ret; 3843 3844 ret = device_for_each_child(dev, NULL, device_check_offline); 3845 if (ret) 3846 return ret; 3847 3848 return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0; 3849 } 3850 3851 /** 3852 * device_offline - Prepare the device for hot-removal. 3853 * @dev: Device to be put offline. 3854 * 3855 * Execute the device bus type's .offline() callback, if present, to prepare 3856 * the device for a subsequent hot-removal. If that succeeds, the device must 3857 * not be used until either it is removed or its bus type's .online() callback 3858 * is executed. 3859 * 3860 * Call under device_hotplug_lock. 3861 */ 3862 int device_offline(struct device *dev) 3863 { 3864 int ret; 3865 3866 if (dev->offline_disabled) 3867 return -EPERM; 3868 3869 ret = device_for_each_child(dev, NULL, device_check_offline); 3870 if (ret) 3871 return ret; 3872 3873 device_lock(dev); 3874 if (device_supports_offline(dev)) { 3875 if (dev->offline) { 3876 ret = 1; 3877 } else { 3878 ret = dev->bus->offline(dev); 3879 if (!ret) { 3880 kobject_uevent(&dev->kobj, KOBJ_OFFLINE); 3881 dev->offline = true; 3882 } 3883 } 3884 } 3885 device_unlock(dev); 3886 3887 return ret; 3888 } 3889 3890 /** 3891 * device_online - Put the device back online after successful device_offline(). 3892 * @dev: Device to be put back online. 3893 * 3894 * If device_offline() has been successfully executed for @dev, but the device 3895 * has not been removed subsequently, execute its bus type's .online() callback 3896 * to indicate that the device can be used again. 3897 * 3898 * Call under device_hotplug_lock. 3899 */ 3900 int device_online(struct device *dev) 3901 { 3902 int ret = 0; 3903 3904 device_lock(dev); 3905 if (device_supports_offline(dev)) { 3906 if (dev->offline) { 3907 ret = dev->bus->online(dev); 3908 if (!ret) { 3909 kobject_uevent(&dev->kobj, KOBJ_ONLINE); 3910 dev->offline = false; 3911 } 3912 } else { 3913 ret = 1; 3914 } 3915 } 3916 device_unlock(dev); 3917 3918 return ret; 3919 } 3920 3921 struct root_device { 3922 struct device dev; 3923 struct module *owner; 3924 }; 3925 3926 static inline struct root_device *to_root_device(struct device *d) 3927 { 3928 return container_of(d, struct root_device, dev); 3929 } 3930 3931 static void root_device_release(struct device *dev) 3932 { 3933 kfree(to_root_device(dev)); 3934 } 3935 3936 /** 3937 * __root_device_register - allocate and register a root device 3938 * @name: root device name 3939 * @owner: owner module of the root device, usually THIS_MODULE 3940 * 3941 * This function allocates a root device and registers it 3942 * using device_register(). In order to free the returned 3943 * device, use root_device_unregister(). 3944 * 3945 * Root devices are dummy devices which allow other devices 3946 * to be grouped under /sys/devices. Use this function to 3947 * allocate a root device and then use it as the parent of 3948 * any device which should appear under /sys/devices/{name} 3949 * 3950 * The /sys/devices/{name} directory will also contain a 3951 * 'module' symlink which points to the @owner directory 3952 * in sysfs. 3953 * 3954 * Returns &struct device pointer on success, or ERR_PTR() on error. 3955 * 3956 * Note: You probably want to use root_device_register(). 3957 */ 3958 struct device *__root_device_register(const char *name, struct module *owner) 3959 { 3960 struct root_device *root; 3961 int err = -ENOMEM; 3962 3963 root = kzalloc(sizeof(struct root_device), GFP_KERNEL); 3964 if (!root) 3965 return ERR_PTR(err); 3966 3967 err = dev_set_name(&root->dev, "%s", name); 3968 if (err) { 3969 kfree(root); 3970 return ERR_PTR(err); 3971 } 3972 3973 root->dev.release = root_device_release; 3974 3975 err = device_register(&root->dev); 3976 if (err) { 3977 put_device(&root->dev); 3978 return ERR_PTR(err); 3979 } 3980 3981 #ifdef CONFIG_MODULES /* gotta find a "cleaner" way to do this */ 3982 if (owner) { 3983 struct module_kobject *mk = &owner->mkobj; 3984 3985 err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module"); 3986 if (err) { 3987 device_unregister(&root->dev); 3988 return ERR_PTR(err); 3989 } 3990 root->owner = owner; 3991 } 3992 #endif 3993 3994 return &root->dev; 3995 } 3996 EXPORT_SYMBOL_GPL(__root_device_register); 3997 3998 /** 3999 * root_device_unregister - unregister and free a root device 4000 * @dev: device going away 4001 * 4002 * This function unregisters and cleans up a device that was created by 4003 * root_device_register(). 4004 */ 4005 void root_device_unregister(struct device *dev) 4006 { 4007 struct root_device *root = to_root_device(dev); 4008 4009 if (root->owner) 4010 sysfs_remove_link(&root->dev.kobj, "module"); 4011 4012 device_unregister(dev); 4013 } 4014 EXPORT_SYMBOL_GPL(root_device_unregister); 4015 4016 4017 static void device_create_release(struct device *dev) 4018 { 4019 pr_debug("device: '%s': %s\n", dev_name(dev), __func__); 4020 kfree(dev); 4021 } 4022 4023 static __printf(6, 0) struct device * 4024 device_create_groups_vargs(struct class *class, struct device *parent, 4025 dev_t devt, void *drvdata, 4026 const struct attribute_group **groups, 4027 const char *fmt, va_list args) 4028 { 4029 struct device *dev = NULL; 4030 int retval = -ENODEV; 4031 4032 if (class == NULL || IS_ERR(class)) 4033 goto error; 4034 4035 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 4036 if (!dev) { 4037 retval = -ENOMEM; 4038 goto error; 4039 } 4040 4041 device_initialize(dev); 4042 dev->devt = devt; 4043 dev->class = class; 4044 dev->parent = parent; 4045 dev->groups = groups; 4046 dev->release = device_create_release; 4047 dev_set_drvdata(dev, drvdata); 4048 4049 retval = kobject_set_name_vargs(&dev->kobj, fmt, args); 4050 if (retval) 4051 goto error; 4052 4053 retval = device_add(dev); 4054 if (retval) 4055 goto error; 4056 4057 return dev; 4058 4059 error: 4060 put_device(dev); 4061 return ERR_PTR(retval); 4062 } 4063 4064 /** 4065 * device_create - creates a device and registers it with sysfs 4066 * @class: pointer to the struct class that this device should be registered to 4067 * @parent: pointer to the parent struct device of this new device, if any 4068 * @devt: the dev_t for the char device to be added 4069 * @drvdata: the data to be added to the device for callbacks 4070 * @fmt: string for the device's name 4071 * 4072 * This function can be used by char device classes. A struct device 4073 * will be created in sysfs, registered to the specified class. 4074 * 4075 * A "dev" file will be created, showing the dev_t for the device, if 4076 * the dev_t is not 0,0. 4077 * If a pointer to a parent struct device is passed in, the newly created 4078 * struct device will be a child of that device in sysfs. 4079 * The pointer to the struct device will be returned from the call. 4080 * Any further sysfs files that might be required can be created using this 4081 * pointer. 4082 * 4083 * Returns &struct device pointer on success, or ERR_PTR() on error. 4084 * 4085 * Note: the struct class passed to this function must have previously 4086 * been created with a call to class_create(). 4087 */ 4088 struct device *device_create(struct class *class, struct device *parent, 4089 dev_t devt, void *drvdata, const char *fmt, ...) 4090 { 4091 va_list vargs; 4092 struct device *dev; 4093 4094 va_start(vargs, fmt); 4095 dev = device_create_groups_vargs(class, parent, devt, drvdata, NULL, 4096 fmt, vargs); 4097 va_end(vargs); 4098 return dev; 4099 } 4100 EXPORT_SYMBOL_GPL(device_create); 4101 4102 /** 4103 * device_create_with_groups - creates a device and registers it with sysfs 4104 * @class: pointer to the struct class that this device should be registered to 4105 * @parent: pointer to the parent struct device of this new device, if any 4106 * @devt: the dev_t for the char device to be added 4107 * @drvdata: the data to be added to the device for callbacks 4108 * @groups: NULL-terminated list of attribute groups to be created 4109 * @fmt: string for the device's name 4110 * 4111 * This function can be used by char device classes. A struct device 4112 * will be created in sysfs, registered to the specified class. 4113 * Additional attributes specified in the groups parameter will also 4114 * be created automatically. 4115 * 4116 * A "dev" file will be created, showing the dev_t for the device, if 4117 * the dev_t is not 0,0. 4118 * If a pointer to a parent struct device is passed in, the newly created 4119 * struct device will be a child of that device in sysfs. 4120 * The pointer to the struct device will be returned from the call. 4121 * Any further sysfs files that might be required can be created using this 4122 * pointer. 4123 * 4124 * Returns &struct device pointer on success, or ERR_PTR() on error. 4125 * 4126 * Note: the struct class passed to this function must have previously 4127 * been created with a call to class_create(). 4128 */ 4129 struct device *device_create_with_groups(struct class *class, 4130 struct device *parent, dev_t devt, 4131 void *drvdata, 4132 const struct attribute_group **groups, 4133 const char *fmt, ...) 4134 { 4135 va_list vargs; 4136 struct device *dev; 4137 4138 va_start(vargs, fmt); 4139 dev = device_create_groups_vargs(class, parent, devt, drvdata, groups, 4140 fmt, vargs); 4141 va_end(vargs); 4142 return dev; 4143 } 4144 EXPORT_SYMBOL_GPL(device_create_with_groups); 4145 4146 /** 4147 * device_destroy - removes a device that was created with device_create() 4148 * @class: pointer to the struct class that this device was registered with 4149 * @devt: the dev_t of the device that was previously registered 4150 * 4151 * This call unregisters and cleans up a device that was created with a 4152 * call to device_create(). 4153 */ 4154 void device_destroy(struct class *class, dev_t devt) 4155 { 4156 struct device *dev; 4157 4158 dev = class_find_device_by_devt(class, devt); 4159 if (dev) { 4160 put_device(dev); 4161 device_unregister(dev); 4162 } 4163 } 4164 EXPORT_SYMBOL_GPL(device_destroy); 4165 4166 /** 4167 * device_rename - renames a device 4168 * @dev: the pointer to the struct device to be renamed 4169 * @new_name: the new name of the device 4170 * 4171 * It is the responsibility of the caller to provide mutual 4172 * exclusion between two different calls of device_rename 4173 * on the same device to ensure that new_name is valid and 4174 * won't conflict with other devices. 4175 * 4176 * Note: Don't call this function. Currently, the networking layer calls this 4177 * function, but that will change. The following text from Kay Sievers offers 4178 * some insight: 4179 * 4180 * Renaming devices is racy at many levels, symlinks and other stuff are not 4181 * replaced atomically, and you get a "move" uevent, but it's not easy to 4182 * connect the event to the old and new device. Device nodes are not renamed at 4183 * all, there isn't even support for that in the kernel now. 4184 * 4185 * In the meantime, during renaming, your target name might be taken by another 4186 * driver, creating conflicts. Or the old name is taken directly after you 4187 * renamed it -- then you get events for the same DEVPATH, before you even see 4188 * the "move" event. It's just a mess, and nothing new should ever rely on 4189 * kernel device renaming. Besides that, it's not even implemented now for 4190 * other things than (driver-core wise very simple) network devices. 4191 * 4192 * We are currently about to change network renaming in udev to completely 4193 * disallow renaming of devices in the same namespace as the kernel uses, 4194 * because we can't solve the problems properly, that arise with swapping names 4195 * of multiple interfaces without races. Means, renaming of eth[0-9]* will only 4196 * be allowed to some other name than eth[0-9]*, for the aforementioned 4197 * reasons. 4198 * 4199 * Make up a "real" name in the driver before you register anything, or add 4200 * some other attributes for userspace to find the device, or use udev to add 4201 * symlinks -- but never rename kernel devices later, it's a complete mess. We 4202 * don't even want to get into that and try to implement the missing pieces in 4203 * the core. We really have other pieces to fix in the driver core mess. :) 4204 */ 4205 int device_rename(struct device *dev, const char *new_name) 4206 { 4207 struct kobject *kobj = &dev->kobj; 4208 char *old_device_name = NULL; 4209 int error; 4210 4211 dev = get_device(dev); 4212 if (!dev) 4213 return -EINVAL; 4214 4215 dev_dbg(dev, "renaming to %s\n", new_name); 4216 4217 old_device_name = kstrdup(dev_name(dev), GFP_KERNEL); 4218 if (!old_device_name) { 4219 error = -ENOMEM; 4220 goto out; 4221 } 4222 4223 if (dev->class) { 4224 error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj, 4225 kobj, old_device_name, 4226 new_name, kobject_namespace(kobj)); 4227 if (error) 4228 goto out; 4229 } 4230 4231 error = kobject_rename(kobj, new_name); 4232 if (error) 4233 goto out; 4234 4235 out: 4236 put_device(dev); 4237 4238 kfree(old_device_name); 4239 4240 return error; 4241 } 4242 EXPORT_SYMBOL_GPL(device_rename); 4243 4244 static int device_move_class_links(struct device *dev, 4245 struct device *old_parent, 4246 struct device *new_parent) 4247 { 4248 int error = 0; 4249 4250 if (old_parent) 4251 sysfs_remove_link(&dev->kobj, "device"); 4252 if (new_parent) 4253 error = sysfs_create_link(&dev->kobj, &new_parent->kobj, 4254 "device"); 4255 return error; 4256 } 4257 4258 /** 4259 * device_move - moves a device to a new parent 4260 * @dev: the pointer to the struct device to be moved 4261 * @new_parent: the new parent of the device (can be NULL) 4262 * @dpm_order: how to reorder the dpm_list 4263 */ 4264 int device_move(struct device *dev, struct device *new_parent, 4265 enum dpm_order dpm_order) 4266 { 4267 int error; 4268 struct device *old_parent; 4269 struct kobject *new_parent_kobj; 4270 4271 dev = get_device(dev); 4272 if (!dev) 4273 return -EINVAL; 4274 4275 device_pm_lock(); 4276 new_parent = get_device(new_parent); 4277 new_parent_kobj = get_device_parent(dev, new_parent); 4278 if (IS_ERR(new_parent_kobj)) { 4279 error = PTR_ERR(new_parent_kobj); 4280 put_device(new_parent); 4281 goto out; 4282 } 4283 4284 pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev), 4285 __func__, new_parent ? dev_name(new_parent) : "<NULL>"); 4286 error = kobject_move(&dev->kobj, new_parent_kobj); 4287 if (error) { 4288 cleanup_glue_dir(dev, new_parent_kobj); 4289 put_device(new_parent); 4290 goto out; 4291 } 4292 old_parent = dev->parent; 4293 dev->parent = new_parent; 4294 if (old_parent) 4295 klist_remove(&dev->p->knode_parent); 4296 if (new_parent) { 4297 klist_add_tail(&dev->p->knode_parent, 4298 &new_parent->p->klist_children); 4299 set_dev_node(dev, dev_to_node(new_parent)); 4300 } 4301 4302 if (dev->class) { 4303 error = device_move_class_links(dev, old_parent, new_parent); 4304 if (error) { 4305 /* We ignore errors on cleanup since we're hosed anyway... */ 4306 device_move_class_links(dev, new_parent, old_parent); 4307 if (!kobject_move(&dev->kobj, &old_parent->kobj)) { 4308 if (new_parent) 4309 klist_remove(&dev->p->knode_parent); 4310 dev->parent = old_parent; 4311 if (old_parent) { 4312 klist_add_tail(&dev->p->knode_parent, 4313 &old_parent->p->klist_children); 4314 set_dev_node(dev, dev_to_node(old_parent)); 4315 } 4316 } 4317 cleanup_glue_dir(dev, new_parent_kobj); 4318 put_device(new_parent); 4319 goto out; 4320 } 4321 } 4322 switch (dpm_order) { 4323 case DPM_ORDER_NONE: 4324 break; 4325 case DPM_ORDER_DEV_AFTER_PARENT: 4326 device_pm_move_after(dev, new_parent); 4327 devices_kset_move_after(dev, new_parent); 4328 break; 4329 case DPM_ORDER_PARENT_BEFORE_DEV: 4330 device_pm_move_before(new_parent, dev); 4331 devices_kset_move_before(new_parent, dev); 4332 break; 4333 case DPM_ORDER_DEV_LAST: 4334 device_pm_move_last(dev); 4335 devices_kset_move_last(dev); 4336 break; 4337 } 4338 4339 put_device(old_parent); 4340 out: 4341 device_pm_unlock(); 4342 put_device(dev); 4343 return error; 4344 } 4345 EXPORT_SYMBOL_GPL(device_move); 4346 4347 static int device_attrs_change_owner(struct device *dev, kuid_t kuid, 4348 kgid_t kgid) 4349 { 4350 struct kobject *kobj = &dev->kobj; 4351 struct class *class = dev->class; 4352 const struct device_type *type = dev->type; 4353 int error; 4354 4355 if (class) { 4356 /* 4357 * Change the device groups of the device class for @dev to 4358 * @kuid/@kgid. 4359 */ 4360 error = sysfs_groups_change_owner(kobj, class->dev_groups, kuid, 4361 kgid); 4362 if (error) 4363 return error; 4364 } 4365 4366 if (type) { 4367 /* 4368 * Change the device groups of the device type for @dev to 4369 * @kuid/@kgid. 4370 */ 4371 error = sysfs_groups_change_owner(kobj, type->groups, kuid, 4372 kgid); 4373 if (error) 4374 return error; 4375 } 4376 4377 /* Change the device groups of @dev to @kuid/@kgid. */ 4378 error = sysfs_groups_change_owner(kobj, dev->groups, kuid, kgid); 4379 if (error) 4380 return error; 4381 4382 if (device_supports_offline(dev) && !dev->offline_disabled) { 4383 /* Change online device attributes of @dev to @kuid/@kgid. */ 4384 error = sysfs_file_change_owner(kobj, dev_attr_online.attr.name, 4385 kuid, kgid); 4386 if (error) 4387 return error; 4388 } 4389 4390 return 0; 4391 } 4392 4393 /** 4394 * device_change_owner - change the owner of an existing device. 4395 * @dev: device. 4396 * @kuid: new owner's kuid 4397 * @kgid: new owner's kgid 4398 * 4399 * This changes the owner of @dev and its corresponding sysfs entries to 4400 * @kuid/@kgid. This function closely mirrors how @dev was added via driver 4401 * core. 4402 * 4403 * Returns 0 on success or error code on failure. 4404 */ 4405 int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid) 4406 { 4407 int error; 4408 struct kobject *kobj = &dev->kobj; 4409 4410 dev = get_device(dev); 4411 if (!dev) 4412 return -EINVAL; 4413 4414 /* 4415 * Change the kobject and the default attributes and groups of the 4416 * ktype associated with it to @kuid/@kgid. 4417 */ 4418 error = sysfs_change_owner(kobj, kuid, kgid); 4419 if (error) 4420 goto out; 4421 4422 /* 4423 * Change the uevent file for @dev to the new owner. The uevent file 4424 * was created in a separate step when @dev got added and we mirror 4425 * that step here. 4426 */ 4427 error = sysfs_file_change_owner(kobj, dev_attr_uevent.attr.name, kuid, 4428 kgid); 4429 if (error) 4430 goto out; 4431 4432 /* 4433 * Change the device groups, the device groups associated with the 4434 * device class, and the groups associated with the device type of @dev 4435 * to @kuid/@kgid. 4436 */ 4437 error = device_attrs_change_owner(dev, kuid, kgid); 4438 if (error) 4439 goto out; 4440 4441 error = dpm_sysfs_change_owner(dev, kuid, kgid); 4442 if (error) 4443 goto out; 4444 4445 #ifdef CONFIG_BLOCK 4446 if (sysfs_deprecated && dev->class == &block_class) 4447 goto out; 4448 #endif 4449 4450 /* 4451 * Change the owner of the symlink located in the class directory of 4452 * the device class associated with @dev which points to the actual 4453 * directory entry for @dev to @kuid/@kgid. This ensures that the 4454 * symlink shows the same permissions as its target. 4455 */ 4456 error = sysfs_link_change_owner(&dev->class->p->subsys.kobj, &dev->kobj, 4457 dev_name(dev), kuid, kgid); 4458 if (error) 4459 goto out; 4460 4461 out: 4462 put_device(dev); 4463 return error; 4464 } 4465 EXPORT_SYMBOL_GPL(device_change_owner); 4466 4467 /** 4468 * device_shutdown - call ->shutdown() on each device to shutdown. 4469 */ 4470 void device_shutdown(void) 4471 { 4472 struct device *dev, *parent; 4473 4474 wait_for_device_probe(); 4475 device_block_probing(); 4476 4477 cpufreq_suspend(); 4478 4479 spin_lock(&devices_kset->list_lock); 4480 /* 4481 * Walk the devices list backward, shutting down each in turn. 4482 * Beware that device unplug events may also start pulling 4483 * devices offline, even as the system is shutting down. 4484 */ 4485 while (!list_empty(&devices_kset->list)) { 4486 dev = list_entry(devices_kset->list.prev, struct device, 4487 kobj.entry); 4488 4489 /* 4490 * hold reference count of device's parent to 4491 * prevent it from being freed because parent's 4492 * lock is to be held 4493 */ 4494 parent = get_device(dev->parent); 4495 get_device(dev); 4496 /* 4497 * Make sure the device is off the kset list, in the 4498 * event that dev->*->shutdown() doesn't remove it. 4499 */ 4500 list_del_init(&dev->kobj.entry); 4501 spin_unlock(&devices_kset->list_lock); 4502 4503 /* hold lock to avoid race with probe/release */ 4504 if (parent) 4505 device_lock(parent); 4506 device_lock(dev); 4507 4508 /* Don't allow any more runtime suspends */ 4509 pm_runtime_get_noresume(dev); 4510 pm_runtime_barrier(dev); 4511 4512 if (dev->class && dev->class->shutdown_pre) { 4513 if (initcall_debug) 4514 dev_info(dev, "shutdown_pre\n"); 4515 dev->class->shutdown_pre(dev); 4516 } 4517 if (dev->bus && dev->bus->shutdown) { 4518 if (initcall_debug) 4519 dev_info(dev, "shutdown\n"); 4520 dev->bus->shutdown(dev); 4521 } else if (dev->driver && dev->driver->shutdown) { 4522 if (initcall_debug) 4523 dev_info(dev, "shutdown\n"); 4524 dev->driver->shutdown(dev); 4525 } 4526 4527 device_unlock(dev); 4528 if (parent) 4529 device_unlock(parent); 4530 4531 put_device(dev); 4532 put_device(parent); 4533 4534 spin_lock(&devices_kset->list_lock); 4535 } 4536 spin_unlock(&devices_kset->list_lock); 4537 } 4538 4539 /* 4540 * Device logging functions 4541 */ 4542 4543 #ifdef CONFIG_PRINTK 4544 static void 4545 set_dev_info(const struct device *dev, struct dev_printk_info *dev_info) 4546 { 4547 const char *subsys; 4548 4549 memset(dev_info, 0, sizeof(*dev_info)); 4550 4551 if (dev->class) 4552 subsys = dev->class->name; 4553 else if (dev->bus) 4554 subsys = dev->bus->name; 4555 else 4556 return; 4557 4558 strscpy(dev_info->subsystem, subsys, sizeof(dev_info->subsystem)); 4559 4560 /* 4561 * Add device identifier DEVICE=: 4562 * b12:8 block dev_t 4563 * c127:3 char dev_t 4564 * n8 netdev ifindex 4565 * +sound:card0 subsystem:devname 4566 */ 4567 if (MAJOR(dev->devt)) { 4568 char c; 4569 4570 if (strcmp(subsys, "block") == 0) 4571 c = 'b'; 4572 else 4573 c = 'c'; 4574 4575 snprintf(dev_info->device, sizeof(dev_info->device), 4576 "%c%u:%u", c, MAJOR(dev->devt), MINOR(dev->devt)); 4577 } else if (strcmp(subsys, "net") == 0) { 4578 struct net_device *net = to_net_dev(dev); 4579 4580 snprintf(dev_info->device, sizeof(dev_info->device), 4581 "n%u", net->ifindex); 4582 } else { 4583 snprintf(dev_info->device, sizeof(dev_info->device), 4584 "+%s:%s", subsys, dev_name(dev)); 4585 } 4586 } 4587 4588 int dev_vprintk_emit(int level, const struct device *dev, 4589 const char *fmt, va_list args) 4590 { 4591 struct dev_printk_info dev_info; 4592 4593 set_dev_info(dev, &dev_info); 4594 4595 return vprintk_emit(0, level, &dev_info, fmt, args); 4596 } 4597 EXPORT_SYMBOL(dev_vprintk_emit); 4598 4599 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...) 4600 { 4601 va_list args; 4602 int r; 4603 4604 va_start(args, fmt); 4605 4606 r = dev_vprintk_emit(level, dev, fmt, args); 4607 4608 va_end(args); 4609 4610 return r; 4611 } 4612 EXPORT_SYMBOL(dev_printk_emit); 4613 4614 static void __dev_printk(const char *level, const struct device *dev, 4615 struct va_format *vaf) 4616 { 4617 if (dev) 4618 dev_printk_emit(level[1] - '0', dev, "%s %s: %pV", 4619 dev_driver_string(dev), dev_name(dev), vaf); 4620 else 4621 printk("%s(NULL device *): %pV", level, vaf); 4622 } 4623 4624 void _dev_printk(const char *level, const struct device *dev, 4625 const char *fmt, ...) 4626 { 4627 struct va_format vaf; 4628 va_list args; 4629 4630 va_start(args, fmt); 4631 4632 vaf.fmt = fmt; 4633 vaf.va = &args; 4634 4635 __dev_printk(level, dev, &vaf); 4636 4637 va_end(args); 4638 } 4639 EXPORT_SYMBOL(_dev_printk); 4640 4641 #define define_dev_printk_level(func, kern_level) \ 4642 void func(const struct device *dev, const char *fmt, ...) \ 4643 { \ 4644 struct va_format vaf; \ 4645 va_list args; \ 4646 \ 4647 va_start(args, fmt); \ 4648 \ 4649 vaf.fmt = fmt; \ 4650 vaf.va = &args; \ 4651 \ 4652 __dev_printk(kern_level, dev, &vaf); \ 4653 \ 4654 va_end(args); \ 4655 } \ 4656 EXPORT_SYMBOL(func); 4657 4658 define_dev_printk_level(_dev_emerg, KERN_EMERG); 4659 define_dev_printk_level(_dev_alert, KERN_ALERT); 4660 define_dev_printk_level(_dev_crit, KERN_CRIT); 4661 define_dev_printk_level(_dev_err, KERN_ERR); 4662 define_dev_printk_level(_dev_warn, KERN_WARNING); 4663 define_dev_printk_level(_dev_notice, KERN_NOTICE); 4664 define_dev_printk_level(_dev_info, KERN_INFO); 4665 4666 #endif 4667 4668 /** 4669 * dev_err_probe - probe error check and log helper 4670 * @dev: the pointer to the struct device 4671 * @err: error value to test 4672 * @fmt: printf-style format string 4673 * @...: arguments as specified in the format string 4674 * 4675 * This helper implements common pattern present in probe functions for error 4676 * checking: print debug or error message depending if the error value is 4677 * -EPROBE_DEFER and propagate error upwards. 4678 * In case of -EPROBE_DEFER it sets also defer probe reason, which can be 4679 * checked later by reading devices_deferred debugfs attribute. 4680 * It replaces code sequence:: 4681 * 4682 * if (err != -EPROBE_DEFER) 4683 * dev_err(dev, ...); 4684 * else 4685 * dev_dbg(dev, ...); 4686 * return err; 4687 * 4688 * with:: 4689 * 4690 * return dev_err_probe(dev, err, ...); 4691 * 4692 * Returns @err. 4693 * 4694 */ 4695 int dev_err_probe(const struct device *dev, int err, const char *fmt, ...) 4696 { 4697 struct va_format vaf; 4698 va_list args; 4699 4700 va_start(args, fmt); 4701 vaf.fmt = fmt; 4702 vaf.va = &args; 4703 4704 if (err != -EPROBE_DEFER) { 4705 dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf); 4706 } else { 4707 device_set_deferred_probe_reason(dev, &vaf); 4708 dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf); 4709 } 4710 4711 va_end(args); 4712 4713 return err; 4714 } 4715 EXPORT_SYMBOL_GPL(dev_err_probe); 4716 4717 static inline bool fwnode_is_primary(struct fwnode_handle *fwnode) 4718 { 4719 return fwnode && !IS_ERR(fwnode->secondary); 4720 } 4721 4722 /** 4723 * set_primary_fwnode - Change the primary firmware node of a given device. 4724 * @dev: Device to handle. 4725 * @fwnode: New primary firmware node of the device. 4726 * 4727 * Set the device's firmware node pointer to @fwnode, but if a secondary 4728 * firmware node of the device is present, preserve it. 4729 * 4730 * Valid fwnode cases are: 4731 * - primary --> secondary --> -ENODEV 4732 * - primary --> NULL 4733 * - secondary --> -ENODEV 4734 * - NULL 4735 */ 4736 void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) 4737 { 4738 struct device *parent = dev->parent; 4739 struct fwnode_handle *fn = dev->fwnode; 4740 4741 if (fwnode) { 4742 if (fwnode_is_primary(fn)) 4743 fn = fn->secondary; 4744 4745 if (fn) { 4746 WARN_ON(fwnode->secondary); 4747 fwnode->secondary = fn; 4748 } 4749 dev->fwnode = fwnode; 4750 } else { 4751 if (fwnode_is_primary(fn)) { 4752 dev->fwnode = fn->secondary; 4753 /* Set fn->secondary = NULL, so fn remains the primary fwnode */ 4754 if (!(parent && fn == parent->fwnode)) 4755 fn->secondary = NULL; 4756 } else { 4757 dev->fwnode = NULL; 4758 } 4759 } 4760 } 4761 EXPORT_SYMBOL_GPL(set_primary_fwnode); 4762 4763 /** 4764 * set_secondary_fwnode - Change the secondary firmware node of a given device. 4765 * @dev: Device to handle. 4766 * @fwnode: New secondary firmware node of the device. 4767 * 4768 * If a primary firmware node of the device is present, set its secondary 4769 * pointer to @fwnode. Otherwise, set the device's firmware node pointer to 4770 * @fwnode. 4771 */ 4772 void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode) 4773 { 4774 if (fwnode) 4775 fwnode->secondary = ERR_PTR(-ENODEV); 4776 4777 if (fwnode_is_primary(dev->fwnode)) 4778 dev->fwnode->secondary = fwnode; 4779 else 4780 dev->fwnode = fwnode; 4781 } 4782 EXPORT_SYMBOL_GPL(set_secondary_fwnode); 4783 4784 /** 4785 * device_set_of_node_from_dev - reuse device-tree node of another device 4786 * @dev: device whose device-tree node is being set 4787 * @dev2: device whose device-tree node is being reused 4788 * 4789 * Takes another reference to the new device-tree node after first dropping 4790 * any reference held to the old node. 4791 */ 4792 void device_set_of_node_from_dev(struct device *dev, const struct device *dev2) 4793 { 4794 of_node_put(dev->of_node); 4795 dev->of_node = of_node_get(dev2->of_node); 4796 dev->of_node_reused = true; 4797 } 4798 EXPORT_SYMBOL_GPL(device_set_of_node_from_dev); 4799 4800 void device_set_node(struct device *dev, struct fwnode_handle *fwnode) 4801 { 4802 dev->fwnode = fwnode; 4803 dev->of_node = to_of_node(fwnode); 4804 } 4805 EXPORT_SYMBOL_GPL(device_set_node); 4806 4807 int device_match_name(struct device *dev, const void *name) 4808 { 4809 return sysfs_streq(dev_name(dev), name); 4810 } 4811 EXPORT_SYMBOL_GPL(device_match_name); 4812 4813 int device_match_of_node(struct device *dev, const void *np) 4814 { 4815 return dev->of_node == np; 4816 } 4817 EXPORT_SYMBOL_GPL(device_match_of_node); 4818 4819 int device_match_fwnode(struct device *dev, const void *fwnode) 4820 { 4821 return dev_fwnode(dev) == fwnode; 4822 } 4823 EXPORT_SYMBOL_GPL(device_match_fwnode); 4824 4825 int device_match_devt(struct device *dev, const void *pdevt) 4826 { 4827 return dev->devt == *(dev_t *)pdevt; 4828 } 4829 EXPORT_SYMBOL_GPL(device_match_devt); 4830 4831 int device_match_acpi_dev(struct device *dev, const void *adev) 4832 { 4833 return ACPI_COMPANION(dev) == adev; 4834 } 4835 EXPORT_SYMBOL(device_match_acpi_dev); 4836 4837 int device_match_any(struct device *dev, const void *unused) 4838 { 4839 return 1; 4840 } 4841 EXPORT_SYMBOL_GPL(device_match_any); 4842