1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * drivers/base/core.c - core driver model code (device registration, etc) 4 * 5 * Copyright (c) 2002-3 Patrick Mochel 6 * Copyright (c) 2002-3 Open Source Development Labs 7 * Copyright (c) 2006 Greg Kroah-Hartman <gregkh@suse.de> 8 * Copyright (c) 2006 Novell, Inc. 9 */ 10 11 #include <linux/acpi.h> 12 #include <linux/cpufreq.h> 13 #include <linux/device.h> 14 #include <linux/err.h> 15 #include <linux/fwnode.h> 16 #include <linux/init.h> 17 #include <linux/module.h> 18 #include <linux/slab.h> 19 #include <linux/string.h> 20 #include <linux/kdev_t.h> 21 #include <linux/notifier.h> 22 #include <linux/of.h> 23 #include <linux/of_device.h> 24 #include <linux/genhd.h> 25 #include <linux/mutex.h> 26 #include <linux/pm_runtime.h> 27 #include <linux/netdevice.h> 28 #include <linux/sched/signal.h> 29 #include <linux/sysfs.h> 30 31 #include "base.h" 32 #include "power/power.h" 33 34 #ifdef CONFIG_SYSFS_DEPRECATED 35 #ifdef CONFIG_SYSFS_DEPRECATED_V2 36 long sysfs_deprecated = 1; 37 #else 38 long sysfs_deprecated = 0; 39 #endif 40 static int __init sysfs_deprecated_setup(char *arg) 41 { 42 return kstrtol(arg, 10, &sysfs_deprecated); 43 } 44 early_param("sysfs.deprecated", sysfs_deprecated_setup); 45 #endif 46 47 /* Device links support. */ 48 static LIST_HEAD(wait_for_suppliers); 49 static DEFINE_MUTEX(wfs_lock); 50 static LIST_HEAD(deferred_sync); 51 static unsigned int defer_sync_state_count = 1; 52 static unsigned int defer_fw_devlink_count; 53 static DEFINE_MUTEX(defer_fw_devlink_lock); 54 static bool fw_devlink_is_permissive(void); 55 56 #ifdef CONFIG_SRCU 57 static DEFINE_MUTEX(device_links_lock); 58 DEFINE_STATIC_SRCU(device_links_srcu); 59 60 static inline void device_links_write_lock(void) 61 { 62 mutex_lock(&device_links_lock); 63 } 64 65 static inline void device_links_write_unlock(void) 66 { 67 mutex_unlock(&device_links_lock); 68 } 69 70 int device_links_read_lock(void) __acquires(&device_links_srcu) 71 { 72 return srcu_read_lock(&device_links_srcu); 73 } 74 75 void device_links_read_unlock(int idx) __releases(&device_links_srcu) 76 { 77 srcu_read_unlock(&device_links_srcu, idx); 78 } 79 80 int device_links_read_lock_held(void) 81 { 82 return srcu_read_lock_held(&device_links_srcu); 83 } 84 #else /* !CONFIG_SRCU */ 85 static DECLARE_RWSEM(device_links_lock); 86 87 static inline void device_links_write_lock(void) 88 { 89 down_write(&device_links_lock); 90 } 91 92 static inline void device_links_write_unlock(void) 93 { 94 up_write(&device_links_lock); 95 } 96 97 int device_links_read_lock(void) 98 { 99 down_read(&device_links_lock); 100 return 0; 101 } 102 103 void device_links_read_unlock(int not_used) 104 { 105 up_read(&device_links_lock); 106 } 107 108 #ifdef CONFIG_DEBUG_LOCK_ALLOC 109 int device_links_read_lock_held(void) 110 { 111 return lockdep_is_held(&device_links_lock); 112 } 113 #endif 114 #endif /* !CONFIG_SRCU */ 115 116 /** 117 * device_is_dependent - Check if one device depends on another one 118 * @dev: Device to check dependencies for. 119 * @target: Device to check against. 120 * 121 * Check if @target depends on @dev or any device dependent on it (its child or 122 * its consumer etc). Return 1 if that is the case or 0 otherwise. 123 */ 124 int device_is_dependent(struct device *dev, void *target) 125 { 126 struct device_link *link; 127 int ret; 128 129 if (dev == target) 130 return 1; 131 132 ret = device_for_each_child(dev, target, device_is_dependent); 133 if (ret) 134 return ret; 135 136 list_for_each_entry(link, &dev->links.consumers, s_node) { 137 if (link->flags == (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED)) 138 continue; 139 140 if (link->consumer == target) 141 return 1; 142 143 ret = device_is_dependent(link->consumer, target); 144 if (ret) 145 break; 146 } 147 return ret; 148 } 149 150 static void device_link_init_status(struct device_link *link, 151 struct device *consumer, 152 struct device *supplier) 153 { 154 switch (supplier->links.status) { 155 case DL_DEV_PROBING: 156 switch (consumer->links.status) { 157 case DL_DEV_PROBING: 158 /* 159 * A consumer driver can create a link to a supplier 160 * that has not completed its probing yet as long as it 161 * knows that the supplier is already functional (for 162 * example, it has just acquired some resources from the 163 * supplier). 164 */ 165 link->status = DL_STATE_CONSUMER_PROBE; 166 break; 167 default: 168 link->status = DL_STATE_DORMANT; 169 break; 170 } 171 break; 172 case DL_DEV_DRIVER_BOUND: 173 switch (consumer->links.status) { 174 case DL_DEV_PROBING: 175 link->status = DL_STATE_CONSUMER_PROBE; 176 break; 177 case DL_DEV_DRIVER_BOUND: 178 link->status = DL_STATE_ACTIVE; 179 break; 180 default: 181 link->status = DL_STATE_AVAILABLE; 182 break; 183 } 184 break; 185 case DL_DEV_UNBINDING: 186 link->status = DL_STATE_SUPPLIER_UNBIND; 187 break; 188 default: 189 link->status = DL_STATE_DORMANT; 190 break; 191 } 192 } 193 194 static int device_reorder_to_tail(struct device *dev, void *not_used) 195 { 196 struct device_link *link; 197 198 /* 199 * Devices that have not been registered yet will be put to the ends 200 * of the lists during the registration, so skip them here. 201 */ 202 if (device_is_registered(dev)) 203 devices_kset_move_last(dev); 204 205 if (device_pm_initialized(dev)) 206 device_pm_move_last(dev); 207 208 device_for_each_child(dev, NULL, device_reorder_to_tail); 209 list_for_each_entry(link, &dev->links.consumers, s_node) { 210 if (link->flags == (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED)) 211 continue; 212 device_reorder_to_tail(link->consumer, NULL); 213 } 214 215 return 0; 216 } 217 218 /** 219 * device_pm_move_to_tail - Move set of devices to the end of device lists 220 * @dev: Device to move 221 * 222 * This is a device_reorder_to_tail() wrapper taking the requisite locks. 223 * 224 * It moves the @dev along with all of its children and all of its consumers 225 * to the ends of the device_kset and dpm_list, recursively. 226 */ 227 void device_pm_move_to_tail(struct device *dev) 228 { 229 int idx; 230 231 idx = device_links_read_lock(); 232 device_pm_lock(); 233 device_reorder_to_tail(dev, NULL); 234 device_pm_unlock(); 235 device_links_read_unlock(idx); 236 } 237 238 #define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \ 239 DL_FLAG_AUTOREMOVE_SUPPLIER | \ 240 DL_FLAG_AUTOPROBE_CONSUMER | \ 241 DL_FLAG_SYNC_STATE_ONLY) 242 243 #define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \ 244 DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE) 245 246 /** 247 * device_link_add - Create a link between two devices. 248 * @consumer: Consumer end of the link. 249 * @supplier: Supplier end of the link. 250 * @flags: Link flags. 251 * 252 * The caller is responsible for the proper synchronization of the link creation 253 * with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the 254 * runtime PM framework to take the link into account. Second, if the 255 * DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will 256 * be forced into the active metastate and reference-counted upon the creation 257 * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be 258 * ignored. 259 * 260 * If DL_FLAG_STATELESS is set in @flags, the caller of this function is 261 * expected to release the link returned by it directly with the help of either 262 * device_link_del() or device_link_remove(). 263 * 264 * If that flag is not set, however, the caller of this function is handing the 265 * management of the link over to the driver core entirely and its return value 266 * can only be used to check whether or not the link is present. In that case, 267 * the DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_AUTOREMOVE_SUPPLIER device link 268 * flags can be used to indicate to the driver core when the link can be safely 269 * deleted. Namely, setting one of them in @flags indicates to the driver core 270 * that the link is not going to be used (by the given caller of this function) 271 * after unbinding the consumer or supplier driver, respectively, from its 272 * device, so the link can be deleted at that point. If none of them is set, 273 * the link will be maintained until one of the devices pointed to by it (either 274 * the consumer or the supplier) is unregistered. 275 * 276 * Also, if DL_FLAG_STATELESS, DL_FLAG_AUTOREMOVE_CONSUMER and 277 * DL_FLAG_AUTOREMOVE_SUPPLIER are not set in @flags (that is, a persistent 278 * managed device link is being added), the DL_FLAG_AUTOPROBE_CONSUMER flag can 279 * be used to request the driver core to automaticall probe for a consmer 280 * driver after successfully binding a driver to the supplier device. 281 * 282 * The combination of DL_FLAG_STATELESS and one of DL_FLAG_AUTOREMOVE_CONSUMER, 283 * DL_FLAG_AUTOREMOVE_SUPPLIER, or DL_FLAG_AUTOPROBE_CONSUMER set in @flags at 284 * the same time is invalid and will cause NULL to be returned upfront. 285 * However, if a device link between the given @consumer and @supplier pair 286 * exists already when this function is called for them, the existing link will 287 * be returned regardless of its current type and status (the link's flags may 288 * be modified then). The caller of this function is then expected to treat 289 * the link as though it has just been created, so (in particular) if 290 * DL_FLAG_STATELESS was passed in @flags, the link needs to be released 291 * explicitly when not needed any more (as stated above). 292 * 293 * A side effect of the link creation is re-ordering of dpm_list and the 294 * devices_kset list by moving the consumer device and all devices depending 295 * on it to the ends of these lists (that does not happen to devices that have 296 * not been registered when this function is called). 297 * 298 * The supplier device is required to be registered when this function is called 299 * and NULL will be returned if that is not the case. The consumer device need 300 * not be registered, however. 301 */ 302 struct device_link *device_link_add(struct device *consumer, 303 struct device *supplier, u32 flags) 304 { 305 struct device_link *link; 306 307 if (!consumer || !supplier || flags & ~DL_ADD_VALID_FLAGS || 308 (flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) || 309 (flags & DL_FLAG_SYNC_STATE_ONLY && 310 flags != DL_FLAG_SYNC_STATE_ONLY) || 311 (flags & DL_FLAG_AUTOPROBE_CONSUMER && 312 flags & (DL_FLAG_AUTOREMOVE_CONSUMER | 313 DL_FLAG_AUTOREMOVE_SUPPLIER))) 314 return NULL; 315 316 if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) { 317 if (pm_runtime_get_sync(supplier) < 0) { 318 pm_runtime_put_noidle(supplier); 319 return NULL; 320 } 321 } 322 323 if (!(flags & DL_FLAG_STATELESS)) 324 flags |= DL_FLAG_MANAGED; 325 326 device_links_write_lock(); 327 device_pm_lock(); 328 329 /* 330 * If the supplier has not been fully registered yet or there is a 331 * reverse (non-SYNC_STATE_ONLY) dependency between the consumer and 332 * the supplier already in the graph, return NULL. If the link is a 333 * SYNC_STATE_ONLY link, we don't check for reverse dependencies 334 * because it only affects sync_state() callbacks. 335 */ 336 if (!device_pm_initialized(supplier) 337 || (!(flags & DL_FLAG_SYNC_STATE_ONLY) && 338 device_is_dependent(consumer, supplier))) { 339 link = NULL; 340 goto out; 341 } 342 343 /* 344 * DL_FLAG_AUTOREMOVE_SUPPLIER indicates that the link will be needed 345 * longer than for DL_FLAG_AUTOREMOVE_CONSUMER and setting them both 346 * together doesn't make sense, so prefer DL_FLAG_AUTOREMOVE_SUPPLIER. 347 */ 348 if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) 349 flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; 350 351 list_for_each_entry(link, &supplier->links.consumers, s_node) { 352 if (link->consumer != consumer) 353 continue; 354 355 if (flags & DL_FLAG_PM_RUNTIME) { 356 if (!(link->flags & DL_FLAG_PM_RUNTIME)) { 357 pm_runtime_new_link(consumer); 358 link->flags |= DL_FLAG_PM_RUNTIME; 359 } 360 if (flags & DL_FLAG_RPM_ACTIVE) 361 refcount_inc(&link->rpm_active); 362 } 363 364 if (flags & DL_FLAG_STATELESS) { 365 kref_get(&link->kref); 366 if (link->flags & DL_FLAG_SYNC_STATE_ONLY && 367 !(link->flags & DL_FLAG_STATELESS)) { 368 link->flags |= DL_FLAG_STATELESS; 369 goto reorder; 370 } else { 371 link->flags |= DL_FLAG_STATELESS; 372 goto out; 373 } 374 } 375 376 /* 377 * If the life time of the link following from the new flags is 378 * longer than indicated by the flags of the existing link, 379 * update the existing link to stay around longer. 380 */ 381 if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) { 382 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) { 383 link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; 384 link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER; 385 } 386 } else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) { 387 link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER | 388 DL_FLAG_AUTOREMOVE_SUPPLIER); 389 } 390 if (!(link->flags & DL_FLAG_MANAGED)) { 391 kref_get(&link->kref); 392 link->flags |= DL_FLAG_MANAGED; 393 device_link_init_status(link, consumer, supplier); 394 } 395 if (link->flags & DL_FLAG_SYNC_STATE_ONLY && 396 !(flags & DL_FLAG_SYNC_STATE_ONLY)) { 397 link->flags &= ~DL_FLAG_SYNC_STATE_ONLY; 398 goto reorder; 399 } 400 401 goto out; 402 } 403 404 link = kzalloc(sizeof(*link), GFP_KERNEL); 405 if (!link) 406 goto out; 407 408 refcount_set(&link->rpm_active, 1); 409 410 if (flags & DL_FLAG_PM_RUNTIME) { 411 if (flags & DL_FLAG_RPM_ACTIVE) 412 refcount_inc(&link->rpm_active); 413 414 pm_runtime_new_link(consumer); 415 } 416 417 get_device(supplier); 418 link->supplier = supplier; 419 INIT_LIST_HEAD(&link->s_node); 420 get_device(consumer); 421 link->consumer = consumer; 422 INIT_LIST_HEAD(&link->c_node); 423 link->flags = flags; 424 kref_init(&link->kref); 425 426 /* Determine the initial link state. */ 427 if (flags & DL_FLAG_STATELESS) 428 link->status = DL_STATE_NONE; 429 else 430 device_link_init_status(link, consumer, supplier); 431 432 /* 433 * Some callers expect the link creation during consumer driver probe to 434 * resume the supplier even without DL_FLAG_RPM_ACTIVE. 435 */ 436 if (link->status == DL_STATE_CONSUMER_PROBE && 437 flags & DL_FLAG_PM_RUNTIME) 438 pm_runtime_resume(supplier); 439 440 list_add_tail_rcu(&link->s_node, &supplier->links.consumers); 441 list_add_tail_rcu(&link->c_node, &consumer->links.suppliers); 442 443 if (flags & DL_FLAG_SYNC_STATE_ONLY) { 444 dev_dbg(consumer, 445 "Linked as a sync state only consumer to %s\n", 446 dev_name(supplier)); 447 goto out; 448 } 449 450 reorder: 451 /* 452 * Move the consumer and all of the devices depending on it to the end 453 * of dpm_list and the devices_kset list. 454 * 455 * It is necessary to hold dpm_list locked throughout all that or else 456 * we may end up suspending with a wrong ordering of it. 457 */ 458 device_reorder_to_tail(consumer, NULL); 459 460 dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier)); 461 462 out: 463 device_pm_unlock(); 464 device_links_write_unlock(); 465 466 if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link) 467 pm_runtime_put(supplier); 468 469 return link; 470 } 471 EXPORT_SYMBOL_GPL(device_link_add); 472 473 /** 474 * device_link_wait_for_supplier - Add device to wait_for_suppliers list 475 * @consumer: Consumer device 476 * 477 * Marks the @consumer device as waiting for suppliers to become available by 478 * adding it to the wait_for_suppliers list. The consumer device will never be 479 * probed until it's removed from the wait_for_suppliers list. 480 * 481 * The caller is responsible for adding the links to the supplier devices once 482 * they are available and removing the @consumer device from the 483 * wait_for_suppliers list once links to all the suppliers have been created. 484 * 485 * This function is NOT meant to be called from the probe function of the 486 * consumer but rather from code that creates/adds the consumer device. 487 */ 488 static void device_link_wait_for_supplier(struct device *consumer, 489 bool need_for_probe) 490 { 491 mutex_lock(&wfs_lock); 492 list_add_tail(&consumer->links.needs_suppliers, &wait_for_suppliers); 493 consumer->links.need_for_probe = need_for_probe; 494 mutex_unlock(&wfs_lock); 495 } 496 497 static void device_link_wait_for_mandatory_supplier(struct device *consumer) 498 { 499 device_link_wait_for_supplier(consumer, true); 500 } 501 502 static void device_link_wait_for_optional_supplier(struct device *consumer) 503 { 504 device_link_wait_for_supplier(consumer, false); 505 } 506 507 /** 508 * device_link_add_missing_supplier_links - Add links from consumer devices to 509 * supplier devices, leaving any 510 * consumer with inactive suppliers on 511 * the wait_for_suppliers list 512 * 513 * Loops through all consumers waiting on suppliers and tries to add all their 514 * supplier links. If that succeeds, the consumer device is removed from 515 * wait_for_suppliers list. Otherwise, they are left in the wait_for_suppliers 516 * list. Devices left on the wait_for_suppliers list will not be probed. 517 * 518 * The fwnode add_links callback is expected to return 0 if it has found and 519 * added all the supplier links for the consumer device. It should return an 520 * error if it isn't able to do so. 521 * 522 * The caller of device_link_wait_for_supplier() is expected to call this once 523 * it's aware of potential suppliers becoming available. 524 */ 525 static void device_link_add_missing_supplier_links(void) 526 { 527 struct device *dev, *tmp; 528 529 mutex_lock(&wfs_lock); 530 list_for_each_entry_safe(dev, tmp, &wait_for_suppliers, 531 links.needs_suppliers) { 532 int ret = fwnode_call_int_op(dev->fwnode, add_links, dev); 533 if (!ret) 534 list_del_init(&dev->links.needs_suppliers); 535 else if (ret != -ENODEV || fw_devlink_is_permissive()) 536 dev->links.need_for_probe = false; 537 } 538 mutex_unlock(&wfs_lock); 539 } 540 541 static void device_link_free(struct device_link *link) 542 { 543 while (refcount_dec_not_one(&link->rpm_active)) 544 pm_runtime_put(link->supplier); 545 546 put_device(link->consumer); 547 put_device(link->supplier); 548 kfree(link); 549 } 550 551 #ifdef CONFIG_SRCU 552 static void __device_link_free_srcu(struct rcu_head *rhead) 553 { 554 device_link_free(container_of(rhead, struct device_link, rcu_head)); 555 } 556 557 static void __device_link_del(struct kref *kref) 558 { 559 struct device_link *link = container_of(kref, struct device_link, kref); 560 561 dev_dbg(link->consumer, "Dropping the link to %s\n", 562 dev_name(link->supplier)); 563 564 if (link->flags & DL_FLAG_PM_RUNTIME) 565 pm_runtime_drop_link(link->consumer); 566 567 list_del_rcu(&link->s_node); 568 list_del_rcu(&link->c_node); 569 call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu); 570 } 571 #else /* !CONFIG_SRCU */ 572 static void __device_link_del(struct kref *kref) 573 { 574 struct device_link *link = container_of(kref, struct device_link, kref); 575 576 dev_info(link->consumer, "Dropping the link to %s\n", 577 dev_name(link->supplier)); 578 579 if (link->flags & DL_FLAG_PM_RUNTIME) 580 pm_runtime_drop_link(link->consumer); 581 582 list_del(&link->s_node); 583 list_del(&link->c_node); 584 device_link_free(link); 585 } 586 #endif /* !CONFIG_SRCU */ 587 588 static void device_link_put_kref(struct device_link *link) 589 { 590 if (link->flags & DL_FLAG_STATELESS) 591 kref_put(&link->kref, __device_link_del); 592 else 593 WARN(1, "Unable to drop a managed device link reference\n"); 594 } 595 596 /** 597 * device_link_del - Delete a stateless link between two devices. 598 * @link: Device link to delete. 599 * 600 * The caller must ensure proper synchronization of this function with runtime 601 * PM. If the link was added multiple times, it needs to be deleted as often. 602 * Care is required for hotplugged devices: Their links are purged on removal 603 * and calling device_link_del() is then no longer allowed. 604 */ 605 void device_link_del(struct device_link *link) 606 { 607 device_links_write_lock(); 608 device_pm_lock(); 609 device_link_put_kref(link); 610 device_pm_unlock(); 611 device_links_write_unlock(); 612 } 613 EXPORT_SYMBOL_GPL(device_link_del); 614 615 /** 616 * device_link_remove - Delete a stateless link between two devices. 617 * @consumer: Consumer end of the link. 618 * @supplier: Supplier end of the link. 619 * 620 * The caller must ensure proper synchronization of this function with runtime 621 * PM. 622 */ 623 void device_link_remove(void *consumer, struct device *supplier) 624 { 625 struct device_link *link; 626 627 if (WARN_ON(consumer == supplier)) 628 return; 629 630 device_links_write_lock(); 631 device_pm_lock(); 632 633 list_for_each_entry(link, &supplier->links.consumers, s_node) { 634 if (link->consumer == consumer) { 635 device_link_put_kref(link); 636 break; 637 } 638 } 639 640 device_pm_unlock(); 641 device_links_write_unlock(); 642 } 643 EXPORT_SYMBOL_GPL(device_link_remove); 644 645 static void device_links_missing_supplier(struct device *dev) 646 { 647 struct device_link *link; 648 649 list_for_each_entry(link, &dev->links.suppliers, c_node) { 650 if (link->status != DL_STATE_CONSUMER_PROBE) 651 continue; 652 653 if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) { 654 WRITE_ONCE(link->status, DL_STATE_AVAILABLE); 655 } else { 656 WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); 657 WRITE_ONCE(link->status, DL_STATE_DORMANT); 658 } 659 } 660 } 661 662 /** 663 * device_links_check_suppliers - Check presence of supplier drivers. 664 * @dev: Consumer device. 665 * 666 * Check links from this device to any suppliers. Walk the list of the device's 667 * links to suppliers and see if all of them are available. If not, simply 668 * return -EPROBE_DEFER. 669 * 670 * We need to guarantee that the supplier will not go away after the check has 671 * been positive here. It only can go away in __device_release_driver() and 672 * that function checks the device's links to consumers. This means we need to 673 * mark the link as "consumer probe in progress" to make the supplier removal 674 * wait for us to complete (or bad things may happen). 675 * 676 * Links without the DL_FLAG_MANAGED flag set are ignored. 677 */ 678 int device_links_check_suppliers(struct device *dev) 679 { 680 struct device_link *link; 681 int ret = 0; 682 683 /* 684 * Device waiting for supplier to become available is not allowed to 685 * probe. 686 */ 687 mutex_lock(&wfs_lock); 688 if (!list_empty(&dev->links.needs_suppliers) && 689 dev->links.need_for_probe) { 690 mutex_unlock(&wfs_lock); 691 return -EPROBE_DEFER; 692 } 693 mutex_unlock(&wfs_lock); 694 695 device_links_write_lock(); 696 697 list_for_each_entry(link, &dev->links.suppliers, c_node) { 698 if (!(link->flags & DL_FLAG_MANAGED)) 699 continue; 700 701 if (link->status != DL_STATE_AVAILABLE && 702 !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) { 703 device_links_missing_supplier(dev); 704 ret = -EPROBE_DEFER; 705 break; 706 } 707 WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE); 708 } 709 dev->links.status = DL_DEV_PROBING; 710 711 device_links_write_unlock(); 712 return ret; 713 } 714 715 /** 716 * __device_links_queue_sync_state - Queue a device for sync_state() callback 717 * @dev: Device to call sync_state() on 718 * @list: List head to queue the @dev on 719 * 720 * Queues a device for a sync_state() callback when the device links write lock 721 * isn't held. This allows the sync_state() execution flow to use device links 722 * APIs. The caller must ensure this function is called with 723 * device_links_write_lock() held. 724 * 725 * This function does a get_device() to make sure the device is not freed while 726 * on this list. 727 * 728 * So the caller must also ensure that device_links_flush_sync_list() is called 729 * as soon as the caller releases device_links_write_lock(). This is necessary 730 * to make sure the sync_state() is called in a timely fashion and the 731 * put_device() is called on this device. 732 */ 733 static void __device_links_queue_sync_state(struct device *dev, 734 struct list_head *list) 735 { 736 struct device_link *link; 737 738 if (!dev_has_sync_state(dev)) 739 return; 740 if (dev->state_synced) 741 return; 742 743 list_for_each_entry(link, &dev->links.consumers, s_node) { 744 if (!(link->flags & DL_FLAG_MANAGED)) 745 continue; 746 if (link->status != DL_STATE_ACTIVE) 747 return; 748 } 749 750 /* 751 * Set the flag here to avoid adding the same device to a list more 752 * than once. This can happen if new consumers get added to the device 753 * and probed before the list is flushed. 754 */ 755 dev->state_synced = true; 756 757 if (WARN_ON(!list_empty(&dev->links.defer_sync))) 758 return; 759 760 get_device(dev); 761 list_add_tail(&dev->links.defer_sync, list); 762 } 763 764 /** 765 * device_links_flush_sync_list - Call sync_state() on a list of devices 766 * @list: List of devices to call sync_state() on 767 * @dont_lock_dev: Device for which lock is already held by the caller 768 * 769 * Calls sync_state() on all the devices that have been queued for it. This 770 * function is used in conjunction with __device_links_queue_sync_state(). The 771 * @dont_lock_dev parameter is useful when this function is called from a 772 * context where a device lock is already held. 773 */ 774 static void device_links_flush_sync_list(struct list_head *list, 775 struct device *dont_lock_dev) 776 { 777 struct device *dev, *tmp; 778 779 list_for_each_entry_safe(dev, tmp, list, links.defer_sync) { 780 list_del_init(&dev->links.defer_sync); 781 782 if (dev != dont_lock_dev) 783 device_lock(dev); 784 785 if (dev->bus->sync_state) 786 dev->bus->sync_state(dev); 787 else if (dev->driver && dev->driver->sync_state) 788 dev->driver->sync_state(dev); 789 790 if (dev != dont_lock_dev) 791 device_unlock(dev); 792 793 put_device(dev); 794 } 795 } 796 797 void device_links_supplier_sync_state_pause(void) 798 { 799 device_links_write_lock(); 800 defer_sync_state_count++; 801 device_links_write_unlock(); 802 } 803 804 void device_links_supplier_sync_state_resume(void) 805 { 806 struct device *dev, *tmp; 807 LIST_HEAD(sync_list); 808 809 device_links_write_lock(); 810 if (!defer_sync_state_count) { 811 WARN(true, "Unmatched sync_state pause/resume!"); 812 goto out; 813 } 814 defer_sync_state_count--; 815 if (defer_sync_state_count) 816 goto out; 817 818 list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) { 819 /* 820 * Delete from deferred_sync list before queuing it to 821 * sync_list because defer_sync is used for both lists. 822 */ 823 list_del_init(&dev->links.defer_sync); 824 __device_links_queue_sync_state(dev, &sync_list); 825 } 826 out: 827 device_links_write_unlock(); 828 829 device_links_flush_sync_list(&sync_list, NULL); 830 } 831 832 static int sync_state_resume_initcall(void) 833 { 834 device_links_supplier_sync_state_resume(); 835 return 0; 836 } 837 late_initcall(sync_state_resume_initcall); 838 839 static void __device_links_supplier_defer_sync(struct device *sup) 840 { 841 if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup)) 842 list_add_tail(&sup->links.defer_sync, &deferred_sync); 843 } 844 845 static void device_link_drop_managed(struct device_link *link) 846 { 847 link->flags &= ~DL_FLAG_MANAGED; 848 WRITE_ONCE(link->status, DL_STATE_NONE); 849 kref_put(&link->kref, __device_link_del); 850 } 851 852 /** 853 * device_links_driver_bound - Update device links after probing its driver. 854 * @dev: Device to update the links for. 855 * 856 * The probe has been successful, so update links from this device to any 857 * consumers by changing their status to "available". 858 * 859 * Also change the status of @dev's links to suppliers to "active". 860 * 861 * Links without the DL_FLAG_MANAGED flag set are ignored. 862 */ 863 void device_links_driver_bound(struct device *dev) 864 { 865 struct device_link *link, *ln; 866 LIST_HEAD(sync_list); 867 868 /* 869 * If a device probes successfully, it's expected to have created all 870 * the device links it needs to or make new device links as it needs 871 * them. So, it no longer needs to wait on any suppliers. 872 */ 873 mutex_lock(&wfs_lock); 874 list_del_init(&dev->links.needs_suppliers); 875 mutex_unlock(&wfs_lock); 876 877 device_links_write_lock(); 878 879 list_for_each_entry(link, &dev->links.consumers, s_node) { 880 if (!(link->flags & DL_FLAG_MANAGED)) 881 continue; 882 883 /* 884 * Links created during consumer probe may be in the "consumer 885 * probe" state to start with if the supplier is still probing 886 * when they are created and they may become "active" if the 887 * consumer probe returns first. Skip them here. 888 */ 889 if (link->status == DL_STATE_CONSUMER_PROBE || 890 link->status == DL_STATE_ACTIVE) 891 continue; 892 893 WARN_ON(link->status != DL_STATE_DORMANT); 894 WRITE_ONCE(link->status, DL_STATE_AVAILABLE); 895 896 if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER) 897 driver_deferred_probe_add(link->consumer); 898 } 899 900 if (defer_sync_state_count) 901 __device_links_supplier_defer_sync(dev); 902 else 903 __device_links_queue_sync_state(dev, &sync_list); 904 905 list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) { 906 struct device *supplier; 907 908 if (!(link->flags & DL_FLAG_MANAGED)) 909 continue; 910 911 supplier = link->supplier; 912 if (link->flags & DL_FLAG_SYNC_STATE_ONLY) { 913 /* 914 * When DL_FLAG_SYNC_STATE_ONLY is set, it means no 915 * other DL_MANAGED_LINK_FLAGS have been set. So, it's 916 * save to drop the managed link completely. 917 */ 918 device_link_drop_managed(link); 919 } else { 920 WARN_ON(link->status != DL_STATE_CONSUMER_PROBE); 921 WRITE_ONCE(link->status, DL_STATE_ACTIVE); 922 } 923 924 /* 925 * This needs to be done even for the deleted 926 * DL_FLAG_SYNC_STATE_ONLY device link in case it was the last 927 * device link that was preventing the supplier from getting a 928 * sync_state() call. 929 */ 930 if (defer_sync_state_count) 931 __device_links_supplier_defer_sync(supplier); 932 else 933 __device_links_queue_sync_state(supplier, &sync_list); 934 } 935 936 dev->links.status = DL_DEV_DRIVER_BOUND; 937 938 device_links_write_unlock(); 939 940 device_links_flush_sync_list(&sync_list, dev); 941 } 942 943 /** 944 * __device_links_no_driver - Update links of a device without a driver. 945 * @dev: Device without a drvier. 946 * 947 * Delete all non-persistent links from this device to any suppliers. 948 * 949 * Persistent links stay around, but their status is changed to "available", 950 * unless they already are in the "supplier unbind in progress" state in which 951 * case they need not be updated. 952 * 953 * Links without the DL_FLAG_MANAGED flag set are ignored. 954 */ 955 static void __device_links_no_driver(struct device *dev) 956 { 957 struct device_link *link, *ln; 958 959 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) { 960 if (!(link->flags & DL_FLAG_MANAGED)) 961 continue; 962 963 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) { 964 device_link_drop_managed(link); 965 continue; 966 } 967 968 if (link->status != DL_STATE_CONSUMER_PROBE && 969 link->status != DL_STATE_ACTIVE) 970 continue; 971 972 if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) { 973 WRITE_ONCE(link->status, DL_STATE_AVAILABLE); 974 } else { 975 WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); 976 WRITE_ONCE(link->status, DL_STATE_DORMANT); 977 } 978 } 979 980 dev->links.status = DL_DEV_NO_DRIVER; 981 } 982 983 /** 984 * device_links_no_driver - Update links after failing driver probe. 985 * @dev: Device whose driver has just failed to probe. 986 * 987 * Clean up leftover links to consumers for @dev and invoke 988 * %__device_links_no_driver() to update links to suppliers for it as 989 * appropriate. 990 * 991 * Links without the DL_FLAG_MANAGED flag set are ignored. 992 */ 993 void device_links_no_driver(struct device *dev) 994 { 995 struct device_link *link; 996 997 device_links_write_lock(); 998 999 list_for_each_entry(link, &dev->links.consumers, s_node) { 1000 if (!(link->flags & DL_FLAG_MANAGED)) 1001 continue; 1002 1003 /* 1004 * The probe has failed, so if the status of the link is 1005 * "consumer probe" or "active", it must have been added by 1006 * a probing consumer while this device was still probing. 1007 * Change its state to "dormant", as it represents a valid 1008 * relationship, but it is not functionally meaningful. 1009 */ 1010 if (link->status == DL_STATE_CONSUMER_PROBE || 1011 link->status == DL_STATE_ACTIVE) 1012 WRITE_ONCE(link->status, DL_STATE_DORMANT); 1013 } 1014 1015 __device_links_no_driver(dev); 1016 1017 device_links_write_unlock(); 1018 } 1019 1020 /** 1021 * device_links_driver_cleanup - Update links after driver removal. 1022 * @dev: Device whose driver has just gone away. 1023 * 1024 * Update links to consumers for @dev by changing their status to "dormant" and 1025 * invoke %__device_links_no_driver() to update links to suppliers for it as 1026 * appropriate. 1027 * 1028 * Links without the DL_FLAG_MANAGED flag set are ignored. 1029 */ 1030 void device_links_driver_cleanup(struct device *dev) 1031 { 1032 struct device_link *link, *ln; 1033 1034 device_links_write_lock(); 1035 1036 list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) { 1037 if (!(link->flags & DL_FLAG_MANAGED)) 1038 continue; 1039 1040 WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER); 1041 WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND); 1042 1043 /* 1044 * autoremove the links between this @dev and its consumer 1045 * devices that are not active, i.e. where the link state 1046 * has moved to DL_STATE_SUPPLIER_UNBIND. 1047 */ 1048 if (link->status == DL_STATE_SUPPLIER_UNBIND && 1049 link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) 1050 device_link_drop_managed(link); 1051 1052 WRITE_ONCE(link->status, DL_STATE_DORMANT); 1053 } 1054 1055 list_del_init(&dev->links.defer_sync); 1056 __device_links_no_driver(dev); 1057 1058 device_links_write_unlock(); 1059 } 1060 1061 /** 1062 * device_links_busy - Check if there are any busy links to consumers. 1063 * @dev: Device to check. 1064 * 1065 * Check each consumer of the device and return 'true' if its link's status 1066 * is one of "consumer probe" or "active" (meaning that the given consumer is 1067 * probing right now or its driver is present). Otherwise, change the link 1068 * state to "supplier unbind" to prevent the consumer from being probed 1069 * successfully going forward. 1070 * 1071 * Return 'false' if there are no probing or active consumers. 1072 * 1073 * Links without the DL_FLAG_MANAGED flag set are ignored. 1074 */ 1075 bool device_links_busy(struct device *dev) 1076 { 1077 struct device_link *link; 1078 bool ret = false; 1079 1080 device_links_write_lock(); 1081 1082 list_for_each_entry(link, &dev->links.consumers, s_node) { 1083 if (!(link->flags & DL_FLAG_MANAGED)) 1084 continue; 1085 1086 if (link->status == DL_STATE_CONSUMER_PROBE 1087 || link->status == DL_STATE_ACTIVE) { 1088 ret = true; 1089 break; 1090 } 1091 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND); 1092 } 1093 1094 dev->links.status = DL_DEV_UNBINDING; 1095 1096 device_links_write_unlock(); 1097 return ret; 1098 } 1099 1100 /** 1101 * device_links_unbind_consumers - Force unbind consumers of the given device. 1102 * @dev: Device to unbind the consumers of. 1103 * 1104 * Walk the list of links to consumers for @dev and if any of them is in the 1105 * "consumer probe" state, wait for all device probes in progress to complete 1106 * and start over. 1107 * 1108 * If that's not the case, change the status of the link to "supplier unbind" 1109 * and check if the link was in the "active" state. If so, force the consumer 1110 * driver to unbind and start over (the consumer will not re-probe as we have 1111 * changed the state of the link already). 1112 * 1113 * Links without the DL_FLAG_MANAGED flag set are ignored. 1114 */ 1115 void device_links_unbind_consumers(struct device *dev) 1116 { 1117 struct device_link *link; 1118 1119 start: 1120 device_links_write_lock(); 1121 1122 list_for_each_entry(link, &dev->links.consumers, s_node) { 1123 enum device_link_state status; 1124 1125 if (!(link->flags & DL_FLAG_MANAGED) || 1126 link->flags & DL_FLAG_SYNC_STATE_ONLY) 1127 continue; 1128 1129 status = link->status; 1130 if (status == DL_STATE_CONSUMER_PROBE) { 1131 device_links_write_unlock(); 1132 1133 wait_for_device_probe(); 1134 goto start; 1135 } 1136 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND); 1137 if (status == DL_STATE_ACTIVE) { 1138 struct device *consumer = link->consumer; 1139 1140 get_device(consumer); 1141 1142 device_links_write_unlock(); 1143 1144 device_release_driver_internal(consumer, NULL, 1145 consumer->parent); 1146 put_device(consumer); 1147 goto start; 1148 } 1149 } 1150 1151 device_links_write_unlock(); 1152 } 1153 1154 /** 1155 * device_links_purge - Delete existing links to other devices. 1156 * @dev: Target device. 1157 */ 1158 static void device_links_purge(struct device *dev) 1159 { 1160 struct device_link *link, *ln; 1161 1162 mutex_lock(&wfs_lock); 1163 list_del(&dev->links.needs_suppliers); 1164 mutex_unlock(&wfs_lock); 1165 1166 /* 1167 * Delete all of the remaining links from this device to any other 1168 * devices (either consumers or suppliers). 1169 */ 1170 device_links_write_lock(); 1171 1172 list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) { 1173 WARN_ON(link->status == DL_STATE_ACTIVE); 1174 __device_link_del(&link->kref); 1175 } 1176 1177 list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) { 1178 WARN_ON(link->status != DL_STATE_DORMANT && 1179 link->status != DL_STATE_NONE); 1180 __device_link_del(&link->kref); 1181 } 1182 1183 device_links_write_unlock(); 1184 } 1185 1186 static u32 fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY; 1187 static int __init fw_devlink_setup(char *arg) 1188 { 1189 if (!arg) 1190 return -EINVAL; 1191 1192 if (strcmp(arg, "off") == 0) { 1193 fw_devlink_flags = 0; 1194 } else if (strcmp(arg, "permissive") == 0) { 1195 fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY; 1196 } else if (strcmp(arg, "on") == 0) { 1197 fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER; 1198 } else if (strcmp(arg, "rpm") == 0) { 1199 fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER | 1200 DL_FLAG_PM_RUNTIME; 1201 } 1202 return 0; 1203 } 1204 early_param("fw_devlink", fw_devlink_setup); 1205 1206 u32 fw_devlink_get_flags(void) 1207 { 1208 return fw_devlink_flags; 1209 } 1210 1211 static bool fw_devlink_is_permissive(void) 1212 { 1213 return fw_devlink_flags == DL_FLAG_SYNC_STATE_ONLY; 1214 } 1215 1216 static void fw_devlink_link_device(struct device *dev) 1217 { 1218 int fw_ret; 1219 1220 if (!fw_devlink_flags) 1221 return; 1222 1223 mutex_lock(&defer_fw_devlink_lock); 1224 if (!defer_fw_devlink_count) 1225 device_link_add_missing_supplier_links(); 1226 1227 /* 1228 * The device's fwnode not having add_links() doesn't affect if other 1229 * consumers can find this device as a supplier. So, this check is 1230 * intentionally placed after device_link_add_missing_supplier_links(). 1231 */ 1232 if (!fwnode_has_op(dev->fwnode, add_links)) 1233 goto out; 1234 1235 /* 1236 * If fw_devlink is being deferred, assume all devices have mandatory 1237 * suppliers they need to link to later. Then, when the fw_devlink is 1238 * resumed, all these devices will get a chance to try and link to any 1239 * suppliers they have. 1240 */ 1241 if (!defer_fw_devlink_count) { 1242 fw_ret = fwnode_call_int_op(dev->fwnode, add_links, dev); 1243 if (fw_ret == -ENODEV && fw_devlink_is_permissive()) 1244 fw_ret = -EAGAIN; 1245 } else { 1246 fw_ret = -ENODEV; 1247 } 1248 1249 if (fw_ret == -ENODEV) 1250 device_link_wait_for_mandatory_supplier(dev); 1251 else if (fw_ret) 1252 device_link_wait_for_optional_supplier(dev); 1253 1254 out: 1255 mutex_unlock(&defer_fw_devlink_lock); 1256 } 1257 1258 /** 1259 * fw_devlink_pause - Pause parsing of fwnode to create device links 1260 * 1261 * Calling this function defers any fwnode parsing to create device links until 1262 * fw_devlink_resume() is called. Both these functions are ref counted and the 1263 * caller needs to match the calls. 1264 * 1265 * While fw_devlink is paused: 1266 * - Any device that is added won't have its fwnode parsed to create device 1267 * links. 1268 * - The probe of the device will also be deferred during this period. 1269 * - Any devices that were already added, but waiting for suppliers won't be 1270 * able to link to newly added devices. 1271 * 1272 * Once fw_devlink_resume(): 1273 * - All the fwnodes that was not parsed will be parsed. 1274 * - All the devices that were deferred probing will be reattempted if they 1275 * aren't waiting for any more suppliers. 1276 * 1277 * This pair of functions, is mainly meant to optimize the parsing of fwnodes 1278 * when a lot of devices that need to link to each other are added in a short 1279 * interval of time. For example, adding all the top level devices in a system. 1280 * 1281 * For example, if N devices are added and: 1282 * - All the consumers are added before their suppliers 1283 * - All the suppliers of the N devices are part of the N devices 1284 * 1285 * Then: 1286 * 1287 * - With the use of fw_devlink_pause() and fw_devlink_resume(), each device 1288 * will only need one parsing of its fwnode because it is guaranteed to find 1289 * all the supplier devices already registered and ready to link to. It won't 1290 * have to do another pass later to find one or more suppliers it couldn't 1291 * find in the first parse of the fwnode. So, we'll only need O(N) fwnode 1292 * parses. 1293 * 1294 * - Without the use of fw_devlink_pause() and fw_devlink_resume(), we would 1295 * end up doing O(N^2) parses of fwnodes because every device that's added is 1296 * guaranteed to trigger a parse of the fwnode of every device added before 1297 * it. This O(N^2) parse is made worse by the fact that when a fwnode of a 1298 * device is parsed, all it descendant devices might need to have their 1299 * fwnodes parsed too (even if the devices themselves aren't added). 1300 */ 1301 void fw_devlink_pause(void) 1302 { 1303 mutex_lock(&defer_fw_devlink_lock); 1304 defer_fw_devlink_count++; 1305 mutex_unlock(&defer_fw_devlink_lock); 1306 } 1307 1308 /** fw_devlink_resume - Resume parsing of fwnode to create device links 1309 * 1310 * This function is used in conjunction with fw_devlink_pause() and is ref 1311 * counted. See documentation for fw_devlink_pause() for more details. 1312 */ 1313 void fw_devlink_resume(void) 1314 { 1315 mutex_lock(&defer_fw_devlink_lock); 1316 if (!defer_fw_devlink_count) { 1317 WARN(true, "Unmatched fw_devlink pause/resume!"); 1318 goto out; 1319 } 1320 1321 defer_fw_devlink_count--; 1322 if (defer_fw_devlink_count) 1323 goto out; 1324 1325 device_link_add_missing_supplier_links(); 1326 driver_deferred_probe_force_trigger(); 1327 out: 1328 mutex_unlock(&defer_fw_devlink_lock); 1329 } 1330 /* Device links support end. */ 1331 1332 int (*platform_notify)(struct device *dev) = NULL; 1333 int (*platform_notify_remove)(struct device *dev) = NULL; 1334 static struct kobject *dev_kobj; 1335 struct kobject *sysfs_dev_char_kobj; 1336 struct kobject *sysfs_dev_block_kobj; 1337 1338 static DEFINE_MUTEX(device_hotplug_lock); 1339 1340 void lock_device_hotplug(void) 1341 { 1342 mutex_lock(&device_hotplug_lock); 1343 } 1344 1345 void unlock_device_hotplug(void) 1346 { 1347 mutex_unlock(&device_hotplug_lock); 1348 } 1349 1350 int lock_device_hotplug_sysfs(void) 1351 { 1352 if (mutex_trylock(&device_hotplug_lock)) 1353 return 0; 1354 1355 /* Avoid busy looping (5 ms of sleep should do). */ 1356 msleep(5); 1357 return restart_syscall(); 1358 } 1359 1360 #ifdef CONFIG_BLOCK 1361 static inline int device_is_not_partition(struct device *dev) 1362 { 1363 return !(dev->type == &part_type); 1364 } 1365 #else 1366 static inline int device_is_not_partition(struct device *dev) 1367 { 1368 return 1; 1369 } 1370 #endif 1371 1372 static int 1373 device_platform_notify(struct device *dev, enum kobject_action action) 1374 { 1375 int ret; 1376 1377 ret = acpi_platform_notify(dev, action); 1378 if (ret) 1379 return ret; 1380 1381 ret = software_node_notify(dev, action); 1382 if (ret) 1383 return ret; 1384 1385 if (platform_notify && action == KOBJ_ADD) 1386 platform_notify(dev); 1387 else if (platform_notify_remove && action == KOBJ_REMOVE) 1388 platform_notify_remove(dev); 1389 return 0; 1390 } 1391 1392 /** 1393 * dev_driver_string - Return a device's driver name, if at all possible 1394 * @dev: struct device to get the name of 1395 * 1396 * Will return the device's driver's name if it is bound to a device. If 1397 * the device is not bound to a driver, it will return the name of the bus 1398 * it is attached to. If it is not attached to a bus either, an empty 1399 * string will be returned. 1400 */ 1401 const char *dev_driver_string(const struct device *dev) 1402 { 1403 struct device_driver *drv; 1404 1405 /* dev->driver can change to NULL underneath us because of unbinding, 1406 * so be careful about accessing it. dev->bus and dev->class should 1407 * never change once they are set, so they don't need special care. 1408 */ 1409 drv = READ_ONCE(dev->driver); 1410 return drv ? drv->name : 1411 (dev->bus ? dev->bus->name : 1412 (dev->class ? dev->class->name : "")); 1413 } 1414 EXPORT_SYMBOL(dev_driver_string); 1415 1416 #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr) 1417 1418 static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr, 1419 char *buf) 1420 { 1421 struct device_attribute *dev_attr = to_dev_attr(attr); 1422 struct device *dev = kobj_to_dev(kobj); 1423 ssize_t ret = -EIO; 1424 1425 if (dev_attr->show) 1426 ret = dev_attr->show(dev, dev_attr, buf); 1427 if (ret >= (ssize_t)PAGE_SIZE) { 1428 printk("dev_attr_show: %pS returned bad count\n", 1429 dev_attr->show); 1430 } 1431 return ret; 1432 } 1433 1434 static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr, 1435 const char *buf, size_t count) 1436 { 1437 struct device_attribute *dev_attr = to_dev_attr(attr); 1438 struct device *dev = kobj_to_dev(kobj); 1439 ssize_t ret = -EIO; 1440 1441 if (dev_attr->store) 1442 ret = dev_attr->store(dev, dev_attr, buf, count); 1443 return ret; 1444 } 1445 1446 static const struct sysfs_ops dev_sysfs_ops = { 1447 .show = dev_attr_show, 1448 .store = dev_attr_store, 1449 }; 1450 1451 #define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr) 1452 1453 ssize_t device_store_ulong(struct device *dev, 1454 struct device_attribute *attr, 1455 const char *buf, size_t size) 1456 { 1457 struct dev_ext_attribute *ea = to_ext_attr(attr); 1458 int ret; 1459 unsigned long new; 1460 1461 ret = kstrtoul(buf, 0, &new); 1462 if (ret) 1463 return ret; 1464 *(unsigned long *)(ea->var) = new; 1465 /* Always return full write size even if we didn't consume all */ 1466 return size; 1467 } 1468 EXPORT_SYMBOL_GPL(device_store_ulong); 1469 1470 ssize_t device_show_ulong(struct device *dev, 1471 struct device_attribute *attr, 1472 char *buf) 1473 { 1474 struct dev_ext_attribute *ea = to_ext_attr(attr); 1475 return snprintf(buf, PAGE_SIZE, "%lx\n", *(unsigned long *)(ea->var)); 1476 } 1477 EXPORT_SYMBOL_GPL(device_show_ulong); 1478 1479 ssize_t device_store_int(struct device *dev, 1480 struct device_attribute *attr, 1481 const char *buf, size_t size) 1482 { 1483 struct dev_ext_attribute *ea = to_ext_attr(attr); 1484 int ret; 1485 long new; 1486 1487 ret = kstrtol(buf, 0, &new); 1488 if (ret) 1489 return ret; 1490 1491 if (new > INT_MAX || new < INT_MIN) 1492 return -EINVAL; 1493 *(int *)(ea->var) = new; 1494 /* Always return full write size even if we didn't consume all */ 1495 return size; 1496 } 1497 EXPORT_SYMBOL_GPL(device_store_int); 1498 1499 ssize_t device_show_int(struct device *dev, 1500 struct device_attribute *attr, 1501 char *buf) 1502 { 1503 struct dev_ext_attribute *ea = to_ext_attr(attr); 1504 1505 return snprintf(buf, PAGE_SIZE, "%d\n", *(int *)(ea->var)); 1506 } 1507 EXPORT_SYMBOL_GPL(device_show_int); 1508 1509 ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, 1510 const char *buf, size_t size) 1511 { 1512 struct dev_ext_attribute *ea = to_ext_attr(attr); 1513 1514 if (strtobool(buf, ea->var) < 0) 1515 return -EINVAL; 1516 1517 return size; 1518 } 1519 EXPORT_SYMBOL_GPL(device_store_bool); 1520 1521 ssize_t device_show_bool(struct device *dev, struct device_attribute *attr, 1522 char *buf) 1523 { 1524 struct dev_ext_attribute *ea = to_ext_attr(attr); 1525 1526 return snprintf(buf, PAGE_SIZE, "%d\n", *(bool *)(ea->var)); 1527 } 1528 EXPORT_SYMBOL_GPL(device_show_bool); 1529 1530 /** 1531 * device_release - free device structure. 1532 * @kobj: device's kobject. 1533 * 1534 * This is called once the reference count for the object 1535 * reaches 0. We forward the call to the device's release 1536 * method, which should handle actually freeing the structure. 1537 */ 1538 static void device_release(struct kobject *kobj) 1539 { 1540 struct device *dev = kobj_to_dev(kobj); 1541 struct device_private *p = dev->p; 1542 1543 /* 1544 * Some platform devices are driven without driver attached 1545 * and managed resources may have been acquired. Make sure 1546 * all resources are released. 1547 * 1548 * Drivers still can add resources into device after device 1549 * is deleted but alive, so release devres here to avoid 1550 * possible memory leak. 1551 */ 1552 devres_release_all(dev); 1553 1554 if (dev->release) 1555 dev->release(dev); 1556 else if (dev->type && dev->type->release) 1557 dev->type->release(dev); 1558 else if (dev->class && dev->class->dev_release) 1559 dev->class->dev_release(dev); 1560 else 1561 WARN(1, KERN_ERR "Device '%s' does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n", 1562 dev_name(dev)); 1563 kfree(p); 1564 } 1565 1566 static const void *device_namespace(struct kobject *kobj) 1567 { 1568 struct device *dev = kobj_to_dev(kobj); 1569 const void *ns = NULL; 1570 1571 if (dev->class && dev->class->ns_type) 1572 ns = dev->class->namespace(dev); 1573 1574 return ns; 1575 } 1576 1577 static void device_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid) 1578 { 1579 struct device *dev = kobj_to_dev(kobj); 1580 1581 if (dev->class && dev->class->get_ownership) 1582 dev->class->get_ownership(dev, uid, gid); 1583 } 1584 1585 static struct kobj_type device_ktype = { 1586 .release = device_release, 1587 .sysfs_ops = &dev_sysfs_ops, 1588 .namespace = device_namespace, 1589 .get_ownership = device_get_ownership, 1590 }; 1591 1592 1593 static int dev_uevent_filter(struct kset *kset, struct kobject *kobj) 1594 { 1595 struct kobj_type *ktype = get_ktype(kobj); 1596 1597 if (ktype == &device_ktype) { 1598 struct device *dev = kobj_to_dev(kobj); 1599 if (dev->bus) 1600 return 1; 1601 if (dev->class) 1602 return 1; 1603 } 1604 return 0; 1605 } 1606 1607 static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj) 1608 { 1609 struct device *dev = kobj_to_dev(kobj); 1610 1611 if (dev->bus) 1612 return dev->bus->name; 1613 if (dev->class) 1614 return dev->class->name; 1615 return NULL; 1616 } 1617 1618 static int dev_uevent(struct kset *kset, struct kobject *kobj, 1619 struct kobj_uevent_env *env) 1620 { 1621 struct device *dev = kobj_to_dev(kobj); 1622 int retval = 0; 1623 1624 /* add device node properties if present */ 1625 if (MAJOR(dev->devt)) { 1626 const char *tmp; 1627 const char *name; 1628 umode_t mode = 0; 1629 kuid_t uid = GLOBAL_ROOT_UID; 1630 kgid_t gid = GLOBAL_ROOT_GID; 1631 1632 add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt)); 1633 add_uevent_var(env, "MINOR=%u", MINOR(dev->devt)); 1634 name = device_get_devnode(dev, &mode, &uid, &gid, &tmp); 1635 if (name) { 1636 add_uevent_var(env, "DEVNAME=%s", name); 1637 if (mode) 1638 add_uevent_var(env, "DEVMODE=%#o", mode & 0777); 1639 if (!uid_eq(uid, GLOBAL_ROOT_UID)) 1640 add_uevent_var(env, "DEVUID=%u", from_kuid(&init_user_ns, uid)); 1641 if (!gid_eq(gid, GLOBAL_ROOT_GID)) 1642 add_uevent_var(env, "DEVGID=%u", from_kgid(&init_user_ns, gid)); 1643 kfree(tmp); 1644 } 1645 } 1646 1647 if (dev->type && dev->type->name) 1648 add_uevent_var(env, "DEVTYPE=%s", dev->type->name); 1649 1650 if (dev->driver) 1651 add_uevent_var(env, "DRIVER=%s", dev->driver->name); 1652 1653 /* Add common DT information about the device */ 1654 of_device_uevent(dev, env); 1655 1656 /* have the bus specific function add its stuff */ 1657 if (dev->bus && dev->bus->uevent) { 1658 retval = dev->bus->uevent(dev, env); 1659 if (retval) 1660 pr_debug("device: '%s': %s: bus uevent() returned %d\n", 1661 dev_name(dev), __func__, retval); 1662 } 1663 1664 /* have the class specific function add its stuff */ 1665 if (dev->class && dev->class->dev_uevent) { 1666 retval = dev->class->dev_uevent(dev, env); 1667 if (retval) 1668 pr_debug("device: '%s': %s: class uevent() " 1669 "returned %d\n", dev_name(dev), 1670 __func__, retval); 1671 } 1672 1673 /* have the device type specific function add its stuff */ 1674 if (dev->type && dev->type->uevent) { 1675 retval = dev->type->uevent(dev, env); 1676 if (retval) 1677 pr_debug("device: '%s': %s: dev_type uevent() " 1678 "returned %d\n", dev_name(dev), 1679 __func__, retval); 1680 } 1681 1682 return retval; 1683 } 1684 1685 static const struct kset_uevent_ops device_uevent_ops = { 1686 .filter = dev_uevent_filter, 1687 .name = dev_uevent_name, 1688 .uevent = dev_uevent, 1689 }; 1690 1691 static ssize_t uevent_show(struct device *dev, struct device_attribute *attr, 1692 char *buf) 1693 { 1694 struct kobject *top_kobj; 1695 struct kset *kset; 1696 struct kobj_uevent_env *env = NULL; 1697 int i; 1698 size_t count = 0; 1699 int retval; 1700 1701 /* search the kset, the device belongs to */ 1702 top_kobj = &dev->kobj; 1703 while (!top_kobj->kset && top_kobj->parent) 1704 top_kobj = top_kobj->parent; 1705 if (!top_kobj->kset) 1706 goto out; 1707 1708 kset = top_kobj->kset; 1709 if (!kset->uevent_ops || !kset->uevent_ops->uevent) 1710 goto out; 1711 1712 /* respect filter */ 1713 if (kset->uevent_ops && kset->uevent_ops->filter) 1714 if (!kset->uevent_ops->filter(kset, &dev->kobj)) 1715 goto out; 1716 1717 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL); 1718 if (!env) 1719 return -ENOMEM; 1720 1721 /* let the kset specific function add its keys */ 1722 retval = kset->uevent_ops->uevent(kset, &dev->kobj, env); 1723 if (retval) 1724 goto out; 1725 1726 /* copy keys to file */ 1727 for (i = 0; i < env->envp_idx; i++) 1728 count += sprintf(&buf[count], "%s\n", env->envp[i]); 1729 out: 1730 kfree(env); 1731 return count; 1732 } 1733 1734 static ssize_t uevent_store(struct device *dev, struct device_attribute *attr, 1735 const char *buf, size_t count) 1736 { 1737 int rc; 1738 1739 rc = kobject_synth_uevent(&dev->kobj, buf, count); 1740 1741 if (rc) { 1742 dev_err(dev, "uevent: failed to send synthetic uevent\n"); 1743 return rc; 1744 } 1745 1746 return count; 1747 } 1748 static DEVICE_ATTR_RW(uevent); 1749 1750 static ssize_t online_show(struct device *dev, struct device_attribute *attr, 1751 char *buf) 1752 { 1753 bool val; 1754 1755 device_lock(dev); 1756 val = !dev->offline; 1757 device_unlock(dev); 1758 return sprintf(buf, "%u\n", val); 1759 } 1760 1761 static ssize_t online_store(struct device *dev, struct device_attribute *attr, 1762 const char *buf, size_t count) 1763 { 1764 bool val; 1765 int ret; 1766 1767 ret = strtobool(buf, &val); 1768 if (ret < 0) 1769 return ret; 1770 1771 ret = lock_device_hotplug_sysfs(); 1772 if (ret) 1773 return ret; 1774 1775 ret = val ? device_online(dev) : device_offline(dev); 1776 unlock_device_hotplug(); 1777 return ret < 0 ? ret : count; 1778 } 1779 static DEVICE_ATTR_RW(online); 1780 1781 int device_add_groups(struct device *dev, const struct attribute_group **groups) 1782 { 1783 return sysfs_create_groups(&dev->kobj, groups); 1784 } 1785 EXPORT_SYMBOL_GPL(device_add_groups); 1786 1787 void device_remove_groups(struct device *dev, 1788 const struct attribute_group **groups) 1789 { 1790 sysfs_remove_groups(&dev->kobj, groups); 1791 } 1792 EXPORT_SYMBOL_GPL(device_remove_groups); 1793 1794 union device_attr_group_devres { 1795 const struct attribute_group *group; 1796 const struct attribute_group **groups; 1797 }; 1798 1799 static int devm_attr_group_match(struct device *dev, void *res, void *data) 1800 { 1801 return ((union device_attr_group_devres *)res)->group == data; 1802 } 1803 1804 static void devm_attr_group_remove(struct device *dev, void *res) 1805 { 1806 union device_attr_group_devres *devres = res; 1807 const struct attribute_group *group = devres->group; 1808 1809 dev_dbg(dev, "%s: removing group %p\n", __func__, group); 1810 sysfs_remove_group(&dev->kobj, group); 1811 } 1812 1813 static void devm_attr_groups_remove(struct device *dev, void *res) 1814 { 1815 union device_attr_group_devres *devres = res; 1816 const struct attribute_group **groups = devres->groups; 1817 1818 dev_dbg(dev, "%s: removing groups %p\n", __func__, groups); 1819 sysfs_remove_groups(&dev->kobj, groups); 1820 } 1821 1822 /** 1823 * devm_device_add_group - given a device, create a managed attribute group 1824 * @dev: The device to create the group for 1825 * @grp: The attribute group to create 1826 * 1827 * This function creates a group for the first time. It will explicitly 1828 * warn and error if any of the attribute files being created already exist. 1829 * 1830 * Returns 0 on success or error code on failure. 1831 */ 1832 int devm_device_add_group(struct device *dev, const struct attribute_group *grp) 1833 { 1834 union device_attr_group_devres *devres; 1835 int error; 1836 1837 devres = devres_alloc(devm_attr_group_remove, 1838 sizeof(*devres), GFP_KERNEL); 1839 if (!devres) 1840 return -ENOMEM; 1841 1842 error = sysfs_create_group(&dev->kobj, grp); 1843 if (error) { 1844 devres_free(devres); 1845 return error; 1846 } 1847 1848 devres->group = grp; 1849 devres_add(dev, devres); 1850 return 0; 1851 } 1852 EXPORT_SYMBOL_GPL(devm_device_add_group); 1853 1854 /** 1855 * devm_device_remove_group: remove a managed group from a device 1856 * @dev: device to remove the group from 1857 * @grp: group to remove 1858 * 1859 * This function removes a group of attributes from a device. The attributes 1860 * previously have to have been created for this group, otherwise it will fail. 1861 */ 1862 void devm_device_remove_group(struct device *dev, 1863 const struct attribute_group *grp) 1864 { 1865 WARN_ON(devres_release(dev, devm_attr_group_remove, 1866 devm_attr_group_match, 1867 /* cast away const */ (void *)grp)); 1868 } 1869 EXPORT_SYMBOL_GPL(devm_device_remove_group); 1870 1871 /** 1872 * devm_device_add_groups - create a bunch of managed attribute groups 1873 * @dev: The device to create the group for 1874 * @groups: The attribute groups to create, NULL terminated 1875 * 1876 * This function creates a bunch of managed attribute groups. If an error 1877 * occurs when creating a group, all previously created groups will be 1878 * removed, unwinding everything back to the original state when this 1879 * function was called. It will explicitly warn and error if any of the 1880 * attribute files being created already exist. 1881 * 1882 * Returns 0 on success or error code from sysfs_create_group on failure. 1883 */ 1884 int devm_device_add_groups(struct device *dev, 1885 const struct attribute_group **groups) 1886 { 1887 union device_attr_group_devres *devres; 1888 int error; 1889 1890 devres = devres_alloc(devm_attr_groups_remove, 1891 sizeof(*devres), GFP_KERNEL); 1892 if (!devres) 1893 return -ENOMEM; 1894 1895 error = sysfs_create_groups(&dev->kobj, groups); 1896 if (error) { 1897 devres_free(devres); 1898 return error; 1899 } 1900 1901 devres->groups = groups; 1902 devres_add(dev, devres); 1903 return 0; 1904 } 1905 EXPORT_SYMBOL_GPL(devm_device_add_groups); 1906 1907 /** 1908 * devm_device_remove_groups - remove a list of managed groups 1909 * 1910 * @dev: The device for the groups to be removed from 1911 * @groups: NULL terminated list of groups to be removed 1912 * 1913 * If groups is not NULL, remove the specified groups from the device. 1914 */ 1915 void devm_device_remove_groups(struct device *dev, 1916 const struct attribute_group **groups) 1917 { 1918 WARN_ON(devres_release(dev, devm_attr_groups_remove, 1919 devm_attr_group_match, 1920 /* cast away const */ (void *)groups)); 1921 } 1922 EXPORT_SYMBOL_GPL(devm_device_remove_groups); 1923 1924 static int device_add_attrs(struct device *dev) 1925 { 1926 struct class *class = dev->class; 1927 const struct device_type *type = dev->type; 1928 int error; 1929 1930 if (class) { 1931 error = device_add_groups(dev, class->dev_groups); 1932 if (error) 1933 return error; 1934 } 1935 1936 if (type) { 1937 error = device_add_groups(dev, type->groups); 1938 if (error) 1939 goto err_remove_class_groups; 1940 } 1941 1942 error = device_add_groups(dev, dev->groups); 1943 if (error) 1944 goto err_remove_type_groups; 1945 1946 if (device_supports_offline(dev) && !dev->offline_disabled) { 1947 error = device_create_file(dev, &dev_attr_online); 1948 if (error) 1949 goto err_remove_dev_groups; 1950 } 1951 1952 return 0; 1953 1954 err_remove_dev_groups: 1955 device_remove_groups(dev, dev->groups); 1956 err_remove_type_groups: 1957 if (type) 1958 device_remove_groups(dev, type->groups); 1959 err_remove_class_groups: 1960 if (class) 1961 device_remove_groups(dev, class->dev_groups); 1962 1963 return error; 1964 } 1965 1966 static void device_remove_attrs(struct device *dev) 1967 { 1968 struct class *class = dev->class; 1969 const struct device_type *type = dev->type; 1970 1971 device_remove_file(dev, &dev_attr_online); 1972 device_remove_groups(dev, dev->groups); 1973 1974 if (type) 1975 device_remove_groups(dev, type->groups); 1976 1977 if (class) 1978 device_remove_groups(dev, class->dev_groups); 1979 } 1980 1981 static ssize_t dev_show(struct device *dev, struct device_attribute *attr, 1982 char *buf) 1983 { 1984 return print_dev_t(buf, dev->devt); 1985 } 1986 static DEVICE_ATTR_RO(dev); 1987 1988 /* /sys/devices/ */ 1989 struct kset *devices_kset; 1990 1991 /** 1992 * devices_kset_move_before - Move device in the devices_kset's list. 1993 * @deva: Device to move. 1994 * @devb: Device @deva should come before. 1995 */ 1996 static void devices_kset_move_before(struct device *deva, struct device *devb) 1997 { 1998 if (!devices_kset) 1999 return; 2000 pr_debug("devices_kset: Moving %s before %s\n", 2001 dev_name(deva), dev_name(devb)); 2002 spin_lock(&devices_kset->list_lock); 2003 list_move_tail(&deva->kobj.entry, &devb->kobj.entry); 2004 spin_unlock(&devices_kset->list_lock); 2005 } 2006 2007 /** 2008 * devices_kset_move_after - Move device in the devices_kset's list. 2009 * @deva: Device to move 2010 * @devb: Device @deva should come after. 2011 */ 2012 static void devices_kset_move_after(struct device *deva, struct device *devb) 2013 { 2014 if (!devices_kset) 2015 return; 2016 pr_debug("devices_kset: Moving %s after %s\n", 2017 dev_name(deva), dev_name(devb)); 2018 spin_lock(&devices_kset->list_lock); 2019 list_move(&deva->kobj.entry, &devb->kobj.entry); 2020 spin_unlock(&devices_kset->list_lock); 2021 } 2022 2023 /** 2024 * devices_kset_move_last - move the device to the end of devices_kset's list. 2025 * @dev: device to move 2026 */ 2027 void devices_kset_move_last(struct device *dev) 2028 { 2029 if (!devices_kset) 2030 return; 2031 pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev)); 2032 spin_lock(&devices_kset->list_lock); 2033 list_move_tail(&dev->kobj.entry, &devices_kset->list); 2034 spin_unlock(&devices_kset->list_lock); 2035 } 2036 2037 /** 2038 * device_create_file - create sysfs attribute file for device. 2039 * @dev: device. 2040 * @attr: device attribute descriptor. 2041 */ 2042 int device_create_file(struct device *dev, 2043 const struct device_attribute *attr) 2044 { 2045 int error = 0; 2046 2047 if (dev) { 2048 WARN(((attr->attr.mode & S_IWUGO) && !attr->store), 2049 "Attribute %s: write permission without 'store'\n", 2050 attr->attr.name); 2051 WARN(((attr->attr.mode & S_IRUGO) && !attr->show), 2052 "Attribute %s: read permission without 'show'\n", 2053 attr->attr.name); 2054 error = sysfs_create_file(&dev->kobj, &attr->attr); 2055 } 2056 2057 return error; 2058 } 2059 EXPORT_SYMBOL_GPL(device_create_file); 2060 2061 /** 2062 * device_remove_file - remove sysfs attribute file. 2063 * @dev: device. 2064 * @attr: device attribute descriptor. 2065 */ 2066 void device_remove_file(struct device *dev, 2067 const struct device_attribute *attr) 2068 { 2069 if (dev) 2070 sysfs_remove_file(&dev->kobj, &attr->attr); 2071 } 2072 EXPORT_SYMBOL_GPL(device_remove_file); 2073 2074 /** 2075 * device_remove_file_self - remove sysfs attribute file from its own method. 2076 * @dev: device. 2077 * @attr: device attribute descriptor. 2078 * 2079 * See kernfs_remove_self() for details. 2080 */ 2081 bool device_remove_file_self(struct device *dev, 2082 const struct device_attribute *attr) 2083 { 2084 if (dev) 2085 return sysfs_remove_file_self(&dev->kobj, &attr->attr); 2086 else 2087 return false; 2088 } 2089 EXPORT_SYMBOL_GPL(device_remove_file_self); 2090 2091 /** 2092 * device_create_bin_file - create sysfs binary attribute file for device. 2093 * @dev: device. 2094 * @attr: device binary attribute descriptor. 2095 */ 2096 int device_create_bin_file(struct device *dev, 2097 const struct bin_attribute *attr) 2098 { 2099 int error = -EINVAL; 2100 if (dev) 2101 error = sysfs_create_bin_file(&dev->kobj, attr); 2102 return error; 2103 } 2104 EXPORT_SYMBOL_GPL(device_create_bin_file); 2105 2106 /** 2107 * device_remove_bin_file - remove sysfs binary attribute file 2108 * @dev: device. 2109 * @attr: device binary attribute descriptor. 2110 */ 2111 void device_remove_bin_file(struct device *dev, 2112 const struct bin_attribute *attr) 2113 { 2114 if (dev) 2115 sysfs_remove_bin_file(&dev->kobj, attr); 2116 } 2117 EXPORT_SYMBOL_GPL(device_remove_bin_file); 2118 2119 static void klist_children_get(struct klist_node *n) 2120 { 2121 struct device_private *p = to_device_private_parent(n); 2122 struct device *dev = p->device; 2123 2124 get_device(dev); 2125 } 2126 2127 static void klist_children_put(struct klist_node *n) 2128 { 2129 struct device_private *p = to_device_private_parent(n); 2130 struct device *dev = p->device; 2131 2132 put_device(dev); 2133 } 2134 2135 /** 2136 * device_initialize - init device structure. 2137 * @dev: device. 2138 * 2139 * This prepares the device for use by other layers by initializing 2140 * its fields. 2141 * It is the first half of device_register(), if called by 2142 * that function, though it can also be called separately, so one 2143 * may use @dev's fields. In particular, get_device()/put_device() 2144 * may be used for reference counting of @dev after calling this 2145 * function. 2146 * 2147 * All fields in @dev must be initialized by the caller to 0, except 2148 * for those explicitly set to some other value. The simplest 2149 * approach is to use kzalloc() to allocate the structure containing 2150 * @dev. 2151 * 2152 * NOTE: Use put_device() to give up your reference instead of freeing 2153 * @dev directly once you have called this function. 2154 */ 2155 void device_initialize(struct device *dev) 2156 { 2157 dev->kobj.kset = devices_kset; 2158 kobject_init(&dev->kobj, &device_ktype); 2159 INIT_LIST_HEAD(&dev->dma_pools); 2160 mutex_init(&dev->mutex); 2161 #ifdef CONFIG_PROVE_LOCKING 2162 mutex_init(&dev->lockdep_mutex); 2163 #endif 2164 lockdep_set_novalidate_class(&dev->mutex); 2165 spin_lock_init(&dev->devres_lock); 2166 INIT_LIST_HEAD(&dev->devres_head); 2167 device_pm_init(dev); 2168 set_dev_node(dev, -1); 2169 #ifdef CONFIG_GENERIC_MSI_IRQ 2170 INIT_LIST_HEAD(&dev->msi_list); 2171 #endif 2172 INIT_LIST_HEAD(&dev->links.consumers); 2173 INIT_LIST_HEAD(&dev->links.suppliers); 2174 INIT_LIST_HEAD(&dev->links.needs_suppliers); 2175 INIT_LIST_HEAD(&dev->links.defer_sync); 2176 dev->links.status = DL_DEV_NO_DRIVER; 2177 } 2178 EXPORT_SYMBOL_GPL(device_initialize); 2179 2180 struct kobject *virtual_device_parent(struct device *dev) 2181 { 2182 static struct kobject *virtual_dir = NULL; 2183 2184 if (!virtual_dir) 2185 virtual_dir = kobject_create_and_add("virtual", 2186 &devices_kset->kobj); 2187 2188 return virtual_dir; 2189 } 2190 2191 struct class_dir { 2192 struct kobject kobj; 2193 struct class *class; 2194 }; 2195 2196 #define to_class_dir(obj) container_of(obj, struct class_dir, kobj) 2197 2198 static void class_dir_release(struct kobject *kobj) 2199 { 2200 struct class_dir *dir = to_class_dir(kobj); 2201 kfree(dir); 2202 } 2203 2204 static const 2205 struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj) 2206 { 2207 struct class_dir *dir = to_class_dir(kobj); 2208 return dir->class->ns_type; 2209 } 2210 2211 static struct kobj_type class_dir_ktype = { 2212 .release = class_dir_release, 2213 .sysfs_ops = &kobj_sysfs_ops, 2214 .child_ns_type = class_dir_child_ns_type 2215 }; 2216 2217 static struct kobject * 2218 class_dir_create_and_add(struct class *class, struct kobject *parent_kobj) 2219 { 2220 struct class_dir *dir; 2221 int retval; 2222 2223 dir = kzalloc(sizeof(*dir), GFP_KERNEL); 2224 if (!dir) 2225 return ERR_PTR(-ENOMEM); 2226 2227 dir->class = class; 2228 kobject_init(&dir->kobj, &class_dir_ktype); 2229 2230 dir->kobj.kset = &class->p->glue_dirs; 2231 2232 retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name); 2233 if (retval < 0) { 2234 kobject_put(&dir->kobj); 2235 return ERR_PTR(retval); 2236 } 2237 return &dir->kobj; 2238 } 2239 2240 static DEFINE_MUTEX(gdp_mutex); 2241 2242 static struct kobject *get_device_parent(struct device *dev, 2243 struct device *parent) 2244 { 2245 if (dev->class) { 2246 struct kobject *kobj = NULL; 2247 struct kobject *parent_kobj; 2248 struct kobject *k; 2249 2250 #ifdef CONFIG_BLOCK 2251 /* block disks show up in /sys/block */ 2252 if (sysfs_deprecated && dev->class == &block_class) { 2253 if (parent && parent->class == &block_class) 2254 return &parent->kobj; 2255 return &block_class.p->subsys.kobj; 2256 } 2257 #endif 2258 2259 /* 2260 * If we have no parent, we live in "virtual". 2261 * Class-devices with a non class-device as parent, live 2262 * in a "glue" directory to prevent namespace collisions. 2263 */ 2264 if (parent == NULL) 2265 parent_kobj = virtual_device_parent(dev); 2266 else if (parent->class && !dev->class->ns_type) 2267 return &parent->kobj; 2268 else 2269 parent_kobj = &parent->kobj; 2270 2271 mutex_lock(&gdp_mutex); 2272 2273 /* find our class-directory at the parent and reference it */ 2274 spin_lock(&dev->class->p->glue_dirs.list_lock); 2275 list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry) 2276 if (k->parent == parent_kobj) { 2277 kobj = kobject_get(k); 2278 break; 2279 } 2280 spin_unlock(&dev->class->p->glue_dirs.list_lock); 2281 if (kobj) { 2282 mutex_unlock(&gdp_mutex); 2283 return kobj; 2284 } 2285 2286 /* or create a new class-directory at the parent device */ 2287 k = class_dir_create_and_add(dev->class, parent_kobj); 2288 /* do not emit an uevent for this simple "glue" directory */ 2289 mutex_unlock(&gdp_mutex); 2290 return k; 2291 } 2292 2293 /* subsystems can specify a default root directory for their devices */ 2294 if (!parent && dev->bus && dev->bus->dev_root) 2295 return &dev->bus->dev_root->kobj; 2296 2297 if (parent) 2298 return &parent->kobj; 2299 return NULL; 2300 } 2301 2302 static inline bool live_in_glue_dir(struct kobject *kobj, 2303 struct device *dev) 2304 { 2305 if (!kobj || !dev->class || 2306 kobj->kset != &dev->class->p->glue_dirs) 2307 return false; 2308 return true; 2309 } 2310 2311 static inline struct kobject *get_glue_dir(struct device *dev) 2312 { 2313 return dev->kobj.parent; 2314 } 2315 2316 /* 2317 * make sure cleaning up dir as the last step, we need to make 2318 * sure .release handler of kobject is run with holding the 2319 * global lock 2320 */ 2321 static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) 2322 { 2323 unsigned int ref; 2324 2325 /* see if we live in a "glue" directory */ 2326 if (!live_in_glue_dir(glue_dir, dev)) 2327 return; 2328 2329 mutex_lock(&gdp_mutex); 2330 /** 2331 * There is a race condition between removing glue directory 2332 * and adding a new device under the glue directory. 2333 * 2334 * CPU1: CPU2: 2335 * 2336 * device_add() 2337 * get_device_parent() 2338 * class_dir_create_and_add() 2339 * kobject_add_internal() 2340 * create_dir() // create glue_dir 2341 * 2342 * device_add() 2343 * get_device_parent() 2344 * kobject_get() // get glue_dir 2345 * 2346 * device_del() 2347 * cleanup_glue_dir() 2348 * kobject_del(glue_dir) 2349 * 2350 * kobject_add() 2351 * kobject_add_internal() 2352 * create_dir() // in glue_dir 2353 * sysfs_create_dir_ns() 2354 * kernfs_create_dir_ns(sd) 2355 * 2356 * sysfs_remove_dir() // glue_dir->sd=NULL 2357 * sysfs_put() // free glue_dir->sd 2358 * 2359 * // sd is freed 2360 * kernfs_new_node(sd) 2361 * kernfs_get(glue_dir) 2362 * kernfs_add_one() 2363 * kernfs_put() 2364 * 2365 * Before CPU1 remove last child device under glue dir, if CPU2 add 2366 * a new device under glue dir, the glue_dir kobject reference count 2367 * will be increase to 2 in kobject_get(k). And CPU2 has been called 2368 * kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir() 2369 * and sysfs_put(). This result in glue_dir->sd is freed. 2370 * 2371 * Then the CPU2 will see a stale "empty" but still potentially used 2372 * glue dir around in kernfs_new_node(). 2373 * 2374 * In order to avoid this happening, we also should make sure that 2375 * kernfs_node for glue_dir is released in CPU1 only when refcount 2376 * for glue_dir kobj is 1. 2377 */ 2378 ref = kref_read(&glue_dir->kref); 2379 if (!kobject_has_children(glue_dir) && !--ref) 2380 kobject_del(glue_dir); 2381 kobject_put(glue_dir); 2382 mutex_unlock(&gdp_mutex); 2383 } 2384 2385 static int device_add_class_symlinks(struct device *dev) 2386 { 2387 struct device_node *of_node = dev_of_node(dev); 2388 int error; 2389 2390 if (of_node) { 2391 error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node"); 2392 if (error) 2393 dev_warn(dev, "Error %d creating of_node link\n",error); 2394 /* An error here doesn't warrant bringing down the device */ 2395 } 2396 2397 if (!dev->class) 2398 return 0; 2399 2400 error = sysfs_create_link(&dev->kobj, 2401 &dev->class->p->subsys.kobj, 2402 "subsystem"); 2403 if (error) 2404 goto out_devnode; 2405 2406 if (dev->parent && device_is_not_partition(dev)) { 2407 error = sysfs_create_link(&dev->kobj, &dev->parent->kobj, 2408 "device"); 2409 if (error) 2410 goto out_subsys; 2411 } 2412 2413 #ifdef CONFIG_BLOCK 2414 /* /sys/block has directories and does not need symlinks */ 2415 if (sysfs_deprecated && dev->class == &block_class) 2416 return 0; 2417 #endif 2418 2419 /* link in the class directory pointing to the device */ 2420 error = sysfs_create_link(&dev->class->p->subsys.kobj, 2421 &dev->kobj, dev_name(dev)); 2422 if (error) 2423 goto out_device; 2424 2425 return 0; 2426 2427 out_device: 2428 sysfs_remove_link(&dev->kobj, "device"); 2429 2430 out_subsys: 2431 sysfs_remove_link(&dev->kobj, "subsystem"); 2432 out_devnode: 2433 sysfs_remove_link(&dev->kobj, "of_node"); 2434 return error; 2435 } 2436 2437 static void device_remove_class_symlinks(struct device *dev) 2438 { 2439 if (dev_of_node(dev)) 2440 sysfs_remove_link(&dev->kobj, "of_node"); 2441 2442 if (!dev->class) 2443 return; 2444 2445 if (dev->parent && device_is_not_partition(dev)) 2446 sysfs_remove_link(&dev->kobj, "device"); 2447 sysfs_remove_link(&dev->kobj, "subsystem"); 2448 #ifdef CONFIG_BLOCK 2449 if (sysfs_deprecated && dev->class == &block_class) 2450 return; 2451 #endif 2452 sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev)); 2453 } 2454 2455 /** 2456 * dev_set_name - set a device name 2457 * @dev: device 2458 * @fmt: format string for the device's name 2459 */ 2460 int dev_set_name(struct device *dev, const char *fmt, ...) 2461 { 2462 va_list vargs; 2463 int err; 2464 2465 va_start(vargs, fmt); 2466 err = kobject_set_name_vargs(&dev->kobj, fmt, vargs); 2467 va_end(vargs); 2468 return err; 2469 } 2470 EXPORT_SYMBOL_GPL(dev_set_name); 2471 2472 /** 2473 * device_to_dev_kobj - select a /sys/dev/ directory for the device 2474 * @dev: device 2475 * 2476 * By default we select char/ for new entries. Setting class->dev_obj 2477 * to NULL prevents an entry from being created. class->dev_kobj must 2478 * be set (or cleared) before any devices are registered to the class 2479 * otherwise device_create_sys_dev_entry() and 2480 * device_remove_sys_dev_entry() will disagree about the presence of 2481 * the link. 2482 */ 2483 static struct kobject *device_to_dev_kobj(struct device *dev) 2484 { 2485 struct kobject *kobj; 2486 2487 if (dev->class) 2488 kobj = dev->class->dev_kobj; 2489 else 2490 kobj = sysfs_dev_char_kobj; 2491 2492 return kobj; 2493 } 2494 2495 static int device_create_sys_dev_entry(struct device *dev) 2496 { 2497 struct kobject *kobj = device_to_dev_kobj(dev); 2498 int error = 0; 2499 char devt_str[15]; 2500 2501 if (kobj) { 2502 format_dev_t(devt_str, dev->devt); 2503 error = sysfs_create_link(kobj, &dev->kobj, devt_str); 2504 } 2505 2506 return error; 2507 } 2508 2509 static void device_remove_sys_dev_entry(struct device *dev) 2510 { 2511 struct kobject *kobj = device_to_dev_kobj(dev); 2512 char devt_str[15]; 2513 2514 if (kobj) { 2515 format_dev_t(devt_str, dev->devt); 2516 sysfs_remove_link(kobj, devt_str); 2517 } 2518 } 2519 2520 static int device_private_init(struct device *dev) 2521 { 2522 dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL); 2523 if (!dev->p) 2524 return -ENOMEM; 2525 dev->p->device = dev; 2526 klist_init(&dev->p->klist_children, klist_children_get, 2527 klist_children_put); 2528 INIT_LIST_HEAD(&dev->p->deferred_probe); 2529 return 0; 2530 } 2531 2532 /** 2533 * device_add - add device to device hierarchy. 2534 * @dev: device. 2535 * 2536 * This is part 2 of device_register(), though may be called 2537 * separately _iff_ device_initialize() has been called separately. 2538 * 2539 * This adds @dev to the kobject hierarchy via kobject_add(), adds it 2540 * to the global and sibling lists for the device, then 2541 * adds it to the other relevant subsystems of the driver model. 2542 * 2543 * Do not call this routine or device_register() more than once for 2544 * any device structure. The driver model core is not designed to work 2545 * with devices that get unregistered and then spring back to life. 2546 * (Among other things, it's very hard to guarantee that all references 2547 * to the previous incarnation of @dev have been dropped.) Allocate 2548 * and register a fresh new struct device instead. 2549 * 2550 * NOTE: _Never_ directly free @dev after calling this function, even 2551 * if it returned an error! Always use put_device() to give up your 2552 * reference instead. 2553 * 2554 * Rule of thumb is: if device_add() succeeds, you should call 2555 * device_del() when you want to get rid of it. If device_add() has 2556 * *not* succeeded, use *only* put_device() to drop the reference 2557 * count. 2558 */ 2559 int device_add(struct device *dev) 2560 { 2561 struct device *parent; 2562 struct kobject *kobj; 2563 struct class_interface *class_intf; 2564 int error = -EINVAL; 2565 struct kobject *glue_dir = NULL; 2566 2567 dev = get_device(dev); 2568 if (!dev) 2569 goto done; 2570 2571 if (!dev->p) { 2572 error = device_private_init(dev); 2573 if (error) 2574 goto done; 2575 } 2576 2577 /* 2578 * for statically allocated devices, which should all be converted 2579 * some day, we need to initialize the name. We prevent reading back 2580 * the name, and force the use of dev_name() 2581 */ 2582 if (dev->init_name) { 2583 dev_set_name(dev, "%s", dev->init_name); 2584 dev->init_name = NULL; 2585 } 2586 2587 /* subsystems can specify simple device enumeration */ 2588 if (!dev_name(dev) && dev->bus && dev->bus->dev_name) 2589 dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id); 2590 2591 if (!dev_name(dev)) { 2592 error = -EINVAL; 2593 goto name_error; 2594 } 2595 2596 pr_debug("device: '%s': %s\n", dev_name(dev), __func__); 2597 2598 parent = get_device(dev->parent); 2599 kobj = get_device_parent(dev, parent); 2600 if (IS_ERR(kobj)) { 2601 error = PTR_ERR(kobj); 2602 goto parent_error; 2603 } 2604 if (kobj) 2605 dev->kobj.parent = kobj; 2606 2607 /* use parent numa_node */ 2608 if (parent && (dev_to_node(dev) == NUMA_NO_NODE)) 2609 set_dev_node(dev, dev_to_node(parent)); 2610 2611 /* first, register with generic layer. */ 2612 /* we require the name to be set before, and pass NULL */ 2613 error = kobject_add(&dev->kobj, dev->kobj.parent, NULL); 2614 if (error) { 2615 glue_dir = get_glue_dir(dev); 2616 goto Error; 2617 } 2618 2619 /* notify platform of device entry */ 2620 error = device_platform_notify(dev, KOBJ_ADD); 2621 if (error) 2622 goto platform_error; 2623 2624 error = device_create_file(dev, &dev_attr_uevent); 2625 if (error) 2626 goto attrError; 2627 2628 error = device_add_class_symlinks(dev); 2629 if (error) 2630 goto SymlinkError; 2631 error = device_add_attrs(dev); 2632 if (error) 2633 goto AttrsError; 2634 error = bus_add_device(dev); 2635 if (error) 2636 goto BusError; 2637 error = dpm_sysfs_add(dev); 2638 if (error) 2639 goto DPMError; 2640 device_pm_add(dev); 2641 2642 if (MAJOR(dev->devt)) { 2643 error = device_create_file(dev, &dev_attr_dev); 2644 if (error) 2645 goto DevAttrError; 2646 2647 error = device_create_sys_dev_entry(dev); 2648 if (error) 2649 goto SysEntryError; 2650 2651 devtmpfs_create_node(dev); 2652 } 2653 2654 /* Notify clients of device addition. This call must come 2655 * after dpm_sysfs_add() and before kobject_uevent(). 2656 */ 2657 if (dev->bus) 2658 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 2659 BUS_NOTIFY_ADD_DEVICE, dev); 2660 2661 kobject_uevent(&dev->kobj, KOBJ_ADD); 2662 2663 /* 2664 * Check if any of the other devices (consumers) have been waiting for 2665 * this device (supplier) to be added so that they can create a device 2666 * link to it. 2667 * 2668 * This needs to happen after device_pm_add() because device_link_add() 2669 * requires the supplier be registered before it's called. 2670 * 2671 * But this also needs to happen before bus_probe_device() to make sure 2672 * waiting consumers can link to it before the driver is bound to the 2673 * device and the driver sync_state callback is called for this device. 2674 */ 2675 if (dev->fwnode && !dev->fwnode->dev) { 2676 dev->fwnode->dev = dev; 2677 fw_devlink_link_device(dev); 2678 } 2679 2680 bus_probe_device(dev); 2681 if (parent) 2682 klist_add_tail(&dev->p->knode_parent, 2683 &parent->p->klist_children); 2684 2685 if (dev->class) { 2686 mutex_lock(&dev->class->p->mutex); 2687 /* tie the class to the device */ 2688 klist_add_tail(&dev->p->knode_class, 2689 &dev->class->p->klist_devices); 2690 2691 /* notify any interfaces that the device is here */ 2692 list_for_each_entry(class_intf, 2693 &dev->class->p->interfaces, node) 2694 if (class_intf->add_dev) 2695 class_intf->add_dev(dev, class_intf); 2696 mutex_unlock(&dev->class->p->mutex); 2697 } 2698 done: 2699 put_device(dev); 2700 return error; 2701 SysEntryError: 2702 if (MAJOR(dev->devt)) 2703 device_remove_file(dev, &dev_attr_dev); 2704 DevAttrError: 2705 device_pm_remove(dev); 2706 dpm_sysfs_remove(dev); 2707 DPMError: 2708 bus_remove_device(dev); 2709 BusError: 2710 device_remove_attrs(dev); 2711 AttrsError: 2712 device_remove_class_symlinks(dev); 2713 SymlinkError: 2714 device_remove_file(dev, &dev_attr_uevent); 2715 attrError: 2716 device_platform_notify(dev, KOBJ_REMOVE); 2717 platform_error: 2718 kobject_uevent(&dev->kobj, KOBJ_REMOVE); 2719 glue_dir = get_glue_dir(dev); 2720 kobject_del(&dev->kobj); 2721 Error: 2722 cleanup_glue_dir(dev, glue_dir); 2723 parent_error: 2724 put_device(parent); 2725 name_error: 2726 kfree(dev->p); 2727 dev->p = NULL; 2728 goto done; 2729 } 2730 EXPORT_SYMBOL_GPL(device_add); 2731 2732 /** 2733 * device_register - register a device with the system. 2734 * @dev: pointer to the device structure 2735 * 2736 * This happens in two clean steps - initialize the device 2737 * and add it to the system. The two steps can be called 2738 * separately, but this is the easiest and most common. 2739 * I.e. you should only call the two helpers separately if 2740 * have a clearly defined need to use and refcount the device 2741 * before it is added to the hierarchy. 2742 * 2743 * For more information, see the kerneldoc for device_initialize() 2744 * and device_add(). 2745 * 2746 * NOTE: _Never_ directly free @dev after calling this function, even 2747 * if it returned an error! Always use put_device() to give up the 2748 * reference initialized in this function instead. 2749 */ 2750 int device_register(struct device *dev) 2751 { 2752 device_initialize(dev); 2753 return device_add(dev); 2754 } 2755 EXPORT_SYMBOL_GPL(device_register); 2756 2757 /** 2758 * get_device - increment reference count for device. 2759 * @dev: device. 2760 * 2761 * This simply forwards the call to kobject_get(), though 2762 * we do take care to provide for the case that we get a NULL 2763 * pointer passed in. 2764 */ 2765 struct device *get_device(struct device *dev) 2766 { 2767 return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL; 2768 } 2769 EXPORT_SYMBOL_GPL(get_device); 2770 2771 /** 2772 * put_device - decrement reference count. 2773 * @dev: device in question. 2774 */ 2775 void put_device(struct device *dev) 2776 { 2777 /* might_sleep(); */ 2778 if (dev) 2779 kobject_put(&dev->kobj); 2780 } 2781 EXPORT_SYMBOL_GPL(put_device); 2782 2783 bool kill_device(struct device *dev) 2784 { 2785 /* 2786 * Require the device lock and set the "dead" flag to guarantee that 2787 * the update behavior is consistent with the other bitfields near 2788 * it and that we cannot have an asynchronous probe routine trying 2789 * to run while we are tearing out the bus/class/sysfs from 2790 * underneath the device. 2791 */ 2792 lockdep_assert_held(&dev->mutex); 2793 2794 if (dev->p->dead) 2795 return false; 2796 dev->p->dead = true; 2797 return true; 2798 } 2799 EXPORT_SYMBOL_GPL(kill_device); 2800 2801 /** 2802 * device_del - delete device from system. 2803 * @dev: device. 2804 * 2805 * This is the first part of the device unregistration 2806 * sequence. This removes the device from the lists we control 2807 * from here, has it removed from the other driver model 2808 * subsystems it was added to in device_add(), and removes it 2809 * from the kobject hierarchy. 2810 * 2811 * NOTE: this should be called manually _iff_ device_add() was 2812 * also called manually. 2813 */ 2814 void device_del(struct device *dev) 2815 { 2816 struct device *parent = dev->parent; 2817 struct kobject *glue_dir = NULL; 2818 struct class_interface *class_intf; 2819 2820 device_lock(dev); 2821 kill_device(dev); 2822 device_unlock(dev); 2823 2824 if (dev->fwnode && dev->fwnode->dev == dev) 2825 dev->fwnode->dev = NULL; 2826 2827 /* Notify clients of device removal. This call must come 2828 * before dpm_sysfs_remove(). 2829 */ 2830 if (dev->bus) 2831 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 2832 BUS_NOTIFY_DEL_DEVICE, dev); 2833 2834 dpm_sysfs_remove(dev); 2835 if (parent) 2836 klist_del(&dev->p->knode_parent); 2837 if (MAJOR(dev->devt)) { 2838 devtmpfs_delete_node(dev); 2839 device_remove_sys_dev_entry(dev); 2840 device_remove_file(dev, &dev_attr_dev); 2841 } 2842 if (dev->class) { 2843 device_remove_class_symlinks(dev); 2844 2845 mutex_lock(&dev->class->p->mutex); 2846 /* notify any interfaces that the device is now gone */ 2847 list_for_each_entry(class_intf, 2848 &dev->class->p->interfaces, node) 2849 if (class_intf->remove_dev) 2850 class_intf->remove_dev(dev, class_intf); 2851 /* remove the device from the class list */ 2852 klist_del(&dev->p->knode_class); 2853 mutex_unlock(&dev->class->p->mutex); 2854 } 2855 device_remove_file(dev, &dev_attr_uevent); 2856 device_remove_attrs(dev); 2857 bus_remove_device(dev); 2858 device_pm_remove(dev); 2859 driver_deferred_probe_del(dev); 2860 device_platform_notify(dev, KOBJ_REMOVE); 2861 device_remove_properties(dev); 2862 device_links_purge(dev); 2863 2864 if (dev->bus) 2865 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 2866 BUS_NOTIFY_REMOVED_DEVICE, dev); 2867 kobject_uevent(&dev->kobj, KOBJ_REMOVE); 2868 glue_dir = get_glue_dir(dev); 2869 kobject_del(&dev->kobj); 2870 cleanup_glue_dir(dev, glue_dir); 2871 put_device(parent); 2872 } 2873 EXPORT_SYMBOL_GPL(device_del); 2874 2875 /** 2876 * device_unregister - unregister device from system. 2877 * @dev: device going away. 2878 * 2879 * We do this in two parts, like we do device_register(). First, 2880 * we remove it from all the subsystems with device_del(), then 2881 * we decrement the reference count via put_device(). If that 2882 * is the final reference count, the device will be cleaned up 2883 * via device_release() above. Otherwise, the structure will 2884 * stick around until the final reference to the device is dropped. 2885 */ 2886 void device_unregister(struct device *dev) 2887 { 2888 pr_debug("device: '%s': %s\n", dev_name(dev), __func__); 2889 device_del(dev); 2890 put_device(dev); 2891 } 2892 EXPORT_SYMBOL_GPL(device_unregister); 2893 2894 static struct device *prev_device(struct klist_iter *i) 2895 { 2896 struct klist_node *n = klist_prev(i); 2897 struct device *dev = NULL; 2898 struct device_private *p; 2899 2900 if (n) { 2901 p = to_device_private_parent(n); 2902 dev = p->device; 2903 } 2904 return dev; 2905 } 2906 2907 static struct device *next_device(struct klist_iter *i) 2908 { 2909 struct klist_node *n = klist_next(i); 2910 struct device *dev = NULL; 2911 struct device_private *p; 2912 2913 if (n) { 2914 p = to_device_private_parent(n); 2915 dev = p->device; 2916 } 2917 return dev; 2918 } 2919 2920 /** 2921 * device_get_devnode - path of device node file 2922 * @dev: device 2923 * @mode: returned file access mode 2924 * @uid: returned file owner 2925 * @gid: returned file group 2926 * @tmp: possibly allocated string 2927 * 2928 * Return the relative path of a possible device node. 2929 * Non-default names may need to allocate a memory to compose 2930 * a name. This memory is returned in tmp and needs to be 2931 * freed by the caller. 2932 */ 2933 const char *device_get_devnode(struct device *dev, 2934 umode_t *mode, kuid_t *uid, kgid_t *gid, 2935 const char **tmp) 2936 { 2937 char *s; 2938 2939 *tmp = NULL; 2940 2941 /* the device type may provide a specific name */ 2942 if (dev->type && dev->type->devnode) 2943 *tmp = dev->type->devnode(dev, mode, uid, gid); 2944 if (*tmp) 2945 return *tmp; 2946 2947 /* the class may provide a specific name */ 2948 if (dev->class && dev->class->devnode) 2949 *tmp = dev->class->devnode(dev, mode); 2950 if (*tmp) 2951 return *tmp; 2952 2953 /* return name without allocation, tmp == NULL */ 2954 if (strchr(dev_name(dev), '!') == NULL) 2955 return dev_name(dev); 2956 2957 /* replace '!' in the name with '/' */ 2958 s = kstrdup(dev_name(dev), GFP_KERNEL); 2959 if (!s) 2960 return NULL; 2961 strreplace(s, '!', '/'); 2962 return *tmp = s; 2963 } 2964 2965 /** 2966 * device_for_each_child - device child iterator. 2967 * @parent: parent struct device. 2968 * @fn: function to be called for each device. 2969 * @data: data for the callback. 2970 * 2971 * Iterate over @parent's child devices, and call @fn for each, 2972 * passing it @data. 2973 * 2974 * We check the return of @fn each time. If it returns anything 2975 * other than 0, we break out and return that value. 2976 */ 2977 int device_for_each_child(struct device *parent, void *data, 2978 int (*fn)(struct device *dev, void *data)) 2979 { 2980 struct klist_iter i; 2981 struct device *child; 2982 int error = 0; 2983 2984 if (!parent->p) 2985 return 0; 2986 2987 klist_iter_init(&parent->p->klist_children, &i); 2988 while (!error && (child = next_device(&i))) 2989 error = fn(child, data); 2990 klist_iter_exit(&i); 2991 return error; 2992 } 2993 EXPORT_SYMBOL_GPL(device_for_each_child); 2994 2995 /** 2996 * device_for_each_child_reverse - device child iterator in reversed order. 2997 * @parent: parent struct device. 2998 * @fn: function to be called for each device. 2999 * @data: data for the callback. 3000 * 3001 * Iterate over @parent's child devices, and call @fn for each, 3002 * passing it @data. 3003 * 3004 * We check the return of @fn each time. If it returns anything 3005 * other than 0, we break out and return that value. 3006 */ 3007 int device_for_each_child_reverse(struct device *parent, void *data, 3008 int (*fn)(struct device *dev, void *data)) 3009 { 3010 struct klist_iter i; 3011 struct device *child; 3012 int error = 0; 3013 3014 if (!parent->p) 3015 return 0; 3016 3017 klist_iter_init(&parent->p->klist_children, &i); 3018 while ((child = prev_device(&i)) && !error) 3019 error = fn(child, data); 3020 klist_iter_exit(&i); 3021 return error; 3022 } 3023 EXPORT_SYMBOL_GPL(device_for_each_child_reverse); 3024 3025 /** 3026 * device_find_child - device iterator for locating a particular device. 3027 * @parent: parent struct device 3028 * @match: Callback function to check device 3029 * @data: Data to pass to match function 3030 * 3031 * This is similar to the device_for_each_child() function above, but it 3032 * returns a reference to a device that is 'found' for later use, as 3033 * determined by the @match callback. 3034 * 3035 * The callback should return 0 if the device doesn't match and non-zero 3036 * if it does. If the callback returns non-zero and a reference to the 3037 * current device can be obtained, this function will return to the caller 3038 * and not iterate over any more devices. 3039 * 3040 * NOTE: you will need to drop the reference with put_device() after use. 3041 */ 3042 struct device *device_find_child(struct device *parent, void *data, 3043 int (*match)(struct device *dev, void *data)) 3044 { 3045 struct klist_iter i; 3046 struct device *child; 3047 3048 if (!parent) 3049 return NULL; 3050 3051 klist_iter_init(&parent->p->klist_children, &i); 3052 while ((child = next_device(&i))) 3053 if (match(child, data) && get_device(child)) 3054 break; 3055 klist_iter_exit(&i); 3056 return child; 3057 } 3058 EXPORT_SYMBOL_GPL(device_find_child); 3059 3060 /** 3061 * device_find_child_by_name - device iterator for locating a child device. 3062 * @parent: parent struct device 3063 * @name: name of the child device 3064 * 3065 * This is similar to the device_find_child() function above, but it 3066 * returns a reference to a device that has the name @name. 3067 * 3068 * NOTE: you will need to drop the reference with put_device() after use. 3069 */ 3070 struct device *device_find_child_by_name(struct device *parent, 3071 const char *name) 3072 { 3073 struct klist_iter i; 3074 struct device *child; 3075 3076 if (!parent) 3077 return NULL; 3078 3079 klist_iter_init(&parent->p->klist_children, &i); 3080 while ((child = next_device(&i))) 3081 if (!strcmp(dev_name(child), name) && get_device(child)) 3082 break; 3083 klist_iter_exit(&i); 3084 return child; 3085 } 3086 EXPORT_SYMBOL_GPL(device_find_child_by_name); 3087 3088 int __init devices_init(void) 3089 { 3090 devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL); 3091 if (!devices_kset) 3092 return -ENOMEM; 3093 dev_kobj = kobject_create_and_add("dev", NULL); 3094 if (!dev_kobj) 3095 goto dev_kobj_err; 3096 sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj); 3097 if (!sysfs_dev_block_kobj) 3098 goto block_kobj_err; 3099 sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj); 3100 if (!sysfs_dev_char_kobj) 3101 goto char_kobj_err; 3102 3103 return 0; 3104 3105 char_kobj_err: 3106 kobject_put(sysfs_dev_block_kobj); 3107 block_kobj_err: 3108 kobject_put(dev_kobj); 3109 dev_kobj_err: 3110 kset_unregister(devices_kset); 3111 return -ENOMEM; 3112 } 3113 3114 static int device_check_offline(struct device *dev, void *not_used) 3115 { 3116 int ret; 3117 3118 ret = device_for_each_child(dev, NULL, device_check_offline); 3119 if (ret) 3120 return ret; 3121 3122 return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0; 3123 } 3124 3125 /** 3126 * device_offline - Prepare the device for hot-removal. 3127 * @dev: Device to be put offline. 3128 * 3129 * Execute the device bus type's .offline() callback, if present, to prepare 3130 * the device for a subsequent hot-removal. If that succeeds, the device must 3131 * not be used until either it is removed or its bus type's .online() callback 3132 * is executed. 3133 * 3134 * Call under device_hotplug_lock. 3135 */ 3136 int device_offline(struct device *dev) 3137 { 3138 int ret; 3139 3140 if (dev->offline_disabled) 3141 return -EPERM; 3142 3143 ret = device_for_each_child(dev, NULL, device_check_offline); 3144 if (ret) 3145 return ret; 3146 3147 device_lock(dev); 3148 if (device_supports_offline(dev)) { 3149 if (dev->offline) { 3150 ret = 1; 3151 } else { 3152 ret = dev->bus->offline(dev); 3153 if (!ret) { 3154 kobject_uevent(&dev->kobj, KOBJ_OFFLINE); 3155 dev->offline = true; 3156 } 3157 } 3158 } 3159 device_unlock(dev); 3160 3161 return ret; 3162 } 3163 3164 /** 3165 * device_online - Put the device back online after successful device_offline(). 3166 * @dev: Device to be put back online. 3167 * 3168 * If device_offline() has been successfully executed for @dev, but the device 3169 * has not been removed subsequently, execute its bus type's .online() callback 3170 * to indicate that the device can be used again. 3171 * 3172 * Call under device_hotplug_lock. 3173 */ 3174 int device_online(struct device *dev) 3175 { 3176 int ret = 0; 3177 3178 device_lock(dev); 3179 if (device_supports_offline(dev)) { 3180 if (dev->offline) { 3181 ret = dev->bus->online(dev); 3182 if (!ret) { 3183 kobject_uevent(&dev->kobj, KOBJ_ONLINE); 3184 dev->offline = false; 3185 } 3186 } else { 3187 ret = 1; 3188 } 3189 } 3190 device_unlock(dev); 3191 3192 return ret; 3193 } 3194 3195 struct root_device { 3196 struct device dev; 3197 struct module *owner; 3198 }; 3199 3200 static inline struct root_device *to_root_device(struct device *d) 3201 { 3202 return container_of(d, struct root_device, dev); 3203 } 3204 3205 static void root_device_release(struct device *dev) 3206 { 3207 kfree(to_root_device(dev)); 3208 } 3209 3210 /** 3211 * __root_device_register - allocate and register a root device 3212 * @name: root device name 3213 * @owner: owner module of the root device, usually THIS_MODULE 3214 * 3215 * This function allocates a root device and registers it 3216 * using device_register(). In order to free the returned 3217 * device, use root_device_unregister(). 3218 * 3219 * Root devices are dummy devices which allow other devices 3220 * to be grouped under /sys/devices. Use this function to 3221 * allocate a root device and then use it as the parent of 3222 * any device which should appear under /sys/devices/{name} 3223 * 3224 * The /sys/devices/{name} directory will also contain a 3225 * 'module' symlink which points to the @owner directory 3226 * in sysfs. 3227 * 3228 * Returns &struct device pointer on success, or ERR_PTR() on error. 3229 * 3230 * Note: You probably want to use root_device_register(). 3231 */ 3232 struct device *__root_device_register(const char *name, struct module *owner) 3233 { 3234 struct root_device *root; 3235 int err = -ENOMEM; 3236 3237 root = kzalloc(sizeof(struct root_device), GFP_KERNEL); 3238 if (!root) 3239 return ERR_PTR(err); 3240 3241 err = dev_set_name(&root->dev, "%s", name); 3242 if (err) { 3243 kfree(root); 3244 return ERR_PTR(err); 3245 } 3246 3247 root->dev.release = root_device_release; 3248 3249 err = device_register(&root->dev); 3250 if (err) { 3251 put_device(&root->dev); 3252 return ERR_PTR(err); 3253 } 3254 3255 #ifdef CONFIG_MODULES /* gotta find a "cleaner" way to do this */ 3256 if (owner) { 3257 struct module_kobject *mk = &owner->mkobj; 3258 3259 err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module"); 3260 if (err) { 3261 device_unregister(&root->dev); 3262 return ERR_PTR(err); 3263 } 3264 root->owner = owner; 3265 } 3266 #endif 3267 3268 return &root->dev; 3269 } 3270 EXPORT_SYMBOL_GPL(__root_device_register); 3271 3272 /** 3273 * root_device_unregister - unregister and free a root device 3274 * @dev: device going away 3275 * 3276 * This function unregisters and cleans up a device that was created by 3277 * root_device_register(). 3278 */ 3279 void root_device_unregister(struct device *dev) 3280 { 3281 struct root_device *root = to_root_device(dev); 3282 3283 if (root->owner) 3284 sysfs_remove_link(&root->dev.kobj, "module"); 3285 3286 device_unregister(dev); 3287 } 3288 EXPORT_SYMBOL_GPL(root_device_unregister); 3289 3290 3291 static void device_create_release(struct device *dev) 3292 { 3293 pr_debug("device: '%s': %s\n", dev_name(dev), __func__); 3294 kfree(dev); 3295 } 3296 3297 static __printf(6, 0) struct device * 3298 device_create_groups_vargs(struct class *class, struct device *parent, 3299 dev_t devt, void *drvdata, 3300 const struct attribute_group **groups, 3301 const char *fmt, va_list args) 3302 { 3303 struct device *dev = NULL; 3304 int retval = -ENODEV; 3305 3306 if (class == NULL || IS_ERR(class)) 3307 goto error; 3308 3309 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 3310 if (!dev) { 3311 retval = -ENOMEM; 3312 goto error; 3313 } 3314 3315 device_initialize(dev); 3316 dev->devt = devt; 3317 dev->class = class; 3318 dev->parent = parent; 3319 dev->groups = groups; 3320 dev->release = device_create_release; 3321 dev_set_drvdata(dev, drvdata); 3322 3323 retval = kobject_set_name_vargs(&dev->kobj, fmt, args); 3324 if (retval) 3325 goto error; 3326 3327 retval = device_add(dev); 3328 if (retval) 3329 goto error; 3330 3331 return dev; 3332 3333 error: 3334 put_device(dev); 3335 return ERR_PTR(retval); 3336 } 3337 3338 /** 3339 * device_create - creates a device and registers it with sysfs 3340 * @class: pointer to the struct class that this device should be registered to 3341 * @parent: pointer to the parent struct device of this new device, if any 3342 * @devt: the dev_t for the char device to be added 3343 * @drvdata: the data to be added to the device for callbacks 3344 * @fmt: string for the device's name 3345 * 3346 * This function can be used by char device classes. A struct device 3347 * will be created in sysfs, registered to the specified class. 3348 * 3349 * A "dev" file will be created, showing the dev_t for the device, if 3350 * the dev_t is not 0,0. 3351 * If a pointer to a parent struct device is passed in, the newly created 3352 * struct device will be a child of that device in sysfs. 3353 * The pointer to the struct device will be returned from the call. 3354 * Any further sysfs files that might be required can be created using this 3355 * pointer. 3356 * 3357 * Returns &struct device pointer on success, or ERR_PTR() on error. 3358 * 3359 * Note: the struct class passed to this function must have previously 3360 * been created with a call to class_create(). 3361 */ 3362 struct device *device_create(struct class *class, struct device *parent, 3363 dev_t devt, void *drvdata, const char *fmt, ...) 3364 { 3365 va_list vargs; 3366 struct device *dev; 3367 3368 va_start(vargs, fmt); 3369 dev = device_create_groups_vargs(class, parent, devt, drvdata, NULL, 3370 fmt, vargs); 3371 va_end(vargs); 3372 return dev; 3373 } 3374 EXPORT_SYMBOL_GPL(device_create); 3375 3376 /** 3377 * device_create_with_groups - creates a device and registers it with sysfs 3378 * @class: pointer to the struct class that this device should be registered to 3379 * @parent: pointer to the parent struct device of this new device, if any 3380 * @devt: the dev_t for the char device to be added 3381 * @drvdata: the data to be added to the device for callbacks 3382 * @groups: NULL-terminated list of attribute groups to be created 3383 * @fmt: string for the device's name 3384 * 3385 * This function can be used by char device classes. A struct device 3386 * will be created in sysfs, registered to the specified class. 3387 * Additional attributes specified in the groups parameter will also 3388 * be created automatically. 3389 * 3390 * A "dev" file will be created, showing the dev_t for the device, if 3391 * the dev_t is not 0,0. 3392 * If a pointer to a parent struct device is passed in, the newly created 3393 * struct device will be a child of that device in sysfs. 3394 * The pointer to the struct device will be returned from the call. 3395 * Any further sysfs files that might be required can be created using this 3396 * pointer. 3397 * 3398 * Returns &struct device pointer on success, or ERR_PTR() on error. 3399 * 3400 * Note: the struct class passed to this function must have previously 3401 * been created with a call to class_create(). 3402 */ 3403 struct device *device_create_with_groups(struct class *class, 3404 struct device *parent, dev_t devt, 3405 void *drvdata, 3406 const struct attribute_group **groups, 3407 const char *fmt, ...) 3408 { 3409 va_list vargs; 3410 struct device *dev; 3411 3412 va_start(vargs, fmt); 3413 dev = device_create_groups_vargs(class, parent, devt, drvdata, groups, 3414 fmt, vargs); 3415 va_end(vargs); 3416 return dev; 3417 } 3418 EXPORT_SYMBOL_GPL(device_create_with_groups); 3419 3420 /** 3421 * device_destroy - removes a device that was created with device_create() 3422 * @class: pointer to the struct class that this device was registered with 3423 * @devt: the dev_t of the device that was previously registered 3424 * 3425 * This call unregisters and cleans up a device that was created with a 3426 * call to device_create(). 3427 */ 3428 void device_destroy(struct class *class, dev_t devt) 3429 { 3430 struct device *dev; 3431 3432 dev = class_find_device_by_devt(class, devt); 3433 if (dev) { 3434 put_device(dev); 3435 device_unregister(dev); 3436 } 3437 } 3438 EXPORT_SYMBOL_GPL(device_destroy); 3439 3440 /** 3441 * device_rename - renames a device 3442 * @dev: the pointer to the struct device to be renamed 3443 * @new_name: the new name of the device 3444 * 3445 * It is the responsibility of the caller to provide mutual 3446 * exclusion between two different calls of device_rename 3447 * on the same device to ensure that new_name is valid and 3448 * won't conflict with other devices. 3449 * 3450 * Note: Don't call this function. Currently, the networking layer calls this 3451 * function, but that will change. The following text from Kay Sievers offers 3452 * some insight: 3453 * 3454 * Renaming devices is racy at many levels, symlinks and other stuff are not 3455 * replaced atomically, and you get a "move" uevent, but it's not easy to 3456 * connect the event to the old and new device. Device nodes are not renamed at 3457 * all, there isn't even support for that in the kernel now. 3458 * 3459 * In the meantime, during renaming, your target name might be taken by another 3460 * driver, creating conflicts. Or the old name is taken directly after you 3461 * renamed it -- then you get events for the same DEVPATH, before you even see 3462 * the "move" event. It's just a mess, and nothing new should ever rely on 3463 * kernel device renaming. Besides that, it's not even implemented now for 3464 * other things than (driver-core wise very simple) network devices. 3465 * 3466 * We are currently about to change network renaming in udev to completely 3467 * disallow renaming of devices in the same namespace as the kernel uses, 3468 * because we can't solve the problems properly, that arise with swapping names 3469 * of multiple interfaces without races. Means, renaming of eth[0-9]* will only 3470 * be allowed to some other name than eth[0-9]*, for the aforementioned 3471 * reasons. 3472 * 3473 * Make up a "real" name in the driver before you register anything, or add 3474 * some other attributes for userspace to find the device, or use udev to add 3475 * symlinks -- but never rename kernel devices later, it's a complete mess. We 3476 * don't even want to get into that and try to implement the missing pieces in 3477 * the core. We really have other pieces to fix in the driver core mess. :) 3478 */ 3479 int device_rename(struct device *dev, const char *new_name) 3480 { 3481 struct kobject *kobj = &dev->kobj; 3482 char *old_device_name = NULL; 3483 int error; 3484 3485 dev = get_device(dev); 3486 if (!dev) 3487 return -EINVAL; 3488 3489 dev_dbg(dev, "renaming to %s\n", new_name); 3490 3491 old_device_name = kstrdup(dev_name(dev), GFP_KERNEL); 3492 if (!old_device_name) { 3493 error = -ENOMEM; 3494 goto out; 3495 } 3496 3497 if (dev->class) { 3498 error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj, 3499 kobj, old_device_name, 3500 new_name, kobject_namespace(kobj)); 3501 if (error) 3502 goto out; 3503 } 3504 3505 error = kobject_rename(kobj, new_name); 3506 if (error) 3507 goto out; 3508 3509 out: 3510 put_device(dev); 3511 3512 kfree(old_device_name); 3513 3514 return error; 3515 } 3516 EXPORT_SYMBOL_GPL(device_rename); 3517 3518 static int device_move_class_links(struct device *dev, 3519 struct device *old_parent, 3520 struct device *new_parent) 3521 { 3522 int error = 0; 3523 3524 if (old_parent) 3525 sysfs_remove_link(&dev->kobj, "device"); 3526 if (new_parent) 3527 error = sysfs_create_link(&dev->kobj, &new_parent->kobj, 3528 "device"); 3529 return error; 3530 } 3531 3532 /** 3533 * device_move - moves a device to a new parent 3534 * @dev: the pointer to the struct device to be moved 3535 * @new_parent: the new parent of the device (can be NULL) 3536 * @dpm_order: how to reorder the dpm_list 3537 */ 3538 int device_move(struct device *dev, struct device *new_parent, 3539 enum dpm_order dpm_order) 3540 { 3541 int error; 3542 struct device *old_parent; 3543 struct kobject *new_parent_kobj; 3544 3545 dev = get_device(dev); 3546 if (!dev) 3547 return -EINVAL; 3548 3549 device_pm_lock(); 3550 new_parent = get_device(new_parent); 3551 new_parent_kobj = get_device_parent(dev, new_parent); 3552 if (IS_ERR(new_parent_kobj)) { 3553 error = PTR_ERR(new_parent_kobj); 3554 put_device(new_parent); 3555 goto out; 3556 } 3557 3558 pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev), 3559 __func__, new_parent ? dev_name(new_parent) : "<NULL>"); 3560 error = kobject_move(&dev->kobj, new_parent_kobj); 3561 if (error) { 3562 cleanup_glue_dir(dev, new_parent_kobj); 3563 put_device(new_parent); 3564 goto out; 3565 } 3566 old_parent = dev->parent; 3567 dev->parent = new_parent; 3568 if (old_parent) 3569 klist_remove(&dev->p->knode_parent); 3570 if (new_parent) { 3571 klist_add_tail(&dev->p->knode_parent, 3572 &new_parent->p->klist_children); 3573 set_dev_node(dev, dev_to_node(new_parent)); 3574 } 3575 3576 if (dev->class) { 3577 error = device_move_class_links(dev, old_parent, new_parent); 3578 if (error) { 3579 /* We ignore errors on cleanup since we're hosed anyway... */ 3580 device_move_class_links(dev, new_parent, old_parent); 3581 if (!kobject_move(&dev->kobj, &old_parent->kobj)) { 3582 if (new_parent) 3583 klist_remove(&dev->p->knode_parent); 3584 dev->parent = old_parent; 3585 if (old_parent) { 3586 klist_add_tail(&dev->p->knode_parent, 3587 &old_parent->p->klist_children); 3588 set_dev_node(dev, dev_to_node(old_parent)); 3589 } 3590 } 3591 cleanup_glue_dir(dev, new_parent_kobj); 3592 put_device(new_parent); 3593 goto out; 3594 } 3595 } 3596 switch (dpm_order) { 3597 case DPM_ORDER_NONE: 3598 break; 3599 case DPM_ORDER_DEV_AFTER_PARENT: 3600 device_pm_move_after(dev, new_parent); 3601 devices_kset_move_after(dev, new_parent); 3602 break; 3603 case DPM_ORDER_PARENT_BEFORE_DEV: 3604 device_pm_move_before(new_parent, dev); 3605 devices_kset_move_before(new_parent, dev); 3606 break; 3607 case DPM_ORDER_DEV_LAST: 3608 device_pm_move_last(dev); 3609 devices_kset_move_last(dev); 3610 break; 3611 } 3612 3613 put_device(old_parent); 3614 out: 3615 device_pm_unlock(); 3616 put_device(dev); 3617 return error; 3618 } 3619 EXPORT_SYMBOL_GPL(device_move); 3620 3621 static int device_attrs_change_owner(struct device *dev, kuid_t kuid, 3622 kgid_t kgid) 3623 { 3624 struct kobject *kobj = &dev->kobj; 3625 struct class *class = dev->class; 3626 const struct device_type *type = dev->type; 3627 int error; 3628 3629 if (class) { 3630 /* 3631 * Change the device groups of the device class for @dev to 3632 * @kuid/@kgid. 3633 */ 3634 error = sysfs_groups_change_owner(kobj, class->dev_groups, kuid, 3635 kgid); 3636 if (error) 3637 return error; 3638 } 3639 3640 if (type) { 3641 /* 3642 * Change the device groups of the device type for @dev to 3643 * @kuid/@kgid. 3644 */ 3645 error = sysfs_groups_change_owner(kobj, type->groups, kuid, 3646 kgid); 3647 if (error) 3648 return error; 3649 } 3650 3651 /* Change the device groups of @dev to @kuid/@kgid. */ 3652 error = sysfs_groups_change_owner(kobj, dev->groups, kuid, kgid); 3653 if (error) 3654 return error; 3655 3656 if (device_supports_offline(dev) && !dev->offline_disabled) { 3657 /* Change online device attributes of @dev to @kuid/@kgid. */ 3658 error = sysfs_file_change_owner(kobj, dev_attr_online.attr.name, 3659 kuid, kgid); 3660 if (error) 3661 return error; 3662 } 3663 3664 return 0; 3665 } 3666 3667 /** 3668 * device_change_owner - change the owner of an existing device. 3669 * @dev: device. 3670 * @kuid: new owner's kuid 3671 * @kgid: new owner's kgid 3672 * 3673 * This changes the owner of @dev and its corresponding sysfs entries to 3674 * @kuid/@kgid. This function closely mirrors how @dev was added via driver 3675 * core. 3676 * 3677 * Returns 0 on success or error code on failure. 3678 */ 3679 int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid) 3680 { 3681 int error; 3682 struct kobject *kobj = &dev->kobj; 3683 3684 dev = get_device(dev); 3685 if (!dev) 3686 return -EINVAL; 3687 3688 /* 3689 * Change the kobject and the default attributes and groups of the 3690 * ktype associated with it to @kuid/@kgid. 3691 */ 3692 error = sysfs_change_owner(kobj, kuid, kgid); 3693 if (error) 3694 goto out; 3695 3696 /* 3697 * Change the uevent file for @dev to the new owner. The uevent file 3698 * was created in a separate step when @dev got added and we mirror 3699 * that step here. 3700 */ 3701 error = sysfs_file_change_owner(kobj, dev_attr_uevent.attr.name, kuid, 3702 kgid); 3703 if (error) 3704 goto out; 3705 3706 /* 3707 * Change the device groups, the device groups associated with the 3708 * device class, and the groups associated with the device type of @dev 3709 * to @kuid/@kgid. 3710 */ 3711 error = device_attrs_change_owner(dev, kuid, kgid); 3712 if (error) 3713 goto out; 3714 3715 error = dpm_sysfs_change_owner(dev, kuid, kgid); 3716 if (error) 3717 goto out; 3718 3719 #ifdef CONFIG_BLOCK 3720 if (sysfs_deprecated && dev->class == &block_class) 3721 goto out; 3722 #endif 3723 3724 /* 3725 * Change the owner of the symlink located in the class directory of 3726 * the device class associated with @dev which points to the actual 3727 * directory entry for @dev to @kuid/@kgid. This ensures that the 3728 * symlink shows the same permissions as its target. 3729 */ 3730 error = sysfs_link_change_owner(&dev->class->p->subsys.kobj, &dev->kobj, 3731 dev_name(dev), kuid, kgid); 3732 if (error) 3733 goto out; 3734 3735 out: 3736 put_device(dev); 3737 return error; 3738 } 3739 EXPORT_SYMBOL_GPL(device_change_owner); 3740 3741 /** 3742 * device_shutdown - call ->shutdown() on each device to shutdown. 3743 */ 3744 void device_shutdown(void) 3745 { 3746 struct device *dev, *parent; 3747 3748 wait_for_device_probe(); 3749 device_block_probing(); 3750 3751 cpufreq_suspend(); 3752 3753 spin_lock(&devices_kset->list_lock); 3754 /* 3755 * Walk the devices list backward, shutting down each in turn. 3756 * Beware that device unplug events may also start pulling 3757 * devices offline, even as the system is shutting down. 3758 */ 3759 while (!list_empty(&devices_kset->list)) { 3760 dev = list_entry(devices_kset->list.prev, struct device, 3761 kobj.entry); 3762 3763 /* 3764 * hold reference count of device's parent to 3765 * prevent it from being freed because parent's 3766 * lock is to be held 3767 */ 3768 parent = get_device(dev->parent); 3769 get_device(dev); 3770 /* 3771 * Make sure the device is off the kset list, in the 3772 * event that dev->*->shutdown() doesn't remove it. 3773 */ 3774 list_del_init(&dev->kobj.entry); 3775 spin_unlock(&devices_kset->list_lock); 3776 3777 /* hold lock to avoid race with probe/release */ 3778 if (parent) 3779 device_lock(parent); 3780 device_lock(dev); 3781 3782 /* Don't allow any more runtime suspends */ 3783 pm_runtime_get_noresume(dev); 3784 pm_runtime_barrier(dev); 3785 3786 if (dev->class && dev->class->shutdown_pre) { 3787 if (initcall_debug) 3788 dev_info(dev, "shutdown_pre\n"); 3789 dev->class->shutdown_pre(dev); 3790 } 3791 if (dev->bus && dev->bus->shutdown) { 3792 if (initcall_debug) 3793 dev_info(dev, "shutdown\n"); 3794 dev->bus->shutdown(dev); 3795 } else if (dev->driver && dev->driver->shutdown) { 3796 if (initcall_debug) 3797 dev_info(dev, "shutdown\n"); 3798 dev->driver->shutdown(dev); 3799 } 3800 3801 device_unlock(dev); 3802 if (parent) 3803 device_unlock(parent); 3804 3805 put_device(dev); 3806 put_device(parent); 3807 3808 spin_lock(&devices_kset->list_lock); 3809 } 3810 spin_unlock(&devices_kset->list_lock); 3811 } 3812 3813 /* 3814 * Device logging functions 3815 */ 3816 3817 #ifdef CONFIG_PRINTK 3818 static int 3819 create_syslog_header(const struct device *dev, char *hdr, size_t hdrlen) 3820 { 3821 const char *subsys; 3822 size_t pos = 0; 3823 3824 if (dev->class) 3825 subsys = dev->class->name; 3826 else if (dev->bus) 3827 subsys = dev->bus->name; 3828 else 3829 return 0; 3830 3831 pos += snprintf(hdr + pos, hdrlen - pos, "SUBSYSTEM=%s", subsys); 3832 if (pos >= hdrlen) 3833 goto overflow; 3834 3835 /* 3836 * Add device identifier DEVICE=: 3837 * b12:8 block dev_t 3838 * c127:3 char dev_t 3839 * n8 netdev ifindex 3840 * +sound:card0 subsystem:devname 3841 */ 3842 if (MAJOR(dev->devt)) { 3843 char c; 3844 3845 if (strcmp(subsys, "block") == 0) 3846 c = 'b'; 3847 else 3848 c = 'c'; 3849 pos++; 3850 pos += snprintf(hdr + pos, hdrlen - pos, 3851 "DEVICE=%c%u:%u", 3852 c, MAJOR(dev->devt), MINOR(dev->devt)); 3853 } else if (strcmp(subsys, "net") == 0) { 3854 struct net_device *net = to_net_dev(dev); 3855 3856 pos++; 3857 pos += snprintf(hdr + pos, hdrlen - pos, 3858 "DEVICE=n%u", net->ifindex); 3859 } else { 3860 pos++; 3861 pos += snprintf(hdr + pos, hdrlen - pos, 3862 "DEVICE=+%s:%s", subsys, dev_name(dev)); 3863 } 3864 3865 if (pos >= hdrlen) 3866 goto overflow; 3867 3868 return pos; 3869 3870 overflow: 3871 dev_WARN(dev, "device/subsystem name too long"); 3872 return 0; 3873 } 3874 3875 int dev_vprintk_emit(int level, const struct device *dev, 3876 const char *fmt, va_list args) 3877 { 3878 char hdr[128]; 3879 size_t hdrlen; 3880 3881 hdrlen = create_syslog_header(dev, hdr, sizeof(hdr)); 3882 3883 return vprintk_emit(0, level, hdrlen ? hdr : NULL, hdrlen, fmt, args); 3884 } 3885 EXPORT_SYMBOL(dev_vprintk_emit); 3886 3887 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...) 3888 { 3889 va_list args; 3890 int r; 3891 3892 va_start(args, fmt); 3893 3894 r = dev_vprintk_emit(level, dev, fmt, args); 3895 3896 va_end(args); 3897 3898 return r; 3899 } 3900 EXPORT_SYMBOL(dev_printk_emit); 3901 3902 static void __dev_printk(const char *level, const struct device *dev, 3903 struct va_format *vaf) 3904 { 3905 if (dev) 3906 dev_printk_emit(level[1] - '0', dev, "%s %s: %pV", 3907 dev_driver_string(dev), dev_name(dev), vaf); 3908 else 3909 printk("%s(NULL device *): %pV", level, vaf); 3910 } 3911 3912 void dev_printk(const char *level, const struct device *dev, 3913 const char *fmt, ...) 3914 { 3915 struct va_format vaf; 3916 va_list args; 3917 3918 va_start(args, fmt); 3919 3920 vaf.fmt = fmt; 3921 vaf.va = &args; 3922 3923 __dev_printk(level, dev, &vaf); 3924 3925 va_end(args); 3926 } 3927 EXPORT_SYMBOL(dev_printk); 3928 3929 #define define_dev_printk_level(func, kern_level) \ 3930 void func(const struct device *dev, const char *fmt, ...) \ 3931 { \ 3932 struct va_format vaf; \ 3933 va_list args; \ 3934 \ 3935 va_start(args, fmt); \ 3936 \ 3937 vaf.fmt = fmt; \ 3938 vaf.va = &args; \ 3939 \ 3940 __dev_printk(kern_level, dev, &vaf); \ 3941 \ 3942 va_end(args); \ 3943 } \ 3944 EXPORT_SYMBOL(func); 3945 3946 define_dev_printk_level(_dev_emerg, KERN_EMERG); 3947 define_dev_printk_level(_dev_alert, KERN_ALERT); 3948 define_dev_printk_level(_dev_crit, KERN_CRIT); 3949 define_dev_printk_level(_dev_err, KERN_ERR); 3950 define_dev_printk_level(_dev_warn, KERN_WARNING); 3951 define_dev_printk_level(_dev_notice, KERN_NOTICE); 3952 define_dev_printk_level(_dev_info, KERN_INFO); 3953 3954 #endif 3955 3956 static inline bool fwnode_is_primary(struct fwnode_handle *fwnode) 3957 { 3958 return fwnode && !IS_ERR(fwnode->secondary); 3959 } 3960 3961 /** 3962 * set_primary_fwnode - Change the primary firmware node of a given device. 3963 * @dev: Device to handle. 3964 * @fwnode: New primary firmware node of the device. 3965 * 3966 * Set the device's firmware node pointer to @fwnode, but if a secondary 3967 * firmware node of the device is present, preserve it. 3968 */ 3969 void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) 3970 { 3971 if (fwnode) { 3972 struct fwnode_handle *fn = dev->fwnode; 3973 3974 if (fwnode_is_primary(fn)) 3975 fn = fn->secondary; 3976 3977 if (fn) { 3978 WARN_ON(fwnode->secondary); 3979 fwnode->secondary = fn; 3980 } 3981 dev->fwnode = fwnode; 3982 } else { 3983 dev->fwnode = fwnode_is_primary(dev->fwnode) ? 3984 dev->fwnode->secondary : NULL; 3985 } 3986 } 3987 EXPORT_SYMBOL_GPL(set_primary_fwnode); 3988 3989 /** 3990 * set_secondary_fwnode - Change the secondary firmware node of a given device. 3991 * @dev: Device to handle. 3992 * @fwnode: New secondary firmware node of the device. 3993 * 3994 * If a primary firmware node of the device is present, set its secondary 3995 * pointer to @fwnode. Otherwise, set the device's firmware node pointer to 3996 * @fwnode. 3997 */ 3998 void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode) 3999 { 4000 if (fwnode) 4001 fwnode->secondary = ERR_PTR(-ENODEV); 4002 4003 if (fwnode_is_primary(dev->fwnode)) 4004 dev->fwnode->secondary = fwnode; 4005 else 4006 dev->fwnode = fwnode; 4007 } 4008 EXPORT_SYMBOL_GPL(set_secondary_fwnode); 4009 4010 /** 4011 * device_set_of_node_from_dev - reuse device-tree node of another device 4012 * @dev: device whose device-tree node is being set 4013 * @dev2: device whose device-tree node is being reused 4014 * 4015 * Takes another reference to the new device-tree node after first dropping 4016 * any reference held to the old node. 4017 */ 4018 void device_set_of_node_from_dev(struct device *dev, const struct device *dev2) 4019 { 4020 of_node_put(dev->of_node); 4021 dev->of_node = of_node_get(dev2->of_node); 4022 dev->of_node_reused = true; 4023 } 4024 EXPORT_SYMBOL_GPL(device_set_of_node_from_dev); 4025 4026 int device_match_name(struct device *dev, const void *name) 4027 { 4028 return sysfs_streq(dev_name(dev), name); 4029 } 4030 EXPORT_SYMBOL_GPL(device_match_name); 4031 4032 int device_match_of_node(struct device *dev, const void *np) 4033 { 4034 return dev->of_node == np; 4035 } 4036 EXPORT_SYMBOL_GPL(device_match_of_node); 4037 4038 int device_match_fwnode(struct device *dev, const void *fwnode) 4039 { 4040 return dev_fwnode(dev) == fwnode; 4041 } 4042 EXPORT_SYMBOL_GPL(device_match_fwnode); 4043 4044 int device_match_devt(struct device *dev, const void *pdevt) 4045 { 4046 return dev->devt == *(dev_t *)pdevt; 4047 } 4048 EXPORT_SYMBOL_GPL(device_match_devt); 4049 4050 int device_match_acpi_dev(struct device *dev, const void *adev) 4051 { 4052 return ACPI_COMPANION(dev) == adev; 4053 } 4054 EXPORT_SYMBOL(device_match_acpi_dev); 4055 4056 int device_match_any(struct device *dev, const void *unused) 4057 { 4058 return 1; 4059 } 4060 EXPORT_SYMBOL_GPL(device_match_any); 4061